hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79511842dbabcea06a7c53ac39ceb0d361da63c1
| 2,868
|
py
|
Python
|
pyqtgraph/exporters/PrintExporter.py
|
marsipu/pyqtgraph
|
eb8965c2f4b8ef9393eeeb857bc23d5faf2c1baf
|
[
"MIT"
] | 1
|
2021-07-09T23:27:37.000Z
|
2021-07-09T23:27:37.000Z
|
pyqtgraph/exporters/PrintExporter.py
|
tom00ti/pyqtgraph
|
6b4385ce0d0f9078aa22e2e27aa5307271e95ae1
|
[
"MIT"
] | null | null | null |
pyqtgraph/exporters/PrintExporter.py
|
tom00ti/pyqtgraph
|
6b4385ce0d0f9078aa22e2e27aa5307271e95ae1
|
[
"MIT"
] | null | null | null |
from .Exporter import Exporter
from ..parametertree import Parameter
from ..Qt import QtGui, QtCore, QtSvg
import re
translate = QtCore.QCoreApplication.translate
__all__ = ['PrintExporter']
#__all__ = [] ## Printer is disabled for now--does not work very well.
class PrintExporter(Exporter):
Name = "Printer"
def __init__(self, item):
Exporter.__init__(self, item)
tr = self.getTargetRect()
self.params = Parameter(name='params', type='group', children=[
{'name': 'width', 'title': translate("Exporter", 'width'), 'type': 'float', 'value': 0.1,
'limits': (0, None), 'suffix': 'm', 'siPrefix': True},
{'name': 'height', 'title': translate("Exporter", 'height'), 'type': 'float',
'value': (0.1 * tr.height()) / tr.width(), 'limits': (0, None), 'suffix': 'm', 'siPrefix': True},
])
self.params.param('width').sigValueChanged.connect(self.widthChanged)
self.params.param('height').sigValueChanged.connect(self.heightChanged)
def widthChanged(self):
sr = self.getSourceRect()
ar = sr.height() / sr.width()
self.params.param('height').setValue(self.params['width'] * ar, blockSignal=self.heightChanged)
def heightChanged(self):
sr = self.getSourceRect()
ar = sr.width() / sr.height()
self.params.param('width').setValue(self.params['height'] * ar, blockSignal=self.widthChanged)
def parameters(self):
return self.params
def export(self, fileName=None):
printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution)
dialog = QtGui.QPrintDialog(printer)
dialog.setWindowTitle(translate('Exporter', "Print Document"))
if dialog.exec_() != QtGui.QDialog.DialogCode.Accepted:
return
#dpi = QtGui.QDesktopWidget().physicalDpiX()
#self.svg.setSize(QtCore.QSize(100,100))
#self.svg.setResolution(600)
#res = printer.resolution()
sr = self.getSourceRect()
#res = sr.width() * .4 / (self.params['width'] * 100 / 2.54)
res = QtGui.QGuiApplication.primaryScreen().physicalDotsPerInchX()
printer.setResolution(res)
rect = printer.pageRect()
center = rect.center()
h = self.params['height'] * res * 100. / 2.54
w = self.params['width'] * res * 100. / 2.54
x = center.x() - w/2.
y = center.y() - h/2.
targetRect = QtCore.QRect(x, y, w, h)
sourceRect = self.getSourceRect()
painter = QtGui.QPainter(printer)
try:
self.setExportMode(True, {'painter': painter})
self.getScene().render(painter, QtCore.QRectF(targetRect), QtCore.QRectF(sourceRect))
finally:
self.setExportMode(False)
painter.end()
#PrintExporter.register()
| 39.287671
| 110
| 0.602859
|
795119d6a65d74e29656bde466342abc699b627b
| 650
|
py
|
Python
|
bin/cli.py
|
rmit-ir/joint-cascade-ranking
|
fa53c4447ce0ebe380d0ebbb5017eb7b1ac015e3
|
[
"MIT"
] | 9
|
2018-12-03T12:29:54.000Z
|
2021-08-18T02:21:34.000Z
|
bin/cli.py
|
rmit-ir/joint-cascade-ranking
|
fa53c4447ce0ebe380d0ebbb5017eb7b1ac015e3
|
[
"MIT"
] | 1
|
2019-11-05T14:54:50.000Z
|
2019-11-20T01:12:45.000Z
|
bin/cli.py
|
rmit-ir/joint-cascade-ranking
|
fa53c4447ce0ebe380d0ebbb5017eb7b1ac015e3
|
[
"MIT"
] | 4
|
2019-10-16T02:55:11.000Z
|
2021-12-05T00:23:29.000Z
|
class AttrDict(dict):
__setattr__ = dict.__setitem__
__getattr__ = dict.__getitem__
@classmethod
def from_parseargs(cls, args):
config = cls()
config.merge_parseargs(args)
return config
@classmethod
def from_attr_dict(cls, other):
config = cls()
for key, val in other.items():
setattr(config, key, val)
return config
def merge_parseargs(self, args):
for key, val in vars(args).items():
# this class is a `dict` so use `in` instead of `hasattr`
if key in self.keys() and val is not None:
setattr(self, key, val)
| 28.26087
| 69
| 0.589231
|
79511bd93dd29d8fab4047de060459b0fe8f22c1
| 7,841
|
py
|
Python
|
bipartite/matching.py
|
severmore/pygraph
|
1ad4c0f6c287ef5fd4c0269495ff890b8be8a6e0
|
[
"MIT"
] | 2
|
2018-06-30T10:19:41.000Z
|
2018-07-02T09:57:25.000Z
|
bipartite/matching.py
|
severmore/pygraph
|
1ad4c0f6c287ef5fd4c0269495ff890b8be8a6e0
|
[
"MIT"
] | 10
|
2018-06-28T16:19:21.000Z
|
2018-07-06T11:48:03.000Z
|
bipartite/matching.py
|
severmore/pygraphs
|
1ad4c0f6c287ef5fd4c0269495ff890b8be8a6e0
|
[
"MIT"
] | null | null | null |
"""
Created by Ivanov Roman and Maxim Dudin.
https://github.com/severmore/pygraphs
"""
from collections import deque
def euler_partition(graph, sustain_graph=False):
"""
Finds an Euler partition of a graph. An Euler partition is a partition of
edges of a graph into open and close path, each vertex of odd degree is the
end of exactly one open path, and each vertex of even degree is the end of
no open path [1].
In this implementation an Euler partition is implemented as a list of paths,
each path is given as a list of vertices that this path passes. For that
cases when the edges should be including a modificaiton of the method
should be considered.
After finishing this method all edges of an initial graph will be removed as
the algorithm supposes. To avoid this set `sustain_graph` to True.
Args:
graph(:obj:`Graph`) - a graph for which a partition is to find.
sustain_graph(bool) - if it is set to True graph edges will be copied.
Return:
:obj:`list` of :obj:`list` of int: an Euler partition representation that
is a list of paths. Each path is given by the list of verteces, skipping
passing enges.
References:
[1] Harold N. Gabow. Using Euler Partition to Edge Color Bipartite
Multigraphs // The International Journal of Computer and Information
Sciences, Vol. 5, No. 4, 1976.
"""
if sustain_graph:
pass
partition = list()
# Populate Q in reverse order comparing to [1]
queue = deque()
for vertex in graph.get_vertices():
if graph.degree(vertex) % 2:
queue.append(vertex)
else:
queue.appendleft(vertex)
while queue:
start = queue.pop()
if graph.degree(start):
path = [start]
pivot = start
while graph.degree(pivot):
pivot_next = graph.edges[pivot][0]
graph.remove_edge(pivot, pivot_next)
path.append(pivot_next)
pivot = pivot_next
partition.append(path)
if graph.degree(start):
queue.appendleft(start)
return partition
def euler_split(graph):
"""
Returns an Euler split of a graph. An Euler split is two subgraphs of the
initial graph such that the set of both subgraphs vertices remains unchanged,
the edges of the graph is a disjoint union of subgraphs edges and these
obtains by alteranatively placing the edges of the paths in the Euler
partition of the graph to the subgraphs.
Args:
graph(:obj:`Graph`) - a graph for which a matching covering maximum degree
vertices is need to find.
Returns:
:obj:`list` of :obj:`turple` of int - a matching to find as a list of edges.
References:
[1] Harold N. Gabow. Using Euler Partition to Edge Color Bipartite
Multigraphs // The International Journal of Computer and Information
Sciences, Vol. 5, No. 4, 1976.
"""
G1 = graph.__class__(vertices_num=graph.vertices_num)
G2 = graph.__class__(vertices_num=graph.vertices_num)
partition = euler_partition(graph)
for path in partition:
v_prev, path = path[0], path[1:]
for index, vertex in enumerate(path):
if index % 2:
G1.add_edge(v_prev, vertex)
else:
G2.add_edge(v_prev, vertex)
v_prev = vertex
G1.update_max_degree()
G2.update_max_degree()
return G1, G2
def _covering_partition(graph):
"""
Finds Cole-Hopcroft graph partition - a partition into two subgraphs such that
the following properties holds:
- both subgraphs have the same to graph vertices,
- a disjoint union of thier edges forms the edges of the initial graph,
- the set of vertices having maximum degree in the graph also have maximum
degree in each subgraphs.
If the maximum degree of the initial graph is even then Cole-Hopcroft
partition is just an Euler split.
Args:
graph(:obj:`Graph`) - a graph to split
Returns:
:obj:`Graph`, :obj:`Graph` - two subraphs of `graph`.
References:
[2] Richard Cole, and John Hopcroft. On Edge Coloring Bipartite Graphs //
SIAM Journal on Computing, Vol. 11, No. 3, pp. 540-546, 1982.
"""
if graph.max_degree % 2 == 0:
return euler_split(graph)
D = graph.max_degree
k, d = D // 4, 1
if D % 4 == 3:
k, d = k + 1, -1
# Find M-containing set - a set of max degree vertices in a graph.
M = { v for v in graph.get_vertices() if graph.degree(v) == D }
H1, H2 = euler_split(graph)
# M1 is a subset of M which vertices have degree 2 * k + d in H1. If M1 have
# less vertices than half of M then swap H1 and H2, M1 and M2; so M1 should
# have at least half of maximum degree vertices.
M1 = { v for v in M if H1.degree(v) == 2 * k + d }
M2 = M - M1
if len(M1) < len(M) / 2:
H2, H1 = H1, H2
M2, M1 = M1, M2
# Iterates till in both the subraphs the set of max degree vertices equals M
while M2:
H21, H22 = euler_split(H2)
M21 = { v for v in M2 if H21.degree(v) == k + d }
M22 = M2 - M21
if len(M21) < len(M2) / 2:
H22, H21 = H21, H22
M22, M21 = M21, M22
H1.union(H21)
H2 = H22
if k % 2:
k = k / 2
else:
H1, H2 = H2, H1
k = (D - k) / 2
d = -d
M1 = M1.union(M21)
M2 = M22
H1.update_max_degree()
H2.update_max_degree()
return H1, H2
def covering_matching(graph, sustain_graph=True):
"""
Finds matching covering maximum degree vertices of a graph. The matching is a
subset of a graph edges such that no two edges have common vertex. It is said
that matching covers a set of vertices, if for all vertex from this set there
is an edge from matching that contains this vertex.
After finishing this method all edges of an initial graph will be removed as
the algorithm supposes. If it is necessary to keep graph edges set
`sustain_graph` to True, and the method return both matching edges and the
rest part of the edges. If one need to recover the initial graph just union
this edges into a single graph.
Args:
graph(:obj:`Graph`) - a graph to split
sustain_graph(bool) - if it is set to True graph edges excluding the edges
of matching found will be returned.
Returns:
:obj:`list` of :obj:`list` of int, :obj:`Graph` - a matching and the rest of
`graph` if `sustain_graph` is set to True.
:obj:`list` of :obj:`list` of int - a matching found if `sustain_graph`
is set to False.
References:
[2] Richard Cole, and John Hopcroft. On Edge Coloring Bipartite Graphs //
SIAM Journal on Computing, Vol. 11, No. 3, pp. 540-546, 1982.
"""
if sustain_graph:
matching, rest = graph, graph.__class__()
# The first iteration of cycle is individually coded to avoid superfluous
# coping of graph edges to `rest`.
if matching.max_degree > 1:
matching, rest = _covering_partition(matching)
if matching.max_degree > rest.max_degree:
matching, rest = rest, matching
while matching.max_degree > 1:
G1, G2 = _covering_partition(matching)
if G1.max_degree > G2.max_degree:
G1, G2 = G2, G1
matching = G1
rest.union(G2)
return matching, rest
else:
matching = graph
while matching.max_degree > 1:
G1, G2 = _covering_partition(matching)
matching = G1 if G1.max_degree < G2.max_degree else G2
return matching
if __name__ == '__main__':
import bipartite.graph
graph = bipartite.graph.UDGraph(edges=
[ (0, 3), (0, 4), (1, 3), (1, 4), (1, 5), (2, 3)])
print('graph: ', graph)
matching, rest = covering_matching(graph)
print('matching: ', matching)
print('rest: ', repr(rest))
rest.union(matching)
graph = bipartite.graph.UDGraph(edges=
[ (0, 3), (0, 4), (1, 3), (1, 4), (1, 5), (2, 3)])
print('restored: ', rest)
print('equality of rest and graph: ', rest == graph)
| 27.416084
| 80
| 0.661268
|
79511e61ff4229cda4d0f13805e1c20e75406481
| 11,885
|
py
|
Python
|
jx_telebot.py
|
jxwhat/telegram_bot
|
f984eeac70efeca99c89c7e3436e88de65ed99d1
|
[
"MIT"
] | null | null | null |
jx_telebot.py
|
jxwhat/telegram_bot
|
f984eeac70efeca99c89c7e3436e88de65ed99d1
|
[
"MIT"
] | null | null | null |
jx_telebot.py
|
jxwhat/telegram_bot
|
f984eeac70efeca99c89c7e3436e88de65ed99d1
|
[
"MIT"
] | null | null | null |
# this file provides supporting stuff for the telegram bot
import numpy as np
import pandas as pd
import yfinance as yf
import re
import random
from datetime import datetime, timedelta
from datetime import date as datetype ## clumsy code but works for now
import requests
vocab_dict = {
'no_change': ['remained flat at {}'],
'small_up': ['inched up {}', 'edged up {}'],
'reg_up': ['gained {}', 'climbed {}', 'advanced {}', 'rose {}', 'added {}', 'was {} higher'],
'big_up': ['jumped {}', 'surged {}', 'rallied {}', 'shot up {}', 'spiked up {}'],
'small_down': ['inched down {}', 'edged down {}'],
'reg_down': ['slid {}', 'dropped {}', 'declined {}', 'fell by {}', 'eased {}', 'retreated by {}', 'slipped by {}', 'was {} lower', 'pulled back {}', 'shedded {}'],
'big_down': ['sunk {}', 'tumbled {}', 'collapsed {}', 'slumped {}']
}
def write_sentence(pct_chg, close_price, vocab_dict=vocab_dict, precision=2):
'''
Crafts a descriptive sentence given the parameters passed
'''
if pct_chg < -0.025:
a = 'big_down'
elif -0.025 <= pct_chg < -0.002:
a = 'reg_down'
elif -0.002 <= pct_chg < 0:
a = 'small_down'
elif pct_chg == 0:
a = 'no_change'
elif 0 < pct_chg < 0.002:
a = 'small_up'
elif 0.002 <= pct_chg < 0.025:
a = 'reg_up'
elif 0.025 <= pct_chg:
a = 'big_up'
else:
if pd.isnull(pct_chg) and pd.isnull(close_price):
return 'has not been traded yet'
else:
return "was unchanged today"
if a == 'no_change':
out_str = 'remained flat at ' + str(round(close_price, 2))
else:
out_str = random.choice(vocab_dict[a]).format(str(round(abs(pct_chg) * 100, 2)) + '%')
if out_str[-1] == '%':
out_str += ' to '
else:
out_str += ' at '
out_str += str(round(close_price, precision))
return out_str
def format_content(df):
'''
writes sentences for every row in the df,
provided that rows are indexed by the tickers
returns a dictionary of sentences, with tickers as key
'''
out = {}
for i in range(len(df)):
# check if the series is a currency, if so use 4dp precision
if len(str(df.iloc[i]['short_name'])) == 7 and str(df.iloc[i]['short_name'][3]) == '/':
out[df.iloc[i].name] = write_sentence(pct_chg=df.iloc[i][2], close_price=df.iloc[i][1], precision=4)
else:
out[df.iloc[i].name] = write_sentence(pct_chg=df.iloc[i][2], close_price=df.iloc[i][1])
return out
def tellme(a):
if all(x > 0 for x in a):
return "up"
elif all(x < 0 for x in a):
return "down"
else:
return "mixed"
def message_draft_pm(df, author, date=datetime.today(), out_path=None):
'''
Drafts a message given a reference df, template, and date
Saves message as a txt file if provided an out_path
'''
weekday = date.strftime(format='%A')
now = date.strftime(format='%I:%M %p')
date = date.strftime(format='%d %b %Y')
if date[0] == '0':
date = date[1:]
template = './msg_templates/msg_template_pm.txt'
sentences = format_content(df)
with open(template, 'r') as f:
text = f.read()
bank_rets = df.loc[['D05.SI', 'U11.SI', 'O39.SI']].pct_chg.values
bank_mvmt = tellme(bank_rets)
textformats = {
'weekday': weekday,
'now': now,
'msg_date': date,
'author_name': author,
'sti': sentences['^STI'],
'hsi': sentences['^HSI'],
'csi': sentences['000300.SS'],
'sse': sentences['000001.SS'],
'chnxt': sentences['399006.SZ'],
'n225': sentences['^N225'],
'asx': sentences['^AXJO'],
'aord': sentences['^AORD'],
'ftse': sentences['^FTSE'],
'cac': sentences['^FCHI'],
'dax': sentences['^GDAXI'],
'sxxp': sentences['^STOXX'],
'sx5e': sentences['^STOXX50E'],
'spf': df.loc['ES=F', 'short_name'],
'spfs': sentences['ES=F'],
'nqf': df.loc['NQ=F', 'short_name'],
'nqfs': sentences['NQ=F'],
'sgdx': sentences['SGD=X'],
'tnx': sentences['^TNX'],
'gcf': df.loc['GC=F', 'short_name'],
'gcfs': sentences['GC=F'],
'clf': df.loc['CL=F', 'short_name'],
'clfs': sentences['CL=F'],
'dbs': sentences['D05.SI'],
'uob': sentences['U11.SI'],
'ocbc': sentences['O39.SI'],
'bank_mvmt': bank_mvmt
}
text = text.format(**textformats)
if out_path != None:
with open(out_path, 'w') as f:
f.write(text)
return text
def message_draft_am(df, author, date=datetime.today(), out_path=None):
'''
Drafts a message given a reference df, template, and date
Saves message as a txt file if provided an out_path
'''
weekday_today = date.strftime(format='%A')
weekday = (date.date() - timedelta(1)).strftime(format='%A')
now = date.strftime(format='%I:%M %p')
date = date.strftime(format='%d %b %Y')
if date[0] == '0':
date = date[1:]
template = './msg_templates/msg_template_am.txt'
sentences = format_content(df)
with open(template, 'r') as f:
text = f.read()
textformats = {
'weekday': weekday,
'weekday_today': weekday_today,
'msg_date': date,
'now': now,
'author_name': author,
'spx': sentences['^GSPC'],
'ndq': sentences['^IXIC'],
'dji': sentences['^DJI'],
'fb': sentences['FB'],
'amzn': sentences['AMZN'],
'aapl': sentences['AAPL'],
'goog': sentences['GOOG'],
'ftse': sentences['^FTSE'],
'cac': sentences['^FCHI'],
'dax': sentences['^GDAXI'],
'sxxp': sentences['^STOXX'],
'sx5e': sentences['^STOXX50E'],
'sgdx': sentences['SGD=X'],
'tnx': sentences['^TNX'],
'gcf': df.loc['GC=F', 'short_name'],
'gcfs': sentences['GC=F'],
'clf': df.loc['CL=F', 'short_name'],
'clfs': sentences['CL=F']
}
text = text.format(**textformats)
if out_path != None:
with open(out_path, 'w') as f:
f.write(text)
return text
def get_prev_summary(tick_list, date, series_type=None):
'''
Get descriptive statistics of tickers given:
tick list - list of tickers
date - reference date
Returns data in a DataFrame
'''
if not isinstance(date, datetype):
date = datetime.strftime(format='%Y-%m-%d')
names = []
close_price = []
pct_chg = []
for t in tick_list:
print('Retrieving data for', str(t))
# initialise the ticker, get quotes and info
ticker = yf.Ticker(t)
quotes = ticker.history(period='5d') #5d quote used as 1d has some issues
# append short name
try:
info = ticker.info
names.append(info['shortName'])
except:
names.append(np.nan)
# append close prices
try:
close_price.append(quotes.loc[date, 'Close'])
except:
close_price.append(np.nan)
# this part handles percent changes
try:
a = quotes.pct_change().loc[date, 'Close']
except:
a = np.nan
if pd.isnull(a):
try:
spare_df = return_csv(series_type)
mydate = spare_df.iloc[[spare_df.shape[0]-1]].index.values[0]
print("{} doesn't have historical data. Attempting to retrieve values from local data dated {}.".format(t, str(mydate)[:10]))
prev_close = spare_df.iloc[[spare_df.shape[0]-1]].loc[:, t][0]
today_close = quotes.loc[date, 'Close']
pct_chg.append(today_close/prev_close - 1)
except:
print('Data for {} is unavailable, filling with NaN value instead.'.format(t))
pct_chg.append(np.nan)
else:
pct_chg.append(a)
output = pd.DataFrame({
'short_name': names,
'close_price': close_price,
'pct_chg': pct_chg
}, index=tick_list)
return output
def create_csv(df, date, path):
'''
creates a csv containing closing price timeseries.
doesn't really need to be called day-to-day
'''
if not isinstance(date, datetype):
date = datetime.strptime(date, '%Y-%m-%d')
data_df = pd.DataFrame(data = df['close_price'].values.reshape(df.shape[0], 1).T,
columns = df.index,
index = pd.DatetimeIndex([date]))
data_df.index.name = 'date'
data_df.to_csv(path, encoding='utf-8-sig')
def update_csv(df, date, path):
'''
updates the csv file containing the closing price timeseries
'''
if not isinstance(date, datetype):
date = datetime.strptime(date, '%Y-%m-%d')
data_df = pd.DataFrame(data = df['close_price'].values.reshape(df.shape[0], 1).T,
columns = df.index,
index = pd.DatetimeIndex([date]))
data_df.index.name = 'date'
source_df = pd.read_csv(path, index_col='date')
source_df = source_df.append(data_df)
source_df.to_csv(path, encoding='utf-8-sig')
def undo_csv(path):
'''
removes the last row of the csv file
'''
df = pd.read_csv(path, index_col='date')
df.iloc[:len(df)-1].to_csv(path)
def retrieve_csv(path):
df = pd.read_csv(path, parse_dates=True, index_col='date')
return df
def return_csv(series_type):
path_dict = {
'apac': './data_files/apac_timeseries.csv',
'useu': './data_files/useu_timeseries.csv',
'othr': './data_files/othr_timeseries.csv'
}
if series_type not in path_dict.keys():
raise ValueError("Bad series_type passed. Series type must be one of the following: 'apac', 'useu', 'othr'.")
return retrieve_csv(path_dict[series_type])
class telegram_bot():
'''
contains info on our telegram bot
'''
def __init__(self):
self.token = '<YOUR_BOT_TOKEN_HERE>'
self.channel_id = '<YOUR_CHANNEL_ID_HERE>'
self.api_url = 'https://api.telegram.org/bot{}/sendMessage'.format(self.token)
def get_details(self):
return self.token, self.channel_id, self.api_url
def __msgformat__(self, text):
replacements = {
'.': '\\.',
'>': '\\>',
'-': '\\-',
'(': '\\(',
')': '\\)',
'+': '\\+'
}
replacements = dict((re.escape(k), v) for k, v in replacements.items())
pattern = re.compile("|".join(replacements.keys()))
return pattern.sub(lambda m: replacements[re.escape(m.group(0))], text)
def send_msg_str(self, str_in):
'''
Sends a message using the telegram bot, given:
str_in: a string to be used as the message content
'''
content = self.__msgformat__(str_in)
payload = {
'chat_id': '-100'+self.channel_id,
'text': content,
'parse_mode': 'MarkdownV2'
}
r = requests.post(self.api_url, data=payload)
return r.json()
def send_msg_file(self, path):
'''
Sends a message using the telegram bot, given:
path: a path to a .txt file containing the message content
'''
with open(path, 'r') as f:
content = f.read()
content = self.__msgformat__(content)
payload = {
'chat_id': '-100'+self.channel_id,
'text': content,
'parse_mode': 'MarkdownV2'
}
r = requests.post(url=self.api_url, data=payload)
return r.json()
| 33.198324
| 168
| 0.549011
|
79511e883f0726820a155db9a2df66110bdaecac
| 5,182
|
py
|
Python
|
src/stk/molecular/functional_groups/factories/boronic_acid_factory.py
|
supramolecular-toolkit/stk
|
0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2
|
[
"MIT"
] | 21
|
2018-04-12T16:25:24.000Z
|
2022-02-14T23:05:43.000Z
|
src/stk/molecular/functional_groups/factories/boronic_acid_factory.py
|
JelfsMaterialsGroup/stk
|
0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2
|
[
"MIT"
] | 8
|
2019-03-19T12:36:36.000Z
|
2020-11-11T12:46:00.000Z
|
src/stk/molecular/functional_groups/factories/boronic_acid_factory.py
|
supramolecular-toolkit/stk
|
0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2
|
[
"MIT"
] | 5
|
2018-08-07T13:00:16.000Z
|
2021-11-01T00:55:10.000Z
|
"""
Boronic Acid Factory
====================
"""
from __future__ import annotations
import typing
from collections import abc
from .functional_group_factory import FunctionalGroupFactory
from .utilities import get_atom_ids
from ..functional_groups import BoronicAcid
from ...molecule import Molecule
from ...atoms import O, H, B
__all__ = (
'BoronicAcidFactory',
)
_ValidIndex = typing.Literal[0, 1, 2, 3, 4, 5]
class BoronicAcidFactory(FunctionalGroupFactory):
"""
Creates :class:`.BoronicAcid` instances.
Creates functional groups from substructures, which match the
``[*][B]([O][H])[O][H]`` functional group string.
Examples:
*Creating Functional Groups with the Factory*
You want to create a building block which has
:class:`.BoronicAcid` functional groups. You want the boron
atom in those functional groups to be the *bonder* atom and the
OH groups to be *deleter* atoms.
.. testcode:: creating-functional-groups-with-the-factory
import stk
building_block = stk.BuildingBlock(
smiles='OB(O)CCCB(O)O',
functional_groups=(stk.BoronicAcidFactory(), ),
)
.. testcode:: creating-functional-groups-with-the-factory
:hide:
assert all(
isinstance(functional_group, stk.BoronicAcid)
for functional_group
in building_block.get_functional_groups()
)
assert building_block.get_num_functional_groups() == 2
*Changing the Bonder and Deleter Atoms*
You want to create a building block which has
:class:`.BoronicAcid` functional groups. You want the oxygen
atoms to be treated as *bonder* atoms, and the hydrogen atoms
to be treated as *deleter* atoms.
.. testcode:: changing-the-bonder-and-deleter-atoms
import stk
boronic_acid_factory = stk.BoronicAcidFactory(
# The indices of the oxygen atoms in the functional
# group string (see docstring) are 2 and 4.
bonders=(2, 4),
# The indices of the hydrogen atoms in the
# functional group string (see docstring) are 3 and 5.
deleters=(3, 5),
)
building_block = stk.BuildingBlock(
smiles='OB(O)CCCB(O)O',
functional_groups=(boronic_acid_factory, ),
)
.. testcode:: changing-the-bonder-and-deleter-atoms
:hide:
fg1, fg2 = building_block.get_functional_groups()
assert fg1.get_num_bonders() == 2
assert sum(1 for _ in fg1.get_deleters()) == 2
assert fg2.get_num_bonders() == 2
assert sum(1 for _ in fg2.get_deleters()) == 2
assert all(
isinstance(atom, stk.O)
for functional_group
in building_block.get_functional_groups()
for atom
in functional_group.get_bonders()
)
assert all(
isinstance(atom, stk.H)
for functional_group
in building_block.get_functional_groups()
for atom
in functional_group.get_deleters()
)
See Also:
:class:`.GenericFunctionalGroup`
Defines *bonders* and *deleters*.
"""
def __init__(
self,
bonders: tuple[_ValidIndex, ...] = (1, ),
deleters: tuple[_ValidIndex, ...] = (2, 3, 4, 5),
placers: typing.Optional[tuple[_ValidIndex, ...]] = None,
) -> None:
"""
Initialize a :class:`.BoronicAcidFactory` instance.
Parameters:
bonders:
The indices of atoms in the functional group string,
which are *bonder* atoms.
deleters:
The indices of atoms in the functional group string,
which are *deleter* atoms.
placers:
The indices of atoms in the functional group string,
which are *placer* atoms. If ``None``, `bonders` will
be used.
"""
self._bonders = bonders
self._deleters = deleters
self._placers = bonders if placers is None else placers
def get_functional_groups(
self,
molecule: Molecule,
) -> abc.Iterable[BoronicAcid]:
for atom_ids in get_atom_ids(
query='[*][B]([O][H])[O][H]',
molecule=molecule,
):
atoms = tuple(molecule.get_atoms(atom_ids))
yield BoronicAcid(
boron=typing.cast(B, atoms[1]),
oxygen1=typing.cast(O, atoms[2]),
hydrogen1=typing.cast(H, atoms[3]),
oxygen2=typing.cast(O, atoms[4]),
hydrogen2=typing.cast(H, atoms[5]),
atom=atoms[0],
bonders=tuple(atoms[i] for i in self._bonders),
deleters=tuple(atoms[i] for i in self._deleters),
placers=tuple(atoms[i] for i in self._placers),
)
| 30.662722
| 71
| 0.564068
|
79511f8809fe03a5ecbce619085d46fad3459e42
| 3,327
|
py
|
Python
|
python/kfserving/test/test_v1beta1_alibi_explainer_spec.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 6
|
2022-02-15T21:54:19.000Z
|
2022-02-16T21:18:54.000Z
|
python/kfserving/test/test_v1beta1_alibi_explainer_spec.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 635
|
2021-01-29T07:06:06.000Z
|
2022-03-31T09:09:20.000Z
|
python/kfserving/test/test_v1beta1_alibi_explainer_spec.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 4
|
2021-02-15T23:02:53.000Z
|
2022-01-27T22:54:16.000Z
|
# Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kfserving
from kfserving.models.v1beta1_alibi_explainer_spec import V1beta1AlibiExplainerSpec # noqa: E501
from kfserving.rest import ApiException
class TestV1beta1AlibiExplainerSpec(unittest.TestCase):
"""V1beta1AlibiExplainerSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1beta1AlibiExplainerSpec
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfserving.models.v1beta1_alibi_explainer_spec.V1beta1AlibiExplainerSpec() # noqa: E501
if include_optional :
return V1beta1AlibiExplainerSpec(
args = [
'0'
],
command = [
'0'
],
config = {
'key' : '0'
},
env = [
None
],
env_from = [
None
],
image = '0',
image_pull_policy = '0',
lifecycle = None,
liveness_probe = None,
name = '0',
ports = [
None
],
readiness_probe = None,
resources = None,
runtime_version = '0',
security_context = None,
startup_probe = None,
stdin = True,
stdin_once = True,
storage_uri = '0',
termination_message_path = '0',
termination_message_policy = '0',
tty = True,
type = '0',
volume_devices = [
None
],
volume_mounts = [
None
],
working_dir = '0'
)
else :
return V1beta1AlibiExplainerSpec(
name = '0',
type = '0',
)
def testV1beta1AlibiExplainerSpec(self):
"""Test V1beta1AlibiExplainerSpec"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 30.245455
| 105
| 0.533514
|
79511fa50dbbf3c85a67a7e66a81564151308499
| 1,011
|
py
|
Python
|
airflow/www/gunicorn_config.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 8
|
2017-04-20T16:15:44.000Z
|
2020-10-11T13:44:10.000Z
|
airflow/www/gunicorn_config.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 219
|
2017-03-15T18:40:16.000Z
|
2022-02-28T22:52:43.000Z
|
airflow/www/gunicorn_config.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 4
|
2020-07-17T14:02:28.000Z
|
2022-02-23T04:29:58.000Z
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import setproctitle
from airflow import settings
def post_worker_init(dummy_worker):
setproctitle.setproctitle(
settings.GUNICORN_WORKER_READY_PREFIX + setproctitle.getproctitle()
)
| 34.862069
| 75
| 0.771513
|
79512037860f60e2420827be48ddfa72ece0fcaa
| 14,680
|
py
|
Python
|
_code_generation/mof.py
|
osgirl/dractor
|
aa0ba9f5e4802ae365df3cb7d51c31b8d1b2349b
|
[
"Apache-2.0"
] | null | null | null |
_code_generation/mof.py
|
osgirl/dractor
|
aa0ba9f5e4802ae365df3cb7d51c31b8d1b2349b
|
[
"Apache-2.0"
] | 2
|
2019-03-17T15:40:39.000Z
|
2019-03-17T15:40:41.000Z
|
_code_generation/mof.py
|
osgirl/dractor
|
aa0ba9f5e4802ae365df3cb7d51c31b8d1b2349b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2017 Verizon. All Rights Reserved.
#
# File: mof.py
# Author: John Hickey, Phil Chandler
# Date: 2017-02-17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Plumbing for converting Dell MOF files into Python objects.
"""
#
# Imports
#
# Python standard library
import collections
import glob
import logging
import os
# Third party
import textx.metamodel
# this project
import _code_generation.exceptions as my_exceptions
#
# Module variables
#
# logger
_LOGGER = logging.getLogger(__name__)
#
# Custom classes for text meta model
#
class Qualified(object):
"""
We have qualifiers at class, function, and argument levels in MOF
files. This is an attempt to unify the parsing of qualifiers and
provide the name property.
"""
def __init__(self, name, qualifiers):
self._name = name
self.qualifiers = {}
for qualifier in qualifiers:
if hasattr(qualifier, 'value'):
value = qualifier.value
elif hasattr(qualifier, 'values'):
value = qualifier.values
elif qualifier.__class__.__name__ == 'NegativeKeyword':
value = False
else:
value = True
# All qualifiers have a 'name'
# Since the capitalization of qualifiers is inconsistent,
# we are just going to squash case for everything
self.qualifiers[qualifier.name.lower()] = value
# Make sure we don't have multiple cases of the same key
assert len(self.qualifiers) == len(qualifiers)
@property
def name(self):
""" Return the Pythonic Name """
return self._name.replace("[]", "")
@property
def valuemap(self):
""" Return the ValueMap from the qualifiers as a python dictionary """
values = self.qualifiers.get('values')
valuemap = self.qualifiers.get('valuemap')
final_mapping = {}
if values and valuemap:
raw_mapping = dict(zip(valuemap, values))
final_mapping = {}
for raw_key, raw_value in raw_mapping.items():
final_mapping[raw_key.strip()] = raw_value.strip()
return final_mapping
@property
def docstring(self):
""" Return a docstring generated from the qualifiers """
raw_description = self.qualifiers.get("description")
# short circuit if there isn't a description to process
if not raw_description:
_LOGGER.debug("No raw description found in MOF, substituting placeholder")
return "No documentation in MOF"
# process the raw description, normalizing whitespace and special characters
_LOGGER.debug("Normalizing raw description from MOF:\n%s", raw_description)
normalized_lines = []
for raw_line in raw_description:
# split to normalize \n in the entry
normalized_line_elements = []
for text in raw_line.split():
# strip leading/trailing whitespace
stripped_text = text.strip()
# escape any special rst characters
escaped_text = stripped_text.replace('*', '\*')
# add to normalized line elements
normalized_line_elements.append(escaped_text)
# create normalized line and save it
normalized_line = " ".join(normalized_line_elements)
normalized_lines.append(normalized_line)
# create and return the normalized line block
normalized_description = "\n".join(normalized_lines)
_LOGGER.debug("Normalized description is:\n%s", normalized_description)
return normalized_description
class MOFClass(Qualified):
""" MOF Class """
def __init__(self, name, qualifiers, parent_class, members):
"""
Our MOF classes consist of members, which are functions, and
qualifiers
"""
self.parent_class = parent_class
self.members = members
super(MOFClass, self).__init__(name, qualifiers)
@property
def attributes(self):
"""
Return all methods that don't take arguments, these are populated
by get/enumerate factory classes
"""
attributes = []
for member in self.members:
if member.attribute:
attributes.append(member)
return attributes
@property
def attributes_metadata(self):
"""
Return the value mapping and qualifiers for every attribute as a dictionary.
We can't do fancy things if we embed these in the functions like we do for
methods.
"""
attribute_meta = collections.defaultdict(dict)
for attribute in self.attributes:
attribute_meta[attribute.name]['valuemap'] = attribute.valuemap
attribute_meta[attribute.name]['qualifiers'] = attribute.qualifiers
return dict(attribute_meta)
@property
def methods(self):
""" Return all methods that require invoke """
methods = []
for member in self.members:
if not member.attribute:
methods.append(member)
return methods
@property
def dcim_parents(self):
""" Return parent classes in dractor/DCIM.py for code autogeneration """
parents = []
if self.attributes:
parents.append('DCIMAttributeObject')
if self.methods:
parents.append('DCIMMethodObject')
return parents
@property
def key(self):
""" Return the name of our key """
for member in self.members:
if member.key:
return member.name
@property
def mof_metadata(self):
""" Return a dictionary representation of the MOF file """
mof_dict = collections.defaultdict(dict)
mof_dict['class'] = self.name
mof_dict['parent_class'] = self.parent_class
mof_dict['qualifiers'] = self.qualifiers
for func in self.members:
mof_dict['functions'].update(func.mof_metadata)
return dict(mof_dict)
class Function(Qualified):
""" Member function """
def __init__(self, name, parent, qualifiers, return_type, arguments,
default): # pylint: disable=too-many-arguments
super(Function, self).__init__(name, qualifiers)
self.parent = parent
self.arguments = arguments
self.return_type = return_type
self.default = default
@property
def key(self):
""" Is this a key property? """
return self.qualifiers.get("key", False)
@property
def required_inputs(self):
""" Return arguments that have a Required qualifier """
inputs = []
for arg in self.arguments:
if arg.IN and arg.required:
inputs.append(arg)
return inputs
@property
def optional_inputs(self):
""" Return all arguments without the Required qualifier """
inputs = []
for arg in self.arguments:
if arg.IN and not arg.required:
inputs.append(arg)
return inputs
@property
def inputs(self):
""" Return all arguments, required and optional """
inputs = []
for arg in self.arguments:
if arg.IN:
inputs.append(arg)
return inputs
@property
def outputs(self):
""" Return all return values """
outputs = []
for arg in self.arguments:
if arg.OUT:
outputs.append(arg)
return outputs
@property
def arg_str(self):
""" Return a pythonic string of args """
args = ['self']
args.extend([x.name for x in self.required_inputs])
args.extend(["{}=None".format(x.name) for x in self.optional_inputs])
return ", ".join(args)
@property
def attribute(self):
""" Is this function an attribute or method """
return not bool(self.arguments)
@property
def mof_metadata(self):
""" Return all metadata """
func_dict = collections.defaultdict(dict)
func_dict[self.name]['qualifiers'] = self.qualifiers
func_dict[self.name]['valuemap'] = self.valuemap
func_dict[self.name]['return_type'] = self.return_type
func_dict[self.name]['optional_inputs'] = {}
func_dict[self.name]['required_inputs'] = {}
func_dict[self.name]['outputs'] = {}
for arg in self.required_inputs:
func_dict[self.name]['required_inputs'].update(arg.mof_metadata)
for arg in self.optional_inputs:
func_dict[self.name]['optional_inputs'].update(arg.mof_metadata)
for arg in self.outputs:
func_dict[self.name]['outputs'].update(arg.mof_metadata)
# For the return value
return dict(func_dict)
class FunctionArg(Qualified):
""" Arguments have metadata too """
def __init__(self, name, parent, qualifiers, ctype):
super(FunctionArg, self).__init__(name, qualifiers)
self.parent = parent
self.ctype = ctype
if '[]' in name or '[]' in ctype: # pylint: disable=simplifiable-if-statement
self.is_list = True
else:
self.is_list = False
@property
def IN(self): # pylint: disable=invalid-name
""" Is this a return value or input value """
# This is complicated a little bit by args like
# DCIM_PhysicalComputerSystemView.SetOneTimeBootSource
# which has a required arg that specifies neither in
# or out.
if 'out' in self.qualifiers:
return False
return self.qualifiers.get("in", True)
@property
def OUT(self): # pylint: disable=invalid-name
""" Is this a return value or input value """
# Most mof files are OUT, but some say Out...
return self.qualifiers.get("out", False)
@property
def required(self):
""" Is this a required arg """
return bool(self.qualifiers.get("required", False))
@property
def arg_type(self):
""" Return a pythonic type for this argument """
arg_type = self.ctype
if 'int' in arg_type:
arg_type = 'int'
if self.is_list:
arg_type = 'list of {}'.format(arg_type)
if 'required' in self.qualifiers:
arg_type = "{}, optional".format(arg_type)
return arg_type
@property
def mapping_description(self):
"""
Return a docstring friendly explanation of how this argument is
mapped
"""
mapping_description_lines = []
if self.valuemap:
for value in sorted(self.valuemap.keys()):
mapping = self.valuemap[value]
mapping_description_lines.append("'{}' <-> '{}'\n".format(value, mapping))
return mapping_description_lines
@property
def mof_metadata(self):
""" Return all the information we know about this arg as a dictionary """
arg_dict = collections.defaultdict(dict)
arg_dict[self.name]['type'] = self.arg_type
arg_dict[self.name]['qualifiers'] = self.qualifiers
arg_dict[self.name]['valuemap'] = self.valuemap
return dict(arg_dict)
#
# MOF parser class
#
class MOFParser(object):
"""Parser for MOF data files"""
# location of textx metamodel config
META_MODEL_PATH = os.path.join(os.path.dirname(__file__), 'data', 'textex', 'dcim_mof_parse.tx')
# custom meta model classes
META_MODEL_CLASSES = [MOFClass, Function, FunctionArg]
# location of mof files
MOF_DIR = os.path.join(os.path.dirname(__file__), 'data', 'mof')
# info on a candidate MOF file
MOFFileEntry = collections.namedtuple('MOFFileEntry', field_names=['base_name', 'path'])
@staticmethod
def find_avail_mof_files(dell_version):
"""Collect list of available MOF files given the dell version"""
assert dell_version is not None
mof_path = os.path.join(MOFParser.MOF_DIR, dell_version)
entries = []
for mof_file_name in glob.glob('{}/*.[Mm][Oo][Ff]'.format(mof_path)):
mof_file_path = os.path.join(mof_path, mof_file_name)
mof_file_base_name = os.path.basename(mof_file_name)
entry = MOFParser.MOFFileEntry(base_name=mof_file_base_name, path=mof_file_path)
entries.append(entry)
_LOGGER.debug("Collected this list of available mof files for dell version %s : %s",
dell_version, entries)
return entries
def __init__(self):
_LOGGER.debug("Load textx metamodel for MOF parsing")
try:
metamodel = textx.metamodel.metamodel_from_file(
MOFParser.META_MODEL_PATH, classes=MOFParser.META_MODEL_CLASSES)
except Exception as error:
raise my_exceptions.CodeGenError("Fatal error loading textx metamodel for MOF parsing",
error)
else:
_LOGGER.debug("Successfully loaded text metamodel from %s : %s",
MOFParser.META_MODEL_PATH, metamodel)
self._meta_model = metamodel
def parser_mof_file(self, mof_file_entry):
assert isinstance(mof_file_entry, MOFParser.MOFFileEntry)
_LOGGER.info("Begin parsing MOF file: %s", mof_file_entry)
try:
mof_class = self._meta_model.model_from_file(mof_file_entry.path)
_LOGGER.debug("successfully parsed mof file")
except Exception as error:
raise my_exceptions.CodeGenError(
"Fatal error while parsing MOF file {}".format(mof_file_entry.path), error)
# now check if it has members
if not mof_class.members:
raise my_exceptions.SkipMOFFile(
"MOF class in MOF file {} has no members, so skipping it".format(
mof_file_entry.path))
_LOGGER.info("Finished parsing MOF file: %s", mof_file_entry)
return mof_class
| 30.143737
| 100
| 0.618188
|
795120d9661a627b7941be18f854f41be190ccda
| 6,891
|
py
|
Python
|
src/view/helpDialog.py
|
Curtin-Timescales-of-Mineral-Systems/UPb-Unmixer
|
4391a77bbb7c198955af8637ed10854245cd15c3
|
[
"MIT"
] | 1
|
2021-02-17T08:10:21.000Z
|
2021-02-17T08:10:21.000Z
|
src/view/helpDialog.py
|
Curtin-Timescales-of-Mineral-Systems/UPb-Unmixer
|
4391a77bbb7c198955af8637ed10854245cd15c3
|
[
"MIT"
] | null | null | null |
src/view/helpDialog.py
|
Curtin-Timescales-of-Mineral-Systems/UPb-Unmixer
|
4391a77bbb7c198955af8637ed10854245cd15c3
|
[
"MIT"
] | null | null | null |
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QLabel, QTabWidget, QVBoxLayout, QWidget, QDialog, QLayout
from utils import config, string, calculations
class HelpDialog(QDialog):
def __init__(self):
super().__init__()
self.setWindowTitle(config.TITLE + " help")
about_label: QLabel = QLabel(self.getAboutHelpText())
about_label.title = "About"
inputs_label: QLabel = QLabel(self.getInputsHelpText())
inputs_label.title = "Inputs"
processing_label: QLabel = QLabel(self.getProcessingHelpText())
processing_label.title = "Processing"
outputs_label: QLabel = QLabel(self.getOutputsHelpText())
outputs_label.title = "Outputs"
tab_widget = QTabWidget()
for label in [about_label, inputs_label, processing_label, outputs_label]:
label.setTextFormat(Qt.RichText)
label.setWordWrap(True)
label.setTextInteractionFlags(Qt.TextSelectableByMouse|label.textInteractionFlags())
layout = QVBoxLayout()
layout.addWidget(label, 0, Qt.AlignTop)
widget = QWidget()
widget.setLayout(layout)
tab_widget.addTab(widget, label.title)
layout: QLayout = QVBoxLayout()
layout.addWidget(tab_widget)
self.setLayout(layout)
def _getStandardInputHelp(self) -> str:
return \
"Data can be parsed from a range of csv file layouts by specifying which columns the required values are " \
"in. Columns can be referred to either by using:" \
"<ul>" \
" <li> numbers (1, 2, 3, ..., 26, 27, ...)" \
" <li> letters (A, B, C, ..., Z, AA, ...)" \
"</ul>" \
"Different uncertainty formats are also supported:" \
"<ul>" \
" <li> percentage vs absolute" \
" <li> 1σ vs 2σ" \
"</ul>" \
"If a row in the imported data is invalid then it will be highlighted in <font color='red'>RED</font>." \
"<br>" \
"Symbols are not supported in column headings (e.g., ±, σ). Use only English alphabetic characters or " \
"numerals."
def _getStandardProcessingHelp(self) -> str:
return \
"Constants used:" \
"<ul>" \
"<li> ²³⁸U/²³⁵U ratio " + " " * 9 + " = " + string.getConstantStr(calculations.U238U235_RATIO) + \
"<li> ²³⁸U decay constant " + " " * 1 + " = " + string.getConstantStr(calculations.U238_DECAY_CONSTANT) + \
"<li> ²³⁵U decay constant " + " " * 1 + " = " + string.getConstantStr(calculations.U235_DECAY_CONSTANT) + \
"<li> ²³²Th decay constant = " + string.getConstantStr(calculations.TH232_DECAY_CONSTANT) + \
"<li> Avogadro constant " + " " * 3 + " = " + string.getConstantStr(calculations.AVOGADRO_NUMBER) + \
"<ul>"
def _getStandardOutputsHelp(self) -> str:
return \
"The plot may be fully customised (markers, colours, scale etc.) using the " \
"button in the toolbar at the bottom. The plot can also be saved to various image formats." \
"<br><br>" \
"When the calculated values are exported back to a CSV file, the values are appended to the end of the " \
"columns of the original CSV file."
def getInputsHelpText(self) -> str:
return \
"Input data required: <ul>" \
"<li> known age (in Ma) of rim (younger) component in mixture" \
"<li> measured ²³⁸U/²⁰⁶Pb and ²⁰⁷Pb/²⁰⁶Pb ratios" \
"<li> uncertainties for all of the above" \
"<li> U and Th concentrations (ppm)" \
"</ul>" + \
self._getStandardInputHelp()
def getProcessingHelpText(self) -> str:
return \
"Processing the data will attempt to calculate a reconstructed core age with associated uncertainty values." \
"<br><br>" \
"Uncertainties are calculated using second order error propagation. " \
"Due to the non-linearity of the concordia curve, the resulting uncertainties are not symmetric. " \
"In particular the upper uncertainty will be larger than the lower uncertainty." \
"<br><br>" \
"Rows with unsuccessful calculations will be highlighted in <font color='" + config.INVALID_CALCULATION_COLOUR + "'>ORANGE</font>." \
"<br><br>" \
"Rows with successful calculations that have a total score of < 0.5 will be highlighted in <font color='" + config.REJECTED_CALCULATION_COLOUR + "'>YELLOW</font>." \
"<br><br>" \
"Rows with successful calculations that have a total score of >= 0.5 will be highlighted in <font color='" + config.VALID_CALCULATION_COLOUR + "'>GREEN</font>." \
"<br><br>" + \
self._getStandardProcessingHelp()
def getOutputsHelpText(self) -> str:
return \
"The following values are output:" \
"<ul>" \
"<li> a reconstructed core age (in Ma)" \
"<li> reconstructed core ²³⁸U/²⁰⁶Pb and ²⁰⁷Pb/²⁰⁶Pb ratios " \
"<li> uncertainties for all of the above" \
"<li> metamict score for the reconstructed age" \
"<li> precision score for the reconstructed age" \
"<li> core:rim score for the reconstructed age" \
"<li> total score for the reconstructed age" \
"</ul>" \
"Ages with a total score of < 0.5 should be considered unreliable. " \
"See the paper for more details on how individual scores are calculated." \
"<br><br>" \
"Clicking on an individual row permits visual inspection of the full solution " \
"on the inverse Concordia plot. " \
"Selecting multiple rows permits visual inspection of the final calculated ages of " \
"all the rows in the selection." \
"<br><br>" + \
self._getStandardOutputsHelp()
def getAboutHelpText(self) -> str:
link = "https://github.com/Curtin-Timescales-of-Mineral-Systems/UPb-Unmixer/issues"
return \
'This program is accompanied by the following paper which should be cited if it this program is used in your results' \
'<p style="text-align: center">Hugo K.H. Olierook, Christopher L. Kirkland, Milo Barham,' \
'<br>Matthew L. Daggitt, Julie Hollis, Michael Hartnady, ' \
'<br>Extracting meaningful U-Pb ages from core–rim mixtures, Gondwana Research, 2020<\\p>' \
'<br><br>' \
'Please report any feedback or issues that you may have with this program on the ' \
'Github page at: <p style="text-align: center"><a href="' + link + '">' + link + '</a><\\p>'
| 51.044444
| 181
| 0.595269
|
79512226bda6594208f2df1b5e7f4c6bf8546918
| 7,380
|
py
|
Python
|
build/lib/WORC/classification/AdvancedSampler.py
|
Sikerdebaard/PREDICTFastr
|
e1f172c3606e6f33edf58008f958dcd1c0ac5b7b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
build/lib/WORC/classification/AdvancedSampler.py
|
Sikerdebaard/PREDICTFastr
|
e1f172c3606e6f33edf58008f958dcd1c0ac5b7b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
build/lib/WORC/classification/AdvancedSampler.py
|
Sikerdebaard/PREDICTFastr
|
e1f172c3606e6f33edf58008f958dcd1c0ac5b7b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016-2019 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.utils import check_random_state
import numpy as np
from sklearn.externals import six
from ghalton import Halton
# from sobol_seq import i4_sobol_generate as Sobol
import scipy
from scipy.stats import uniform
import math
class log_uniform():
def __init__(self, loc=-1, scale=0, base=10):
self.loc = loc
self.scale = scale
self.base = base
self.uniform_dist = uniform(loc=self.loc, scale=self.scale)
def rvs(self, size=None, random_state=None):
if size is None:
return np.power(self.base, self.uniform_dist.rvs(random_state=random_state))
else:
return np.power(self.base, self.uniform_dist.rvs(size=size, random_state=random_state))
class discrete_uniform():
def __init__(self, loc=-1, scale=0):
self.loc = loc
self.scale = scale
self.uniform_dist = uniform(loc=self.loc, scale=self.scale)
def rvs(self, size=None, random_state=None):
if size is None:
return int(self.uniform_dist.rvs(random_state=random_state))
else:
return int(self.uniform_dist.rvs(size=size, random_state=random_state))
class exp_uniform():
def __init__(self, loc=-1, scale=0, base=math.e):
self.loc = loc
self.scale = scale
self.base = base
def rvs(self, size=None, random_state=None):
uniform_dist = uniform(loc=self.loc, scale=self.scale)
if size is None:
return np.power(self.base, uniform_dist .rvs(random_state=random_state))
else:
return np.power(self.base, uniform_dist .rvs(size=size, random_state=random_state))
class AdvancedSampler(object):
"""Generator on parameters sampled from given distributions using
numerical sequences. Based on the sklearn ParameterSampler.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from WORC.classification.AdvancedSampler import HaltonSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(HaltonSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None,
method='Halton'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
self.method = method
if method == 'Halton':
self.Halton = Halton(len(self.param_distributions.keys()))
def __iter__(self):
# Create a random state to be used
rnd = check_random_state(self.random_state)
# Generate the sequence generator
if self.method == 'Halton':
sequence = self.Halton.get(self.n_iter)
elif self.method == 'Sobol':
sequence = Sobol(len(self.param_distributions.keys()), self.n_iter)
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for i in six.moves.range(self.n_iter):
sample = sequence[i]
params = dict()
for ind, (k, v) in enumerate(items):
point = sample[ind]
# Check if the parameter space is a distribution or a list
if hasattr(v, "rvs"):
print(point)
# Parameter space is a distribution, hence sample
params[k] = v.ppf(point)
else:
# Parameter space is a list, so select an index
point = int(round(point*float(len(v) - 1)))
print(point)
params[k] = v[point]
yield params
# For reproducibility, reset sampler if needed
if self.method == 'Halton':
self.Halton.reset()
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
if __name__ == '__main__':
random_seed = np.random.randint(1, 5000)
random_state = check_random_state(random_seed)
param_distributions = {'kernel': ['poly', 'RGB'],
'C': scipy.stats.uniform(loc=0, scale=1E6),
'degree': scipy.stats.uniform(loc=1, scale=6),
'coef0': scipy.stats.uniform(loc=0, scale=1),
'gamma': scipy.stats.uniform(loc=1E-5, scale=1),
'histogram_features': ['True', 'False']}
n_iter = 6
method = 'Sobol'
sampled_params = AdvancedSampler(param_distributions,
n_iter,
random_state)
for s in sampled_params:
print(s)
| 37.653061
| 99
| 0.628862
|
795124f5956467b4e725d767c1b0c0765f150e03
| 257
|
py
|
Python
|
0x04-python-more_data_structures/10-best_score.py
|
Trice254/alx-higher_level_programming
|
b49b7adaf2c3faa290b3652ad703914f8013c67c
|
[
"MIT"
] | null | null | null |
0x04-python-more_data_structures/10-best_score.py
|
Trice254/alx-higher_level_programming
|
b49b7adaf2c3faa290b3652ad703914f8013c67c
|
[
"MIT"
] | null | null | null |
0x04-python-more_data_structures/10-best_score.py
|
Trice254/alx-higher_level_programming
|
b49b7adaf2c3faa290b3652ad703914f8013c67c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
def best_score(a_dictionary):
m = 0
res = ""
if a_dictionary:
for k, v in a_dictionary.items():
if v > m:
res = k
m = v
return res
else:
return None
| 18.357143
| 41
| 0.44358
|
79512638a56b332004747d466d3ec9317b9bbb41
| 3,661
|
py
|
Python
|
profiles_project/profiles_api/views.py
|
aseem-hegshetye/Django_Rest_Api
|
663693b4003d012029fc3365f01b171824229344
|
[
"MIT"
] | null | null | null |
profiles_project/profiles_api/views.py
|
aseem-hegshetye/Django_Rest_Api
|
663693b4003d012029fc3365f01b171824229344
|
[
"MIT"
] | 15
|
2020-06-06T00:44:36.000Z
|
2022-03-12T00:18:42.000Z
|
profiles_project/profiles_api/views.py
|
aseem-hegshetye/Django_Rest_Api
|
663693b4003d012029fc3365f01b171824229344
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework.views import APIView, Response, status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from . import models
from . import permissions
from .serializers import *
class HelloAPIView(APIView):
""" Test API View"""
def get(self, request, format=None):
api_list = [
'item1',
'item2'
]
return Response({'message': 'test view', 'data': api_list}, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""create hello message with our name """
serializer = HelloSerializer(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, response, pk=None):
return Response({'method': 'PUT'})
def patch(self, response, pk=None):
return Response({'method': 'PATCH'})
def delete(self, response, pk=None):
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
""" test api view set"""
serializer_class = HelloSerializer # keep serializer class here at top so that django will show this in browsers API view
def list(self, request):
api_viewset = [
'asidas',
'aosidjais'
]
return Response({'message': api_viewset}, status=status.HTTP_200_OK)
def create(self, request):
""" create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'hello {name}'
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
return Response({'method': 'get'})
def update(self, request, pk=None):
return Response({'method': 'put'})
def partial_update(self, request, pk=None):
return Response({'method': 'patch'})
def destroy(self, request, pk=None):
return Response({'method': 'delete'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""
handle creating and updating profile
authentication -
"""
serializer_class = UserProfileSerializer
queryset = models.CustomUser.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('first_name', 'email')
class UserLoginApiView(ObtainAuthToken):
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
# token: 3b582ecfa00f0363ad7d306f006d0af2bf84ddc4
class UserProfileFeedViewSet(viewsets.ModelViewSet):
""" handle profile feed updates and creation"""
queryset = models.ProfileFeedItem.objects.all()
serializer_class = ProfileFeedSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (
permissions.UpdateOwnStatus,
IsAuthenticated
)
def perform_create(self, serializer):
""" overwritting POST request made to create new object"""
serializer.save(user_profile=self.request.user)
| 32.114035
| 126
| 0.679869
|
795126b292ac556dd3a62d41d01831097b39fc84
| 3,524
|
py
|
Python
|
PyAxe/AFTP.py
|
sunjinopensource/PyAxe
|
4b01d0aec83006f9ab366acbefbe7a2bcd5bee10
|
[
"MIT"
] | null | null | null |
PyAxe/AFTP.py
|
sunjinopensource/PyAxe
|
4b01d0aec83006f9ab366acbefbe7a2bcd5bee10
|
[
"MIT"
] | null | null | null |
PyAxe/AFTP.py
|
sunjinopensource/PyAxe
|
4b01d0aec83006f9ab366acbefbe7a2bcd5bee10
|
[
"MIT"
] | null | null | null |
import os
import ftplib
import re
from . import ALog
ANONYMOUS_USERPASS = ('', '')
class FileStat:
def __init__(self):
self.isDir = False
self.permissionBits = '' # 'rwxrwxrwx'
self.name = ''
self.size = 0
def dir(host, targetDir='/', userpass=ANONYMOUS_USERPASS):
ret = []
def parseLine(line):
"""
drwxr-xr-x 1 ftp ftp 0 Nov 02 15:47 TestDir
-rw-r--r-- 1 ftp ftp 0 Nov 02 14:51 test.txt
-rwxr-xr-x 1 ftp ftp 2943704 Aug 02 2016 cpuz_x32.exe
-rw-r--r-- 1 ftp ftp 463451034 Jul 25 2016 exe_7v7_20160725_130231_master_a1b66ed_svn1206.rar
"""
pattern = re.compile(r'([dl\-])([r\-][w\-][xsStT\-][r\-][w\-][xsStT\-][r\-][w\-][xsStT\-])\s+(\d+)\s+(\S+?)\s+(\S+?)\s+(\d+)\s+([a-zA-Z]{3})\s+(\d{2})\s+(\S+?)\s+(.*)')
result = pattern.match(line)
dir, permissionBits, fileCount, user, group, size, month, day, yearOrTime, name = result.groups()
stat = FileStat()
stat.isDir = dir == 'd'
stat.permissionBits = permissionBits
stat.name = name
stat.size = int(size)
ret.append(stat)
with ftplib.FTP(host) as ftp:
ftp.login(userpass[0], userpass[1])
ftp.cwd(targetDir)
ftp.dir(parseLine)
return ret
def upload(host, localFilePath, targetDir='/', userpass=ANONYMOUS_USERPASS):
"""
本地文件上传到目标目录
目标目录必须已经存在
eg. upload('192.168.3.250', 'C:\\test\\a.txt', 'A/B') ==> A/B/a.txt
"""
ALog.info('-=> ftp upload(%s, %s, %s)', host, localFilePath, targetDir)
with ftplib.FTP(host) as ftp:
ftp.login(userpass[0], userpass[1])
ftp.cwd(targetDir)
ftp.storbinary('STOR %s' % os.path.basename(localFilePath), open(localFilePath, 'rb'))
def download(host, targetFilePath, localDir='.', userpass=ANONYMOUS_USERPASS):
"""
目标文件下载到本地目录
本地目录必须已经存在
"""
ALog.info('-=> ftp download(%s, %s, %s)', host, targetFilePath, localDir)
targetDir = os.path.dirname(targetFilePath)
targetFileName = os.path.basename(targetFilePath)
with ftplib.FTP(host) as ftp:
ftp.login(userpass[0], userpass[1])
ftp.cwd(targetDir)
ftp.retrbinary('RETR %s' % targetFileName, open(os.path.join(localDir, targetFileName), 'wb').write)
def moveFile(host, srcPath, dstPath, userpass=ANONYMOUS_USERPASS):
"""
把文件从源路径移到目标路径
"""
ALog.info('-=> ftp move(%s, %s, %s)', host, srcPath, dstPath)
with ftplib.FTP(host) as ftp:
ftp.login(userpass[0], userpass[1])
ftp.rename(srcPath, dstPath)
def isDir(host, path, userpass=ANONYMOUS_USERPASS):
"""
目标目录是否存在于FTP
"""
try:
with ftplib.FTP(host) as ftp:
ftp.login(userpass[0], userpass[1])
ftp.cwd(path)
return True
except:
return False
def isFile(host, path, userpass=ANONYMOUS_USERPASS):
"""
目标文件是否存在于FTP
"""
targetDir = os.path.dirname(path)
targetName = os.path.basename(path)
try:
files = dir(host, targetDir, userpass)
except:
return False
for file in files:
if not file.isDir and file.name == targetName:
return True
return False
def exists(host, path, userpass=ANONYMOUS_USERPASS):
"""
目标是否存在于FTP
"""
return isDir(host, path, userpass) or isFile(host, path, userpass)
| 30.119658
| 177
| 0.571793
|
7951277a710e2424e66291bfe8c36119cc185bbd
| 929
|
py
|
Python
|
RecoHGCal/TICL/python/SimTracksters_cff.py
|
soumyadipbarman/cmssw
|
1e8e5a42bd6a4f7eb5a4dc2523ca21b04b687658
|
[
"Apache-2.0"
] | 13
|
2015-11-30T15:49:45.000Z
|
2022-02-08T16:11:30.000Z
|
RecoHGCal/TICL/python/SimTracksters_cff.py
|
soumyadipbarman/cmssw
|
1e8e5a42bd6a4f7eb5a4dc2523ca21b04b687658
|
[
"Apache-2.0"
] | 640
|
2015-02-11T18:55:47.000Z
|
2022-03-31T14:12:23.000Z
|
RecoHGCal/TICL/python/SimTracksters_cff.py
|
soumyadipbarman/cmssw
|
1e8e5a42bd6a4f7eb5a4dc2523ca21b04b687658
|
[
"Apache-2.0"
] | 51
|
2015-08-11T21:01:40.000Z
|
2022-03-30T07:31:34.000Z
|
import FWCore.ParameterSet.Config as cms
from RecoHGCal.TICL.trackstersFromSimClustersProducer_cfi import trackstersFromSimClustersProducer as _trackstersFromSimClustersProducer
from RecoHGCal.TICL.filteredLayerClustersProducer_cfi import filteredLayerClustersProducer as _filteredLayerClustersProducer
# CA - PATTERN RECOGNITION
filteredLayerClustersSimTracksters = _filteredLayerClustersProducer.clone(
clusterFilter = "ClusterFilterByAlgoAndSize",
algo_number = 8,
min_cluster_size = 0, # inclusive
iteration_label = "ticlSimTracksters"
)
ticlSimTracksters = _trackstersFromSimClustersProducer.clone(
)
from Configuration.ProcessModifiers.premix_stage2_cff import premix_stage2
premix_stage2.toModify(ticlSimTracksters,
simclusters = "mixData:MergedCaloTruth",
caloparticles = "mixData:MergedCaloTruth",
)
ticlSimTrackstersTask = cms.Task(filteredLayerClustersSimTracksters, ticlSimTracksters)
| 34.407407
| 136
| 0.8493
|
7951277be2e0540b73a805c24f3393759fef015e
| 189,965
|
py
|
Python
|
qiskit/circuit/quantumcircuit.py
|
itoko/qiskit-terra
|
5c61d8f2df5a2255e1f31a53be79a45a1abe0cd1
|
[
"Apache-2.0"
] | 1,456
|
2017-08-05T16:33:05.000Z
|
2018-06-05T04:15:35.000Z
|
qiskit/circuit/quantumcircuit.py
|
itoko/qiskit-terra
|
5c61d8f2df5a2255e1f31a53be79a45a1abe0cd1
|
[
"Apache-2.0"
] | 365
|
2017-08-04T06:09:16.000Z
|
2018-06-05T08:33:37.000Z
|
qiskit/circuit/quantumcircuit.py
|
itoko/qiskit-terra
|
5c61d8f2df5a2255e1f31a53be79a45a1abe0cd1
|
[
"Apache-2.0"
] | 463
|
2017-08-05T04:10:01.000Z
|
2018-06-05T06:43:21.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=bad-docstring-quotes,invalid-name
"""Quantum circuit object."""
import copy
import itertools
import functools
import multiprocessing as mp
import string
import re
from collections import OrderedDict, defaultdict, namedtuple
from typing import (
Union,
Optional,
List,
Dict,
Tuple,
Type,
TypeVar,
Sequence,
Callable,
Mapping,
Set,
Iterable,
)
import typing
import numpy as np
from qiskit.exceptions import QiskitError, MissingOptionalLibraryError
from qiskit.utils.multiprocessing import is_main_process
from qiskit.circuit.instruction import Instruction
from qiskit.circuit.gate import Gate
from qiskit.circuit.parameter import Parameter
from qiskit.qasm.qasm import Qasm
from qiskit.qasm.exceptions import QasmError
from qiskit.circuit.exceptions import CircuitError
from qiskit.utils.deprecation import deprecate_function
from .parameterexpression import ParameterExpression, ParameterValueType
from .quantumregister import QuantumRegister, Qubit, AncillaRegister, AncillaQubit
from .classicalregister import ClassicalRegister, Clbit
from .parametertable import ParameterTable, ParameterView
from .parametervector import ParameterVector, ParameterVectorElement
from .instructionset import InstructionSet
from .register import Register
from .bit import Bit
from .quantumcircuitdata import QuantumCircuitData
from .delay import Delay
from .measure import Measure
from .reset import Reset
try:
import pygments
from pygments.formatters import Terminal256Formatter # pylint: disable=no-name-in-module
from qiskit.qasm.pygments import OpenQASMLexer # pylint: disable=ungrouped-imports
from qiskit.qasm.pygments import QasmTerminalStyle # pylint: disable=ungrouped-imports
HAS_PYGMENTS = True
except Exception: # pylint: disable=broad-except
HAS_PYGMENTS = False
if typing.TYPE_CHECKING:
import qiskit # pylint: disable=cyclic-import
BitLocations = namedtuple("BitLocations", ("index", "registers"))
# The following types are not marked private to avoid leaking this "private/public" abstraction out
# into the documentation. They are not imported by circuit.__init__, nor are they meant to be.
# Arbitrary type variables for marking up generics.
S = TypeVar("S")
T = TypeVar("T")
# Type of the elements of QuantumCircuit._data.
DataElement = Tuple[Instruction, List[Qubit], List[Clbit]]
# Types that can be coerced to a valid Qubit specifier in a circuit.
QubitSpecifier = Union[
Qubit,
QuantumRegister,
int,
slice,
Sequence[Union[Qubit, int]],
]
# Types that can be coerced to a valid Clbit specifier in a circuit.
ClbitSpecifier = Union[
Clbit,
ClassicalRegister,
int,
slice,
Sequence[Union[Clbit, int]],
]
# Generic type which is either :obj:`~Qubit` or :obj:`~Clbit`, used to specify types of functions
# which operate on either type of bit, but not both at the same time.
BitType = TypeVar("BitType", Qubit, Clbit)
# Regex pattern to match valid OpenQASM identifiers
VALID_QASM2_IDENTIFIER = re.compile("[a-z][a-zA-Z_0-9]*")
class QuantumCircuit:
"""Create a new circuit.
A circuit is a list of instructions bound to some registers.
Args:
regs (list(:class:`Register`) or list(``int``) or list(list(:class:`Bit`))): The
registers to be included in the circuit.
* If a list of :class:`Register` objects, represents the :class:`QuantumRegister`
and/or :class:`ClassicalRegister` objects to include in the circuit.
For example:
* ``QuantumCircuit(QuantumRegister(4))``
* ``QuantumCircuit(QuantumRegister(4), ClassicalRegister(3))``
* ``QuantumCircuit(QuantumRegister(4, 'qr0'), QuantumRegister(2, 'qr1'))``
* If a list of ``int``, the amount of qubits and/or classical bits to include in
the circuit. It can either be a single int for just the number of quantum bits,
or 2 ints for the number of quantum bits and classical bits, respectively.
For example:
* ``QuantumCircuit(4) # A QuantumCircuit with 4 qubits``
* ``QuantumCircuit(4, 3) # A QuantumCircuit with 4 qubits and 3 classical bits``
* If a list of python lists containing :class:`Bit` objects, a collection of
:class:`Bit` s to be added to the circuit.
name (str): the name of the quantum circuit. If not set, an
automatically generated string will be assigned.
global_phase (float or ParameterExpression): The global phase of the circuit in radians.
metadata (dict): Arbitrary key value metadata to associate with the
circuit. This gets stored as free-form data in a dict in the
:attr:`~qiskit.circuit.QuantumCircuit.metadata` attribute. It will
not be directly used in the circuit.
Raises:
CircuitError: if the circuit name, if given, is not valid.
Examples:
Construct a simple Bell state circuit.
.. jupyter-execute::
from qiskit import QuantumCircuit
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
qc.draw()
Construct a 5-qubit GHZ circuit.
.. jupyter-execute::
from qiskit import QuantumCircuit
qc = QuantumCircuit(5)
qc.h(0)
qc.cx(0, range(1, 5))
qc.measure_all()
Construct a 4-qubit Bernstein-Vazirani circuit using registers.
.. jupyter-execute::
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
qr = QuantumRegister(3, 'q')
anc = QuantumRegister(1, 'ancilla')
cr = ClassicalRegister(3, 'c')
qc = QuantumCircuit(qr, anc, cr)
qc.x(anc[0])
qc.h(anc[0])
qc.h(qr[0:3])
qc.cx(qr[0:3], anc[0])
qc.h(qr[0:3])
qc.barrier(qr)
qc.measure(qr, cr)
qc.draw()
"""
instances = 0
prefix = "circuit"
# Class variable OPENQASM header
header = "OPENQASM 2.0;"
extension_lib = 'include "qelib1.inc";'
def __init__(
self,
*regs: Union[Register, int, Sequence[Bit]],
name: Optional[str] = None,
global_phase: ParameterValueType = 0,
metadata: Optional[Dict] = None,
):
if any(not isinstance(reg, (list, QuantumRegister, ClassicalRegister)) for reg in regs):
# check if inputs are integers, but also allow e.g. 2.0
try:
valid_reg_size = all(reg == int(reg) for reg in regs)
except (ValueError, TypeError):
valid_reg_size = False
if not valid_reg_size:
raise CircuitError(
"Circuit args must be Registers or integers. (%s '%s' was "
"provided)" % ([type(reg).__name__ for reg in regs], regs)
)
regs = tuple(int(reg) for reg in regs) # cast to int
self._base_name = None
if name is None:
self._base_name = self.cls_prefix()
self._name_update()
elif not isinstance(name, str):
raise CircuitError(
"The circuit name should be a string (or None to auto-generate a name)."
)
else:
self._base_name = name
self.name = name
self._increment_instances()
# Data contains a list of instructions and their contexts,
# in the order they were applied.
self._data = []
# A stack to hold the instruction sets that are being built up during for-, if- and
# while-block construction. These are stored as a stripped down sequence of instructions,
# and sets of qubits and clbits, rather than a full QuantumCircuit instance because the
# builder interfaces need to wait until they are completed before they can fill in things
# like `break` and `continue`. This is because these instructions need to "operate" on the
# full width of bits, but the builder interface won't know what bits are used until the end.
self._control_flow_scopes = []
self.qregs = []
self.cregs = []
self._qubits = []
self._clbits = []
# Dict mapping Qubit or Clbit instances to tuple comprised of 0) the
# corresponding index in circuit.{qubits,clbits} and 1) a list of
# Register-int pairs for each Register containing the Bit and its index
# within that register.
self._qubit_indices = {}
self._clbit_indices = {}
self._ancillas = []
self._calibrations = defaultdict(dict)
self.add_register(*regs)
# Parameter table tracks instructions with variable parameters.
self._parameter_table = ParameterTable()
# Cache to avoid re-sorting parameters
self._parameters = None
self._layout = None
self._global_phase: ParameterValueType = 0
self.global_phase = global_phase
self.duration = None
self.unit = "dt"
if not isinstance(metadata, dict) and metadata is not None:
raise TypeError("Only a dictionary or None is accepted for circuit metadata")
self._metadata = metadata
@property
def data(self) -> QuantumCircuitData:
"""Return the circuit data (instructions and context).
Returns:
QuantumCircuitData: a list-like object containing the tuples for the circuit's data.
Each tuple is in the format ``(instruction, qargs, cargs)``, where instruction is an
Instruction (or subclass) object, qargs is a list of Qubit objects, and cargs is a
list of Clbit objects.
"""
return QuantumCircuitData(self)
@data.setter
def data(
self, data_input: List[Tuple[Instruction, List[QubitSpecifier], List[ClbitSpecifier]]]
):
"""Sets the circuit data from a list of instructions and context.
Args:
data_input (list): A list of instructions with context
in the format (instruction, qargs, cargs), where Instruction
is an Instruction (or subclass) object, qargs is a list of
Qubit objects, and cargs is a list of Clbit objects.
"""
# If data_input is QuantumCircuitData(self), clearing self._data
# below will also empty data_input, so make a shallow copy first.
data_input = data_input.copy()
self._data = []
self._parameter_table = ParameterTable()
for inst, qargs, cargs in data_input:
self.append(inst, qargs, cargs)
@property
def calibrations(self) -> dict:
"""Return calibration dictionary.
The custom pulse definition of a given gate is of the form
{'gate_name': {(qubits, params): schedule}}
"""
return dict(self._calibrations)
@calibrations.setter
def calibrations(self, calibrations: dict):
"""Set the circuit calibration data from a dictionary of calibration definition.
Args:
calibrations (dict): A dictionary of input in the format
{'gate_name': {(qubits, gate_params): schedule}}
"""
self._calibrations = defaultdict(dict, calibrations)
@property
def metadata(self) -> dict:
"""The user provided metadata associated with the circuit
The metadata for the circuit is a user provided ``dict`` of metadata
for the circuit. It will not be used to influence the execution or
operation of the circuit, but it is expected to be passed between
all transforms of the circuit (ie transpilation) and that providers will
associate any circuit metadata with the results it returns from
execution of that circuit.
"""
return self._metadata
@metadata.setter
def metadata(self, metadata: Optional[dict]):
"""Update the circuit metadata"""
if not isinstance(metadata, dict) and metadata is not None:
raise TypeError("Only a dictionary or None is accepted for circuit metadata")
self._metadata = metadata
def __str__(self) -> str:
return str(self.draw(output="text"))
def __eq__(self, other) -> bool:
if not isinstance(other, QuantumCircuit):
return False
# TODO: remove the DAG from this function
from qiskit.converters import circuit_to_dag
return circuit_to_dag(self) == circuit_to_dag(other)
@classmethod
def _increment_instances(cls):
cls.instances += 1
@classmethod
def cls_instances(cls) -> int:
"""Return the current number of instances of this class,
useful for auto naming."""
return cls.instances
@classmethod
def cls_prefix(cls) -> str:
"""Return the prefix to use for auto naming."""
return cls.prefix
def _name_update(self) -> None:
"""update name of instance using instance number"""
if not is_main_process():
pid_name = f"-{mp.current_process().pid}"
else:
pid_name = ""
self.name = f"{self._base_name}-{self.cls_instances()}{pid_name}"
def has_register(self, register: Register) -> bool:
"""
Test if this circuit has the register r.
Args:
register (Register): a quantum or classical register.
Returns:
bool: True if the register is contained in this circuit.
"""
has_reg = False
if isinstance(register, QuantumRegister) and register in self.qregs:
has_reg = True
elif isinstance(register, ClassicalRegister) and register in self.cregs:
has_reg = True
return has_reg
def reverse_ops(self) -> "QuantumCircuit":
"""Reverse the circuit by reversing the order of instructions.
This is done by recursively reversing all instructions.
It does not invert (adjoint) any gate.
Returns:
QuantumCircuit: the reversed circuit.
Examples:
input:
.. parsed-literal::
┌───┐
q_0: ┤ H ├─────■──────
└───┘┌────┴─────┐
q_1: ─────┤ RX(1.57) ├
└──────────┘
output:
.. parsed-literal::
┌───┐
q_0: ─────■──────┤ H ├
┌────┴─────┐└───┘
q_1: ┤ RX(1.57) ├─────
└──────────┘
"""
reverse_circ = QuantumCircuit(
self.qubits, self.clbits, *self.qregs, *self.cregs, name=self.name + "_reverse"
)
for inst, qargs, cargs in reversed(self.data):
reverse_circ._append(inst.reverse_ops(), qargs, cargs)
reverse_circ.duration = self.duration
reverse_circ.unit = self.unit
return reverse_circ
def reverse_bits(self) -> "QuantumCircuit":
"""Return a circuit with the opposite order of wires.
The circuit is "vertically" flipped. If a circuit is
defined over multiple registers, the resulting circuit will have
the same registers but with their order flipped.
This method is useful for converting a circuit written in little-endian
convention to the big-endian equivalent, and vice versa.
Returns:
QuantumCircuit: the circuit with reversed bit order.
Examples:
input:
.. parsed-literal::
┌───┐
q_0: ┤ H ├─────■──────
└───┘┌────┴─────┐
q_1: ─────┤ RX(1.57) ├
└──────────┘
output:
.. parsed-literal::
┌──────────┐
q_0: ─────┤ RX(1.57) ├
┌───┐└────┬─────┘
q_1: ┤ H ├─────■──────
└───┘
"""
circ = QuantumCircuit(
*reversed(self.qregs),
*reversed(self.cregs),
name=self.name,
global_phase=self.global_phase,
)
num_qubits = self.num_qubits
num_clbits = self.num_clbits
old_qubits = self.qubits
old_clbits = self.clbits
new_qubits = circ.qubits
new_clbits = circ.clbits
for inst, qargs, cargs in self.data:
new_qargs = [new_qubits[num_qubits - old_qubits.index(q) - 1] for q in qargs]
new_cargs = [new_clbits[num_clbits - old_clbits.index(c) - 1] for c in cargs]
circ._append(inst, new_qargs, new_cargs)
return circ
def inverse(self) -> "QuantumCircuit":
"""Invert (take adjoint of) this circuit.
This is done by recursively inverting all gates.
Returns:
QuantumCircuit: the inverted circuit
Raises:
CircuitError: if the circuit cannot be inverted.
Examples:
input:
.. parsed-literal::
┌───┐
q_0: ┤ H ├─────■──────
└───┘┌────┴─────┐
q_1: ─────┤ RX(1.57) ├
└──────────┘
output:
.. parsed-literal::
┌───┐
q_0: ──────■──────┤ H ├
┌─────┴─────┐└───┘
q_1: ┤ RX(-1.57) ├─────
└───────────┘
"""
inverse_circ = QuantumCircuit(
self.qubits,
self.clbits,
*self.qregs,
*self.cregs,
name=self.name + "_dg",
global_phase=-self.global_phase,
)
for inst, qargs, cargs in reversed(self._data):
inverse_circ._append(inst.inverse(), qargs, cargs)
return inverse_circ
def repeat(self, reps: int) -> "QuantumCircuit":
"""Repeat this circuit ``reps`` times.
Args:
reps (int): How often this circuit should be repeated.
Returns:
QuantumCircuit: A circuit containing ``reps`` repetitions of this circuit.
"""
repeated_circ = QuantumCircuit(
self.qubits, self.clbits, *self.qregs, *self.cregs, name=self.name + f"**{reps}"
)
# benefit of appending instructions: decomposing shows the subparts, i.e. the power
# is actually `reps` times this circuit, and it is currently much faster than `compose`.
if reps > 0:
try: # try to append as gate if possible to not disallow to_gate
inst: Instruction = self.to_gate()
except QiskitError:
inst = self.to_instruction()
for _ in range(reps):
repeated_circ._append(inst, self.qubits, self.clbits)
return repeated_circ
def power(self, power: float, matrix_power: bool = False) -> "QuantumCircuit":
"""Raise this circuit to the power of ``power``.
If ``power`` is a positive integer and ``matrix_power`` is ``False``, this implementation
defaults to calling ``repeat``. Otherwise, if the circuit is unitary, the matrix is
computed to calculate the matrix power.
Args:
power (float): The power to raise this circuit to.
matrix_power (bool): If True, the circuit is converted to a matrix and then the
matrix power is computed. If False, and ``power`` is a positive integer,
the implementation defaults to ``repeat``.
Raises:
CircuitError: If the circuit needs to be converted to a gate but it is not unitary.
Returns:
QuantumCircuit: A circuit implementing this circuit raised to the power of ``power``.
"""
if power >= 0 and isinstance(power, (int, np.integer)) and not matrix_power:
return self.repeat(power)
# attempt conversion to gate
if self.num_parameters > 0:
raise CircuitError(
"Cannot raise a parameterized circuit to a non-positive power "
"or matrix-power, please bind the free parameters: "
"{}".format(self.parameters)
)
try:
gate = self.to_gate()
except QiskitError as ex:
raise CircuitError(
"The circuit contains non-unitary operations and cannot be "
"controlled. Note that no qiskit.circuit.Instruction objects may "
"be in the circuit for this operation."
) from ex
power_circuit = QuantumCircuit(self.qubits, self.clbits, *self.qregs, *self.cregs)
power_circuit.append(gate.power(power), list(range(gate.num_qubits)))
return power_circuit
def control(
self,
num_ctrl_qubits: int = 1,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> "QuantumCircuit":
"""Control this circuit on ``num_ctrl_qubits`` qubits.
Args:
num_ctrl_qubits (int): The number of control qubits.
label (str): An optional label to give the controlled operation for visualization.
ctrl_state (str or int): The control state in decimal or as a bitstring
(e.g. '111'). If None, use ``2**num_ctrl_qubits - 1``.
Returns:
QuantumCircuit: The controlled version of this circuit.
Raises:
CircuitError: If the circuit contains a non-unitary operation and cannot be controlled.
"""
try:
gate = self.to_gate()
except QiskitError as ex:
raise CircuitError(
"The circuit contains non-unitary operations and cannot be "
"controlled. Note that no qiskit.circuit.Instruction objects may "
"be in the circuit for this operation."
) from ex
controlled_gate = gate.control(num_ctrl_qubits, label, ctrl_state)
control_qreg = QuantumRegister(num_ctrl_qubits)
controlled_circ = QuantumCircuit(
control_qreg, self.qubits, *self.qregs, name=f"c_{self.name}"
)
controlled_circ.append(controlled_gate, controlled_circ.qubits)
return controlled_circ
@deprecate_function(
"The QuantumCircuit.combine() method is being deprecated. "
"Use the compose() method which is more flexible w.r.t "
"circuit register compatibility."
)
def combine(self, rhs: "QuantumCircuit") -> "QuantumCircuit":
"""DEPRECATED - Returns rhs appended to self if self contains compatible registers.
Two circuits are compatible if they contain the same registers
or if they contain different registers with unique names. The
returned circuit will contain all unique registers between both
circuits.
Return self + rhs as a new object.
Args:
rhs (QuantumCircuit): The quantum circuit to append to the right hand side.
Returns:
QuantumCircuit: Returns a new QuantumCircuit object
Raises:
QiskitError: if the rhs circuit is not compatible
"""
# Check registers in LHS are compatible with RHS
self._check_compatible_regs(rhs)
# Make new circuit with combined registers
combined_qregs = copy.deepcopy(self.qregs)
combined_cregs = copy.deepcopy(self.cregs)
for element in rhs.qregs:
if element not in self.qregs:
combined_qregs.append(element)
for element in rhs.cregs:
if element not in self.cregs:
combined_cregs.append(element)
circuit = QuantumCircuit(*combined_qregs, *combined_cregs)
for instruction_context in itertools.chain(self.data, rhs.data):
circuit._append(*instruction_context)
circuit.global_phase = self.global_phase + rhs.global_phase
for gate, cals in rhs.calibrations.items():
for key, sched in cals.items():
circuit.add_calibration(gate, qubits=key[0], schedule=sched, params=key[1])
for gate, cals in self.calibrations.items():
for key, sched in cals.items():
circuit.add_calibration(gate, qubits=key[0], schedule=sched, params=key[1])
return circuit
@deprecate_function(
"The QuantumCircuit.extend() method is being deprecated. Use the "
"compose() (potentially with the inplace=True argument) and tensor() "
"methods which are more flexible w.r.t circuit register compatibility."
)
def extend(self, rhs: "QuantumCircuit") -> "QuantumCircuit":
"""DEPRECATED - Append QuantumCircuit to the RHS if it contains compatible registers.
Two circuits are compatible if they contain the same registers
or if they contain different registers with unique names. The
returned circuit will contain all unique registers between both
circuits.
Modify and return self.
Args:
rhs (QuantumCircuit): The quantum circuit to append to the right hand side.
Returns:
QuantumCircuit: Returns this QuantumCircuit object (which has been modified)
Raises:
QiskitError: if the rhs circuit is not compatible
"""
# Check registers in LHS are compatible with RHS
self._check_compatible_regs(rhs)
# Add new registers
for element in rhs.qregs:
if element not in self.qregs:
self.add_register(element)
for element in rhs.cregs:
if element not in self.cregs:
self.add_register(element)
# Copy the circuit data if rhs and self are the same, otherwise the data of rhs is
# appended to both self and rhs resulting in an infinite loop
data = rhs.data.copy() if rhs is self else rhs.data
# Add new gates
for instruction_context in data:
self._append(*instruction_context)
self.global_phase += rhs.global_phase
for gate, cals in rhs.calibrations.items():
for key, sched in cals.items():
self.add_calibration(gate, qubits=key[0], schedule=sched, params=key[1])
return self
def compose(
self,
other: Union["QuantumCircuit", Instruction],
qubits: Optional[Sequence[Union[Qubit, int]]] = None,
clbits: Optional[Sequence[Union[Clbit, int]]] = None,
front: bool = False,
inplace: bool = False,
wrap: bool = False,
) -> Optional["QuantumCircuit"]:
"""Compose circuit with ``other`` circuit or instruction, optionally permuting wires.
``other`` can be narrower or of equal width to ``self``.
Args:
other (qiskit.circuit.Instruction or QuantumCircuit):
(sub)circuit or instruction to compose onto self. If not a :obj:`.QuantumCircuit`,
this can be anything that :obj:`.append` will accept.
qubits (list[Qubit|int]): qubits of self to compose onto.
clbits (list[Clbit|int]): clbits of self to compose onto.
front (bool): If True, front composition will be performed (not implemented yet).
inplace (bool): If True, modify the object. Otherwise return composed circuit.
wrap (bool): If True, wraps the other circuit into a gate (or instruction, depending on
whether it contains only unitary instructions) before composing it onto self.
Returns:
QuantumCircuit: the composed circuit (returns None if inplace==True).
Raises:
CircuitError: if composing on the front.
QiskitError: if ``other`` is wider or there are duplicate edge mappings.
Examples::
lhs.compose(rhs, qubits=[3, 2], inplace=True)
.. parsed-literal::
┌───┐ ┌─────┐ ┌───┐
lqr_1_0: ───┤ H ├─── rqr_0: ──■──┤ Tdg ├ lqr_1_0: ───┤ H ├───────────────
├───┤ ┌─┴─┐└─────┘ ├───┤
lqr_1_1: ───┤ X ├─── rqr_1: ┤ X ├─────── lqr_1_1: ───┤ X ├───────────────
┌──┴───┴──┐ └───┘ ┌──┴───┴──┐┌───┐
lqr_1_2: ┤ U1(0.1) ├ + = lqr_1_2: ┤ U1(0.1) ├┤ X ├───────
└─────────┘ └─────────┘└─┬─┘┌─────┐
lqr_2_0: ─────■───── lqr_2_0: ─────■───────■──┤ Tdg ├
┌─┴─┐ ┌─┴─┐ └─────┘
lqr_2_1: ───┤ X ├─── lqr_2_1: ───┤ X ├───────────────
└───┘ └───┘
lcr_0: 0 ═══════════ lcr_0: 0 ═══════════════════════
lcr_1: 0 ═══════════ lcr_1: 0 ═══════════════════════
"""
if inplace:
dest = self
else:
dest = self.copy()
if wrap:
try:
other = other.to_gate()
except QiskitError:
other = other.to_instruction()
if not isinstance(other, QuantumCircuit):
if qubits is None:
qubits = list(range(other.num_qubits))
if clbits is None:
clbits = list(range(other.num_clbits))
if front:
dest.data.insert(0, (other, qubits, clbits))
else:
dest.append(other, qargs=qubits, cargs=clbits)
if inplace:
return None
return dest
instrs = other.data
if other.num_qubits > self.num_qubits or other.num_clbits > self.num_clbits:
raise CircuitError(
"Trying to compose with another QuantumCircuit which has more 'in' edges."
)
# number of qubits and clbits must match number in circuit or None
identity_qubit_map = dict(zip(other.qubits, self.qubits))
identity_clbit_map = dict(zip(other.clbits, self.clbits))
if qubits is None:
qubit_map = identity_qubit_map
elif len(qubits) != len(other.qubits):
raise CircuitError(
f"Number of items in qubits parameter ({len(qubits)}) does not"
f" match number of qubits in the circuit ({len(other.qubits)})."
)
else:
qubit_map = {
other.qubits[i]: (self.qubits[q] if isinstance(q, int) else q)
for i, q in enumerate(qubits)
}
if clbits is None:
clbit_map = identity_clbit_map
elif len(clbits) != len(other.clbits):
raise CircuitError(
f"Number of items in clbits parameter ({len(clbits)}) does not"
f" match number of clbits in the circuit ({len(other.clbits)})."
)
else:
clbit_map = {
other.clbits[i]: (self.clbits[c] if isinstance(c, int) else c)
for i, c in enumerate(clbits)
}
edge_map = {**qubit_map, **clbit_map} or {**identity_qubit_map, **identity_clbit_map}
mapped_instrs = []
for instr, qargs, cargs in instrs:
n_qargs = [edge_map[qarg] for qarg in qargs]
n_cargs = [edge_map[carg] for carg in cargs]
n_instr = instr.copy()
if instr.condition is not None:
from qiskit.dagcircuit import DAGCircuit # pylint: disable=cyclic-import
n_instr.condition = DAGCircuit._map_condition(edge_map, instr.condition, self.cregs)
mapped_instrs.append((n_instr, n_qargs, n_cargs))
if front:
# adjust new instrs before original ones and update all parameters
dest._data = mapped_instrs + dest._data
dest._parameter_table.clear()
for instr, _, _ in dest._data:
dest._update_parameter_table(instr)
else:
# just append new instrs and parameters
dest._data += mapped_instrs
for instr, _, _ in mapped_instrs:
dest._update_parameter_table(instr)
for gate, cals in other.calibrations.items():
dest._calibrations[gate].update(cals)
dest.global_phase += other.global_phase
if inplace:
return None
return dest
def tensor(self, other: "QuantumCircuit", inplace: bool = False) -> Optional["QuantumCircuit"]:
"""Tensor ``self`` with ``other``.
Remember that in the little-endian convention the leftmost operation will be at the bottom
of the circuit. See also
[the docs](qiskit.org/documentation/tutorials/circuits/3_summary_of_quantum_operations.html)
for more information.
.. parsed-literal::
┌────────┐ ┌─────┐ ┌─────┐
q_0: ┤ bottom ├ ⊗ q_0: ┤ top ├ = q_0: ─┤ top ├──
└────────┘ └─────┘ ┌┴─────┴─┐
q_1: ┤ bottom ├
└────────┘
Args:
other (QuantumCircuit): The other circuit to tensor this circuit with.
inplace (bool): If True, modify the object. Otherwise return composed circuit.
Examples:
.. jupyter-execute::
from qiskit import QuantumCircuit
top = QuantumCircuit(1)
top.x(0);
bottom = QuantumCircuit(2)
bottom.cry(0.2, 0, 1);
tensored = bottom.tensor(top)
print(tensored.draw())
Returns:
QuantumCircuit: The tensored circuit (returns None if inplace==True).
"""
num_qubits = self.num_qubits + other.num_qubits
num_clbits = self.num_clbits + other.num_clbits
# If a user defined both circuits with via register sizes and not with named registers
# (e.g. QuantumCircuit(2, 2)) then we have a naming collision, as the registers are by
# default called "q" resp. "c". To still allow tensoring we define new registers of the
# correct sizes.
if (
len(self.qregs) == len(other.qregs) == 1
and self.qregs[0].name == other.qregs[0].name == "q"
):
# check if classical registers are in the circuit
if num_clbits > 0:
dest = QuantumCircuit(num_qubits, num_clbits)
else:
dest = QuantumCircuit(num_qubits)
# handle case if ``measure_all`` was called on both circuits, in which case the
# registers are both named "meas"
elif (
len(self.cregs) == len(other.cregs) == 1
and self.cregs[0].name == other.cregs[0].name == "meas"
):
cr = ClassicalRegister(self.num_clbits + other.num_clbits, "meas")
dest = QuantumCircuit(*other.qregs, *self.qregs, cr)
# Now we don't have to handle any more cases arising from special implicit naming
else:
dest = QuantumCircuit(
other.qubits,
self.qubits,
other.clbits,
self.clbits,
*other.qregs,
*self.qregs,
*other.cregs,
*self.cregs,
)
# compose self onto the output, and then other
dest.compose(other, range(other.num_qubits), range(other.num_clbits), inplace=True)
dest.compose(
self,
range(other.num_qubits, num_qubits),
range(other.num_clbits, num_clbits),
inplace=True,
)
# Replace information from tensored circuit into self when inplace = True
if inplace:
self.__dict__.update(dest.__dict__)
return None
return dest
@property
def qubits(self) -> List[Qubit]:
"""
Returns a list of quantum bits in the order that the registers were added.
"""
return self._qubits
@property
def clbits(self) -> List[Clbit]:
"""
Returns a list of classical bits in the order that the registers were added.
"""
return self._clbits
@property
def ancillas(self) -> List[AncillaQubit]:
"""
Returns a list of ancilla bits in the order that the registers were added.
"""
return self._ancillas
@deprecate_function(
"The QuantumCircuit.__add__() method is being deprecated."
"Use the compose() method which is more flexible w.r.t "
"circuit register compatibility."
)
def __add__(self, rhs: "QuantumCircuit") -> "QuantumCircuit":
"""Overload + to implement self.combine."""
return self.combine(rhs)
@deprecate_function(
"The QuantumCircuit.__iadd__() method is being deprecated. Use the "
"compose() (potentially with the inplace=True argument) and tensor() "
"methods which are more flexible w.r.t circuit register compatibility."
)
def __iadd__(self, rhs: "QuantumCircuit") -> "QuantumCircuit":
"""Overload += to implement self.extend."""
return self.extend(rhs)
def __and__(self, rhs: "QuantumCircuit") -> "QuantumCircuit":
"""Overload & to implement self.compose."""
return self.compose(rhs)
def __iand__(self, rhs: "QuantumCircuit") -> "QuantumCircuit":
"""Overload &= to implement self.compose in place."""
self.compose(rhs, inplace=True)
return self
def __xor__(self, top: "QuantumCircuit") -> "QuantumCircuit":
"""Overload ^ to implement self.tensor."""
return self.tensor(top)
def __ixor__(self, top: "QuantumCircuit") -> "QuantumCircuit":
"""Overload ^= to implement self.tensor in place."""
self.tensor(top, inplace=True)
return self
def __len__(self) -> int:
"""Return number of operations in circuit."""
return len(self._data)
@typing.overload
def __getitem__(self, item: int) -> DataElement:
...
@typing.overload
def __getitem__(self, item: slice) -> List[DataElement]:
...
def __getitem__(self, item):
"""Return indexed operation."""
return self._data[item]
@staticmethod
def cast(value: S, type_: Callable[..., T]) -> Union[S, T]:
"""Best effort to cast value to type. Otherwise, returns the value."""
try:
return type_(value)
except (ValueError, TypeError):
return value
def qbit_argument_conversion(self, qubit_representation: QubitSpecifier) -> List[Qubit]:
"""
Converts several qubit representations (such as indexes, range, etc.)
into a list of qubits.
Args:
qubit_representation (Object): representation to expand
Returns:
List(Qubit): the resolved instances of the qubits.
"""
return _bit_argument_conversion(
qubit_representation, self.qubits, self._qubit_indices, Qubit
)
def cbit_argument_conversion(self, clbit_representation: ClbitSpecifier) -> List[Clbit]:
"""
Converts several classical bit representations (such as indexes, range, etc.)
into a list of classical bits.
Args:
clbit_representation (Object): representation to expand
Returns:
List(tuple): Where each tuple is a classical bit.
"""
return _bit_argument_conversion(
clbit_representation, self.clbits, self._clbit_indices, Clbit
)
def _resolve_classical_resource(self, specifier):
"""Resolve a single classical resource specifier into a concrete resource, raising an error
if the specifier is invalid.
This is slightly different to :meth:`.cbit_argument_conversion`, because it should not
unwrap :obj:`.ClassicalRegister` instances into lists, and in general it should not allow
iterables or broadcasting. It is expected to be used as a callback for things like
:meth:`.InstructionSet.c_if` to check the validity of their arguments.
Args:
specifier (Union[Clbit, ClassicalRegister, int]): a specifier of a classical resource
present in this circuit. An ``int`` will be resolved into a :obj:`.Clbit` using the
same conventions as measurement operations on this circuit use.
Returns:
Union[Clbit, ClassicalRegister]: the resolved resource.
Raises:
CircuitError: if the resource is not present in this circuit, or if the integer index
passed is out-of-bounds.
"""
if isinstance(specifier, Clbit):
if specifier not in self._clbit_indices:
raise CircuitError(f"Clbit {specifier} is not present in this circuit.")
return specifier
if isinstance(specifier, ClassicalRegister):
# This is linear complexity for something that should be constant, but QuantumCircuit
# does not currently keep a hashmap of registers, and requires non-trivial changes to
# how it exposes its registers publically before such a map can be safely stored so it
# doesn't miss updates. (Jake, 2021-11-10).
if specifier not in self.cregs:
raise CircuitError(f"Register {specifier} is not present in this circuit.")
return specifier
if isinstance(specifier, int):
try:
return self._clbits[specifier]
except IndexError:
raise CircuitError(f"Classical bit index {specifier} is out-of-range.") from None
raise CircuitError(f"Unknown classical resource specifier: '{specifier}'.")
def append(
self,
instruction: Instruction,
qargs: Optional[Sequence[QubitSpecifier]] = None,
cargs: Optional[Sequence[ClbitSpecifier]] = None,
) -> InstructionSet:
"""Append one or more instructions to the end of the circuit, modifying
the circuit in place. Expands qargs and cargs.
Args:
instruction (qiskit.circuit.Instruction): Instruction instance to append
qargs (list(argument)): qubits to attach instruction to
cargs (list(argument)): clbits to attach instruction to
Returns:
qiskit.circuit.Instruction: a handle to the instruction that was just added
Raises:
CircuitError: if object passed is a subclass of Instruction
CircuitError: if object passed is neither subclass nor an instance of Instruction
"""
# Convert input to instruction
if not isinstance(instruction, Instruction) and not hasattr(instruction, "to_instruction"):
if issubclass(instruction, Instruction):
raise CircuitError(
"Object is a subclass of Instruction, please add () to "
"pass an instance of this object."
)
raise CircuitError(
"Object to append must be an Instruction or have a to_instruction() method."
)
if not isinstance(instruction, Instruction) and hasattr(instruction, "to_instruction"):
instruction = instruction.to_instruction()
if not isinstance(instruction, Instruction):
raise CircuitError("object is not an Instruction.")
# Make copy of parameterized gate instances
if hasattr(instruction, "params"):
is_parameter = any(isinstance(param, Parameter) for param in instruction.params)
if is_parameter:
instruction = copy.deepcopy(instruction)
expanded_qargs = [self.qbit_argument_conversion(qarg) for qarg in qargs or []]
expanded_cargs = [self.cbit_argument_conversion(carg) for carg in cargs or []]
if self._control_flow_scopes:
appender = self._control_flow_scopes[-1].append
requester = self._control_flow_scopes[-1].request_classical_resource
else:
appender = self._append
requester = self._resolve_classical_resource
instructions = InstructionSet(resource_requester=requester)
for qarg, carg in instruction.broadcast_arguments(expanded_qargs, expanded_cargs):
self._check_dups(qarg)
instructions.add(appender(instruction, qarg, carg), qarg, carg)
return instructions
def _append(
self,
instruction: Instruction,
qargs: Sequence[Qubit],
cargs: Sequence[Clbit],
) -> Instruction:
"""Append an instruction to the end of the circuit, modifying the circuit in place.
.. warning::
This is an internal fast-path function, and it is the responsibility of the caller to
ensure that all the arguments are valid; there is no error checking here. In
particular, all the qubits and clbits must already exist in the circuit and there can be
no duplicates in the list.
.. note::
This function may be used by callers other than :obj:`.QuantumCircuit` when the caller
is sure that all error-checking, broadcasting and scoping has already been performed,
and the only reference to the circuit the instructions are being appended to is within
that same function. In particular, it is not safe to call
:meth:`QuantumCircuit._append` on a circuit that is received by a function argument.
This is because :meth:`.QuantumCircuit._append` will not recognise the scoping
constructs of the control-flow builder interface.
Args:
instruction: Instruction instance to append
qargs: Qubits to attach the instruction to.
cargs: Clbits to attach the instruction to.
Returns:
Instruction: a handle to the instruction that was just added
:meta public:
"""
self._data.append((instruction, qargs, cargs))
self._update_parameter_table(instruction)
# mark as normal circuit if a new instruction is added
self.duration = None
self.unit = "dt"
return instruction
def _update_parameter_table(self, instruction: Instruction) -> Instruction:
for param_index, param in enumerate(instruction.params):
if isinstance(param, (ParameterExpression, QuantumCircuit)):
# Scoped constructs like the control-flow ops use QuantumCircuit as a parameter.
atomic_parameters = set(param.parameters)
else:
atomic_parameters = set()
for parameter in atomic_parameters:
if parameter in self._parameter_table:
if not self._check_dup_param_spec(
self._parameter_table[parameter], instruction, param_index
):
self._parameter_table[parameter].append((instruction, param_index))
else:
if parameter.name in self._parameter_table.get_names():
raise CircuitError(f"Name conflict on adding parameter: {parameter.name}")
self._parameter_table[parameter] = [(instruction, param_index)]
# clear cache if new parameter is added
self._parameters = None
return instruction
def _check_dup_param_spec(
self,
parameter_spec_list: Sequence[Tuple[Instruction, int]],
instruction: Instruction,
param_index: int,
) -> bool:
for spec in parameter_spec_list:
if spec[0] is instruction and spec[1] == param_index:
return True
return False
def add_register(self, *regs: Union[Register, int, Sequence[Bit]]) -> None:
"""Add registers."""
if not regs:
return
if any(isinstance(reg, int) for reg in regs):
# QuantumCircuit defined without registers
if len(regs) == 1 and isinstance(regs[0], int):
# QuantumCircuit with anonymous quantum wires e.g. QuantumCircuit(2)
if regs[0] == 0:
regs = tuple()
else:
regs = (QuantumRegister(regs[0], "q"),)
elif len(regs) == 2 and all(isinstance(reg, int) for reg in regs):
# QuantumCircuit with anonymous wires e.g. QuantumCircuit(2, 3)
if regs[0] == 0:
qregs = tuple()
else:
qregs = (QuantumRegister(regs[0], "q"),)
if regs[1] == 0:
cregs = tuple()
else:
cregs = (ClassicalRegister(regs[1], "c"),)
regs = qregs + cregs
else:
raise CircuitError(
"QuantumCircuit parameters can be Registers or Integers."
" If Integers, up to 2 arguments. QuantumCircuit was called"
" with %s." % (regs,)
)
for register in regs:
if isinstance(register, Register) and any(
register.name == reg.name for reg in self.qregs + self.cregs
):
raise CircuitError('register name "%s" already exists' % register.name)
if isinstance(register, AncillaRegister):
for bit in register:
if bit not in self._qubit_indices:
self._ancillas.append(bit)
if isinstance(register, QuantumRegister):
self.qregs.append(register)
for idx, bit in enumerate(register):
if bit in self._qubit_indices:
self._qubit_indices[bit].registers.append((register, idx))
else:
self._qubits.append(bit)
self._qubit_indices[bit] = BitLocations(
len(self._qubits) - 1, [(register, idx)]
)
elif isinstance(register, ClassicalRegister):
self.cregs.append(register)
for idx, bit in enumerate(register):
if bit in self._clbit_indices:
self._clbit_indices[bit].registers.append((register, idx))
else:
self._clbits.append(bit)
self._clbit_indices[bit] = BitLocations(
len(self._clbits) - 1, [(register, idx)]
)
elif isinstance(register, list):
self.add_bits(register)
else:
raise CircuitError("expected a register")
def add_bits(self, bits: Iterable[Bit]) -> None:
"""Add Bits to the circuit."""
duplicate_bits = set(self._qubit_indices).union(self._clbit_indices).intersection(bits)
if duplicate_bits:
raise CircuitError(f"Attempted to add bits found already in circuit: {duplicate_bits}")
for bit in bits:
if isinstance(bit, AncillaQubit):
self._ancillas.append(bit)
if isinstance(bit, Qubit):
self._qubits.append(bit)
self._qubit_indices[bit] = BitLocations(len(self._qubits) - 1, [])
elif isinstance(bit, Clbit):
self._clbits.append(bit)
self._clbit_indices[bit] = BitLocations(len(self._clbits) - 1, [])
else:
raise CircuitError(
"Expected an instance of Qubit, Clbit, or "
"AncillaQubit, but was passed {}".format(bit)
)
def find_bit(self, bit: Bit) -> BitLocations:
"""Find locations in the circuit which can be used to reference a given :obj:`~Bit`.
Args:
bit (Bit): The bit to locate.
Returns:
namedtuple(int, List[Tuple(Register, int)]): A 2-tuple. The first element (``index``)
contains the index at which the ``Bit`` can be found (in either
:obj:`~QuantumCircuit.qubits`, :obj:`~QuantumCircuit.clbits`, depending on its
type). The second element (``registers``) is a list of ``(register, index)``
pairs with an entry for each :obj:`~Register` in the circuit which contains the
:obj:`~Bit` (and the index in the :obj:`~Register` at which it can be found).
Notes:
The circuit index of an :obj:`~AncillaQubit` will be its index in
:obj:`~QuantumCircuit.qubits`, not :obj:`~QuantumCircuit.ancillas`.
Raises:
CircuitError: If the supplied :obj:`~Bit` was of an unknown type.
CircuitError: If the supplied :obj:`~Bit` could not be found on the circuit.
"""
try:
if isinstance(bit, Qubit):
return self._qubit_indices[bit]
elif isinstance(bit, Clbit):
return self._clbit_indices[bit]
else:
raise CircuitError(f"Could not locate bit of unknown type: {type(bit)}")
except KeyError as err:
raise CircuitError(
f"Could not locate provided bit: {bit}. Has it been added to the QuantumCircuit?"
) from err
def _check_dups(self, qubits: Sequence[Qubit]) -> None:
"""Raise exception if list of qubits contains duplicates."""
squbits = set(qubits)
if len(squbits) != len(qubits):
raise CircuitError("duplicate qubit arguments")
def to_instruction(
self,
parameter_map: Optional[Dict[Parameter, ParameterValueType]] = None,
label: Optional[str] = None,
) -> Instruction:
"""Create an Instruction out of this circuit.
Args:
parameter_map(dict): For parameterized circuits, a mapping from
parameters in the circuit to parameters to be used in the
instruction. If None, existing circuit parameters will also
parameterize the instruction.
label (str): Optional gate label.
Returns:
qiskit.circuit.Instruction: a composite instruction encapsulating this circuit
(can be decomposed back)
"""
from qiskit.converters.circuit_to_instruction import circuit_to_instruction
return circuit_to_instruction(self, parameter_map, label=label)
def to_gate(
self,
parameter_map: Optional[Dict[Parameter, ParameterValueType]] = None,
label: Optional[str] = None,
) -> Gate:
"""Create a Gate out of this circuit.
Args:
parameter_map(dict): For parameterized circuits, a mapping from
parameters in the circuit to parameters to be used in the
gate. If None, existing circuit parameters will also
parameterize the gate.
label (str): Optional gate label.
Returns:
Gate: a composite gate encapsulating this circuit
(can be decomposed back)
"""
from qiskit.converters.circuit_to_gate import circuit_to_gate
return circuit_to_gate(self, parameter_map, label=label)
def decompose(
self,
gates_to_decompose: Optional[
Union[Type[Gate], Sequence[Type[Gate]], Sequence[str], str]
] = None,
) -> "QuantumCircuit":
"""Call a decomposition pass on this circuit,
to decompose one level (shallow decompose).
Args:
gates_to_decompose (str or list(str)): optional subset of gates to decompose.
Defaults to all gates in circuit.
Returns:
QuantumCircuit: a circuit one level decomposed
"""
# pylint: disable=cyclic-import
from qiskit.transpiler.passes.basis.decompose import Decompose
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.converters.dag_to_circuit import dag_to_circuit
pass_ = Decompose(gates_to_decompose=gates_to_decompose)
decomposed_dag = pass_.run(circuit_to_dag(self))
return dag_to_circuit(decomposed_dag)
def _check_compatible_regs(self, rhs: "QuantumCircuit") -> None:
"""Raise exception if the circuits are defined on incompatible registers"""
list1 = self.qregs + self.cregs
list2 = rhs.qregs + rhs.cregs
for element1 in list1:
for element2 in list2:
if element2.name == element1.name:
if element1 != element2:
raise CircuitError(
"circuits are not compatible:"
f" registers {element1} and {element2} not compatible"
)
def _unique_register_name(self, prefix: str = "") -> str:
"""Generate a register name with the given prefix, which is unique within this circuit."""
used = {
reg.name[len(prefix) :]
for reg in itertools.chain(self.qregs, self.cregs)
if reg.name.startswith(prefix)
}
characters = (string.digits + string.ascii_letters) if prefix else string.ascii_letters
for parts in itertools.chain.from_iterable(
itertools.product(characters, repeat=n) for n in itertools.count(1)
):
name = "".join(parts)
if name not in used:
return prefix + name
# This isn't actually reachable because the above loop is infinite.
return prefix
def qasm(
self,
formatted: bool = False,
filename: Optional[str] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""Return OpenQASM string.
Args:
formatted (bool): Return formatted Qasm string.
filename (str): Save Qasm to file with name 'filename'.
encoding (str): Optionally specify the encoding to use for the
output file if ``filename`` is specified. By default this is
set to the system's default encoding (ie whatever
``locale.getpreferredencoding()`` returns) and can be set to
any valid codec or alias from stdlib's
`codec module <https://docs.python.org/3/library/codecs.html#standard-encodings>`__
Returns:
str: If formatted=False.
Raises:
MissingOptionalLibraryError: If pygments is not installed and ``formatted`` is
``True``.
QasmError: If circuit has free parameters.
"""
if self.num_parameters > 0:
raise QasmError("Cannot represent circuits with unbound parameters in OpenQASM 2.")
existing_gate_names = [
"barrier",
"measure",
"reset",
"u3",
"u2",
"u1",
"cx",
"id",
"u0",
"u",
"p",
"x",
"y",
"z",
"h",
"s",
"sdg",
"t",
"tdg",
"rx",
"ry",
"rz",
"sx",
"sxdg",
"cz",
"cy",
"swap",
"ch",
"ccx",
"cswap",
"crx",
"cry",
"crz",
"cu1",
"cp",
"cu3",
"csx",
"cu",
"rxx",
"rzz",
"rccx",
"rc3x",
"c3x",
"c3sx",
"c4x",
]
existing_composite_circuits = []
string_temp = self.header + "\n"
string_temp += self.extension_lib + "\n"
for register in self.qregs:
string_temp += register.qasm() + "\n"
for register in self.cregs:
string_temp += register.qasm() + "\n"
bit_labels = {
bit: "%s[%d]" % (reg.name, idx)
for reg in self.qregs + self.cregs
for (idx, bit) in enumerate(reg)
}
regless_qubits = set(self.qubits) - {bit for reg in self.qregs for bit in reg}
regless_clbits = set(self.clbits) - {bit for reg in self.cregs for bit in reg}
if regless_qubits:
register_name = self._unique_register_name("qregless_")
string_temp += f"qreg {register_name}[{len(regless_qubits)}];\n"
bit_labels.update(
{bit: f"{register_name}[{idx}]" for idx, bit in enumerate(regless_qubits)}
)
if regless_clbits:
register_name = self._unique_register_name("cregless_")
string_temp += f"creg {register_name}[{len(regless_clbits)}];\n"
bit_labels.update(
{bit: f"{register_name}[{idx}]" for idx, bit in enumerate(regless_clbits)}
)
for instruction, qargs, cargs in self._data:
if instruction.name == "measure":
qubit = qargs[0]
clbit = cargs[0]
string_temp += "{} {} -> {};\n".format(
instruction.qasm(),
bit_labels[qubit],
bit_labels[clbit],
)
else:
# Check instructions names or label are valid
if not VALID_QASM2_IDENTIFIER.fullmatch(instruction.name):
instruction = instruction.copy(name=_qasm_escape_gate_name(instruction.name))
# decompose gate using definitions if they are not defined in OpenQASM2
if (
instruction.name not in existing_gate_names
and instruction not in existing_composite_circuits
):
if instruction.name in [
instruction.name for instruction in existing_composite_circuits
]:
# append instruction id to name of instruction copy to make it unique
instruction = instruction.copy(name=f"{instruction.name}_{id(instruction)}")
existing_composite_circuits.append(instruction)
_add_sub_instruction_to_existing_composite_circuits(
instruction, existing_gate_names, existing_composite_circuits
)
# Insert qasm representation of the original instruction
string_temp += "{} {};\n".format(
instruction.qasm(),
",".join([bit_labels[j] for j in qargs + cargs]),
)
# insert gate definitions
string_temp = _insert_composite_gate_definition_qasm(
string_temp, existing_composite_circuits, self.extension_lib
)
if filename:
with open(filename, "w+", encoding=encoding) as file:
file.write(string_temp)
file.close()
if formatted:
if not HAS_PYGMENTS:
raise MissingOptionalLibraryError(
libname="pygments>2.4",
name="formatted QASM output",
pip_install="pip install pygments",
)
code = pygments.highlight(
string_temp, OpenQASMLexer(), Terminal256Formatter(style=QasmTerminalStyle)
)
print(code)
return None
else:
return string_temp
def draw(
self,
output: Optional[str] = None,
scale: Optional[float] = None,
filename: Optional[str] = None,
style: Optional[Union[dict, str]] = None,
interactive: bool = False,
plot_barriers: bool = True,
reverse_bits: bool = False,
justify: Optional[str] = None,
vertical_compression: Optional[str] = "medium",
idle_wires: bool = True,
with_layout: bool = True,
fold: Optional[int] = None,
# The type of ax is matplotlib.axes.Axes, but this is not a fixed dependency, so cannot be
# safely forward-referenced.
ax: Optional[typing.Any] = None,
initial_state: bool = False,
cregbundle: bool = True,
):
"""Draw the quantum circuit. Use the output parameter to choose the drawing format:
**text**: ASCII art TextDrawing that can be printed in the console.
**matplotlib**: images with color rendered purely in Python.
**latex**: high-quality images compiled via latex.
**latex_source**: raw uncompiled latex output.
Args:
output (str): select the output method to use for drawing the circuit.
Valid choices are ``text``, ``mpl``, ``latex``, ``latex_source``.
By default the `text` drawer is used unless the user config file
(usually ``~/.qiskit/settings.conf``) has an alternative backend set
as the default. For example, ``circuit_drawer = latex``. If the output
kwarg is set, that backend will always be used over the default in
the user config file.
scale (float): scale of image to draw (shrink if < 1.0). Only used by
the `mpl`, `latex` and `latex_source` outputs. Defaults to 1.0.
filename (str): file path to save image to. Defaults to None.
style (dict or str): dictionary of style or file name of style json file.
This option is only used by the `mpl` or `latex` output type.
If `style` is a str, it is used as the path to a json file
which contains a style dict. The file will be opened, parsed, and
then any style elements in the dict will replace the default values
in the input dict. A file to be loaded must end in ``.json``, but
the name entered here can omit ``.json``. For example,
``style='iqx.json'`` or ``style='iqx'``.
If `style` is a dict and the ``'name'`` key is set, that name
will be used to load a json file, followed by loading the other
items in the style dict. For example, ``style={'name': 'iqx'}``.
If `style` is not a str and `name` is not a key in the style dict,
then the default value from the user config file (usually
``~/.qiskit/settings.conf``) will be used, for example,
``circuit_mpl_style = iqx``.
If none of these are set, the `default` style will be used.
The search path for style json files can be specified in the user
config, for example,
``circuit_mpl_style_path = /home/user/styles:/home/user``.
See: :class:`~qiskit.visualization.qcstyle.DefaultStyle` for more
information on the contents.
interactive (bool): when set to true, show the circuit in a new window
(for `mpl` this depends on the matplotlib backend being used
supporting this). Note when used with either the `text` or the
`latex_source` output type this has no effect and will be silently
ignored. Defaults to False.
reverse_bits (bool): when set to True, reverse the bit order inside
registers for the output visualization. Defaults to False.
plot_barriers (bool): enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (string): options are ``left``, ``right`` or ``none``. If
anything else is supplied, it defaults to left justified. It refers
to where gates should be placed in the output circuit if there is
an option. ``none`` results in each gate being placed in its own
column.
vertical_compression (string): ``high``, ``medium`` or ``low``. It
merges the lines generated by the `text` output so the drawing
will take less vertical room. Default is ``medium``. Only used by
the `text` output, will be silently ignored otherwise.
idle_wires (bool): include idle wires (wires with no circuit elements)
in output visualization. Default is True.
with_layout (bool): include layout information, with labels on the
physical layout. Default is True.
fold (int): sets pagination. It can be disabled using -1. In `text`,
sets the length of the lines. This is useful when the drawing does
not fit in the console. If None (default), it will try to guess the
console width using ``shutil.get_terminal_size()``. However, if
running in jupyter, the default line length is set to 80 characters.
In `mpl`, it is the number of (visual) layers before folding.
Default is 25.
ax (matplotlib.axes.Axes): Only used by the `mpl` backend. An optional
Axes object to be used for the visualization output. If none is
specified, a new matplotlib Figure will be created and used.
Additionally, if specified there will be no returned Figure since
it is redundant.
initial_state (bool): optional. Adds ``|0>`` in the beginning of the wire.
Default is False.
cregbundle (bool): optional. If set True, bundle classical registers.
Default is True.
Returns:
:class:`TextDrawing` or :class:`matplotlib.figure` or :class:`PIL.Image` or
:class:`str`:
* `TextDrawing` (output='text')
A drawing that can be printed as ascii art.
* `matplotlib.figure.Figure` (output='mpl')
A matplotlib figure object for the circuit diagram.
* `PIL.Image` (output='latex')
An in-memory representation of the image of the circuit diagram.
* `str` (output='latex_source')
The LaTeX source code for visualizing the circuit diagram.
Raises:
VisualizationError: when an invalid output method is selected
ImportError: when the output methods requires non-installed libraries.
Example:
.. jupyter-execute::
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.tools.visualization import circuit_drawer
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.measure(q, c)
qc.draw(output='mpl', style={'backgroundcolor': '#EEEEEE'})
"""
# pylint: disable=cyclic-import
from qiskit.visualization import circuit_drawer
return circuit_drawer(
self,
scale=scale,
filename=filename,
style=style,
output=output,
interactive=interactive,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
vertical_compression=vertical_compression,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold,
ax=ax,
initial_state=initial_state,
cregbundle=cregbundle,
)
def size(self, filter_function: Optional[callable] = lambda x: not x[0]._directive) -> int:
"""Returns total number of instructions in circuit.
Args:
filter_function (callable): a function to filter out some instructions.
Should take as input a tuple of (Instruction, list(Qubit), list(Clbit)).
By default filters out "directives", such as barrier or snapshot.
Returns:
int: Total number of gate operations.
"""
return sum(map(filter_function, self._data))
def depth(self, filter_function: Optional[callable] = lambda x: not x[0]._directive) -> int:
"""Return circuit depth (i.e., length of critical path).
Args:
filter_function (callable): a function to filter out some instructions.
Should take as input a tuple of (Instruction, list(Qubit), list(Clbit)).
By default filters out "directives", such as barrier or snapshot.
Returns:
int: Depth of circuit.
Notes:
The circuit depth and the DAG depth need not be the
same.
"""
# Assign each bit in the circuit a unique integer
# to index into op_stack.
bit_indices = {bit: idx for idx, bit in enumerate(self.qubits + self.clbits)}
# If no bits, return 0
if not bit_indices:
return 0
# A list that holds the height of each qubit
# and classical bit.
op_stack = [0] * len(bit_indices)
# Here we are playing a modified version of
# Tetris where we stack gates, but multi-qubit
# gates, or measurements have a block for each
# qubit or cbit that are connected by a virtual
# line so that they all stacked at the same depth.
# Conditional gates act on all cbits in the register
# they are conditioned on.
# The max stack height is the circuit depth.
for instr, qargs, cargs in self._data:
levels = []
reg_ints = []
for ind, reg in enumerate(qargs + cargs):
# Add to the stacks of the qubits and
# cbits used in the gate.
reg_ints.append(bit_indices[reg])
if filter_function((instr, qargs, cargs)):
levels.append(op_stack[reg_ints[ind]] + 1)
else:
levels.append(op_stack[reg_ints[ind]])
# Assuming here that there is no conditional
# snapshots or barriers ever.
if instr.condition:
# Controls operate over all bits of a classical register
# or over a single bit
if isinstance(instr.condition[0], Clbit):
condition_bits = [instr.condition[0]]
else:
condition_bits = instr.condition[0]
for cbit in condition_bits:
idx = bit_indices[cbit]
if idx not in reg_ints:
reg_ints.append(idx)
levels.append(op_stack[idx] + 1)
max_level = max(levels)
for ind in reg_ints:
op_stack[ind] = max_level
return max(op_stack)
def width(self) -> int:
"""Return number of qubits plus clbits in circuit.
Returns:
int: Width of circuit.
"""
return len(self.qubits) + len(self.clbits)
@property
def num_qubits(self) -> int:
"""Return number of qubits."""
return len(self.qubits)
@property
def num_ancillas(self) -> int:
"""Return the number of ancilla qubits."""
return len(self.ancillas)
@property
def num_clbits(self) -> int:
"""Return number of classical bits."""
return len(self.clbits)
# The stringified return type is because OrderedDict can't be subscripted before Python 3.9, and
# typing.OrderedDict wasn't added until 3.7.2. It can be turned into a proper type once 3.6
# support is dropped.
def count_ops(self) -> "OrderedDict[Instruction, int]":
"""Count each operation kind in the circuit.
Returns:
OrderedDict: a breakdown of how many operations of each kind, sorted by amount.
"""
count_ops: Dict[Instruction, int] = {}
for instr, _, _ in self._data:
count_ops[instr.name] = count_ops.get(instr.name, 0) + 1
return OrderedDict(sorted(count_ops.items(), key=lambda kv: kv[1], reverse=True))
def num_nonlocal_gates(self) -> int:
"""Return number of non-local gates (i.e. involving 2+ qubits).
Conditional nonlocal gates are also included.
"""
multi_qubit_gates = 0
for instr, _, _ in self._data:
if instr.num_qubits > 1 and not instr._directive:
multi_qubit_gates += 1
return multi_qubit_gates
def get_instructions(self, name: str) -> List[DataElement]:
"""Get instructions matching name.
Args:
name (str): The name of instruction to.
Returns:
list(tuple): list of (instruction, qargs, cargs).
"""
return [match for match in self._data if match[0].name == name]
def num_connected_components(self, unitary_only: bool = False) -> int:
"""How many non-entangled subcircuits can the circuit be factored to.
Args:
unitary_only (bool): Compute only unitary part of graph.
Returns:
int: Number of connected components in circuit.
"""
# Convert registers to ints (as done in depth).
bits = self.qubits if unitary_only else (self.qubits + self.clbits)
bit_indices = {bit: idx for idx, bit in enumerate(bits)}
# Start with each qubit or cbit being its own subgraph.
sub_graphs = [[bit] for bit in range(len(bit_indices))]
num_sub_graphs = len(sub_graphs)
# Here we are traversing the gates and looking to see
# which of the sub_graphs the gate joins together.
for instr, qargs, cargs in self._data:
if unitary_only:
args = qargs
num_qargs = len(args)
else:
args = qargs + cargs
num_qargs = len(args) + (1 if instr.condition else 0)
if num_qargs >= 2 and not instr._directive:
graphs_touched = []
num_touched = 0
# Controls necessarily join all the cbits in the
# register that they use.
if not unitary_only:
for bit in instr.condition_bits:
idx = bit_indices[bit]
for k in range(num_sub_graphs):
if idx in sub_graphs[k]:
graphs_touched.append(k)
break
for item in args:
reg_int = bit_indices[item]
for k in range(num_sub_graphs):
if reg_int in sub_graphs[k]:
if k not in graphs_touched:
graphs_touched.append(k)
break
graphs_touched = list(set(graphs_touched))
num_touched = len(graphs_touched)
# If the gate touches more than one subgraph
# join those graphs together and return
# reduced number of subgraphs
if num_touched > 1:
connections = []
for idx in graphs_touched:
connections.extend(sub_graphs[idx])
_sub_graphs = []
for idx in range(num_sub_graphs):
if idx not in graphs_touched:
_sub_graphs.append(sub_graphs[idx])
_sub_graphs.append(connections)
sub_graphs = _sub_graphs
num_sub_graphs -= num_touched - 1
# Cannot go lower than one so break
if num_sub_graphs == 1:
break
return num_sub_graphs
def num_unitary_factors(self) -> int:
"""Computes the number of tensor factors in the unitary
(quantum) part of the circuit only.
"""
return self.num_connected_components(unitary_only=True)
def num_tensor_factors(self) -> int:
"""Computes the number of tensor factors in the unitary
(quantum) part of the circuit only.
Notes:
This is here for backwards compatibility, and will be
removed in a future release of Qiskit. You should call
`num_unitary_factors` instead.
"""
return self.num_unitary_factors()
def copy(self, name: Optional[str] = None) -> "QuantumCircuit":
"""Copy the circuit.
Args:
name (str): name to be given to the copied circuit. If None, then the name stays the same
Returns:
QuantumCircuit: a deepcopy of the current circuit, with the specified name
"""
cpy = copy.copy(self)
# copy registers correctly, in copy.copy they are only copied via reference
cpy.qregs = self.qregs.copy()
cpy.cregs = self.cregs.copy()
cpy._qubits = self._qubits.copy()
cpy._ancillas = self._ancillas.copy()
cpy._clbits = self._clbits.copy()
cpy._qubit_indices = self._qubit_indices.copy()
cpy._clbit_indices = self._clbit_indices.copy()
instr_instances = {id(instr): instr for instr, _, __ in self._data}
instr_copies = {id_: instr.copy() for id_, instr in instr_instances.items()}
cpy._parameter_table = ParameterTable(
{
param: [
(instr_copies[id(instr)], param_index)
for instr, param_index in self._parameter_table[param]
]
for param in self._parameter_table
}
)
cpy._data = [
(instr_copies[id(inst)], qargs.copy(), cargs.copy())
for inst, qargs, cargs in self._data
]
cpy._calibrations = copy.deepcopy(self._calibrations)
cpy._metadata = copy.deepcopy(self._metadata)
if name:
cpy.name = name
return cpy
def _create_creg(self, length: int, name: str) -> ClassicalRegister:
"""Creates a creg, checking if ClassicalRegister with same name exists"""
if name in [creg.name for creg in self.cregs]:
save_prefix = ClassicalRegister.prefix
ClassicalRegister.prefix = name
new_creg = ClassicalRegister(length)
ClassicalRegister.prefix = save_prefix
else:
new_creg = ClassicalRegister(length, name)
return new_creg
def _create_qreg(self, length: int, name: str) -> QuantumRegister:
"""Creates a qreg, checking if QuantumRegister with same name exists"""
if name in [qreg.name for qreg in self.qregs]:
save_prefix = QuantumRegister.prefix
QuantumRegister.prefix = name
new_qreg = QuantumRegister(length)
QuantumRegister.prefix = save_prefix
else:
new_qreg = QuantumRegister(length, name)
return new_qreg
def reset(self, qubit: QubitSpecifier) -> InstructionSet:
"""Reset the quantum bit(s) to their default state.
Args:
qubit: qubit(s) to reset.
Returns:
qiskit.circuit.InstructionSet: handle to the added instruction.
"""
return self.append(Reset(), [qubit], [])
def measure(self, qubit: QubitSpecifier, cbit: ClbitSpecifier) -> InstructionSet:
"""Measure quantum bit into classical bit (tuples).
Args:
qubit: qubit to measure.
cbit: classical bit to place the measurement in.
Returns:
qiskit.circuit.InstructionSet: handle to the added instructions.
Raises:
CircuitError: if arguments have bad format.
"""
return self.append(Measure(), [qubit], [cbit])
def measure_active(self, inplace: bool = True) -> Optional["QuantumCircuit"]:
"""Adds measurement to all non-idle qubits. Creates a new ClassicalRegister with
a size equal to the number of non-idle qubits being measured.
Returns a new circuit with measurements if `inplace=False`.
Args:
inplace (bool): All measurements inplace or return new circuit.
Returns:
QuantumCircuit: Returns circuit with measurements when `inplace = False`.
"""
from qiskit.converters.circuit_to_dag import circuit_to_dag
if inplace:
circ = self
else:
circ = self.copy()
dag = circuit_to_dag(circ)
qubits_to_measure = [qubit for qubit in circ.qubits if qubit not in dag.idle_wires()]
new_creg = circ._create_creg(len(qubits_to_measure), "measure")
circ.add_register(new_creg)
circ.barrier()
circ.measure(qubits_to_measure, new_creg)
if not inplace:
return circ
else:
return None
def measure_all(
self, inplace: bool = True, add_bits: bool = True
) -> Optional["QuantumCircuit"]:
"""Adds measurement to all qubits.
By default, adds new classical bits in a :obj:`.ClassicalRegister` to store these
measurements. If ``add_bits=False``, the results of the measurements will instead be stored
in the already existing classical bits, with qubit ``n`` being measured into classical bit
``n``.
Returns a new circuit with measurements if ``inplace=False``.
Args:
inplace (bool): All measurements inplace or return new circuit.
add_bits (bool): Whether to add new bits to store the results.
Returns:
QuantumCircuit: Returns circuit with measurements when ``inplace=False``.
Raises:
CircuitError: if ``add_bits=False`` but there are not enough classical bits.
"""
if inplace:
circ = self
else:
circ = self.copy()
if add_bits:
new_creg = circ._create_creg(len(circ.qubits), "meas")
circ.add_register(new_creg)
circ.barrier()
circ.measure(circ.qubits, new_creg)
else:
if len(circ.clbits) < len(circ.qubits):
raise CircuitError(
"The number of classical bits must be equal or greater than "
"the number of qubits."
)
circ.barrier()
circ.measure(circ.qubits, circ.clbits[0 : len(circ.qubits)])
if not inplace:
return circ
else:
return None
def remove_final_measurements(self, inplace: bool = True) -> Optional["QuantumCircuit"]:
"""Removes final measurements and barriers on all qubits if they are present.
Deletes the classical registers that were used to store the values from these measurements
that become idle as a result of this operation, and deletes classical bits that are
referenced only by removed registers, or that aren't referenced at all but have
become idle as a result of this operation.
Measurements and barriers are considered final if they are
followed by no other operations (aside from other measurements or barriers.)
Args:
inplace (bool): All measurements removed inplace or return new circuit.
Returns:
QuantumCircuit: Returns the resulting circuit when ``inplace=False``, else None.
"""
# pylint: disable=cyclic-import
from qiskit.transpiler.passes import RemoveFinalMeasurements
from qiskit.converters import circuit_to_dag
if inplace:
circ = self
else:
circ = self.copy()
dag = circuit_to_dag(circ)
remove_final_meas = RemoveFinalMeasurements()
new_dag = remove_final_meas.run(dag)
kept_cregs = set(new_dag.cregs.values())
kept_clbits = set(new_dag.clbits)
# Filter only cregs/clbits still in new DAG, preserving original circuit order
cregs_to_add = [creg for creg in circ.cregs if creg in kept_cregs]
clbits_to_add = [clbit for clbit in circ._clbits if clbit in kept_clbits]
# Clear cregs and clbits
circ.cregs = []
circ._clbits = []
circ._clbit_indices = {}
# We must add the clbits first to preserve the original circuit
# order. This way, add_register never adds clbits and just
# creates registers that point to them.
circ.add_bits(clbits_to_add)
for creg in cregs_to_add:
circ.add_register(creg)
# Clear instruction info
circ.data.clear()
circ._parameter_table.clear()
# Set circ instructions to match the new DAG
for node in new_dag.topological_op_nodes():
# Get arguments for classical condition (if any)
inst = node.op.copy()
circ.append(inst, node.qargs, node.cargs)
if not inplace:
return circ
else:
return None
@staticmethod
def from_qasm_file(path: str) -> "QuantumCircuit":
"""Take in a QASM file and generate a QuantumCircuit object.
Args:
path (str): Path to the file for a QASM program
Return:
QuantumCircuit: The QuantumCircuit object for the input QASM
"""
qasm = Qasm(filename=path)
return _circuit_from_qasm(qasm)
@staticmethod
def from_qasm_str(qasm_str: str) -> "QuantumCircuit":
"""Take in a QASM string and generate a QuantumCircuit object.
Args:
qasm_str (str): A QASM program string
Return:
QuantumCircuit: The QuantumCircuit object for the input QASM
"""
qasm = Qasm(data=qasm_str)
return _circuit_from_qasm(qasm)
@property
def global_phase(self) -> ParameterValueType:
"""Return the global phase of the circuit in radians."""
return self._global_phase
@global_phase.setter
def global_phase(self, angle: ParameterValueType):
"""Set the phase of the circuit.
Args:
angle (float, ParameterExpression): radians
"""
if isinstance(angle, ParameterExpression) and angle.parameters:
self._global_phase = angle
else:
# Set the phase to the [0, 2π) interval
angle = float(angle)
if not angle:
self._global_phase = 0
else:
self._global_phase = angle % (2 * np.pi)
@property
def parameters(self) -> ParameterView:
"""Convenience function to get the parameters defined in the parameter table."""
# parameters from gates
if self._parameters is None:
unsorted = self._unsorted_parameters()
self._parameters = sorted(unsorted, key=functools.cmp_to_key(_compare_parameters))
# return as parameter view, which implements the set and list interface
return ParameterView(self._parameters)
@property
def num_parameters(self) -> int:
"""Convenience function to get the number of parameter objects in the circuit."""
return len(self._unsorted_parameters())
def _unsorted_parameters(self) -> Set[Parameter]:
"""Efficiently get all parameters in the circuit, without any sorting overhead."""
parameters = set(self._parameter_table)
if isinstance(self.global_phase, ParameterExpression):
parameters.update(self.global_phase.parameters)
return parameters
def assign_parameters(
self,
parameters: Union[Mapping[Parameter, ParameterValueType], Sequence[ParameterValueType]],
inplace: bool = False,
) -> Optional["QuantumCircuit"]:
"""Assign parameters to new parameters or values.
The keys of the parameter dictionary must be Parameter instances in the current circuit. The
values of the dictionary can either be numeric values or new parameter objects.
The values can be assigned to the current circuit object or to a copy of it.
Args:
parameters (dict or iterable): Either a dictionary or iterable specifying the new
parameter values. If a dict, it specifies the mapping from ``current_parameter`` to
``new_parameter``, where ``new_parameter`` can be a new parameter object or a
numeric value. If an iterable, the elements are assigned to the existing parameters
in the order of ``QuantumCircuit.parameters``.
inplace (bool): If False, a copy of the circuit with the bound parameters is
returned. If True the circuit instance itself is modified.
Raises:
CircuitError: If parameters is a dict and contains parameters not present in the
circuit.
ValueError: If parameters is a list/array and the length mismatches the number of free
parameters in the circuit.
Returns:
Optional(QuantumCircuit): A copy of the circuit with bound parameters, if
``inplace`` is False, otherwise None.
Examples:
Create a parameterized circuit and assign the parameters in-place.
.. jupyter-execute::
from qiskit.circuit import QuantumCircuit, Parameter
circuit = QuantumCircuit(2)
params = [Parameter('A'), Parameter('B'), Parameter('C')]
circuit.ry(params[0], 0)
circuit.crx(params[1], 0, 1)
print('Original circuit:')
print(circuit.draw())
circuit.assign_parameters({params[0]: params[2]}, inplace=True)
print('Assigned in-place:')
print(circuit.draw())
Bind the values out-of-place and get a copy of the original circuit.
.. jupyter-execute::
from qiskit.circuit import QuantumCircuit, ParameterVector
circuit = QuantumCircuit(2)
params = ParameterVector('P', 2)
circuit.ry(params[0], 0)
circuit.crx(params[1], 0, 1)
bound_circuit = circuit.assign_parameters({params[0]: 1, params[1]: 2})
print('Bound circuit:')
print(bound_circuit.draw())
print('The original circuit is unchanged:')
print(circuit.draw())
"""
# replace in self or in a copy depending on the value of in_place
if inplace:
bound_circuit = self
else:
bound_circuit = self.copy()
self._increment_instances()
bound_circuit._name_update()
if isinstance(parameters, dict):
# unroll the parameter dictionary (needed if e.g. it contains a ParameterVector)
unrolled_param_dict = self._unroll_param_dict(parameters)
unsorted_parameters = self._unsorted_parameters()
# check that all param_dict items are in the _parameter_table for this circuit
params_not_in_circuit = [
param_key
for param_key in unrolled_param_dict
if param_key not in unsorted_parameters
]
if len(params_not_in_circuit) > 0:
raise CircuitError(
"Cannot bind parameters ({}) not present in the circuit.".format(
", ".join(map(str, params_not_in_circuit))
)
)
# replace the parameters with a new Parameter ("substitute") or numeric value ("bind")
for parameter, value in unrolled_param_dict.items():
bound_circuit._assign_parameter(parameter, value)
else:
if len(parameters) != self.num_parameters:
raise ValueError(
"Mismatching number of values and parameters. For partial binding "
"please pass a dictionary of {parameter: value} pairs."
)
# use a copy of the parameters, to ensure we don't change the contents of
# self.parameters while iterating over them
fixed_parameters_copy = self.parameters.copy()
for i, value in enumerate(parameters):
bound_circuit._assign_parameter(fixed_parameters_copy[i], value)
return None if inplace else bound_circuit
def bind_parameters(
self, values: Union[Mapping[Parameter, float], Sequence[float]]
) -> "QuantumCircuit":
"""Assign numeric parameters to values yielding a new circuit.
To assign new Parameter objects or bind the values in-place, without yielding a new
circuit, use the :meth:`assign_parameters` method.
Args:
values (dict or iterable): {parameter: value, ...} or [value1, value2, ...]
Raises:
CircuitError: If values is a dict and contains parameters not present in the circuit.
TypeError: If values contains a ParameterExpression.
Returns:
QuantumCircuit: copy of self with assignment substitution.
"""
if isinstance(values, dict):
if any(isinstance(value, ParameterExpression) for value in values.values()):
raise TypeError(
"Found ParameterExpression in values; use assign_parameters() instead."
)
return self.assign_parameters(values)
else:
if any(isinstance(value, ParameterExpression) for value in values):
raise TypeError(
"Found ParameterExpression in values; use assign_parameters() instead."
)
return self.assign_parameters(values)
def _unroll_param_dict(
self, value_dict: Mapping[Parameter, ParameterValueType]
) -> Dict[Parameter, ParameterValueType]:
unrolled_value_dict: Dict[Parameter, ParameterValueType] = {}
for (param, value) in value_dict.items():
if isinstance(param, ParameterVector):
if not len(param) == len(value):
raise CircuitError(
"ParameterVector {} has length {}, which "
"differs from value list {} of "
"len {}".format(param, len(param), value, len(value))
)
unrolled_value_dict.update(zip(param, value))
# pass anything else except number through. error checking is done in assign_parameter
elif isinstance(param, (ParameterExpression, str)) or param is None:
unrolled_value_dict[param] = value
return unrolled_value_dict
def _assign_parameter(self, parameter: Parameter, value: ParameterValueType) -> None:
"""Update this circuit where instances of ``parameter`` are replaced by ``value``, which
can be either a numeric value or a new parameter expression.
Args:
parameter (ParameterExpression): Parameter to be bound
value (Union(ParameterExpression, float, int)): A numeric or parametric expression to
replace instances of ``parameter``.
Raises:
RuntimeError: if some internal logic error has caused the circuit instruction sequence
and the parameter table to become out of sync, and the table now contains a
reference to a value that cannot be assigned.
"""
# parameter might be in global phase only
if parameter in self._parameter_table.keys():
for instr, param_index in self._parameter_table[parameter]:
assignee = instr.params[param_index]
# Normal ParameterExpression.
if isinstance(assignee, ParameterExpression):
new_param = assignee.assign(parameter, value)
# if fully bound, validate
if len(new_param.parameters) == 0:
instr.params[param_index] = instr.validate_parameter(new_param)
else:
instr.params[param_index] = new_param
self._rebind_definition(instr, parameter, value)
# Scoped block of a larger instruction.
elif isinstance(assignee, QuantumCircuit):
# It's possible that someone may re-use a loop body, so we need to mutate the
# parameter vector with a new circuit, rather than mutating the body.
instr.params[param_index] = assignee.assign_parameters({parameter: value})
else:
raise RuntimeError( # pragma: no cover
"The ParameterTable or data of this QuantumCircuit have become out-of-sync."
f"\nParameterTable: {self._parameter_table}"
f"\nData: {self.data}"
)
if isinstance(value, ParameterExpression):
entry = self._parameter_table.pop(parameter)
for new_parameter in value.parameters:
if new_parameter in self._parameter_table:
self._parameter_table[new_parameter].extend(entry)
else:
self._parameter_table[new_parameter] = entry
else:
del self._parameter_table[parameter] # clear evaluated expressions
if (
isinstance(self.global_phase, ParameterExpression)
and parameter in self.global_phase.parameters
):
self.global_phase = self.global_phase.assign(parameter, value)
# clear parameter cache
self._parameters = None
self._assign_calibration_parameters(parameter, value)
def _assign_calibration_parameters(
self, parameter: Parameter, value: ParameterValueType
) -> None:
"""Update parameterized pulse gate calibrations, if there are any which contain
``parameter``. This updates the calibration mapping as well as the gate definition
``Schedule``s, which also may contain ``parameter``.
"""
new_param: ParameterValueType
for cals in self.calibrations.values():
for (qubit, cal_params), schedule in copy.copy(cals).items():
if any(
isinstance(p, ParameterExpression) and parameter in p.parameters
for p in cal_params
):
del cals[(qubit, cal_params)]
new_cal_params = []
for p in cal_params:
if isinstance(p, ParameterExpression) and parameter in p.parameters:
new_param = p.assign(parameter, value)
if not new_param.parameters:
new_param = float(new_param)
new_cal_params.append(new_param)
else:
new_cal_params.append(p)
schedule.assign_parameters({parameter: value})
cals[(qubit, tuple(new_cal_params))] = schedule
def _rebind_definition(
self, instruction: Instruction, parameter: Parameter, value: ParameterValueType
) -> None:
if instruction._definition:
for op, _, _ in instruction._definition:
for idx, param in enumerate(op.params):
if isinstance(param, ParameterExpression) and parameter in param.parameters:
if isinstance(value, ParameterExpression):
op.params[idx] = param.subs({parameter: value})
else:
op.params[idx] = param.bind({parameter: value})
self._rebind_definition(op, parameter, value)
def barrier(self, *qargs: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.Barrier`. If qargs is empty, applies to all qubits in the
circuit.
Returns:
qiskit.circuit.InstructionSet: handle to the added instructions.
"""
from .barrier import Barrier
qubits: List[QubitSpecifier] = []
if not qargs: # None
qubits.extend(self.qubits)
for qarg in qargs:
if isinstance(qarg, QuantumRegister):
qubits.extend([qarg[j] for j in range(qarg.size)])
elif isinstance(qarg, list):
qubits.extend(qarg)
elif isinstance(qarg, range):
qubits.extend(list(qarg))
elif isinstance(qarg, slice):
qubits.extend(self.qubits[qarg])
else:
qubits.append(qarg)
return self.append(Barrier(len(qubits)), qubits, [])
def delay(
self,
duration: ParameterValueType,
qarg: Optional[QubitSpecifier] = None,
unit: str = "dt",
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.Delay`. If qarg is None, applies to all qubits.
When applying to multiple qubits, delays with the same duration will be created.
Args:
duration (int or float or ParameterExpression): duration of the delay.
qarg (Object): qubit argument to apply this delay.
unit (str): unit of the duration. Supported units: 's', 'ms', 'us', 'ns', 'ps', 'dt'.
Default is ``dt``, i.e. integer time unit depending on the target backend.
Returns:
qiskit.circuit.InstructionSet: handle to the added instructions.
Raises:
CircuitError: if arguments have bad format.
"""
qubits: List[QubitSpecifier] = []
if qarg is None: # -> apply delays to all qubits
for q in self.qubits:
qubits.append(q)
else:
if isinstance(qarg, QuantumRegister):
qubits.extend([qarg[j] for j in range(qarg.size)])
elif isinstance(qarg, list):
qubits.extend(qarg)
elif isinstance(qarg, (range, tuple)):
qubits.extend(list(qarg))
elif isinstance(qarg, slice):
qubits.extend(self.qubits[qarg])
else:
qubits.append(qarg)
instructions = InstructionSet(resource_requester=self._resolve_classical_resource)
for q in qubits:
inst = (Delay(duration, unit), [q], [])
self.append(*inst)
instructions.add(*inst)
return instructions
def h(self, qubit: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.HGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.h import HGate
return self.append(HGate(), [qubit], [])
def ch(
self,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.CHGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.h import CHGate
return self.append(
CHGate(label=label, ctrl_state=ctrl_state), [control_qubit, target_qubit], []
)
def i(self, qubit: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.IGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.i import IGate
return self.append(IGate(), [qubit], [])
def id(self, qubit: QubitSpecifier) -> InstructionSet: # pylint: disable=invalid-name
"""Apply :class:`~qiskit.circuit.library.IGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
See also:
QuantumCircuit.i: the same function.
"""
return self.i(qubit)
def ms(self, theta: ParameterValueType, qubits: Sequence[QubitSpecifier]) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.generalized_gates.gms.MSGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The angle of the rotation.
qubits: The qubits to apply the gate to.
Returns:
A handle to the instructions created.
"""
# pylint: disable=cyclic-import
from .library.generalized_gates.gms import MSGate
return self.append(MSGate(len(qubits), theta), qubits)
def p(self, theta: ParameterValueType, qubit: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.PhaseGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: THe angle of the rotation.
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.p import PhaseGate
return self.append(PhaseGate(theta), [qubit], [])
def cp(
self,
theta: ParameterValueType,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.CPhaseGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The angle of the rotation.
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.p import CPhaseGate
return self.append(
CPhaseGate(theta, label=label, ctrl_state=ctrl_state), [control_qubit, target_qubit], []
)
def mcp(
self,
lam: ParameterValueType,
control_qubits: Sequence[QubitSpecifier],
target_qubit: QubitSpecifier,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.MCPhaseGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
lam: The angle of the rotation.
control_qubits: The qubits used as the controls.
target_qubit: The qubit(s) targeted by the gate.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.p import MCPhaseGate
num_ctrl_qubits = len(control_qubits)
return self.append(
MCPhaseGate(lam, num_ctrl_qubits), control_qubits[:] + [target_qubit], []
)
def r(
self, theta: ParameterValueType, phi: ParameterValueType, qubit: QubitSpecifier
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The angle of the rotation.
phi: The angle of the axis of rotation in the x-y plane.
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.r import RGate
return self.append(RGate(theta, phi), [qubit], [])
def rv(
self,
vx: ParameterValueType,
vy: ParameterValueType,
vz: ParameterValueType,
qubit: QubitSpecifier,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RVGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Rotation around an arbitrary rotation axis :math:`v`, where :math:`|v|` is the angle of
rotation in radians.
Args:
vx: x-compenent of the rotation axis.
vy: y-compenent of the rotation axis.
vz: z-compenent of the rotation axis.
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.generalized_gates.rv import RVGate
return self.append(RVGate(vx, vy, vz), [qubit], [])
def rccx(
self,
control_qubit1: QubitSpecifier,
control_qubit2: QubitSpecifier,
target_qubit: QubitSpecifier,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RCCXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit1: The qubit(s) used as the first control.
control_qubit2: The qubit(s) used as the second control.
target_qubit: The qubit(s) targeted by the gate.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.x import RCCXGate
return self.append(RCCXGate(), [control_qubit1, control_qubit2, target_qubit], [])
def rcccx(
self,
control_qubit1: QubitSpecifier,
control_qubit2: QubitSpecifier,
control_qubit3: QubitSpecifier,
target_qubit: QubitSpecifier,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RC3XGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit1: The qubit(s) used as the first control.
control_qubit2: The qubit(s) used as the second control.
control_qubit3: The qubit(s) used as the third control.
target_qubit: The qubit(s) targeted by the gate.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.x import RC3XGate
return self.append(
RC3XGate(), [control_qubit1, control_qubit2, control_qubit3, target_qubit], []
)
def rx(
self, theta: ParameterValueType, qubit: QubitSpecifier, label: Optional[str] = None
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The rotation angle of the gate.
qubit: The qubit(s) to apply the gate to.
label: The string label of the gate in the circuit.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.rx import RXGate
return self.append(RXGate(theta, label=label), [qubit], [])
def crx(
self,
theta: ParameterValueType,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.CRXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The angle of the rotation.
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.rx import CRXGate
return self.append(
CRXGate(theta, label=label, ctrl_state=ctrl_state), [control_qubit, target_qubit], []
)
def rxx(
self, theta: ParameterValueType, qubit1: QubitSpecifier, qubit2: QubitSpecifier
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RXXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The angle of the rotation.
qubit1: The qubit(s) to apply the gate to.
qubit2: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.rxx import RXXGate
return self.append(RXXGate(theta), [qubit1, qubit2], [])
def ry(
self, theta: ParameterValueType, qubit: QubitSpecifier, label: Optional[str] = None
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RYGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The rotation angle of the gate.
qubit: The qubit(s) to apply the gate to.
label: The string label of the gate in the circuit.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.ry import RYGate
return self.append(RYGate(theta, label=label), [qubit], [])
def cry(
self,
theta: ParameterValueType,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.CRYGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The angle of the rotation.
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.ry import CRYGate
return self.append(
CRYGate(theta, label=label, ctrl_state=ctrl_state), [control_qubit, target_qubit], []
)
def ryy(
self, theta: ParameterValueType, qubit1: QubitSpecifier, qubit2: QubitSpecifier
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RYYGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The rotation angle of the gate.
qubit1: The qubit(s) to apply the gate to.
qubit2: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.ryy import RYYGate
return self.append(RYYGate(theta), [qubit1, qubit2], [])
def rz(self, phi: ParameterValueType, qubit: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RZGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
phi: The rotation angle of the gate.
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.rz import RZGate
return self.append(RZGate(phi), [qubit], [])
def crz(
self,
theta: ParameterValueType,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.CRZGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The angle of the rotation.
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.rz import CRZGate
return self.append(
CRZGate(theta, label=label, ctrl_state=ctrl_state), [control_qubit, target_qubit], []
)
def rzx(
self, theta: ParameterValueType, qubit1: QubitSpecifier, qubit2: QubitSpecifier
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RZXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The rotation angle of the gate.
qubit1: The qubit(s) to apply the gate to.
qubit2: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.rzx import RZXGate
return self.append(RZXGate(theta), [qubit1, qubit2], [])
def rzz(
self, theta: ParameterValueType, qubit1: QubitSpecifier, qubit2: QubitSpecifier
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.RZZGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The rotation angle of the gate.
qubit1: The qubit(s) to apply the gate to.
qubit2: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.rzz import RZZGate
return self.append(RZZGate(theta), [qubit1, qubit2], [])
def ecr(self, qubit1: QubitSpecifier, qubit2: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.ECRGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit1, qubit2: The qubits to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.ecr import ECRGate
return self.append(ECRGate(), [qubit1, qubit2], [])
def s(self, qubit: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.SGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.s import SGate
return self.append(SGate(), [qubit], [])
def sdg(self, qubit: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.SdgGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.s import SdgGate
return self.append(SdgGate(), [qubit], [])
def swap(self, qubit1: QubitSpecifier, qubit2: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.SwapGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit1, qubit2: The qubits to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.swap import SwapGate
return self.append(SwapGate(), [qubit1, qubit2], [])
def iswap(self, qubit1: QubitSpecifier, qubit2: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.iSwapGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit1, qubit2: The qubits to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.iswap import iSwapGate
return self.append(iSwapGate(), [qubit1, qubit2], [])
def cswap(
self,
control_qubit: QubitSpecifier,
target_qubit1: QubitSpecifier,
target_qubit2: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.CSwapGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit: The qubit(s) used as the control.
target_qubit1: The qubit(s) targeted by the gate.
target_qubit2: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.swap import CSwapGate
return self.append(
CSwapGate(label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit1, target_qubit2],
[],
)
def fredkin(
self,
control_qubit: QubitSpecifier,
target_qubit1: QubitSpecifier,
target_qubit2: QubitSpecifier,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.CSwapGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit: The qubit(s) used as the control.
target_qubit1: The qubit(s) targeted by the gate.
target_qubit2: The qubit(s) targeted by the gate.
Returns:
A handle to the instructions created.
See Also:
QuantumCircuit.cswap: the same function with a different name.
"""
return self.cswap(control_qubit, target_qubit1, target_qubit2)
def sx(self, qubit: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.SXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.sx import SXGate
return self.append(SXGate(), [qubit], [])
def sxdg(self, qubit: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.SXdgGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.sx import SXdgGate
return self.append(SXdgGate(), [qubit], [])
def csx(
self,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.CSXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.sx import CSXGate
return self.append(
CSXGate(label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit],
[],
)
def t(self, qubit: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.TGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.t import TGate
return self.append(TGate(), [qubit], [])
def tdg(self, qubit: QubitSpecifier) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.TdgGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.t import TdgGate
return self.append(TdgGate(), [qubit], [])
def u(
self,
theta: ParameterValueType,
phi: ParameterValueType,
lam: ParameterValueType,
qubit: QubitSpecifier,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.UGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The :math:`\theta` rotation angle of the gate.
phi: The :math:`\phi` rotation angle of the gate.
lam: The :math:`\lambda` rotation angle of the gate.
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.u import UGate
return self.append(UGate(theta, phi, lam), [qubit], [])
def cu(
self,
theta: ParameterValueType,
phi: ParameterValueType,
lam: ParameterValueType,
gamma: ParameterValueType,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.CUGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The :math:`\theta` rotation angle of the gate.
phi: The :math:`\phi` rotation angle of the gate.
lam: The :math:`\lambda` rotation angle of the gate.
gamma: The global phase applied of the U gate, if applied.
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.u import CUGate
return self.append(
CUGate(theta, phi, lam, gamma, label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit],
[],
)
@deprecate_function(
"The QuantumCircuit.u1 method is deprecated as of "
"0.16.0. It will be removed no earlier than 3 months "
"after the release date. You should use the "
"QuantumCircuit.p method instead, which acts "
"identically."
)
def u1(self, theta: ParameterValueType, qubit: QubitSpecifier) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.U1Gate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The :math:`\theta` rotation angle of the gate.
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.u1 import U1Gate
return self.append(U1Gate(theta), [qubit], [])
@deprecate_function(
"The QuantumCircuit.cu1 method is deprecated as of "
"0.16.0. It will be removed no earlier than 3 months "
"after the release date. You should use the "
"QuantumCircuit.cp method instead, which acts "
"identically."
)
def cu1(
self,
theta: ParameterValueType,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.CU1Gate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The :math:`\theta` rotation angle of the gate.
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.u1 import CU1Gate
return self.append(
CU1Gate(theta, label=label, ctrl_state=ctrl_state), [control_qubit, target_qubit], []
)
@deprecate_function(
"The QuantumCircuit.mcu1 method is deprecated as of "
"0.16.0. It will be removed no earlier than 3 months "
"after the release date. You should use the "
"QuantumCircuit.mcp method instead, which acts "
"identically."
)
def mcu1(
self,
lam: ParameterValueType,
control_qubits: Sequence[QubitSpecifier],
target_qubit: QubitSpecifier,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.MCU1Gate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
lam: The :math:`\lambda` rotation angle of the gate.
control_qubits: The qubits used as the controls.
target_qubit: The qubit(s) targeted by the gate.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.u1 import MCU1Gate
num_ctrl_qubits = len(control_qubits)
return self.append(MCU1Gate(lam, num_ctrl_qubits), control_qubits[:] + [target_qubit], [])
@deprecate_function(
"The QuantumCircuit.u2 method is deprecated as of "
"0.16.0. It will be removed no earlier than 3 months "
"after the release date. You can use the general 1-"
"qubit gate QuantumCircuit.u instead: u2(φ,λ) = "
"u(π/2, φ, λ). Alternatively, you can decompose it in"
"terms of QuantumCircuit.p and QuantumCircuit.sx: "
"u2(φ,λ) = p(π/2+φ) sx p(λ-π/2) (1 pulse on hardware)."
)
def u2(
self, phi: ParameterValueType, lam: ParameterValueType, qubit: QubitSpecifier
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.U2Gate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
phi: The :math:`\phi` rotation angle of the gate.
lam: The :math:`\lambda` rotation angle of the gate.
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.u2 import U2Gate
return self.append(U2Gate(phi, lam), [qubit], [])
@deprecate_function(
"The QuantumCircuit.u3 method is deprecated as of 0.16.0. It will be "
"removed no earlier than 3 months after the release date. You should use "
"QuantumCircuit.u instead, which acts identically. Alternatively, you can "
"decompose u3 in terms of QuantumCircuit.p and QuantumCircuit.sx: "
"u3(ϴ,φ,λ) = p(φ+π) sx p(ϴ+π) sx p(λ) (2 pulses on hardware)."
)
def u3(
self,
theta: ParameterValueType,
phi: ParameterValueType,
lam: ParameterValueType,
qubit: QubitSpecifier,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.U3Gate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The :math:`\theta` rotation angle of the gate.
phi: The :math:`\phi` rotation angle of the gate.
lam: The :math:`\lambda` rotation angle of the gate.
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.u3 import U3Gate
return self.append(U3Gate(theta, phi, lam), [qubit], [])
@deprecate_function(
"The QuantumCircuit.cu3 method is deprecated as of 0.16.0. It will be "
"removed no earlier than 3 months after the release date. You should "
"use the QuantumCircuit.cu method instead, where "
"cu3(ϴ,φ,λ) = cu(ϴ,φ,λ,0)."
)
def cu3(
self,
theta: ParameterValueType,
phi: ParameterValueType,
lam: ParameterValueType,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.CU3Gate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
theta: The :math:`\theta` rotation angle of the gate.
phi: The :math:`\phi` rotation angle of the gate.
lam: The :math:`\lambda` rotation angle of the gate.
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.u3 import CU3Gate
return self.append(
CU3Gate(theta, phi, lam, label=label, ctrl_state=ctrl_state),
[control_qubit, target_qubit],
[],
)
def x(self, qubit: QubitSpecifier, label: Optional[str] = None) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.XGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
label: The string label of the gate in the circuit.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.x import XGate
return self.append(XGate(label=label), [qubit], [])
def cx(
self,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.CXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.x import CXGate
return self.append(
CXGate(label=label, ctrl_state=ctrl_state), [control_qubit, target_qubit], []
)
def cnot(
self,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.CXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit: The qubit(s) used as the control.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
See Also:
QuantumCircuit.cx: the same function with a different name.
"""
return self.cx(control_qubit, target_qubit, label, ctrl_state)
def dcx(self, qubit1: QubitSpecifier, qubit2: QubitSpecifier) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.DCXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit1: The qubit(s) to apply the gate to.
qubit2: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.dcx import DCXGate
return self.append(DCXGate(), [qubit1, qubit2], [])
def ccx(
self,
control_qubit1: QubitSpecifier,
control_qubit2: QubitSpecifier,
target_qubit: QubitSpecifier,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.CCXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit1: The qubit(s) used as the first control.
control_qubit2: The qubit(s) used as the second control.
target_qubit: The qubit(s) targeted by the gate.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.x import CCXGate
return self.append(
CCXGate(ctrl_state=ctrl_state),
[control_qubit1, control_qubit2, target_qubit],
[],
)
def toffoli(
self,
control_qubit1: QubitSpecifier,
control_qubit2: QubitSpecifier,
target_qubit: QubitSpecifier,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.CCXGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit1: The qubit(s) used as the first control.
control_qubit2: The qubit(s) used as the second control.
target_qubit: The qubit(s) targeted by the gate.
Returns:
A handle to the instructions created.
See Also:
QuantumCircuit.ccx: the same gate with a different name.
"""
return self.ccx(control_qubit1, control_qubit2, target_qubit)
def mcx(
self,
control_qubits: Sequence[QubitSpecifier],
target_qubit: QubitSpecifier,
ancilla_qubits: Optional[Union[QubitSpecifier, Sequence[QubitSpecifier]]] = None,
mode: str = "noancilla",
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.MCXGate`.
The multi-cX gate can be implemented using different techniques, which use different numbers
of ancilla qubits and have varying circuit depth. These modes are:
- 'noancilla': Requires 0 ancilla qubits.
- 'recursion': Requires 1 ancilla qubit if more than 4 controls are used, otherwise 0.
- 'v-chain': Requires 2 less ancillas than the number of control qubits.
- 'v-chain-dirty': Same as for the clean ancillas (but the circuit will be longer).
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubits: The qubits used as the controls.
target_qubit: The qubit(s) targeted by the gate.
ancilla_qubits: The qubits used as the ancillae, if the mode requires them.
mode: The choice of mode, explained further above.
Returns:
A handle to the instructions created.
Raises:
ValueError: if the given mode is not known, or if too few ancilla qubits are passed.
AttributeError: if no ancilla qubits are passed, but some are needed.
"""
from .library.standard_gates.x import MCXGrayCode, MCXRecursive, MCXVChain
num_ctrl_qubits = len(control_qubits)
available_implementations = {
"noancilla": MCXGrayCode(num_ctrl_qubits),
"recursion": MCXRecursive(num_ctrl_qubits),
"v-chain": MCXVChain(num_ctrl_qubits, False),
"v-chain-dirty": MCXVChain(num_ctrl_qubits, dirty_ancillas=True),
# outdated, previous names
"advanced": MCXRecursive(num_ctrl_qubits),
"basic": MCXVChain(num_ctrl_qubits, dirty_ancillas=False),
"basic-dirty-ancilla": MCXVChain(num_ctrl_qubits, dirty_ancillas=True),
}
# check ancilla input
if ancilla_qubits:
_ = self.qbit_argument_conversion(ancilla_qubits)
try:
gate = available_implementations[mode]
except KeyError as ex:
all_modes = list(available_implementations.keys())
raise ValueError(
f"Unsupported mode ({mode}) selected, choose one of {all_modes}"
) from ex
if hasattr(gate, "num_ancilla_qubits") and gate.num_ancilla_qubits > 0:
required = gate.num_ancilla_qubits
if ancilla_qubits is None:
raise AttributeError(f"No ancillas provided, but {required} are needed!")
# convert ancilla qubits to a list if they were passed as int or qubit
if not hasattr(ancilla_qubits, "__len__"):
ancilla_qubits = [ancilla_qubits]
if len(ancilla_qubits) < required:
actually = len(ancilla_qubits)
raise ValueError(f"At least {required} ancillas required, but {actually} given.")
# size down if too many ancillas were provided
ancilla_qubits = ancilla_qubits[:required]
else:
ancilla_qubits = []
return self.append(gate, control_qubits[:] + [target_qubit] + ancilla_qubits[:], [])
def mct(
self,
control_qubits: Sequence[QubitSpecifier],
target_qubit: QubitSpecifier,
ancilla_qubits: Optional[Union[QubitSpecifier, Sequence[QubitSpecifier]]] = None,
mode: str = "noancilla",
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.MCXGate`.
The multi-cX gate can be implemented using different techniques, which use different numbers
of ancilla qubits and have varying circuit depth. These modes are:
- 'noancilla': Requires 0 ancilla qubits.
- 'recursion': Requires 1 ancilla qubit if more than 4 controls are used, otherwise 0.
- 'v-chain': Requires 2 less ancillas than the number of control qubits.
- 'v-chain-dirty': Same as for the clean ancillas (but the circuit will be longer).
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubits: The qubits used as the controls.
target_qubit: The qubit(s) targeted by the gate.
ancilla_qubits: The qubits used as the ancillae, if the mode requires them.
mode: The choice of mode, explained further above.
Returns:
A handle to the instructions created.
Raises:
ValueError: if the given mode is not known, or if too few ancilla qubits are passed.
AttributeError: if no ancilla qubits are passed, but some are needed.
See Also:
QuantumCircuit.mcx: the same gate with a different name.
"""
return self.mcx(control_qubits, target_qubit, ancilla_qubits, mode)
def y(self, qubit: QubitSpecifier) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.YGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.y import YGate
return self.append(YGate(), [qubit], [])
def cy(
self,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.CYGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit: The qubit(s) used as the controls.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.y import CYGate
return self.append(
CYGate(label=label, ctrl_state=ctrl_state), [control_qubit, target_qubit], []
)
def z(self, qubit: QubitSpecifier) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.ZGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
qubit: The qubit(s) to apply the gate to.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.z import ZGate
return self.append(ZGate(), [qubit], [])
def cz(
self,
control_qubit: QubitSpecifier,
target_qubit: QubitSpecifier,
label: Optional[str] = None,
ctrl_state: Optional[Union[str, int]] = None,
) -> InstructionSet:
r"""Apply :class:`~qiskit.circuit.library.CZGate`.
For the full matrix form of this gate, see the underlying gate documentation.
Args:
control_qubit: The qubit(s) used as the controls.
target_qubit: The qubit(s) targeted by the gate.
label: The string label of the gate in the circuit.
ctrl_state:
The control state in decimal, or as a bitstring (e.g. '1'). Defaults to controlling
on the '1' state.
Returns:
A handle to the instructions created.
"""
from .library.standard_gates.z import CZGate
return self.append(
CZGate(label=label, ctrl_state=ctrl_state), [control_qubit, target_qubit], []
)
def pauli(
self,
pauli_string: str,
qubits: Sequence[QubitSpecifier],
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.library.PauliGate`.
Args:
pauli_string: A string representing the Pauli operator to apply, e.g. 'XX'.
qubits: The qubits to apply this gate to.
Returns:
A handle to the instructions created.
"""
from qiskit.circuit.library.generalized_gates.pauli import PauliGate
return self.append(PauliGate(pauli_string), qubits, [])
def _push_scope(
self,
qubits: Iterable[Qubit] = (),
clbits: Iterable[Clbit] = (),
registers: Iterable[Register] = (),
allow_jumps: bool = True,
):
"""Add a scope for collecting instructions into this circuit.
This should only be done by the control-flow context managers, which will handle cleaning up
after themselves at the end as well.
Args:
qubits: Any qubits that this scope should automatically use.
clbits: Any clbits that this scope should automatically use.
allow_jumps: Whether this scope allows jumps to be used within it.
"""
# pylint: disable=cyclic-import
from qiskit.circuit.controlflow.builder import ControlFlowBuilderBlock
# Chain resource requests so things like registers added to inner scopes via conditions are
# requested in the outer scope as well.
if self._control_flow_scopes:
resource_requester = self._control_flow_scopes[-1].request_classical_resource
else:
resource_requester = self._resolve_classical_resource
self._control_flow_scopes.append(
ControlFlowBuilderBlock(
qubits,
clbits,
resource_requester=resource_requester,
registers=registers,
allow_jumps=allow_jumps,
)
)
def _pop_scope(self) -> "qiskit.circuit.controlflow.builder.ControlFlowBuilderBlock":
"""Finish a scope used in the control-flow builder interface, and return it to the caller.
This should only be done by the control-flow context managers, since they naturally
synchronise the creation and deletion of stack elements."""
return self._control_flow_scopes.pop()
def _peek_previous_instruction_in_scope(
self,
) -> Tuple[Instruction, Sequence[Qubit], Sequence[Clbit]]:
"""Return the instruction 3-tuple of the most recent instruction in the current scope, even
if that scope is currently under construction.
This function is only intended for use by the control-flow ``if``-statement builders, which
may need to modify a previous instruction."""
if self._control_flow_scopes:
return self._control_flow_scopes[-1].peek()
if not self._data:
raise CircuitError("This circuit contains no instructions.")
return self._data[-1]
def _pop_previous_instruction_in_scope(
self,
) -> Tuple[Instruction, Sequence[Qubit], Sequence[Clbit]]:
"""Return the instruction 3-tuple of the most recent instruction in the current scope, even
if that scope is currently under construction, and remove it from that scope.
This function is only intended for use by the control-flow ``if``-statement builders, which
may need to replace a previous instruction with another.
"""
if self._control_flow_scopes:
return self._control_flow_scopes[-1].pop()
if not self._data:
raise CircuitError("This circuit contains no instructions.")
instruction, qubits, clbits = self._data.pop()
self._update_parameter_table_on_instruction_removal(instruction)
return instruction, qubits, clbits
def _update_parameter_table_on_instruction_removal(self, instruction: Instruction) -> None:
"""Update the :obj:`.ParameterTable` of this circuit given that an instance of the given
``instruction`` has just been removed from the circuit.
.. note::
This does not account for the possibility for the same instruction instance being added
more than once to the circuit. At the time of writing (2021-11-17, main commit 271a82f)
there is a defensive ``deepcopy`` of parameterised instructions inside
:meth:`.QuantumCircuit.append`, so this should be safe. Trying to account for it would
involve adding a potentially quadratic-scaling loop to check each entry in ``data``.
"""
atomic_parameters = set()
for parameter in instruction.params:
if isinstance(parameter, (ParameterExpression, QuantumCircuit)):
atomic_parameters.update(parameter.parameters)
for atomic_parameter in atomic_parameters:
entries = self._parameter_table[atomic_parameter]
new_entries = [
(entry_instruction, entry_index)
for entry_instruction, entry_index in entries
if entry_instruction is not instruction
]
if not new_entries:
del self._parameter_table[atomic_parameter]
# Invalidate cache.
self._parameters = None
else:
self._parameter_table[atomic_parameter] = new_entries
@typing.overload
def while_loop(
self,
condition: Tuple[Union[ClassicalRegister, Clbit], int],
body: None,
qubits: None,
clbits: None,
*,
label: Optional[str],
) -> "qiskit.circuit.controlflow.while_loop.WhileLoopContext":
...
@typing.overload
def while_loop(
self,
condition: Tuple[Union[ClassicalRegister, Clbit], int],
body: "QuantumCircuit",
qubits: Sequence[QubitSpecifier],
clbits: Sequence[ClbitSpecifier],
*,
label: Optional[str],
) -> InstructionSet:
...
def while_loop(self, condition, body=None, qubits=None, clbits=None, *, label=None):
"""Create a ``while`` loop on this circuit.
There are two forms for calling this function. If called with all its arguments (with the
possible exception of ``label``), it will create a
:obj:`~qiskit.circuit.controlflow.WhileLoopOp` with the given ``body``. If ``body`` (and
``qubits`` and ``clbits``) are *not* passed, then this acts as a context manager, which
will automatically build a :obj:`~qiskit.circuit.controlflow.WhileLoopOp` when the scope
finishes. In this form, you do not need to keep track of the qubits or clbits you are
using, because the scope will handle it for you.
Example usage::
from qiskit.circuit import QuantumCircuit, Clbit, Qubit
bits = [Qubit(), Qubit(), Clbit()]
qc = QuantumCircuit(bits)
with qc.while_loop((bits[2], 0)):
qc.h(0)
qc.cx(0, 1)
qc.measure(0, 0)
Args:
condition (Tuple[Union[ClassicalRegister, Clbit], int]): An equality condition to be
checked prior to executing ``body``. The left-hand side of the condition must be a
:obj:`~ClassicalRegister` or a :obj:`~Clbit`, and the right-hand side must be an
integer or boolean.
body (Optional[QuantumCircuit]): The loop body to be repeatedly executed. Omit this to
use the context-manager mode.
qubits (Optional[Sequence[Qubit]]): The circuit qubits over which the loop body should
be run. Omit this to use the context-manager mode.
clbits (Optional[Sequence[Clbit]]): The circuit clbits over which the loop body should
be run. Omit this to use the context-manager mode.
label (Optional[str]): The string label of the instruction in the circuit.
Returns:
InstructionSet or WhileLoopContext: If used in context-manager mode, then this should be
used as a ``with`` resource, which will infer the block content and operands on exit.
If the full form is used, then this returns a handle to the instructions created.
Raises:
CircuitError: if an incorrect calling convention is used.
"""
# pylint: disable=cyclic-import
from qiskit.circuit.controlflow.while_loop import WhileLoopOp, WhileLoopContext
if body is None:
if qubits is not None or clbits is not None:
raise CircuitError(
"When using 'while_loop' as a context manager,"
" you cannot pass qubits or clbits."
)
return WhileLoopContext(self, condition, label=label)
elif qubits is None or clbits is None:
raise CircuitError(
"When using 'while_loop' with a body, you must pass qubits and clbits."
)
return self.append(WhileLoopOp(condition, body, label), qubits, clbits)
@typing.overload
def for_loop(
self,
indexset: Iterable[int],
loop_parameter: Optional[Parameter],
body: None,
qubits: None,
clbits: None,
*,
label: Optional[str],
) -> "qiskit.circuit.controlflow.for_loop.ForLoopContext":
...
@typing.overload
def for_loop(
self,
indexset: Iterable[int],
loop_parameter: Union[Parameter, None],
body: "QuantumCircuit",
qubits: Sequence[QubitSpecifier],
clbits: Sequence[ClbitSpecifier],
*,
label: Optional[str],
) -> InstructionSet:
...
def for_loop(
self, indexset, loop_parameter=None, body=None, qubits=None, clbits=None, *, label=None
):
"""Create a ``for`` loop on this circuit.
There are two forms for calling this function. If called with all its arguments (with the
possible exception of ``label``), it will create a
:obj:`~qiskit.circuit.controlflow.ForLoopOp` with the given ``body``. If ``body`` (and
``qubits`` and ``clbits``) are *not* passed, then this acts as a context manager, which,
when entered, provides a loop variable (unless one is given, in which case it will be
reused) and will automatically build a :obj:`~qiskit.circuit.controlflow.ForLoopOp` when the
scope finishes. In this form, you do not need to keep track of the qubits or clbits you are
using, because the scope will handle it for you.
For example::
from qiskit import QuantumCircuit
qc = QuantumCircuit(2, 1)
with qc.for_loop(range(5)) as i:
qc.h(0)
qc.cx(0, 1)
qc.measure(0, 0)
qc.break_loop().c_if(0, True)
Args:
indexset (Iterable[int]): A collection of integers to loop over. Always necessary.
loop_parameter (Optional[Parameter]): The parameter used within ``body`` to which
the values from ``indexset`` will be assigned. In the context-manager form, if this
argument is not supplied, then a loop parameter will be allocated for you and
returned as the value of the ``with`` statement. This will only be bound into the
circuit if it is used within the body.
If this argument is ``None`` in the manual form of this method, ``body`` will be
repeated once for each of the items in ``indexset`` but their values will be
ignored.
body (Optional[QuantumCircuit]): The loop body to be repeatedly executed. Omit this to
use the context-manager mode.
qubits (Optional[Sequence[QubitSpecifier]]): The circuit qubits over which the loop body
should be run. Omit this to use the context-manager mode.
clbits (Optional[Sequence[ClbitSpecifier]]): The circuit clbits over which the loop body
should be run. Omit this to use the context-manager mode.
label (Optional[str]): The string label of the instruction in the circuit.
Returns:
InstructionSet or ForLoopContext: depending on the call signature, either a context
manager for creating the for loop (it will automatically be added to the circuit at the
end of the block), or an :obj:`~InstructionSet` handle to the appended loop operation.
Raises:
CircuitError: if an incorrect calling convention is used.
"""
# pylint: disable=cyclic-import
from qiskit.circuit.controlflow.for_loop import ForLoopOp, ForLoopContext
if body is None:
if qubits is not None or clbits is not None:
raise CircuitError(
"When using 'for_loop' as a context manager, you cannot pass qubits or clbits."
)
return ForLoopContext(self, indexset, loop_parameter, label=label)
elif qubits is None or clbits is None:
raise CircuitError(
"When using 'for_loop' with a body, you must pass qubits and clbits."
)
return self.append(ForLoopOp(indexset, loop_parameter, body, label), qubits, clbits)
@typing.overload
def if_test(
self,
condition: Tuple[Union[ClassicalRegister, Clbit], int],
true_body: None,
qubits: None,
clbits: None,
*,
label: Optional[str],
) -> "qiskit.circuit.controlflow.if_else.IfContext":
...
@typing.overload
def if_test(
self,
condition: Tuple[Union[ClassicalRegister, Clbit], int],
true_body: "QuantumCircuit",
qubits: Sequence[QubitSpecifier],
clbits: Sequence[ClbitSpecifier],
*,
label: Optional[str] = None,
) -> InstructionSet:
...
def if_test(
self,
condition,
true_body=None,
qubits=None,
clbits=None,
*,
label=None,
):
"""Create an ``if`` statement on this circuit.
There are two forms for calling this function. If called with all its arguments (with the
possible exception of ``label``), it will create a
:obj:`~qiskit.circuit.controlflow.IfElseOp` with the given ``true_body``, and there will be
no branch for the ``false`` condition (see also the :meth:`.if_else` method). However, if
``true_body`` (and ``qubits`` and ``clbits``) are *not* passed, then this acts as a context
manager, which can be used to build ``if`` statements. The return value of the ``with``
statement is a chainable context manager, which can be used to create subsequent ``else``
blocks. In this form, you do not need to keep track of the qubits or clbits you are using,
because the scope will handle it for you.
For example::
from qiskit.circuit import QuantumCircuit, Qubit, Clbit
bits = [Qubit(), Qubit(), Qubit(), Clbit(), Clbit()]
qc = QuantumCircuit(bits)
qc.h(0)
qc.cx(0, 1)
qc.measure(0, 0)
qc.h(0)
qc.cx(0, 1)
qc.measure(0, 1)
with qc.if_test((bits[3], 0)) as else_:
qc.x(2)
with else_:
qc.h(2)
qc.z(2)
Args:
condition (Tuple[Union[ClassicalRegister, Clbit], int]): A condition to be evaluated at
circuit runtime which, if true, will trigger the evaluation of ``true_body``. Can be
specified as either a tuple of a ``ClassicalRegister`` to be tested for equality
with a given ``int``, or as a tuple of a ``Clbit`` to be compared to either a
``bool`` or an ``int``.
true_body (Optional[QuantumCircuit]): The circuit body to be run if ``condition`` is
true.
qubits (Optional[Sequence[QubitSpecifier]]): The circuit qubits over which the if/else
should be run.
clbits (Optional[Sequence[ClbitSpecifier]]): The circuit clbits over which the if/else
should be run.
label (Optional[str]): The string label of the instruction in the circuit.
Returns:
InstructionSet or IfContext: depending on the call signature, either a context
manager for creating the ``if`` block (it will automatically be added to the circuit at
the end of the block), or an :obj:`~InstructionSet` handle to the appended conditional
operation.
Raises:
CircuitError: If the provided condition references Clbits outside the
enclosing circuit.
CircuitError: if an incorrect calling convention is used.
Returns:
A handle to the instruction created.
"""
# pylint: disable=cyclic-import
from qiskit.circuit.controlflow.if_else import IfElseOp, IfContext
condition = (self._resolve_classical_resource(condition[0]), condition[1])
if true_body is None:
if qubits is not None or clbits is not None:
raise CircuitError(
"When using 'if_test' as a context manager, you cannot pass qubits or clbits."
)
# We can only allow jumps if we're in a loop block, but the default path (no scopes)
# also allows adding jumps to support the more verbose internal mode.
in_loop = bool(self._control_flow_scopes and self._control_flow_scopes[-1].allow_jumps)
return IfContext(self, condition, in_loop=in_loop, label=label)
elif qubits is None or clbits is None:
raise CircuitError("When using 'if_test' with a body, you must pass qubits and clbits.")
return self.append(IfElseOp(condition, true_body, None, label), qubits, clbits)
def if_else(
self,
condition: Union[
Tuple[ClassicalRegister, int],
Tuple[Clbit, int],
Tuple[Clbit, bool],
],
true_body: "QuantumCircuit",
false_body: "QuantumCircuit",
qubits: Sequence[QubitSpecifier],
clbits: Sequence[ClbitSpecifier],
label: Optional[str] = None,
) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.controlflow.IfElseOp`.
.. note::
This method does not have an associated context-manager form, because it is already
handled by the :meth:`.if_test` method. You can use the ``else`` part of that with
something such as::
from qiskit.circuit import QuantumCircuit, Qubit, Clbit
bits = [Qubit(), Qubit(), Clbit()]
qc = QuantumCircuit(bits)
qc.h(0)
qc.cx(0, 1)
qc.measure(0, 0)
with qc.if_test((bits[2], 0)) as else_:
qc.h(0)
with else_:
qc.x(0)
Args:
condition: A condition to be evaluated at circuit runtime which,
if true, will trigger the evaluation of ``true_body``. Can be
specified as either a tuple of a ``ClassicalRegister`` to be
tested for equality with a given ``int``, or as a tuple of a
``Clbit`` to be compared to either a ``bool`` or an ``int``.
true_body: The circuit body to be run if ``condition`` is true.
false_body: The circuit to be run if ``condition`` is false.
qubits: The circuit qubits over which the if/else should be run.
clbits: The circuit clbits over which the if/else should be run.
label: The string label of the instruction in the circuit.
Raises:
CircuitError: If the provided condition references Clbits outside the
enclosing circuit.
Returns:
A handle to the instruction created.
"""
# pylint: disable=cyclic-import
from qiskit.circuit.controlflow.if_else import IfElseOp
condition = (self._resolve_classical_resource(condition[0]), condition[1])
return self.append(IfElseOp(condition, true_body, false_body, label), qubits, clbits)
def break_loop(self) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.controlflow.BreakLoopOp`.
.. warning::
If you are using the context-manager "builder" forms of :meth:`.if_test`,
:meth:`.for_loop` or :meth:`.while_loop`, you can only call this method if you are
within a loop context, because otherwise the "resource width" of the operation cannot be
determined. This would quickly lead to invalid circuits, and so if you are trying to
construct a reusable loop body (without the context managers), you must also use the
non-context-manager form of :meth:`.if_test` and :meth:`.if_else`. Take care that the
:obj:`.BreakLoopOp` instruction must span all the resources of its containing loop, not
just the immediate scope.
Returns:
A handle to the instruction created.
Raises:
CircuitError: if this method was called within a builder context, but not contained
within a loop.
"""
# pylint: disable=cyclic-import
from qiskit.circuit.controlflow.break_loop import BreakLoopOp, BreakLoopPlaceholder
if self._control_flow_scopes:
operation = BreakLoopPlaceholder()
resources = operation.placeholder_resources()
return self.append(operation, resources.qubits, resources.clbits)
return self.append(BreakLoopOp(self.num_qubits, self.num_clbits), self.qubits, self.clbits)
def continue_loop(self) -> InstructionSet:
"""Apply :class:`~qiskit.circuit.controlflow.ContinueLoopOp`.
.. warning::
If you are using the context-manager "builder" forms of :meth:`.if_test`,
:meth:`.for_loop` or :meth:`.while_loop`, you can only call this method if you are
within a loop context, because otherwise the "resource width" of the operation cannot be
determined. This would quickly lead to invalid circuits, and so if you are trying to
construct a reusable loop body (without the context managers), you must also use the
non-context-manager form of :meth:`.if_test` and :meth:`.if_else`. Take care that the
:obj:`.ContinueLoopOp` instruction must span all the resources of its containing loop,
not just the immediate scope.
Returns:
A handle to the instruction created.
Raises:
CircuitError: if this method was called within a builder context, but not contained
within a loop.
"""
# pylint: disable=cyclic-import
from qiskit.circuit.controlflow.continue_loop import ContinueLoopOp, ContinueLoopPlaceholder
if self._control_flow_scopes:
operation = ContinueLoopPlaceholder()
resources = operation.placeholder_resources()
return self.append(operation, resources.qubits, resources.clbits)
return self.append(
ContinueLoopOp(self.num_qubits, self.num_clbits), self.qubits, self.clbits
)
def add_calibration(
self,
gate: Union[Gate, str],
qubits: Sequence[int],
# Schedule has the type `qiskit.pulse.Schedule`, but `qiskit.pulse` cannot be imported
# while this module is, and so Sphinx will not accept a forward reference to it. Sphinx
# needs the types available at runtime, whereas mypy will accept it, because it handles the
# type checking by static analysis.
schedule,
params: Optional[Sequence[ParameterValueType]] = None,
) -> None:
"""Register a low-level, custom pulse definition for the given gate.
Args:
gate (Union[Gate, str]): Gate information.
qubits (Union[int, Tuple[int]]): List of qubits to be measured.
schedule (Schedule): Schedule information.
params (Optional[List[Union[float, Parameter]]]): A list of parameters.
Raises:
Exception: if the gate is of type string and params is None.
"""
if isinstance(gate, Gate):
self._calibrations[gate.name][(tuple(qubits), tuple(gate.params))] = schedule
else:
self._calibrations[gate][(tuple(qubits), tuple(params or []))] = schedule
# Functions only for scheduled circuits
def qubit_duration(self, *qubits: Union[Qubit, int]) -> float:
"""Return the duration between the start and stop time of the first and last instructions,
excluding delays, over the supplied qubits. Its time unit is ``self.unit``.
Args:
*qubits: Qubits within ``self`` to include.
Returns:
Return the duration between the first start and last stop time of non-delay instructions
"""
return self.qubit_stop_time(*qubits) - self.qubit_start_time(*qubits)
def qubit_start_time(self, *qubits: Union[Qubit, int]) -> float:
"""Return the start time of the first instruction, excluding delays,
over the supplied qubits. Its time unit is ``self.unit``.
Return 0 if there are no instructions over qubits
Args:
*qubits: Qubits within ``self`` to include. Integers are allowed for qubits, indicating
indices of ``self.qubits``.
Returns:
Return the start time of the first instruction, excluding delays, over the qubits
Raises:
CircuitError: if ``self`` is a not-yet scheduled circuit.
"""
if self.duration is None:
# circuit has only delays, this is kind of scheduled
for inst, _, _ in self.data:
if not isinstance(inst, Delay):
raise CircuitError(
"qubit_start_time undefined. Circuit must be scheduled first."
)
return 0
qubits = [self.qubits[q] if isinstance(q, int) else q for q in qubits]
starts = {q: 0 for q in qubits}
dones = {q: False for q in qubits}
for inst, qargs, _ in self.data:
for q in qubits:
if q in qargs:
if isinstance(inst, Delay):
if not dones[q]:
starts[q] += inst.duration
else:
dones[q] = True
if len(qubits) == len([done for done in dones.values() if done]): # all done
return min(start for start in starts.values())
return 0 # If there are no instructions over bits
def qubit_stop_time(self, *qubits: Union[Qubit, int]) -> float:
"""Return the stop time of the last instruction, excluding delays, over the supplied qubits.
Its time unit is ``self.unit``.
Return 0 if there are no instructions over qubits
Args:
*qubits: Qubits within ``self`` to include. Integers are allowed for qubits, indicating
indices of ``self.qubits``.
Returns:
Return the stop time of the last instruction, excluding delays, over the qubits
Raises:
CircuitError: if ``self`` is a not-yet scheduled circuit.
"""
if self.duration is None:
# circuit has only delays, this is kind of scheduled
for inst, _, _ in self.data:
if not isinstance(inst, Delay):
raise CircuitError(
"qubit_stop_time undefined. Circuit must be scheduled first."
)
return 0
qubits = [self.qubits[q] if isinstance(q, int) else q for q in qubits]
stops = {q: self.duration for q in qubits}
dones = {q: False for q in qubits}
for inst, qargs, _ in reversed(self.data):
for q in qubits:
if q in qargs:
if isinstance(inst, Delay):
if not dones[q]:
stops[q] -= inst.duration
else:
dones[q] = True
if len(qubits) == len([done for done in dones.values() if done]): # all done
return max(stop for stop in stops.values())
return 0 # If there are no instructions over bits
def _circuit_from_qasm(qasm: Qasm) -> "QuantumCircuit":
# pylint: disable=cyclic-import
from qiskit.converters import ast_to_dag
from qiskit.converters import dag_to_circuit
ast = qasm.parse()
dag = ast_to_dag(ast)
return dag_to_circuit(dag)
def _standard_compare(value1, value2):
if value1 < value2:
return -1
if value1 > value2:
return 1
return 0
def _compare_parameters(param1: Parameter, param2: Parameter) -> int:
if isinstance(param1, ParameterVectorElement) and isinstance(param2, ParameterVectorElement):
# if they belong to a vector with the same name, sort by index
if param1.vector.name == param2.vector.name:
return _standard_compare(param1.index, param2.index)
# else sort by name
return _standard_compare(param1.name, param2.name)
def _add_sub_instruction_to_existing_composite_circuits(
instruction: Instruction,
existing_gate_names: List[str],
existing_composite_circuits: List[Instruction],
) -> None:
"""Recursively add undefined sub-instructions in the definition of the given
instruction to existing_composite_circuit list.
"""
for sub_instruction, _, _ in instruction.definition:
# Check instructions names are valid
if not VALID_QASM2_IDENTIFIER.fullmatch(sub_instruction.name):
sub_instruction = sub_instruction.copy(
name=_qasm_escape_gate_name(sub_instruction.name)
)
if (
sub_instruction.name not in existing_gate_names
and sub_instruction not in existing_composite_circuits
):
existing_composite_circuits.insert(0, sub_instruction)
_add_sub_instruction_to_existing_composite_circuits(
sub_instruction, existing_gate_names, existing_composite_circuits
)
def _qasm_escape_gate_name(name: str) -> str:
"""Returns a valid OpenQASM gate identifier"""
# Replace all non-ASCII-word characters with the underscore.
escaped_name = re.sub(r"\W", "_", name, flags=re.ASCII)
if not escaped_name or escaped_name[0] not in string.ascii_lowercase:
# Add an arbitrary, guaranteed-to-be-valid prefix.
escaped_name = "gate_" + escaped_name
return escaped_name
def _get_composite_circuit_qasm_from_instruction(instruction: Instruction) -> str:
"""Returns OpenQASM string composite circuit given an instruction.
The given instruction should be the result of composite_circuit.to_instruction()."""
if instruction.definition is None:
raise ValueError(f'Instruction "{instruction.name}" is not defined.')
gate_parameters = ",".join(["param%i" % num for num in range(len(instruction.params))])
qubit_parameters = ",".join(["q%i" % num for num in range(instruction.num_qubits)])
composite_circuit_gates = ""
definition = instruction.definition
definition_bit_labels = {
bit: idx for bits in (definition.qubits, definition.clbits) for idx, bit in enumerate(bits)
}
for sub_instruction, qargs, _ in definition:
if not VALID_QASM2_IDENTIFIER.fullmatch(sub_instruction.name):
sub_instruction = sub_instruction.copy(
name=_qasm_escape_gate_name(sub_instruction.name)
)
gate_qargs = ",".join(
["q%i" % index for index in [definition_bit_labels[qubit] for qubit in qargs]]
)
composite_circuit_gates += f"{sub_instruction.qasm()} {gate_qargs}; "
if composite_circuit_gates:
composite_circuit_gates = composite_circuit_gates.rstrip(" ")
if gate_parameters:
qasm_string = "gate {}({}) {} {{ {} }}".format(
instruction.name,
gate_parameters,
qubit_parameters,
composite_circuit_gates,
)
else:
qasm_string = "gate {} {} {{ {} }}".format(
instruction.name,
qubit_parameters,
composite_circuit_gates,
)
return qasm_string
def _insert_composite_gate_definition_qasm(
string_temp: str, existing_composite_circuits: List[Instruction], extension_lib: str
) -> str:
"""Insert composite gate definition QASM code right after extension library in the header"""
gate_definition_string = ""
# Generate gate definition string
for instruction in existing_composite_circuits:
if hasattr(instruction, "_qasm_definition"):
qasm_string = instruction._qasm_definition
else:
qasm_string = _get_composite_circuit_qasm_from_instruction(instruction)
gate_definition_string += "\n" + qasm_string
string_temp = string_temp.replace(extension_lib, f"{extension_lib}{gate_definition_string}")
return string_temp
def _bit_argument_conversion(specifier, bit_sequence, bit_set, type_):
"""Get the list of bits referred to by the specifier ``specifier``.
Valid types for ``specifier`` are integers, bits of the correct type (as given in ``type_``), or
iterables of one of those two scalar types. Integers are interpreted as indices into the
sequence ``bit_sequence``. All allowed bits must be in ``bit_set`` (which should implement
fast lookup), which is assumed to contain the same bits as ``bit_sequence``.
Returns:
List[Bit]: a list of the specified bits from ``bits``.
Raises:
CircuitError: if an incorrect type or index is encountered, if the same bit is specified
more than once, or if the specifier is to a bit not in the ``bit_set``.
"""
# The duplication between this function and `_bit_argument_conversion_scalar` is so that fast
# paths return as quickly as possible, and all valid specifiers will resolve without needing to
# try/catch exceptions (which is too slow for inner-loop code).
if isinstance(specifier, type_):
if specifier in bit_set:
return [specifier]
raise CircuitError(f"Bit '{specifier}' is not in the circuit.")
if isinstance(specifier, (int, np.integer)):
try:
return [bit_sequence[specifier]]
except IndexError as ex:
raise CircuitError(
f"Index {specifier} out of range for size {len(bit_sequence)}."
) from ex
# Slices can't raise IndexError - they just return an empty list.
if isinstance(specifier, slice):
return bit_sequence[specifier]
try:
return [
_bit_argument_conversion_scalar(index, bit_sequence, bit_set, type_)
for index in specifier
]
except TypeError as ex:
message = (
f"Incorrect bit type: expected '{type_.__name__}' but got '{type(specifier).__name__}'"
if isinstance(specifier, Bit)
else f"Invalid bit index: '{specifier}' of type '{type(specifier)}'"
)
raise CircuitError(message) from ex
def _bit_argument_conversion_scalar(specifier, bit_sequence, bit_set, type_):
if isinstance(specifier, type_):
if specifier in bit_set:
return specifier
raise CircuitError(f"Bit '{specifier}' is not in the circuit.")
if isinstance(specifier, (int, np.integer)):
try:
return bit_sequence[specifier]
except IndexError as ex:
raise CircuitError(
f"Index {specifier} out of range for size {len(bit_sequence)}."
) from ex
message = (
f"Incorrect bit type: expected '{type_.__name__}' but got '{type(specifier).__name__}'"
if isinstance(specifier, Bit)
else f"Invalid bit index: '{specifier}' of type '{type(specifier)}'"
)
raise CircuitError(message)
| 39.135764
| 100
| 0.597489
|
795128014c9de92d174d1f4345ed699b13341265
| 414
|
py
|
Python
|
app/services/cli/settings.py
|
criticallycode/zima
|
cd38cac1c0c33b362d110ae28deba3828daa3f4a
|
[
"Apache-2.0"
] | null | null | null |
app/services/cli/settings.py
|
criticallycode/zima
|
cd38cac1c0c33b362d110ae28deba3828daa3f4a
|
[
"Apache-2.0"
] | null | null | null |
app/services/cli/settings.py
|
criticallycode/zima
|
cd38cac1c0c33b362d110ae28deba3828daa3f4a
|
[
"Apache-2.0"
] | null | null | null |
"""
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
#-------------------------------------------------------------------------------
# Global settings
#-------------------------------------------------------------------------------
# Core Django settings
#-------------------------------------------------------------------------------
# Django Addons
| 31.846154
| 80
| 0.301932
|
7951280dde0bd6c660a84ca4935acca684c0ed84
| 851
|
py
|
Python
|
website/pagination_classes.py
|
edsegura30/BackEnd
|
82669ce4f07fa54936f33ee08f0123f64d27a83a
|
[
"MIT"
] | 3
|
2019-04-05T17:39:28.000Z
|
2019-05-22T15:38:08.000Z
|
website/pagination_classes.py
|
edsegura30/BackEnd
|
82669ce4f07fa54936f33ee08f0123f64d27a83a
|
[
"MIT"
] | 1
|
2019-04-05T17:39:44.000Z
|
2019-04-08T08:56:55.000Z
|
website/pagination_classes.py
|
edsegura30/BackEnd
|
82669ce4f07fa54936f33ee08f0123f64d27a83a
|
[
"MIT"
] | null | null | null |
"""
Custom paginators for API responses
"""
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
_BASE_URL = 'http://localhost:8000/'
_EVENT_URL = 'events/'
class DefaultEdgarPagination(PageNumberPagination):
"""
This is the default REST API paginator
"""
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 200
def get_paginated_response(self, data):
return Response(
{
'next': self.get_next_link(),
'previous': self.get_previous_link(),
'total_pages': self.page.paginator.num_pages,
'page': self.page.number,
'all_events': _BASE_URL + _EVENT_URL,
'count': self.page.paginator.count,
'results': data,
})
| 28.366667
| 61
| 0.613396
|
795128174c98f5be3d2b79e43ab1d172dafe03fc
| 169,255
|
py
|
Python
|
nova/virt/libvirt/driver.py
|
bopopescu/novatest-1
|
16ed4510200308b8b710cac90aa032ec5eab7e9d
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/libvirt/driver.py
|
bopopescu/novatest-1
|
16ed4510200308b8b710cac90aa032ec5eab7e9d
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/libvirt/driver.py
|
bopopescu/novatest-1
|
16ed4510200308b8b710cac90aa032ec5eab7e9d
|
[
"Apache-2.0"
] | 1
|
2020-07-24T08:49:47.000Z
|
2020-07-24T08:49:47.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
(default: kvm).
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
:libvirt_disk_prefix: Override the default disk prefix for the devices
attached to a server.
:rescue_image_id: Rescue ami image (None = original image).
:rescue_kernel_id: Rescue aki image (None = original image).
:rescue_ramdisk_id: Rescue ari image (None = original image).
:injected_network_template: Template file for injected network
:allow_same_net_traffic: Whether to allow in project network traffic
"""
import errno
import eventlet
import functools
import glob
import os
import shutil
import socket
import sys
import tempfile
import threading
import time
import uuid
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
from xml.dom import minidom
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import processutils
from nova import utils
from nova import version
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
default=None,
help='Rescue ami image'),
cfg.StrOpt('rescue_kernel_id',
default=None,
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
default=None,
help='Rescue ari image'),
cfg.StrOpt('libvirt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)'),
cfg.StrOpt('libvirt_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on libvirt_type)'),
cfg.BoolOpt('libvirt_inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('libvirt_inject_key',
default=True,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('libvirt_inject_partition',
default=1,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
default=None,
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('libvirt_vif_driver',
default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
help='The libvirt VIF driver to configure the VIFs.'),
cfg.ListOpt('libvirt_volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.'
'LibvirtFibreChannelVolumeDriver',
'scality='
'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
default=None,
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on libvirt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('libvirt_wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.BoolOpt('libvirt_nonblocking',
default=True,
help='Use a separated OS thread pool to realize non-blocking'
' libvirt calls'),
cfg.StrOpt('libvirt_cpu_mode',
default=None,
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If libvirt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('libvirt_cpu_model',
default=None,
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"'),
cfg.StrOpt('libvirt_snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: ["file=directsync","block=none"]'),
cfg.StrOpt('vcpu_pin_set',
default=None,
help='Which pcpus can be used by vcpus of instance '
'e.g: "4-12,^8,15"'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 102400
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 6)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
MIN_LIBVIRT_CLOSE_CALLBACK_VERSION = (1, 0, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
def libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
self._host_state = None
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._caps = None
self._vcpu_total = 0
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
vif_class = importutils.import_class(CONF.libvirt_vif_driver)
self.vif_driver = vif_class(self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt_volume_drivers, self)
self._host_state = None
self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"writethrough",
"unsafe",
]
for mode_str in CONF.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherant (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self)
return self._host_state
def set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
def _munge_version(ver):
return ver[0] * 1000000 + ver[1] * 1000 + ver[2]
try:
if lv_ver is not None:
libvirt_version = self._conn.getLibVersion()
if libvirt_version < _munge_version(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = self._conn.getVersion()
if hypervisor_version < _munge_version(hv_ver):
return False
if hv_type is not None:
hypervisor_type = self._conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self.queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread.
"""
if self._event_queue is None:
LOG.debug("Event loop thread is not active, "
"discarding event %s" % event)
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
self.emit_event(event)
except native_Queue.Empty:
pass
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug("Starting green dispatch thread")
eventlet.spawn(self._dispatch_thread)
def init_host(self, host):
if not self.has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.'),
{'major': major, 'minor': minor, 'micro': micro})
libvirt.registerErrorHandler(libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
def _get_connection(self):
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
LOG.debug(_('Connecting to libvirt: %s'), self.uri())
if not CONF.libvirt_nonblocking:
wrapped_conn = self._connect(self.uri(), self.read_only)
else:
wrapped_conn = tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
self._connect, self.uri(), self.read_only)
with self._wrapped_conn_lock:
self._wrapped_conn = wrapped_conn
try:
LOG.debug("Registering for lifecycle events %s" % str(self))
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception:
LOG.warn(_("URI %s does not support events"),
self.uri())
if self.has_min_version(MIN_LIBVIRT_CLOSE_CALLBACK_VERSION):
try:
LOG.debug("Registering for connection events: %s" %
str(self))
wrapped_conn.registerCloseCallback(
self._close_callback, None)
except libvirt.libvirtError:
LOG.debug(_("URI %s does not support connection events"),
self.uri())
return wrapped_conn
_conn = property(_get_connection)
def _close_callback(self, conn, reason, opaque):
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
LOG.info(_("Connection to libvirt lost: %s") % reason)
self._wrapped_conn = None
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
return False
raise
@staticmethod
def uri():
if CONF.libvirt_type == 'uml':
uri = CONF.libvirt_uri or 'uml:///system'
elif CONF.libvirt_type == 'xen':
uri = CONF.libvirt_uri or 'xen:///'
elif CONF.libvirt_type == 'lxc':
uri = CONF.libvirt_uri or 'lxc:///'
else:
uri = CONF.libvirt_uri or 'qemu:///system'
return uri
@staticmethod
def _connect(uri, read_only):
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
LOG.warning(
_("Can not handle authentication request for %d credentials")
% len(creds))
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
_connect_auth_cb,
None]
try:
if read_only:
return libvirt.openReadOnly(uri)
else:
return libvirt.openAuth(uri, auth, 0)
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
method='_connect',
reason=ex)
notifier.notify(nova_context.get_admin_context(),
notifier.publisher_id('compute'),
'compute.libvirt.error',
notifier.ERROR,
payload)
pass
def get_num_instances(self):
"""Efficient override of base instance_exists method."""
return self._conn.numOfDomains()
def instance_exists(self, instance_name):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance_name)
return True
except exception.NovaException:
return False
def legacy_nwinfo(self):
return True
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
return []
return self._conn.listDomainsID()
def list_instances(self):
names = []
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._lookup_by_id(domain_id)
names.append(domain.name())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
# extend instance list to contain also defined domains
names.extend([vm for vm in self._conn.listDefinedDomains()
if vm not in names])
return names
def list_instance_uuids(self):
uuids = set()
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._lookup_by_id(domain_id)
uuids.add(domain.UUIDString())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
# extend instance list to contain also defined domains
for domain_name in self._conn.listDefinedDomains():
try:
uuids.add(self._lookup_by_name(domain_name).UUIDString())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
return list(uuids)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
for (network, mapping) in network_info:
self.vif_driver.unplug(instance, (network, mapping))
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.NotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warn(_("Cannot destroy instance, operation time out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.NotFound:
LOG.error(_("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_("Going to destroy instance again."), instance=instance)
self._destroy(instance)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
self._destroy(instance)
self._cleanup(instance, network_info, block_device_info, destroy_disks)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.NotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug(_("Error from libvirt during undefineFlags."
" Retrying with undefine"), instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s') %
{'errcode': errcode, 'e': e}, instance=instance)
def _cleanup(self, instance, network_info, block_device_info,
destroy_disks):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
retry = True
while retry:
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance)['state']
except exception.NotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.error(_('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s') %
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
if destroy_disks:
target = libvirt_utils.get_instance_path(instance)
LOG.info(_('Deleting instance files %s'), target,
instance=instance)
if os.path.exists(target):
# If we fail to get rid of the directory
# tree, this shouldn't block deletion of
# the instance as whole.
try:
shutil.rmtree(target)
except OSError as e:
LOG.error(_('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target, 'e': e})
#NOTE(bfilippov): destroy all LVM disks for this instance
self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt_images_volume_group:
vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['name']
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = libvirt_utils.list_logical_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug(_('Could not determine iscsi initiator name'),
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide node names'),
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide port names'),
instance=instance)
connector = {'ip': CONF.my_ip,
'host': CONF.host}
if self._initiator:
connector['initiator'] = self._initiator
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
shutil.rmtree(target)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
def attach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception as ex:
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_("During detach_volume, instance disappeared."))
else:
raise
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
@exception.wrap_exception()
def attach_interface(self, instance, image_meta, network_info):
virt_dom = self._lookup_by_name(instance['name'])
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
self.firewall_driver.setup_basic_filtering(instance,
[(network, mapping)])
cfg = self.vif_driver.get_config(instance, network, mapping,
image_meta)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError:
LOG.error(_('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, (network, mapping))
raise exception.InterfaceAttachFailed(instance)
@exception.wrap_exception()
def detach_interface(self, instance, network_info):
virt_dom = self._lookup_by_name(instance['name'])
for (network, mapping) in network_info:
cfg = self.vif_driver.get_config(instance, network, mapping, None)
try:
self.vif_driver.unplug(instance, (network, mapping))
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_('detaching network adapter failed.'),
instance=instance)
raise exception.InterfaceDetachFailed(instance)
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
try:
base = image_service.show(context, image_id)
except exception.ImageNotFound:
base = {}
_image_service = glance.get_remote_image_service(context, image_href)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)
metadata = {'is_public': False,
'status': 'active',
'name': snapshot['name'],
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm as raw
if image_format == 'lvm':
image_format = 'raw'
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = image_format
metadata['container_format'] = base.get('container_format', 'bare')
snapshot_name = uuid.uuid4().hex
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm":
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confims the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path,
snapshot_name,
image_type=source_format)
if live_snapshot:
LOG.info(_("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_("Beginning cold snapshot process"),
instance=instance)
snapshot_backend.snapshot_create()
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt_snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE (rmk): libvirt needs to be able to write to the
# temp directory, which is owned nova.
utils.execute('chmod', '777', tmpdir, run_as_root=True)
self._live_snapshot(virt_dom, disk_path, out_path,
image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
if not live_snapshot:
snapshot_backend.snapshot_delete()
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
LOG.info(_("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
image_service.update(context,
image_href,
metadata,
image_file)
LOG.info(_("Snapshot image upload complete"),
instance=instance)
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
def _wait_for_block_job(domain, disk_path):
status = domain.blockJobInfo(disk_path, 0)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if cur == end and cur != 0 and end != 0:
return False
else:
return True
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while _wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2', None,
out_path, image_format)
def reboot(self, context, instance, network_info, reboot_type='SOFT',
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
if self._soft_reboot(instance):
LOG.info(_("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_("Failed to soft reboot instance."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slighty longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
for x in xrange(CONF.libvirt_wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
new_domid = dom.ID()
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self.get_instance_disk_info(instance['name'], xml,
block_device_info)
self._create_images_and_backing(context, instance, disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
if self.instance_exists(instance['name']):
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
'image_id': CONF.rescue_image_id or instance['image_ref'],
'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta,
rescue=True)
self._create_image(context, instance,
disk_info['mapping'],
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self.to_xml(instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self.to_xml(instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
fp = open(fpath, 'a+')
fp.write(data)
return fpath
def get_console_output(self, instance):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes '
'ignored'), remaining, instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
port = get_vnc_port_for_instance(instance['name'])
host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
@exception.wrap_exception()
def get_spice_console(self, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'spice':
return (graphic.getAttribute('port'),
graphic.getAttribute('tlsPort'))
return (None, None)
ports = get_spice_ports_for_instance(instance['name'])
host = CONF.spice.server_proxyclient_address
return {'host': host, 'port': ports[0],
'tlsPort': ports[1], 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
os.close(f)
LOG.debug(_("Path '%(path)s' supports direct I/O") %
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'") %
{'path': dirpath, 'ex': str(e)})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
if not fs_format:
fs_format = CONF.default_ephemeral_format
if not CONF.libvirt_images_type == "lvm":
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
if fs_format:
utils.mkfs(fs_format, target, label)
def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type):
self._create_local(target, ephemeral_size)
disk.mkfs(os_type, fs_label, target)
@staticmethod
def _create_swap(target, swap_mb):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None, admin_pass=None):
if not suffix:
suffix = ''
booted_from_volume = (
(not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping
)
# syntactic nicety
def basepath(fname='', suffix=suffix):
return os.path.join(libvirt_utils.get_instance_path(instance),
fname + suffix)
def image(fname, image_type=CONF.libvirt_images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(basepath(suffix=''))
LOG.info(_('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
inst_type = flavors.extract_flavor(instance)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * 1024 * 1024 * 1024
if size == 0 or suffix == '.rescue':
size = None
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = instance['os_type']
if not os_type_with_default:
os_type_with_default = 'default'
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"])
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * 1024 * 1024 * 1024
image('disk.local').cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for eph in driver.block_device_info_get_ephemerals(block_device_info):
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % eph['num'],
os_type=instance["os_type"])
size = eph['size'] * 1024 * 1024 * 1024
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
image(blockinfo.get_eph_disk(eph)).cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * 1024 * 1024
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection
elif CONF.libvirt_inject_partition != -2:
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt_inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt_type == 'lxc':
target_partition = None
if CONF.libvirt_inject_key and instance['key_data']:
key = str(instance['key_data'])
else:
key = None
net = netutils.get_injected_network_template(network_info)
metadata = instance.get('metadata')
if not CONF.libvirt_inject_password:
admin_pass = None
if any((key, net, metadata, admin_pass, files)):
# If we're not using config_drive, inject into root fs
injection_path = image('disk').path
img_id = instance['image_ref']
for inj, val in [('key', key),
('net', net),
('metadata', metadata),
('admin_pass', admin_pass),
('files', files)]:
if val:
LOG.info(_('Injecting %(inj)s into image '
'%(img_id)s'),
{'inj': inj, 'img_id': img_id},
instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
if CONF.libvirt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
"""
if not self._caps:
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
return self._caps
def get_host_uuid(self):
"""Returns a UUID representing the host."""
caps = self.get_host_capabilities()
return caps.host.uuid
def get_host_cpu_for_guest(self):
"""Returns an instance of config.LibvirtConfigGuestCPU
representing the host's CPU model & topology with
policy for configuring a guest to match
"""
caps = self.get_host_capabilities()
hostcpu = caps.host.cpu
guestcpu = vconfig.LibvirtConfigGuestCPU()
guestcpu.model = hostcpu.model
guestcpu.vendor = hostcpu.vendor
guestcpu.arch = hostcpu.arch
guestcpu.match = "exact"
for hostfeat in hostcpu.features:
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
guestcpu.features.append(guestfeat)
return guestcpu
def get_guest_cpu_config(self):
mode = CONF.libvirt_cpu_mode
model = CONF.libvirt_cpu_model
if mode is None:
if CONF.libvirt_type == "kvm" or CONF.libvirt_type == "qemu":
mode = "host-model"
else:
mode = "none"
if mode == "none":
return None
if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
% {'mode': mode, 'model': (model or "")})
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
# updated to be at least this new, we can kill off the elif
# blocks here
if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
elif mode == "custom":
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = model
elif mode == "host-model":
cpu = self.get_host_cpu_for_guest()
elif mode == "host-passthrough":
msg = _("Passthrough of the host CPU was requested but "
"this libvirt version does not support this feature")
raise exception.NovaException(msg)
return cpu
def get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self.get_hypervisor_version())
def get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if rescue:
diskrescue = self.get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self.get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
'/dev/' + disklocal.target_dev})
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
diskeph = self.get_guest_disk_config(
instance,
blockinfo.get_eph_disk(eph),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self.get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': '/dev/' + diskswap.target_dev})
for vol in block_device_mapping:
connection_info = vol['connection_info']
info = disk_mapping[vol['mount_device']]
cfg = self.volume_driver_method('connect_volume',
connection_info,
info)
devices.append(cfg)
if 'disk.config' in disk_mapping:
diskconfig = self.get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for d in devices:
self.set_cache_mode(d)
return devices
def get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self.get_host_uuid()
sysinfo.system_uuid = instance['uuid']
return sysinfo
def get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
inst_type = self.virtapi.instance_type_get(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
CONSOLE = "console=tty0 console=ttyS0"
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = inst_type['memory_mb'] * 1024
guest.vcpus = inst_type['vcpus']
guest.cpuset = CONF.vcpu_pin_set
quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota']
for key, value in inst_type['extra_specs'].iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in quota_items:
setattr(guest, scope[1], value)
guest.cpu = self.get_guest_cpu_config()
if 'root' in disk_mapping and disk_mapping['root']['dev'] is not None:
root_device_name = "/dev/" + disk_mapping['root']['dev']
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': root_device_name})
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
elif CONF.libvirt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.xen_hvmloader_path
if CONF.libvirt_type in ("kvm", "qemu"):
caps = self.get_host_capabilities()
if caps.host.cpu.arch in ("i686", "x86_64"):
guest.sysinfo = self.get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
else:
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name
else:
guest.os_type = vm_mode.HVM
if rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
guest.os_boot_dev = "hd"
if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
guest.acpi = True
guest.apic = True
clk = vconfig.LibvirtConfigGuestClock()
clk.offset = "utc"
guest.set_clock(clk)
if CONF.libvirt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
for cfg in self.get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
inst_type):
guest.add_device(cfg)
for (network, mapping) in network_info:
cfg = self.vif_driver.get_config(instance,
network, mapping,
image_meta,
inst_type)
guest.add_device(cfg)
if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
consolepty.type = "pty"
guest.add_device(consolepty)
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled,
# or SPICE is enabled and the SPICE agent is disabled
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if CONF.spice.enabled and CONF.spice.agent_enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
if CONF.spice.enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
return guest
def to_xml(self, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
LOG.debug(_('Start to_xml instance=%(instance)s '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s'
'block_device_info=%(block_device_info)s'),
{'instance': instance, 'network_info': network_info,
'disk_info': disk_info, 'image_meta': image_meta,
'rescue': rescue, 'block_device_info': block_device_info})
conf = self.get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s'),
{'instance': instance, 'xml': xml})
return xml
def _lookup_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[state],
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
'cpu_time': cpu_time,
'id': virt_dom.ID()}
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
inst_path = None
if instance:
inst_path = libvirt_utils.get_instance_path(instance)
if CONF.libvirt_type == 'lxc':
if not inst_path:
inst_path = None
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
image = self.image_backend.image(instance, 'disk')
disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
if xml:
try:
domain = self._conn.defineXML(xml)
except Exception as e:
LOG.error(_("An error occurred while trying to define a domain"
" with xml: %s") % xml)
raise e
if power_on:
try:
domain.createWithFlags(launch_flags)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while trying to launch a "
"defined domain with xml: %s") %
domain.XMLDesc(0))
try:
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while enabling hairpin mode on "
"domain with xml: %s") % domain.XMLDesc(0))
# NOTE(uni): Now the container is running with its own private mount
# namespace and so there is no need to keep the container rootfs
# mounted in the host namespace
if CONF.libvirt_type == 'lxc':
state = self.get_info(instance)['state']
container_dir = os.path.join(inst_path, 'rootfs')
if state == power_state.RUNNING:
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
return domain
def _create_domain_and_network(self, xml, instance, network_info,
block_device_info=None, power_on=True):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
domain = self._create_domain(xml, instance=instance, power_on=power_on)
self.firewall_driver.apply_instance_filter(instance, network_info)
return domain
def get_all_block_devices(self):
"""
Return all block devices in use on this node.
"""
devices = []
for dom_id in self.list_instance_ids():
try:
domain = self._lookup_by_id(dom_id)
doc = etree.fromstring(domain.XMLDesc(0))
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def get_disks(self, instance_name):
"""
Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
domain = self._lookup_by_name(instance_name)
xml = domain.XMLDesc(0)
try:
doc = etree.fromstring(xml)
except Exception:
return []
return filter(bool,
[target.get("dev")
for target in doc.findall('devices/disk/target')])
def get_interfaces(self, xml):
"""
Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
def _get_cpuset_ids(self):
"""
Parsing vcpu_pin_set config.
Returns a list of pcpu ids can be used by instances.
"""
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in CONF.vcpu_pin_set.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available pcpu ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single PCPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
if not cpuset_ids:
raise exception.Invalid(_("No CPUs available after parsing %r") %
CONF.vcpu_pin_set)
# This will convert the set to a sorted list for us
return sorted(cpuset_ids)
def get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
if self._vcpu_total != 0:
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if CONF.vcpu_pin_set is None:
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = self._get_cpuset_ids()
if available_ids[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
self._vcpu_total = len(available_ids)
return self._vcpu_total
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt_images_type == 'lvm':
info = libvirt_utils.get_volume_group_info(
CONF.libvirt_images_volume_group)
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / (1024 ** 3)
return info
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
total = 0
if CONF.libvirt_type == 'lxc':
return total + 1
dom_ids = self.list_instance_ids()
for dom_id in dom_ids:
try:
dom = self._lookup_by_id(dom_id)
vcpus = dom.vcpus()
if vcpus is None:
LOG.debug(_("couldn't obtain the vpu count from domain id:"
" %s") % dom_id)
else:
total += len(vcpus[1])
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
def get_memory_mb_used(self):
"""Get the free memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
m = open('/proc/meminfo').read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
try:
dom_mem = int(self._lookup_by_id(domain_id).info()[2])
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s")
% domain_id)
continue
# skip dom0
if domain_id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / 1024
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail / 1024
def get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
return self._conn.getHostname()
def get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self.get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
def get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self.get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilties XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug(_("Trying to get stats for the volume %s"),
volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3],
flush_operations=vol_stats[4])
LOG.debug(
_("Got volume usage stats for the volume=%(volume)s,"
" instance=%(instance)s, rd_req=%(rd_req)d,"
" rd_bytes=%(rd_bytes)d, wr_req=%(wr_req)d,"
" wr_bytes=%(wr_bytes)d")
% stats)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name.
"""
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance_name, 'disk': disk,
'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance_name)
def interface_stats(self, instance_name, interface):
"""
Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
#TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called as a periodic task and is used only
in live migration currently.
:param nodename: ignored in this driver
:returns: dictionary containing resource info
"""
def _get_disk_available_least():
"""Return total real disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real instance disk size from the total size
of the virtual disk of all instances.
"""
disk_free_gb = disk_info_dict['free']
disk_over_committed = self.get_disk_over_committed_size_total()
# Disk available least size
available_least = disk_free_gb * (1024 ** 3) - disk_over_committed
return (available_least / (1024 ** 3))
disk_info_dict = self.get_local_gb_info()
dic = {'vcpus': self.get_vcpu_total(),
'memory_mb': self.get_memory_mb_total(),
'local_gb': disk_info_dict['total'],
'vcpus_used': self.get_vcpu_used(),
'memory_mb_used': self.get_memory_mb_used(),
'local_gb_used': disk_info_dict['used'],
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': self.get_hypervisor_hostname(),
'cpu_info': self.get_cpu_info(),
'disk_available_least': _get_disk_available_least()}
return dic
def check_instance_shared_storage_local(self, context, instance):
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.") % tmp_file)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * 1024) - CONF.reserved_host_disk_mb
# Compare CPU
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
is_volume_backed = dest_check_data.get('is_volume_backed', False)
shared = self._check_shared_storage_test_file(filename)
if block_migration:
if shared:
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
elif not shared and not is_volume_backed:
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
dest_check_data.update({"is_shared_storage": shared})
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data['instance_relative_path'] = instance_path
return dest_check_data
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * (1024 ** 2)
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance['uuid'],
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openReadonly().getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string that shows cpu feature(see get_cpu_info())
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt_type == 'xen':
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = e.message
LOG.error(m, {'ret': ret, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.") % tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info,
time_module=None):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
if not time_module:
time_module = greenthread
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance["name"])
time_module.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params block_migration: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, do block migration.
:params migrate_data: implementation specific params
"""
greenthread.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params migrate_data: implementation specific params
"""
# Do live migration.
try:
if block_migration:
flaglist = CONF.block_migration_flag.split(',')
else:
flaglist = CONF.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance["name"])
dom.migrateToURI(CONF.live_migration_uri % dest,
logical_sum,
None,
CONF.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Live Migration failure: %s"), e,
instance=instance)
recover_method(context, instance, dest, block_migration)
# Waiting for completion of live_migration.
timer = loopingcall.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
self.get_info(instance)['state']
except exception.NotFound:
timer.stop()
post_method(context, instance, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def pre_live_migration(self, context, instance, block_device_info,
network_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_storage = True
is_volume_backed = False
is_block_migration = True
instance_relative_path = None
if migrate_data:
is_shared_storage = migrate_data.get('is_shared_storage', True)
is_volume_backed = migrate_data.get('is_volume_backed', False)
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
if not is_shared_storage:
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
if is_volume_backed and not (is_block_migration or is_shared_storage):
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
def pre_block_migration(self, context, instance, disk_info_json):
"""Preparation for block migration."""
# NOTE (rmk): When preparing for a block migration, the instance dir
# should not exist on the destination hypervisor.
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
self._create_images_and_backing(context, instance, disk_info_json)
def _create_images_and_backing(self, context, instance, disk_info_json):
"""
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params disk_info_json:
json strings specified in get_instance_disk_info
"""
disk_info = jsonutils.loads(disk_info_json)
instance_dir = libvirt_utils.get_instance_path(instance)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['disk_size'])
else:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migraiton.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance)
self.to_xml(instance, network_info, disk_info,
block_device_info, write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
dom = self._lookup_by_name(instance["name"])
self._conn.defineXML(dom.XMLDesc(0))
def get_instance_disk_info(self, instance_name, xml=None,
block_device_info=None):
"""Preparation block migration.
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:return:
json strings with below format::
"[{'path':'disk', 'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'},...]"
"""
# NOTE (rmk): Passing the domain XML into this function is optional.
# When it is not passed, we attempt to extract it from
# the pre-existing definition.
if xml is None:
try:
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
# NOTE (rmk): When block_device_info is provided, we will use it to
# filter out devices which are actually volumes.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
target = target_nodes[cnt].attrib['dev']
if disk_type != 'file':
LOG.debug(_('skipping %s since it looks like volume'), path)
continue
if not path:
LOG.debug(_('skipping disk for %s as it does not have a path'),
instance_name)
continue
if target in volume_devices:
LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
'volume'), {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = 0
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return jsonutils.dumps(disk_info)
def get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
instances_name = self.list_instances()
disk_over_committed_size = 0
for i_name in instances_name:
try:
disk_infos = jsonutils.loads(
self.get_instance_disk_info(i_name))
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except OSError as e:
if e.errno == errno.ENOENT:
LOG.error(_('Getting disk size of %(i_name)s: %(e)s'),
{'i_name': i_name, 'e': e})
else:
raise
except exception.InstanceNotFound:
# Instance was deleted during the check so ignore it
pass
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
#NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.verify_base_images(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
utils.execute('ssh', dest, 'touch', tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
utils.execute('ssh', dest, 'rm', tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
disk_info_text = self.get_instance_disk_info(instance['name'],
block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
self.power_off(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_migration"), instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
size *= 1024 * 1024 * 1024
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt = info['type']
if (size and fmt == 'qcow2' and
disk.can_resize_fs(info['path'], size, use_cow=True)):
path_raw = info['path'] + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', info['path'], path_raw)
utils.execute('mv', path_raw, info['path'])
fmt = 'raw'
if size:
disk.extend(info['path'], size)
if fmt == 'raw' and CONF.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
path_qcow = info['path'] + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', info['path'], path_qcow)
utils.execute('mv', path_qcow, info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
# assume _create_image do nothing if a target file exists.
# TODO(oda): injecting files is not necessary
self._create_image(context, instance,
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
shutil.rmtree(inst_base)
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
if os.path.exists(inst_base):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = get_io_devices(xml)
for disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(disk)
output[disk + "_read_req"] = stats[0]
output[disk + "_read"] = stats[1]
output[disk + "_write_req"] = stats[2]
output[disk + "_write"] = stats[3]
output[disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
pass
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
pass
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""only used for Resource Pools."""
pass
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug(_('Checking instance files accessability %s'), instance_path)
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
class HostState(object):
"""Manages information about the compute node through libvirt."""
def __init__(self, driver):
super(HostState, self).__init__()
self._stats = {}
self.driver = driver
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from libvirt."""
LOG.debug(_("Updating host stats"))
data = {}
data["vcpus"] = self.driver.get_vcpu_total()
data["vcpus_used"] = self.driver.get_vcpu_used()
data["cpu_info"] = jsonutils.loads(self.driver.get_cpu_info())
disk_info_dict = self.driver.get_local_gb_info()
data["disk_total"] = disk_info_dict['total']
data["disk_used"] = disk_info_dict['used']
data["disk_available"] = disk_info_dict['free']
data["host_memory_total"] = self.driver.get_memory_mb_total()
data["host_memory_free"] = (data["host_memory_total"] -
self.driver.get_memory_mb_used())
data["hypervisor_type"] = self.driver.get_hypervisor_type()
data["hypervisor_version"] = self.driver.get_hypervisor_version()
data["hypervisor_hostname"] = self.driver.get_hypervisor_hostname()
data["supported_instances"] = \
self.driver.get_instance_capabilities()
self._stats = data
return data
| 41.957115
| 79
| 0.560308
|
79512840cdc2dd833c98c35c8ab44be0a49cc05b
| 4,935
|
py
|
Python
|
pipenv/patched/crayons.py
|
craynic/pipenv
|
e137d4334a5d225a06bf41b21e2eef746c19c3cb
|
[
"MIT"
] | null | null | null |
pipenv/patched/crayons.py
|
craynic/pipenv
|
e137d4334a5d225a06bf41b21e2eef746c19c3cb
|
[
"MIT"
] | null | null | null |
pipenv/patched/crayons.py
|
craynic/pipenv
|
e137d4334a5d225a06bf41b21e2eef746c19c3cb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
clint.colored
~~~~~~~~~~~~~
This module provides a simple and elegant wrapper for colorama.
"""
import os
import re
import sys
import shellingham
import colorama
PY3 = sys.version_info[0] >= 3
__all__ = (
"red",
"green",
"yellow",
"blue",
"black",
"magenta",
"cyan",
"white",
"normal",
"clean",
"disable",
)
COLORS = __all__[:-2]
is_ipython = "get_ipython" in dir()
if (
os.environ.get("CMDER_ROOT")
or os.environ.get("VSCODE_PID")
or os.environ.get("TERM_PROGRAM") == "Hyper"
or "VSCODE_CWD" in os.environ
):
is_native_powershell = False
else:
is_native_powershell = True
try:
is_powershell = "powershell" in shellingham.detect_shell()[0]
except shellingham.ShellDetectionFailure:
is_powershell = False
if is_ipython or (is_powershell and is_native_powershell):
"""when ipython is fired lot of variables like _oh, etc are used.
There are so many ways to find current python interpreter is ipython.
get_ipython is easiest is most appealing for readers to understand.
"""
DISABLE_COLOR = True
else:
DISABLE_COLOR = False
class ColoredString(object):
"""Enhanced string for __len__ operations on Colored output."""
def __init__(self, color, s, always_color=False, bold=False):
super(ColoredString, self).__init__()
if not PY3 and isinstance(s, unicode):
self.s = s.encode("utf-8")
else:
self.s = s
self.color = color
self.always_color = always_color
self.bold = bold
if os.environ.get("PIPENV_FORCE_COLOR"):
self.always_color = True
def __getattr__(self, att):
def func_help(*args, **kwargs):
result = getattr(self.s, att)(*args, **kwargs)
try:
is_result_string = isinstance(result, basestring)
except NameError:
is_result_string = isinstance(result, str)
if is_result_string:
return self._new(result)
elif isinstance(result, list):
return [self._new(x) for x in result]
else:
return result
return func_help
@property
def color_str(self):
style = "BRIGHT" if self.bold else "NORMAL"
c = "%s%s%s%s%s" % (
getattr(colorama.Fore, self.color),
getattr(colorama.Style, style),
self.s,
colorama.Fore.RESET,
getattr(colorama.Style, "NORMAL"),
)
if self.always_color:
return c
elif sys.stdout.isatty() and not DISABLE_COLOR:
return c
else:
return self.s
def __len__(self):
return len(self.s)
def __repr__(self):
return "<%s-string: '%s'>" % (self.color, self.s)
def __unicode__(self):
value = self.color_str
if isinstance(value, bytes):
return value.decode("utf8")
return value
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return self.color_str
def __iter__(self):
return iter(self.color_str)
def __add__(self, other):
return str(self.color_str) + str(other)
def __radd__(self, other):
return str(other) + str(self.color_str)
def __mul__(self, other):
return self.color_str * other
def _new(self, s):
return ColoredString(self.color, s)
def clean(s):
strip = re.compile(
"([^-_a-zA-Z0-9!@#%&=,/'\";:~`\$\^\*\(\)\+\[\]\.\{\}\|\?\<\>\\]+|[^\s]+)"
)
txt = strip.sub("", str(s))
strip = re.compile(r"\[\d+m")
txt = strip.sub("", txt)
return txt
def normal(string, always=False, bold=False):
return ColoredString("RESET", string, always_color=always, bold=bold)
def black(string, always=False, bold=False):
return ColoredString("BLACK", string, always_color=always, bold=bold)
def red(string, always=False, bold=False):
return ColoredString("RED", string, always_color=always, bold=bold)
def green(string, always=False, bold=False):
return ColoredString("GREEN", string, always_color=always, bold=bold)
def yellow(string, always=False, bold=False):
return ColoredString("YELLOW", string, always_color=always, bold=bold)
def blue(string, always=False, bold=False):
return ColoredString("BLUE", string, always_color=always, bold=bold)
def magenta(string, always=False, bold=False):
return ColoredString("MAGENTA", string, always_color=always, bold=bold)
def cyan(string, always=False, bold=False):
return ColoredString("CYAN", string, always_color=always, bold=bold)
def white(string, always=False, bold=False):
# This upsets people...
return ColoredString("WHITE", string, always_color=always, bold=bold)
def disable():
"""Disables colors."""
global DISABLE_COLOR
DISABLE_COLOR = True
| 24.552239
| 81
| 0.613982
|
795129c42bbde563f6643398aaa67a5593d36874
| 20,542
|
py
|
Python
|
qgitc/mergewidget.py
|
timxx/qgitc
|
514333c80ce79c795ebe7c70ef74cfa91a9f529b
|
[
"Apache-2.0"
] | 2
|
2020-05-06T19:56:15.000Z
|
2022-02-08T01:02:19.000Z
|
qgitc/mergewidget.py
|
timxx/qgitc
|
514333c80ce79c795ebe7c70ef74cfa91a9f529b
|
[
"Apache-2.0"
] | null | null | null |
qgitc/mergewidget.py
|
timxx/qgitc
|
514333c80ce79c795ebe7c70ef74cfa91a9f529b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from PySide6.QtGui import *
from PySide6.QtWidgets import *
from PySide6.QtCore import *
from .gitutils import Git, GitProcess
from .stylehelper import dpiScaled
from .conflictlog import (
ConflictLogExcel,
ConflictLogXlsx,
HAVE_EXCEL_API,
HAVE_XLSX_WRITER,
MergeInfo)
from .events import CopyConflictCommit
from .common import dataDirPath
from datetime import datetime
import shutil
STATE_CONFLICT = 0
STATE_RESOLVED = 1
RESOLVE_SUCCEEDED = 0
RESOLVE_FAILED = 1
StateRole = Qt.UserRole + 1
class MergeWidget(QWidget):
requestResolve = Signal(str)
resolveFinished = Signal(int)
def __init__(self, parent=None):
super(MergeWidget, self).__init__(parent)
self.setWindowFlags(Qt.WindowMinMaxButtonsHint)
self.setWindowTitle(self.tr("Conflict List"))
self.resolvedCount = 0
self.iconResolved = self.__makeTextIcon(chr(0x2714), Qt.green)
self.iconConflict = self.__makeTextIcon('!', Qt.red)
self.resolveIndex = -1
self.process = None
self._firstShown = True
self.log = None
self.__setupUi()
self.__setupSignals()
self._mergeInfo = None
def __setupUi(self):
self.view = QListView(self)
self.model = QStandardItemModel(self)
self.proxyModel = QSortFilterProxyModel(self)
self.proxyModel.setSourceModel(self.model)
self.view.setSelectionMode(QAbstractItemView.ExtendedSelection)
# for Ctrl+C
self.view.installEventFilter(self)
self.view.setModel(self.proxyModel)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.leFilter = QLineEdit(self)
self.lbFilter = QLabel("0", self)
filterLayout = QHBoxLayout()
filterLayout.addWidget(self.leFilter)
filterLayout.addWidget(self.lbFilter)
vlayout = QVBoxLayout(self)
vlayout.addLayout(filterLayout)
vlayout.addWidget(self.view)
hlayout = QHBoxLayout()
self.status = QLabel(self)
self.status.setToolTip(self.tr("Click to refresh the list"))
self.cbAutoNext = QCheckBox(self.tr("Continuous resolve"))
self.btnResolve = QPushButton(self.tr("Resolve"))
hlayout.addWidget(self.status)
hlayout.addSpacerItem(QSpacerItem(
20, 20, QSizePolicy.MinimumExpanding))
hlayout.addWidget(self.cbAutoNext)
hlayout.addWidget(self.btnResolve)
vlayout.addLayout(hlayout)
self.cbAutoLog = QCheckBox(self.tr("Log conflicts to"), self)
self.leLogFile = QLineEdit(self)
self.btnChooseLog = QPushButton(self.tr("Choose"), self)
hlayout = QHBoxLayout()
hlayout.addWidget(self.cbAutoLog)
hlayout.addWidget(self.leLogFile)
hlayout.addWidget(self.btnChooseLog)
vlayout.addLayout(hlayout)
self.cbAutoNext.setChecked(True)
if HAVE_EXCEL_API or HAVE_XLSX_WRITER:
self.cbAutoLog.setChecked(True)
self.__onAutoLogChanged(Qt.Checked)
else:
self.cbAutoLog.setChecked(False)
self.cbAutoLog.setEnabled(False)
self.cbAutoLog.setToolTip(
self.tr("No pywin32/pywpsrpc or openpyxl found, feature disabled."))
self.__onAutoLogChanged(Qt.Unchecked)
self.leLogFile.setText(self.__defaultLogFile())
self.__setupMenu()
def __defaultLogFile(self):
dir = QStandardPaths.writableLocation(QStandardPaths.DocumentsLocation)
dt = datetime.now()
fileName = "conflicts-{}.xlsx".format(dt.strftime("%Y%m%d%H%M%S"))
return dir + QDir.separator() + fileName
def __ensureLogWriter(self):
if self.log is not None:
return
logFile = self.leLogFile.text()
shutil.copy(dataDirPath() + "/templates/builtin.xlsx", logFile)
if HAVE_EXCEL_API:
self.log = ConflictLogExcel(logFile)
elif HAVE_XLSX_WRITER:
self.log = ConflictLogXlsx(logFile)
if self._mergeInfo is not None:
self.log.setMergeInfo(self._mergeInfo)
def __setupMenu(self):
self.menu = QMenu()
self.acResolve = self.menu.addAction(
self.tr("&Resolve"),
self.__onMenuResolve)
self.acUndoMerge = self.menu.addAction(
self.tr("&Undo merge"),
self.__onMenuUndoMerge)
self.menu.addSeparator()
self.acUseOurs = self.menu.addAction(
self.tr("Use &ours"),
self.__onMenuUseOurs)
self.acUseTheirs = self.menu.addAction(
self.tr("Use &theirs"),
self.__onMenuUseTheirs)
self.menu.addSeparator()
self.menu.addAction(self.tr("&Copy Path"),
self.__onMenuCopyPath,
QKeySequence("Ctrl+C"))
self.menu.addAction(self.tr("Copy &Windows Path"),
self.__onMenuCopyWinPath)
self.menu.addSeparator()
self.menu.addAction(self.tr("Select &All"),
self.__onMenuSelectAll,
QKeySequence("Ctrl+A"))
def __setupSignals(self):
self.btnResolve.clicked.connect(self.__onResolveClicked)
self.view.doubleClicked.connect(self.__onItemDoubleClicked)
self.status.linkActivated.connect(self.__onStatusRefresh)
self.leFilter.textChanged.connect(self.__onFilterChanged)
self.cbAutoLog.stateChanged.connect(self.__onAutoLogChanged)
self.btnChooseLog.clicked.connect(self.__onChooseLogFile)
def __makeTextIcon(self, text, color):
img = QPixmap(dpiScaled(QSize(32, 32)))
img.fill(Qt.transparent)
painter = QPainter(img)
painter.setPen(color)
font = QFont()
font.setPixelSize(dpiScaled(32))
painter.setFont(font)
painter.drawText(img.rect(), Qt.AlignCenter, text)
painter = None
return QIcon(img)
def __updateStatus(self):
# just don't wanna inherit a QLabel LoL
total = self.model.rowCount()
self.status.setText(
"<a href='#refresh'>{}/{}</a>".format(self.resolvedCount,
total))
def __updateFilterCount(self):
text = self.proxyModel.filterRegularExpression().pattern()
count = self.proxyModel.rowCount() if text else 0
self.lbFilter.setText("{}".format(count))
def __resolvedIndex(self, index):
index = self.proxyModel.mapToSource(index)
item = self.model.itemFromIndex(index)
item.setData(STATE_RESOLVED, StateRole)
item.setIcon(self.iconResolved)
self.resolvedCount += 1
self.__updateStatus()
def __checkCurrentResolve(self, index):
if self.resolveIndex == index.row():
text = self.tr(
"You are resolving this file, please close it first.")
QMessageBox.information(self,
qApp.applicationName(),
text)
return False
return True
def __onResolveClicked(self, checked=False):
index = self.view.currentIndex()
self.resolve(index)
def __onItemDoubleClicked(self, index):
self.resolve(index)
def __onStatusRefresh(self, link):
if self.process:
QMessageBox.information(self,
qApp.applicationName(),
self.tr("You can't refresh before close the merge window."))
return
self.updateList()
def __onFilterChanged(self, text):
self.proxyModel.setFilterRegularExpression(text)
self.__updateFilterCount()
def __onAutoLogChanged(self, state):
enabled = state == Qt.Checked
self.leLogFile.setEnabled(enabled)
self.btnChooseLog.setEnabled(enabled)
def __onChooseLogFile(self, checked):
f, _ = QFileDialog.getSaveFileName(
self,
self.tr("Choose file"),
dir=QStandardPaths.writableLocation(QStandardPaths.DocumentsLocation))
if f:
self.leLogFile.setText(f)
def __onMenuResolve(self):
self.__onResolveClicked()
def __onMenuUndoMerge(self):
index = self.view.currentIndex()
if index.data(StateRole) != STATE_RESOLVED:
return
if Git.undoMerge(index.data()):
index = self.proxyModel.mapToSource(index)
item = self.model.itemFromIndex(index)
item.setData(STATE_CONFLICT, StateRole)
item.setIcon(self.iconConflict)
self.resolvedCount -= 1
self.__updateStatus()
def __onMenuUseOurs(self):
index = self.view.currentIndex()
if not self.__checkCurrentResolve(index):
return
if index.data(StateRole) == STATE_CONFLICT:
if Git.resolveBy(True, index.data()):
self.__resolvedIndex(index)
if self.logEnabled():
self.__ensureLogWriter()
self.log.setResolveMethod(
index.data(),
self.tr("Local Branch"))
def __onMenuUseTheirs(self):
index = self.view.currentIndex()
if not self.__checkCurrentResolve(index):
return
if index.data(StateRole) == STATE_CONFLICT:
if Git.resolveBy(False, index.data()):
self.__resolvedIndex(index)
if self.logEnabled():
self.__ensureLogWriter()
self.log.setResolveMethod(
index.data(),
self.tr("Remote Branch"))
def __doCopyPath(self, asWin=False):
indexList = self.view.selectionModel().selectedRows()
paths = ""
for index in indexList:
path = index.data(Qt.DisplayRole)
if asWin:
path = path.replace('/', '\\')
paths += path + "\n"
if paths:
qApp.clipboard().setText(paths)
def __onMenuCopyPath(self):
self.__doCopyPath()
def __onMenuCopyWinPath(self):
self.__doCopyPath(True)
def __onMenuSelectAll(self):
self.view.selectAll()
def __onReadyRead(self):
# FIXME: since git might not flush all output at one time
# delay some time to read all data for "Deleted merge"
QTimer.singleShot(50, self.__onResolveReadyRead)
def __onResolveReadyRead(self):
if not self.process or not self.process.bytesAvailable():
return
data = self.process.readAllStandardOutput().data()
# seems no options to control this buggy prompt
if b'Continue merging other unresolved paths [y/n]?' in data:
self.process.write(b"n\n")
elif b'Deleted merge conflict for' in data:
text = data.decode("utf-8")
isCreated = "(c)reated" in text
if isCreated:
text = text.replace("(c)reated", "created")
else:
text = text.replace("(m)odified", "modified")
text = text.replace("(d)eleted", "deleted")
text = text.replace("(a)bort", "abort")
msgBox = QMessageBox(
QMessageBox.Question, qApp.applicationName(), text, QMessageBox.NoButton, self)
msgBox.addButton(self.tr("Use &created") if isCreated
else self.tr("Use &modified"),
QMessageBox.AcceptRole)
msgBox.addButton(self.tr("&Deleted file"), QMessageBox.RejectRole)
msgBox.addButton(QMessageBox.Abort)
r = msgBox.exec_()
if r == QMessageBox.AcceptRole:
if isCreated:
self.process.write(b"c\n")
else:
self.process.write(b"m\n")
elif r == QMessageBox.RejectRole:
self.process.write(b"d\n")
else: # r == QMessageBox.Abort:
self.process.write(b"a\n")
elif b'Symbolic link merge conflict for' in data:
text = data.decode("utf-8")
text = text.replace("(l)ocal", "local")
text = text.replace("(r)emote", "remote")
text = text.replace("(a)bort", "abort")
msgBox = QMessageBox(
QMessageBox.Question, qApp.applicationName(), text, QMessageBox.NoButton, self)
msgBox.addButton(self.tr("Use &local"), QMessageBox.AcceptRole)
msgBox.addButton(self.tr("Use &remote"), QMessageBox.RejectRole)
msgBox.addButton(QMessageBox.Abort)
r = msgBox.exec_()
if r == QMessageBox.AcceptRole:
self.process.write(b"l\n")
elif r == QMessageBox.RejectRole:
self.process.write(b"r\n")
else:
self.process.write(b"a\n")
elif b'Was the merge successful [y/n]?' in data:
# TODO:
self.process.write(b"n\n")
elif b'?' in data:
# TODO: might have other prompt need yes no
print("unhandled prompt", data)
def __onResolveFinished(self, exitCode, exitStatus):
errorData = None
if exitCode == 0:
index = self.proxyModel.index(self.resolveIndex, 0)
self.__resolvedIndex(index)
else:
errorData = self.process.readAllStandardError()
self.process = None
curRow = self.resolveIndex
self.resolveIndex = -1
self.resolveFinished.emit(RESOLVE_SUCCEEDED if exitCode == 0
else RESOLVE_FAILED)
self.leFilter.setEnabled(True)
self.cbAutoLog.setEnabled(True)
self.__onAutoLogChanged(self.cbAutoLog.checkState())
# auto next only when success
if exitCode != 0:
if errorData:
QMessageBox.critical(
self, self.window().windowTitle(),
errorData.data().decode("utf-8"))
return
if not self.cbAutoNext.isChecked():
return
if self.resolvedCount == self.model.rowCount():
QMessageBox.information(self, qApp.applicationName(),
self.tr("All resolved!"))
return
index = None
allFilterResolved = True
noEndConflicts = True
# search to the end
for i in range(curRow + 1, self.proxyModel.rowCount()):
index = self.proxyModel.index(i, 0)
if index.data(StateRole) == STATE_CONFLICT:
allFilterResolved = False
noEndConflicts = False
break
index = None
# search from beginning
if not index:
for i in range(curRow):
index = self.proxyModel.index(i, 0)
if index.data(StateRole) == STATE_CONFLICT:
allFilterResolved = False
break
index = None
# to avoid show two messagebox if reach to the end
if allFilterResolved:
text = self.tr(
"All filter conflicts are resolved, please clear the filter to resolve the rest.")
QMessageBox.information(self, qApp.applicationName(), text)
return
elif noEndConflicts:
text = self.tr(
"Resolve reach to the end of list, do you want to resolve from beginning?")
r = QMessageBox.question(
self, qApp.applicationName(), text, QMessageBox.Yes, QMessageBox.No)
if r == QMessageBox.No:
return
self.view.setCurrentIndex(index)
self.resolve(index)
def __onFirstShow(self):
self.updateList()
if self.model.rowCount() == 0:
QMessageBox.information(
self,
self.window().windowTitle(),
self.tr("No conflict files to resolve!"),
QMessageBox.Ok)
def contextMenuEvent(self, event):
index = self.view.currentIndex()
enabled = index.data(StateRole) == STATE_RESOLVED
self.acResolve.setEnabled(not enabled)
# TODO: handle multiple files
if len(self.view.selectionModel().selectedRows()) > 1:
self.acUndoMerge.setEnabled(False)
self.acUseOurs.setEnabled(False)
self.acUseTheirs.setEnabled(False)
else:
self.acUndoMerge.setEnabled(enabled)
self.acUseOurs.setEnabled(not self.isResolving())
self.acUseTheirs.setEnabled(not self.isResolving())
self.menu.exec_(event.globalPos())
def paintEvent(self, event):
super().paintEvent(event)
if self._firstShown:
self._firstShown = False
QTimer.singleShot(0, self.__onFirstShow)
def sizeHint(self):
return dpiScaled(QSize(500, 700))
def updateList(self):
if not Git.available():
return
files = Git.conflictFiles()
self.model.clear()
if files:
for f in files:
item = QStandardItem(self.iconConflict, f)
item.setData(STATE_CONFLICT, StateRole)
self.model.appendRow(item)
index = self.proxyModel.index(0, 0)
self.view.setCurrentIndex(index)
self.resolvedCount = 0
self.__updateStatus()
self.__updateFilterCount()
for action in self.menu.actions():
action.setEnabled(not files is None)
self.btnResolve.setEnabled(not files is None)
def resolve(self, index):
if not index.isValid():
return
if index.data(StateRole) == STATE_RESOLVED:
QMessageBox.information(self, qApp.applicationName(),
self.tr("This file is already resolved."))
return
if self.process:
QMessageBox.information(self, qApp.applicationName(),
self.tr("Please resolve current conflicts before start a new one."))
return
# since we saved the index, so disabled ...
self.leFilter.setEnabled(False)
self.cbAutoLog.setEnabled(False)
self.__onAutoLogChanged(Qt.Unchecked)
self.resolveIndex = index.row()
file = index.data()
args = ["mergetool", "--no-prompt"]
toolName = None
tools = qApp.settings().mergeToolList()
# ignored case even on Unix platform
lowercase_file = file.lower()
for tool in tools:
if tool.canMerge() and tool.isValid():
if lowercase_file.endswith(tool.suffix.lower()):
toolName = tool.command
break
if not toolName:
toolName = qApp.settings().mergeToolName()
if toolName:
args.append("--tool=%s" % toolName)
args.append(file)
# subprocess is not suitable here
self.process = QProcess(self)
self.process.readyReadStandardOutput.connect(self.__onReadyRead)
self.process.finished.connect(self.__onResolveFinished)
self.process.setWorkingDirectory(Git.REPO_DIR)
self.process.start(GitProcess.GIT_BIN, args)
self.requestResolve.emit(file)
if self.logEnabled():
self.__ensureLogWriter()
self.log.addFile(file)
def event(self, e):
if e.type() == CopyConflictCommit.Type:
if self.isResolving() and self.logEnabled():
self.log.addCommit(e.commit)
return True
return super().event(e)
def eventFilter(self, obj, event):
if obj == self.view and event.type() == QEvent.KeyPress:
if event.matches(QKeySequence.Copy):
self.__doCopyPath()
return True
return super().eventFilter(obj, event)
def isResolving(self):
return self.process is not None
def logEnabled(self):
return self.cbAutoLog.isChecked()
def queryClose(self):
if self.log:
self.log.save()
self.log = None
return True
def setBranches(self, localBranch, remoteBranch):
self._mergeInfo = MergeInfo(
localBranch,
remoteBranch.replace("remotes/origin/", ""),
Git.getConfigValue("user.name", False)
)
| 34.582492
| 104
| 0.587285
|
79512bd8dcda8ddf1e1029dfa070c2e791fce916
| 3,817
|
py
|
Python
|
app/equipment_type/routes.py
|
kid-kodi/BioBank
|
27c7cb7286dcae737fa53c245456d60857fe949f
|
[
"MIT"
] | null | null | null |
app/equipment_type/routes.py
|
kid-kodi/BioBank
|
27c7cb7286dcae737fa53c245456d60857fe949f
|
[
"MIT"
] | null | null | null |
app/equipment_type/routes.py
|
kid-kodi/BioBank
|
27c7cb7286dcae737fa53c245456d60857fe949f
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from flask import render_template, flash, redirect, url_for, request, g, \
jsonify, current_app
from flask_login import current_user, login_required
from flask_babel import _, get_locale
from guess_language import guess_language
from app import db
from app.equipment_type.forms import EquipmentTypeForm, SearchForm
from app.models import Customer, EquipmentType
from app.translate import translate
from app.equipment_type import bp
@bp.route('/equipment_type', methods=['GET', 'POST'])
@login_required
def index():
pagination = []
search_form = SearchForm()
page = request.args.get('page', 1, type=int)
if search_form.validate_on_submit():
name = search_form.name.data
if name != '':
pagination = EquipmentType.query.filter_by(name=name) \
.order_by(EquipmentType.created_at.desc()).paginate(
page, per_page=current_app.config['FLASK_PER_PAGE'],
error_out=False)
else:
pagination = EquipmentType.query \
.order_by(EquipmentType.created_at.desc()).paginate(
page, per_page=current_app.config['FLASK_PER_PAGE'],
error_out=False)
else:
pagination = EquipmentType.query \
.order_by(EquipmentType.created_at.desc()).paginate(
page, per_page=current_app.config['FLASK_PER_PAGE'],
error_out=False)
list = pagination.items
return render_template('equipment_type/list.html',
list=list, pagination=pagination,
title="equipment", search_form=search_form)
@bp.route('/equipment_type/add', methods=['GET', 'POST'])
@login_required
def add():
add = True
form = EquipmentTypeForm()
if form.validate_on_submit():
equipementType = EquipmentType(name=form.name.data,
description=form.description.data,
created_at=datetime.utcnow(),
created_by=current_user.id)
db.session.add(equipementType)
db.session.commit()
flash(_('Data saved!'))
return redirect(url_for('equipment_type.index'))
return render_template('equipment_type/form.html', action="Add",
add=add, form=form,
title="Add equipment_type")
@bp.route('/equipment_type/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit(id):
add = False
equipementType = EquipmentType.query.get_or_404(id)
form = EquipmentTypeForm(obj=equipementType)
if form.validate_on_submit():
equipementType.name = form.name.data
equipementType.description = form.description.data
db.session.commit()
flash('You have successfully edited the equipment_type.')
# redirect to the bps page
return redirect(url_for('equipment_type.index'))
form.name.data = equipementType.name
return render_template('equipment_type/form.html', action="Edit",
add=add, form=form,
equipementType=equipementType, title="Edit equipment_type")
@bp.route('/equipment_type/<int:id>', methods=['GET', 'POST'])
@login_required
def detail(id):
equipementType = EquipmentType.query.get_or_404(id)
return render_template('equipment_type/detail.html', equipementType=equipementType)
@bp.route('/equipment_type/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete(id):
equipementType = EquipmentType.query.get_or_404(id)
db.session.delete(equipementType)
db.session.commit()
flash('You have successfully deleted the equipment_type.')
# redirect to the bps page
return redirect(url_for('equipment_type.index'))
| 38.17
| 87
| 0.653655
|
79512cb9149491e8e325c294dc06fc126ca13e56
| 4,077
|
py
|
Python
|
vint/ast/parsing.py
|
mosheavni/vint
|
9078dd626415cfe37ddaf03032e714bbaca8b336
|
[
"MIT"
] | 538
|
2015-01-03T18:54:53.000Z
|
2020-01-11T01:34:51.000Z
|
vint/ast/parsing.py
|
mosheavni/vint
|
9078dd626415cfe37ddaf03032e714bbaca8b336
|
[
"MIT"
] | 235
|
2015-01-01T06:20:01.000Z
|
2020-01-17T11:32:39.000Z
|
vint/ast/parsing.py
|
mosheavni/vint
|
9078dd626415cfe37ddaf03032e714bbaca8b336
|
[
"MIT"
] | 43
|
2015-01-23T16:59:49.000Z
|
2019-12-27T10:56:12.000Z
|
from typing import Dict, Any # noqa: F401
import re
from vint._bundles import vimlparser
from vint.ast.traversing import traverse
from vint.encodings.decoder import Decoder
from vint.encodings.decoding_strategy import default_decoding_strategy
from vint.linting.lint_target import AbstractLintTarget
class Parser(object):
def __init__(self, plugins=None, enable_neovim=False):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins if plugins else []
self._enable_neovim = enable_neovim
def parse(self, lint_target): # type: (AbstractLintTarget) -> Dict[str, Any]
""" Parse vim script file and return the AST. """
decoder = Decoder(default_decoding_strategy)
decoded = decoder.decode(lint_target.read())
decoded_and_lf_normalized = decoded.replace('\r\n', '\n')
return self.parse_string(decoded_and_lf_normalized)
def parse_string(self, string): # type: (str) -> Dict[str, Any]
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = vimlparser.StringReader(lines)
parser = vimlparser.VimLParser(self._enable_neovim)
ast = parser.parse(reader)
# TOPLEVEL does not have a pos, but we need pos for all nodes
ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_redir(self, redir_cmd):
""" Parse a command :redir content. """
redir_cmd_str = redir_cmd['str']
matched = re.match(r'redir?!?\s*(=>>?\s*)(\S+)', redir_cmd_str)
if matched:
redir_cmd_op = matched.group(1)
redir_cmd_body = matched.group(2)
arg_pos = redir_cmd['ea']['argpos']
# Position of the "redir_cmd_body"
start_pos = {
'col': arg_pos['col'] + len(redir_cmd_op),
'i': arg_pos['i'] + len(redir_cmd_op),
'lnum': arg_pos['lnum'],
}
# NOTE: This is a hack to parse variable node.
raw_ast = self.parse_string('echo ' + redir_cmd_body)
# We need the left node of ECHO node
redir_cmd_ast = raw_ast['body'][0]['list'][0]
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
traverse(redir_cmd_ast, on_enter=adjust_position)
return redir_cmd_ast
return None
def parse_string_expr(self, string_expr_node):
""" Parse a string node content. """
string_expr_node_value = string_expr_node['value']
string_expr_str = string_expr_node_value[1:-1]
# Care escaped string literals
if string_expr_node_value[0] == "'":
string_expr_str = string_expr_str.replace("''", "'")
else:
string_expr_str = string_expr_str.replace('\\"', '"')
# NOTE: This is a hack to parse expr1. See :help expr1
raw_ast = self.parse_string('echo ' + string_expr_str)
# We need the left node of ECHO node
parsed_string_expr_nodes = raw_ast['body'][0]['list']
start_pos = string_expr_node['pos']
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
for parsed_string_expr_node in parsed_string_expr_nodes:
traverse(parsed_string_expr_node, on_enter=adjust_position)
return parsed_string_expr_nodes
| 33.418033
| 81
| 0.590385
|
79512e3fdd0749567f943a81625f02d4b71c9138
| 3,240
|
py
|
Python
|
main.py
|
n0vuh/antizon
|
bc9b8765add79a5a4f9f225f8e14a79aa3e10291
|
[
"BSD-3-Clause"
] | 3
|
2021-08-13T21:30:47.000Z
|
2021-11-21T23:40:06.000Z
|
main.py
|
n0vuh/antizon
|
bc9b8765add79a5a4f9f225f8e14a79aa3e10291
|
[
"BSD-3-Clause"
] | 1
|
2021-08-12T21:53:50.000Z
|
2021-08-12T21:56:24.000Z
|
main.py
|
n0vuh/antizon
|
bc9b8765add79a5a4f9f225f8e14a79aa3e10291
|
[
"BSD-3-Clause"
] | null | null | null |
# developed by novuh (github.com/n0vuh)
# AntiZon is intended for the purpose of getting a consumer a better price for a product.
# AntiZon is not intended to be used as financial advice.
# source code, p.s: ignore lazy code, it'll be fixed
import json
import os
from colorama import Back, Fore, Style
from src.utils.console import clear, title, tags
from src.amzn.item import AMZN
from src.gogl.find import SERP, process
def query_loop(amzn: AMZN, serp: SERP):
query = input(tags.default + "Amazon Product ASIN >> ")
print(tags.default + f"Searching Amazon for '{query}'...")
amazon_data = amzn.get_item(query)
try:
amzn_value = amazon_data.value
amzn_title = amazon_data.name
print(tags.okay + "Found a result!")
except AttributeError:
print(tags.error + str(amazon_data["message"]))
query_loop(amzn, serp)
print(tags.default + f"Searching Google for '{amzn_title}'...")
try:
r = process(serp.get(amzn_title))
print(tags.okay + f"Found {len(r['extracted'])} result(s)!")
except Exception as e:
print(tags.error + f"Error occured while searching Google: {e}")
query_loop(amzn, serp)
print(tags.default + "Processing retrieved data...")
cheaper = []
for res in r["extracted"]:
if res["price"] < amzn_value:
cheaper.append(
{
"price": res["price"],
"source": res["supplier"],
"link": res["link"]
}
)
print(tags.okay + f"Found {len(cheaper)} cheaper listings!")
print(tags.default + "Writing...")
with open("output.txt", "a") as fp:
for res in cheaper:
fp.write(f"${res['price']} | {res['link']} | {res['source']}\n")
print(tags.okay + "Done! You can view the results inside output.txt.")
input(Fore.BLACK + Style.BRIGHT + "Press ENTER to quit.")
exit()
def main():
clear()
# title stuff
title("AntiZon by novuh")
length = os.get_terminal_size().columns
print(Style.BRIGHT + Fore.WHITE + Back.YELLOW + "AntiZon by github.com/n0vuh".center(length, " ") + Back.RESET + "\n")
# load config file
print(Back.RESET + tags.default + "Loading config file...")
cfg = json.load(open("src/resources/config.json", "r"))
# check to see if config has any data, if not, require user to give required data
dump = False
if cfg["serpkey"] == "":
print(tags.error + "You do not have a SerpAPI key! Get one @ serpapi.com")
serp_key = input(tags.default + "SerpAPI Key >> ")
dump = True
if cfg["amznkey"] == "":
print(tags.error + "You do not have a Rainforest key! Get one @ rainforestapi.com")
amzn_key = input(tags.default + "Rainforest Key >> ")
dump = True
if dump:
json.dump({"serpkey": serp_key, "amznkey": amzn_key}, open("src/resources/config.json", "w"))
main()
# setup api classes
amzn = AMZN(cfg["amznkey"])
serp = SERP(cfg["serpkey"])
# search loop
query_loop(amzn, serp)
if __name__ == "__main__":
main()
| 34.105263
| 123
| 0.587346
|
79512f31d9a702aa7d3cacd949fcd92b5eaa25ef
| 1,277
|
py
|
Python
|
jsonfmt.py
|
r15ch13/scoops
|
82da457b00f583627ad83512c0de68f54c615083
|
[
"MIT"
] | null | null | null |
jsonfmt.py
|
r15ch13/scoops
|
82da457b00f583627ad83512c0de68f54c615083
|
[
"MIT"
] | null | null | null |
jsonfmt.py
|
r15ch13/scoops
|
82da457b00f583627ad83512c0de68f54c615083
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" @todo add docstring """
# ### imports ###
from __future__ import (
absolute_import,
division,
print_function # ,
# unicode_literals
)
from jsoncomment import JsonComment
# from jsonschema import validate
import json
import os
import sys
def decode(s):
if sys.version_info >= (3, 0):
return s
for encoding in 'utf-8-sig', 'utf-16':
try:
return s.decode(encoding)
except UnicodeDecodeError:
continue
return s.decode('latin-1')
def touch(filename, mtime):
with open(filename, 'a+'):
pass
os.utime(filename, (mtime, mtime))
return 0
file = sys.argv[1]
print('Updating', file)
mtime = os.path.getmtime(file)
with open(file, 'r') as f:
jstr = f.read()
jstr_no_bom = decode(jstr)
parser = JsonComment(json)
json_data = parser.loads(jstr_no_bom)
new_data = json.dumps(
json_data, sort_keys=True, indent=4, separators=(',', ': '))
with open(file + '.tmp', 'wb') as f:
new_data = new_data.encode('utf-8')
new_data += b"\n"
f.write(new_data)
if os.path.isfile(file + '.bak'):
os.remove(file + '.bak')
os.rename(file, file + '.bak')
os.rename(file + '.tmp', file)
touch(file, mtime)
sys.exit(0)
| 18.779412
| 64
| 0.618637
|
79512fe05d993ffaabcea1e53f67931b5550932e
| 17,292
|
py
|
Python
|
cube_algorithm/cube.py
|
Khja/CubeAlgAddon
|
84961f3dadbccfeed208169776dee35e0a7e969d
|
[
"MIT"
] | 4
|
2020-07-23T19:39:12.000Z
|
2022-01-16T15:52:01.000Z
|
cube_algorithm/cube.py
|
Khja/CubeAlgAddon
|
84961f3dadbccfeed208169776dee35e0a7e969d
|
[
"MIT"
] | null | null | null |
cube_algorithm/cube.py
|
Khja/CubeAlgAddon
|
84961f3dadbccfeed208169776dee35e0a7e969d
|
[
"MIT"
] | null | null | null |
# MIT License
# Copyright (c) 2020 Khja
# Please read before using this piece of software.
css = """\
.card {
font-family: arial;
font-size: 30px;
text-align: center;
color: black;
background-color: white;
}
:root {
--thickness: 4;
--size: 20;
--G: #39EA5C;
--W: #FFFFFF;
--R: #DC0010;
--O: #FF9E00;
--Y: #FFFF00;
--B: #0027D7;
}
\
"""
n_back = '''{{FrontSide}}
<hr id=answer>
<b>{{Case}}</b>
<br><br>
<i>{{Algorithm}}</i>
'''
_front = """<br>
<canvas id="cube" width=150 height=150></canvas>
<br>
<br>
<script>
//Colors. You may change the RGB codes, don't touch the names
var colors = {
G: getComputedStyle(document.documentElement).getPropertyValue('--G'),
W: getComputedStyle(document.documentElement).getPropertyValue('--W'),
R: getComputedStyle(document.documentElement).getPropertyValue('--R'),
O: getComputedStyle(document.documentElement).getPropertyValue('--O'),
Y: getComputedStyle(document.documentElement).getPropertyValue('--Y'),
B: getComputedStyle(document.documentElement).getPropertyValue('--B')
};
var border = getComputedStyle(document.documentElement).getPropertyValue('--Border')
//Size of the cube
var size = parseInt(getComputedStyle(document.documentElement).getPropertyValue('--size'))
//Thickness of the border
var thickness = parseInt(getComputedStyle(document.documentElement).getPropertyValue('--thickness'))
//*****DON'T TOUCH FURTHER!!!!*****//
//Faces
var F = Array(9).fill("G"),
U = Array(9).fill("W"),
R = Array(9).fill("R"),
B = Array(9).fill("B")
D = Array(9).fill("Y"),
L = Array(9).fill("O"),
cube = [U,F,R,D,B,L];
function Random(max) {
return Math.round(Math.random() * (max-1));
}
/*Rotation functions*/
//Rotate_s the face colors clockwise. Center stays in center
function Rotate_Face_1(face) {
var top = Array()
top.push(face[6])
top.push(face[3])
top.push(face[0])
top.push(face[7])
top.push(face[4])
top.push(face[1])
top.push(face[8])
top.push(face[5])
top.push(face[2])
return top
}
//Rotate_s the face colors counterclockwise. Center stays in center
function Rotate_Face_2(face) {
return Rotate_Face_1(Rotate_Face_1(Rotate_Face_1(face)))
}
//Rotate_s all objects in the Arrays clockwise
function Rotate_Layer(U,F,L,B,R) {
var u = Rotate_Face_1(U)
var f = Array()
var l = Array()
var b = Array()
var r = Array()
//Rotate_s the lateral objects
f.push(R[0])
f.push(R[1])
f.push(R[2])
f.push(F[3])
f.push(F[4])
f.push(F[5])
f.push(F[6])
f.push(F[7])
f.push(F[8])
l.push(F[0])
l.push(F[1])
l.push(F[2])
l.push(L[3])
l.push(L[4])
l.push(L[5])
l.push(L[6])
l.push(L[7])
l.push(L[8])
b.push(L[0])
b.push(L[1])
b.push(L[2])
b.push(B[3])
b.push(B[4])
b.push(B[5])
b.push(B[6])
b.push(B[7])
b.push(B[8])
r.push(B[0])
r.push(B[1])
r.push(B[2])
r.push(R[3])
r.push(R[4])
r.push(R[5])
r.push(R[6])
r.push(R[7])
r.push(R[8])
return [u, f, l, b, r]
}
/*Moves*/
//Rotations
function Rotate_X() {
var u = cube[0], f = cube[1], r = cube[2], d = cube[3], b = cube[4], l = cube[5]
cube[0] = f
cube[1] = d
cube[2] = Rotate_Face_1(r)
cube[3] = Rotate_Face_1(Rotate_Face_1(b))
cube[4] = Rotate_Face_1(Rotate_Face_1(u))
cube[5] = Rotate_Face_2(l)
}
function Rotate_Xi() {Rotate_X(); Rotate_X(); Rotate_X()};
function Rotate_Y() {Rotate_Yi(); Rotate_Yi(); Rotate_Yi()};
function Rotate_Yi() {
var u = cube[0], f = cube[1], r = cube[2], d = cube[3], b = cube[4], l = cube[5]
cube[0] = Rotate_Face_2(u)
cube[1] = l
cube[2] = f
cube[3] = Rotate_Face_1(d)
cube[4] = r
cube[5] = b
}
function Rotate_Zi() {Rotate_Yi(); Rotate_Xi(); Rotate_Y()};
function Rotate_Z() {Rotate_Zi(); Rotate_Zi(); Rotate_Zi()};
//Layers
function Rotate_U() {
var all = Rotate_Layer(cube[0],cube[1],cube[5],cube[4],cube[2])
cube[0] = all[0]
cube[1] = all[1]
cube[5] = all[2]
cube[4] = all[3]
cube[2] = all[4]
}
function Rotate_Ui() {Rotate_U(); Rotate_U(); Rotate_U()};
function Rotate_F() {Rotate_X(); Rotate_U(); Rotate_Xi()};
function Rotate_Fi() {Rotate_X(); Rotate_Ui(); Rotate_Xi()};
function Rotate_R() {Rotate_Zi(); Rotate_U(); Rotate_Z()};
function Rotate_Ri() {Rotate_R(); Rotate_R(); Rotate_R()};
function Rotate_D() {Rotate_X(); Rotate_X(); Rotate_U(); Rotate_X(); Rotate_X()};
function Rotate_Di() {Rotate_D(); Rotate_D(); Rotate_D()};
function Rotate_B() {Rotate_Xi(); Rotate_U(); Rotate_X()};
function Rotate_Bi() {Rotate_Xi(); Rotate_Ui(); Rotate_X()};
function Rotate_L() {Rotate_Z(); Rotate_U(); Rotate_Zi()};
function Rotate_Li() {Rotate_Z(); Rotate_Ui(); Rotate_Zi()};
function Rotate_M() {Rotate_X(); Rotate_Ri(); Rotate_L()};
function Rotate_Mi() {Rotate_Xi(); Rotate_R(); Rotate_Li()};
function Rotate_r() {Rotate_L(); Rotate_X()}
function Rotate_ri() {Rotate_Li(); Rotate_Xi()}
function Rotate_l() {Rotate_R(); Rotate_X()}
function Rotate_li() {Rotate_Ri(); Rotate_Xi()}
function Rotate_u() {Rotate_Di(); Rotate_Yi()}
function Rotate_ui() {Rotate_D(); Rotate_Y()}
function Rotate_b() {Rotate_Fi(); Rotate_Z()}
function Rotate_bi() {Rotate_F(); Rotate_Zi()}
function Rotate_d() {Rotate_Ui(); Rotate_Y()}
function Rotate_di() {Rotate_U(); Rotate_Yi()}
function Rotate_f() {Rotate_B(); Rotate_Z()}
function Rotate_fi() {Rotate_Bi(); Rotate_Zi()}
/*Drawing functions*/
function rect(ctx,l,x,y,a,b,color) {
ctx.fillStyle = color;
ctx.beginPath();
ctx.moveTo(x*l, y*l);
ctx.lineTo(x*l, y*l+a*l);
ctx.lineTo(x*l+b*l, y*l+a*l);
ctx.lineTo(x*l+b*l, y*l);
ctx.lineTo(x*l, y*l);
ctx.fill();
ctx.stroke();
}
function Display(cube) {
var u = cube[0],
f = cube[1],
r = cube[2],
b = cube[4],
l = cube[5];
var canvas = document.getElementById('cube');
if (canvas.getContext) {
var ctx = canvas.getContext('2d');
ctx.canvas.width = 8 * size
ctx.canvas.height = 8 * size
ctx.strokeStyle = border;
ctx.lineWidth = thickness;
//U Face
rect(ctx, size, 1, 5, 2, 2, colors[u[6]])
rect(ctx, size, 3, 5, 2, 2, colors[u[7]])
rect(ctx, size, 5, 5, 2, 2, colors[u[8]])
rect(ctx, size, 1, 3, 2, 2, colors[u[3]])
rect(ctx, size, 3, 3, 2, 2, colors[u[4]])
rect(ctx, size, 5, 3, 2, 2, colors[u[5]])
rect(ctx, size, 1, 1, 2, 2, colors[u[0]])
rect(ctx, size, 3, 1, 2, 2, colors[u[1]])
rect(ctx, size, 5, 1, 2, 2, colors[u[2]])
//F Face
rect(ctx, size, 1, 7, 1, 2, colors[f[0]])
rect(ctx, size, 3, 7, 1, 2, colors[f[1]])
rect(ctx, size, 5, 7, 1, 2, colors[f[2]])
/* //B Face
rect(ctx, size, 1, -1, colors[b[2]])
rect(ctx, size, 3, -1, colors[b[1]])
rect(ctx, size, 5, -1, colors[b[0]]) */
//R Face
rect(ctx, size, 7, 1, 2, 1, colors[r[2]])
rect(ctx, size, 7, 3, 2, 1, colors[r[1]])
rect(ctx, size, 7, 5, 2, 1, colors[r[0]])
//L Face
rect(ctx, size, 0, 1, 2, 1, colors[l[0]])
rect(ctx, size, 0, 3, 2, 1, colors[l[1]])
rect(ctx, size, 0, 5, 2, 1, colors[l[2]])
}
}
function Reverse(alg) {
var reverse = Array()
var alg = alg.reverse()
for (i = 0; i < alg.length; i++) {
//x
if (alg[i] == "x") {reverse.push("x'")}
if (alg[i] == "x'") {reverse.push("x")}
if (alg[i] == "x2" || alg[i] == "x2'") {reverse.push("x2")}
//y
if (alg[i] == "y") {reverse.push("y'")}
if (alg[i] == "y'") {reverse.push("y")}
if (alg[i] == "y2" || alg[i] == "y2'") {reverse.push("y2")}
//z
if (alg[i] == "z") {reverse.push("z'")}
if (alg[i] == "z'") {reverse.push("z")}
if (alg[i] == "z2" || alg[i] == "z2'") {reverse.push("z2")}
//U
if (alg[i] == "U") {reverse.push("U'")}
if (alg[i] == "U'") {reverse.push("U")}
if (alg[i] == "U2" || alg[i] == "U2'") {reverse.push("U2")}
//F
if (alg[i] == "F") {reverse.push("F'")}
if (alg[i] == "F'") {reverse.push("F")}
if (alg[i] == "F2" || alg[i] == "F2'") {reverse.push("F2")}
//R
if (alg[i] == "R") {reverse.push("R'")}
if (alg[i] == "R'") {reverse.push("R")}
if (alg[i] == "R2" || alg[i] == "R2'") {reverse.push("R2")}
//D
if (alg[i] == "D") {reverse.push("D'")}
if (alg[i] == "D'") {reverse.push("D")}
if (alg[i] == "D2" || alg[i] == "D2'") {reverse.push("D2")}
//B
if (alg[i] == "B") {reverse.push("B'")}
if (alg[i] == "B'") {reverse.push("B")}
if (alg[i] == "B2" || alg[i] == "B2'") {reverse.push("B2")}
//L
if (alg[i] == "L") {reverse.push("L'")}
if (alg[i] == "L'") {reverse.push("L")}
if (alg[i] == "L2" || alg[i] == "L2'") {reverse.push("L2")}
//M
if (alg[i] == "M") {reverse.push("M'")}
if (alg[i] == "M'") {reverse.push("M")}
if (alg[i] == "M2" || alg[i] == "M2'") {reverse.push("M2")}
//u
if (alg[i] == "u") {reverse.push("u'")}
if (alg[i] == "u'") {reverse.push("u")}
if (alg[i] == "u2" || alg[i] == "u2'") {reverse.push("u2")}
//f
if (alg[i] == "f") {reverse.push("f'")}
if (alg[i] == "f'") {reverse.push("f")}
if (alg[i] == "f2" || alg[i] == "f2'") {reverse.push("f2")}
//r
if (alg[i] == "r") {reverse.push("r'")}
if (alg[i] == "r'") {reverse.push("r")}
if (alg[i] == "r2" || alg[i] == "r2'") {reverse.push("r2")}
//d
if (alg[i] == "d") {reverse.push("d'")}
if (alg[i] == "d'") {reverse.push("d")}
if (alg[i] == "d2" || alg[i] == "d2'") {reverse.push("d2")}
//b
if (alg[i] == "b") {reverse.push("b'")}
if (alg[i] == "b'") {reverse.push("b")}
if (alg[i] == "b2" || alg[i] == "b2'") {reverse.push("b2")}
//l
if (alg[i] == "l") {reverse.push("l'")}
if (alg[i] == "l'") {reverse.push("l")}
if (alg[i] == "l2" || alg[i] == "l2'") {reverse.push("l2")}
}
return reverse
}
function Perform(alg) {
for (i = 0; i < alg.length; i++) {
//x
if (alg[i] == "x") {Rotate_X()}
if (alg[i] == "x'") {Rotate_Xi()}
if (alg[i] == "x2" || alg[i] == "x2'") {Rotate_X(); Rotate_X()}
//y
if (alg[i] == "y") {Rotate_Y()}
if (alg[i] == "y'") {Rotate_Yi()}
if (alg[i] == "y2" || alg[i] == "y2'") {Rotate_Y(); Rotate_Y();}
//z
if (alg[i] == "z") {Rotate_Z()}
if (alg[i] == "z'") {Rotate_Zi()}
if (alg[i] == "z2" || alg[i] == "z2'") {Rotate_Z(); Rotate_Z();}
//U
if (alg[i] == "U") {Rotate_U()}
if (alg[i] == "U'") {Rotate_Ui()}
if (alg[i] == "U2" || alg[i] == "U2'") {Rotate_U(); Rotate_U()}
//F
if (alg[i] == "F") {Rotate_F()}
if (alg[i] == "F'") {Rotate_Fi()}
if (alg[i] == "F2" || alg[i] == "F2'") {Rotate_F(); Rotate_F()}
//R
if (alg[i] == "R") {Rotate_R()}
if (alg[i] == "R'") {Rotate_Ri()}
if (alg[i] == "R2" || alg[i] == "R2'") {Rotate_R();Rotate_R();}
//D
if (alg[i] == "D") {Rotate_D();}
if (alg[i] == "D'") {Rotate_Di();}
if (alg[i] == "D2" || alg[i] == "D2'") {Rotate_D(); Rotate_D()}
//B
if (alg[i] == "B") {Rotate_B()}
if (alg[i] == "B'") {Rotate_Bi()}
if (alg[i] == "B2" || alg[i] == "B2'") {Rotate_B(); Rotate_B()}
//L
if (alg[i] == "L") {Rotate_L()}
if (alg[i] == "L'") {Rotate_Li()}
if (alg[i] == "L2" || alg[i] == "L2'") {Rotate_L(); Rotate_L()}
//M
if (alg[i] == "M") {Rotate_M()}
if (alg[i] == "M'") {Rotate_Mi()}
if (alg[i] == "M2" || alg[i] == "M2'") {Rotate_M(); Rotate_M()}
//u
if (alg[i] == "u") { Rotate_u()}
if (alg[i] == "u'") {Rotate_ui()}
if (alg[i] == "u2" || alg[i] == "u2'") {Rotate_u(); Rotate_u()}
//f
if (alg[i] == "f") {Rotate_f()}
if (alg[i] == "f'") {Rotate_fi()}
if (alg[i] == "f2" || alg[i] == "f2'") {Rotate_f(); Rotate_f()}
//r
if (alg[i] == "r") {Rotate_r()}
if (alg[i] == "r'") {Rotate_ri()}
if (alg[i] == "r2" || alg[i] == "r2'") {Rotate_r(); Rotate_r()}
//d
if (alg[i] == "d") {Rotate_d()}
if (alg[i] == "d'") {Rotate_di()}
if (alg[i] == "d2" || alg[i] == "d2'") {Rotate_d(); Rotate_d()}
//b
if (alg[i] == "b") {Rotate_b()}
if (alg[i] == "b'") {Rotate_bi()}
if (alg[i] == "b2" || alg[i] == "b2'") {Rotate_b(); Rotate_b()}
//l
if (alg[i] == "l") {Rotate_R(); Rotate_Xi()}
if (alg[i] == "l'") {Rotate_li()}
if (alg[i] == "l2" || alg[i] == "l2'") {Rotate_l(); Rotate_l()}
}
}
var neutrality = "{{Neutrality}}".trim().split(' // ');
function getNeutral(neutrality) {
var colors = Array();
for (i = 0; i < neutrality.length; i ++) {
color = neutrality[i];
for (k = 0; k < 10; k ++) {
if (color == "W") {colors.push("")};
if (color == "Y") {colors.push("x2")};
if (color == "G") {colors.push("x")};
if (color == "B") {colors.push("x'")};
if (color == "R") {colors.push("z'")};
if (color == "O") {colors.push("z")};
}
}
return colors
}
function Scramble(moves,number) {
var scramble = Array();
var i = 0;
while (i < number) {
var k = Random(moves.length)
var move = moves[k]
if (move.length > 2) {
move = move.split(' ')
}
else {
move = [move]
}
Perform(move)
i ++
}
}
function getAlg(alg,s) {
alg = alg.replace(/\(/g, "")
alg = alg.replace(/\)/g, "")
return alg.trim().split(s)
}
var moves = getAlg("{{Algorithm}}",' ');
var scramble_size = {{ScrambleSize}};
var scramble = getAlg("{{Scramble}}"," // ");
var aufmoves = ["U","U'","U2"];
var reverse = Reverse(moves);
//Apply algorithm and scramble and AUF
var ufaces = getNeutral(neutrality)
Perform([ufaces[Random(ufaces.length)]])
Scramble(['y','y2'],6)
Scramble(scramble,scramble_size);
Perform(reverse);
Perform([aufmoves[0]])
//Draw Cube
Display(cube);
</script>
"""
u_back = '''\
{{FrontSide}}
<hr id=answer>
<b>{{Case}}</b>
<br><br>
<i><div id='alg'></div></i>
<script>
if ("{{U Algorithm}}".length > 0) {
document.getElementById('alg').innerHTML = "{{U Algorithm}}"
}
else {
document.getElementById('alg').innerHTML = "(U) "+"{{Algorithm}}"
}
</script>
\
'''
n_front = _front.replace("Perform([aufmoves[0]])","")
n_back = '''\
{{FrontSide}}
<hr id=answer>
<b>{{Case}}</b>
<br><br>
<i>{{Algorithm}}</i>
\
'''
u_front = "{{#U}}"+_front+"{{/U}}"
ui_front = "{{#U'}}"+_front.replace("[aufmoves[0]]","[aufmoves[1]]")+"{{/U'}}"
ui_back = u_back.replace("U Algorithm","U' Algorithm")
ui_back = ui_back.replace('"(U) "+"{{Algorithm}}"', '''"(U') "+"{{Algorithm}}"''')
u2_front = "{{#U2}}"+_front.replace("[aufmoves[0]]","[aufmoves[2]]")+"{{/U2}}"
u2_back = u_back.replace("U Algorithm","U2 Algorithm")
u2_back = u2_back.replace('"(U) "+"{{Algorithm}}"', '''"(U2) "+"{{Algorithm}}"''')
| 29.558974
| 104
| 0.446623
|
7951300735316b86476229d4a28cbf569ac524f6
| 397
|
py
|
Python
|
rimproject/wsgi.py
|
mostateresnet/rimproject
|
ef1952ee386b62ea7b49c139a3c66c718fb310d0
|
[
"MIT"
] | null | null | null |
rimproject/wsgi.py
|
mostateresnet/rimproject
|
ef1952ee386b62ea7b49c139a3c66c718fb310d0
|
[
"MIT"
] | 55
|
2018-04-18T19:57:56.000Z
|
2021-09-22T18:51:16.000Z
|
rimproject/wsgi.py
|
mostateresnet/rimproject
|
ef1952ee386b62ea7b49c139a3c66c718fb310d0
|
[
"MIT"
] | null | null | null |
"""
WSGI config for rimproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rimproject.settings")
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
79513118d1365989722c33e13c769768443be583
| 344
|
py
|
Python
|
src/utils/outliers.py
|
Hydrapse/pytorch-template
|
d7ea2f19bbdc032b8663ca432c1ef9012fe6180b
|
[
"MIT",
"Unlicense"
] | null | null | null |
src/utils/outliers.py
|
Hydrapse/pytorch-template
|
d7ea2f19bbdc032b8663ca432c1ef9012fe6180b
|
[
"MIT",
"Unlicense"
] | null | null | null |
src/utils/outliers.py
|
Hydrapse/pytorch-template
|
d7ea2f19bbdc032b8663ca432c1ef9012fe6180b
|
[
"MIT",
"Unlicense"
] | null | null | null |
import math
import torch
def replace_nan(val, nan_to=0., inf_to=1.):
val = torch.where(torch.isinf(val), torch.full_like(val, nan_to), val)
return torch.where(torch.isnan(val), torch.full_like(val, inf_to), val)
def clip_int(val, lower, upper):
val = 0 if math.isnan(val) else round(val)
return max(lower, min(val, upper))
| 24.571429
| 75
| 0.69186
|
795131ae170c11d294ae427dbeebeaf1d4b452a8
| 346
|
py
|
Python
|
hammock_example.py
|
voidabhi/python-scripts
|
a6d06bd3ccf4ec24df521a3cf305d22176f68a18
|
[
"MIT"
] | 2
|
2015-06-01T18:33:38.000Z
|
2018-11-21T19:40:37.000Z
|
hammock_example.py
|
voidabhi/python-scripts
|
a6d06bd3ccf4ec24df521a3cf305d22176f68a18
|
[
"MIT"
] | 102
|
2015-01-20T17:26:52.000Z
|
2017-12-28T17:32:51.000Z
|
hammock_example.py
|
voidabhi/python-scripts
|
a6d06bd3ccf4ec24df521a3cf305d22176f68a18
|
[
"MIT"
] | 3
|
2020-03-02T06:54:18.000Z
|
2021-01-07T16:36:35.000Z
|
from hammock import Hammock as Github
github = Github('https://api.github.com')
headers = {'Accept': 'application/vnd.github.preview'}
resp = github.search.repositories.GET(params={'q': 'language:python', 'sort': 'stars', 'per_page': 10, 'page': 1}, headers=headers)
res = resp.json()
print res['items'][0]['full_name']
print len(res['items'])
| 34.6
| 131
| 0.696532
|
7951320e962063cabd7ef45ebee81d8768cc2366
| 22,908
|
py
|
Python
|
core/tests/tests_forms.py
|
mbaitelman/babybuddy
|
ea6476f9cac83959470a2fdfdd2d40441bfde061
|
[
"BSD-2-Clause"
] | null | null | null |
core/tests/tests_forms.py
|
mbaitelman/babybuddy
|
ea6476f9cac83959470a2fdfdd2d40441bfde061
|
[
"BSD-2-Clause"
] | null | null | null |
core/tests/tests_forms.py
|
mbaitelman/babybuddy
|
ea6476f9cac83959470a2fdfdd2d40441bfde061
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.core.management import call_command
from django.test import TestCase
from django.test import Client as HttpClient
from django.utils import timezone
from django.utils.formats import get_format
from faker import Factory
from core import models
class FormsTestCaseBase(TestCase):
c = None
child = None
user = None
@classmethod
def setUpClass(cls):
super(FormsTestCaseBase, cls).setUpClass()
fake = Factory.create()
call_command('migrate', verbosity=0)
cls.c = HttpClient()
fake_user = fake.simple_profile()
credentials = {
'username': fake_user['username'],
'password': fake.password()
}
cls.user = User.objects.create_user(
is_superuser=True, **credentials)
cls.c.login(**credentials)
cls.child = models.Child.objects.create(
first_name='Child',
last_name='One',
birth_date=timezone.localdate()
)
@staticmethod
def localdate_string(datetime=None):
""" Converts an object to a local date string for form input. """
date_format = get_format('DATE_INPUT_FORMATS')[0]
return timezone.localdate(datetime).strftime(date_format)
@staticmethod
def localtime_string(datetime=None):
""" Converts an object to a local time string for form input. """
datetime_format = get_format('DATETIME_INPUT_FORMATS')[0]
return timezone.localtime(datetime).strftime(datetime_format)
class InitialValuesTestCase(FormsTestCaseBase):
@classmethod
def setUpClass(cls):
super(InitialValuesTestCase, cls).setUpClass()
cls.timer = models.Timer.objects.create(
user=cls.user,
start=timezone.localtime() - timezone.timedelta(minutes=30)
)
def test_child_with_one_child(self):
page = self.c.get('/sleep/add/')
self.assertEqual(page.context['form'].initial['child'], self.child)
def test_child_with_parameter(self):
child_two = models.Child.objects.create(
first_name='Child',
last_name='Two',
birth_date=timezone.localdate()
)
page = self.c.get('/sleep/add/')
self.assertTrue('child' not in page.context['form'].initial)
page = self.c.get('/sleep/add/?child={}'.format(self.child.slug))
self.assertEqual(page.context['form'].initial['child'], self.child)
page = self.c.get('/sleep/add/?child={}'.format(child_two.slug))
self.assertEqual(page.context['form'].initial['child'], child_two)
def test_feeding_type(self):
child_two = models.Child.objects.create(
first_name='Child',
last_name='Two',
birth_date=timezone.localdate()
)
f_one = models.Feeding.objects.create(
child=self.child,
start=timezone.localtime() - timezone.timedelta(hours=4),
end=timezone.localtime() - timezone.timedelta(hours=3, minutes=30),
type='breast milk',
method='left breast'
)
f_two = models.Feeding.objects.create(
child=child_two,
start=timezone.localtime() - timezone.timedelta(hours=4),
end=timezone.localtime() - timezone.timedelta(hours=3, minutes=30),
type='formula',
method='bottle'
)
page = self.c.get('/feedings/add/')
self.assertTrue('type' not in page.context['form'].initial)
page = self.c.get('/feedings/add/?child={}'.format(self.child.slug))
self.assertEqual(page.context['form'].initial['type'], f_one.type)
page = self.c.get('/feedings/add/?child={}'.format(child_two.slug))
self.assertEqual(page.context['form'].initial['type'], f_two.type)
def test_timer_set(self):
self.timer.stop()
page = self.c.get('/sleep/add/')
self.assertTrue('start' not in page.context['form'].initial)
self.assertTrue('end' not in page.context['form'].initial)
page = self.c.get('/sleep/add/?timer={}'.format(self.timer.id))
self.assertEqual(page.context['form'].initial['start'],
self.timer.start)
self.assertEqual(page.context['form'].initial['end'], self.timer.end)
def test_timer_stop_on_save(self):
end = timezone.localtime()
params = {
'child': self.child.id,
'start': self.localtime_string(self.timer.start),
'end': self.localtime_string(end)
}
page = self.c.post('/sleep/add/?timer={}'.format(self.timer.id),
params, follow=True)
self.assertEqual(page.status_code, 200)
self.timer.refresh_from_db()
self.assertFalse(self.timer.active)
self.assertEqual(self.localtime_string(self.timer.end), params['end'])
class ChildFormsTestCase(FormsTestCaseBase):
@classmethod
def setUpClass(cls):
super(ChildFormsTestCase, cls).setUpClass()
cls.child = models.Child.objects.first()
def test_add(self):
params = {
'first_name': 'Child',
'last_name': 'Two',
'birth_date': timezone.localdate()
}
page = self.c.post('/children/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(page, 'Child entry added')
def test_edit(self):
params = {
'first_name': 'Name',
'last_name': 'Changed',
'birth_date': self.child.birth_date
}
page = self.c.post('/children/{}/edit/'.format(self.child.slug),
params, follow=True)
self.assertEqual(page.status_code, 200)
self.child.refresh_from_db()
self.assertEqual(self.child.last_name, params['last_name'])
self.assertContains(page, 'Child entry updated')
def test_delete(self):
params = {'confirm_name': 'Incorrect'}
page = self.c.post('/children/{}/delete/'.format(self.child.slug),
params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertFormError(page, 'form', 'confirm_name',
'Name does not match child name.')
params['confirm_name'] = str(self.child)
page = self.c.post('/children/{}/delete/'.format(self.child.slug),
params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(page, 'Child entry deleted')
class DiaperChangeFormsTestCase(FormsTestCaseBase):
@classmethod
def setUpClass(cls):
super(DiaperChangeFormsTestCase, cls).setUpClass()
cls.change = models.DiaperChange.objects.create(
child=cls.child,
time=timezone.localtime(),
wet=True,
solid=True,
color='black',
amount=0.45
)
def test_add(self):
child = models.Child.objects.first()
params = {
'child': child.id,
'time': self.localtime_string(),
'color': 'black',
'amount': 0.45
}
page = self.c.post('/changes/add/', params)
self.assertEqual(page.status_code, 200)
self.assertFormError(page, 'form', None,
'Wet and/or solid is required.')
params.update({'wet': 1, 'solid': 1, 'color': 'black'})
page = self.c.post('/changes/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(
page,
'Diaper Change entry for {} added'.format(str(child))
)
def test_edit(self):
params = {
'child': self.change.child.id,
'time': self.localtime_string(),
'wet': self.change.wet,
'solid': self.change.solid,
'color': self.change.color,
'amount': 1.23
}
page = self.c.post('/changes/{}/'.format(self.change.id),
params, follow=True)
self.assertEqual(page.status_code, 200)
self.change.refresh_from_db()
self.assertEqual(self.change.amount, params['amount'])
self.assertContains(
page,
'Diaper Change entry for {} updated'.format(str(self.change.child))
)
def test_delete(self):
page = self.c.post('/changes/{}/delete/'.format(self.change.id),
follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(page, 'Diaper Change entry deleted')
class FeedingFormsTestCase(FormsTestCaseBase):
@classmethod
def setUpClass(cls):
super(FeedingFormsTestCase, cls).setUpClass()
cls.feeding = models.Feeding.objects.create(
child=cls.child,
start=timezone.localtime() - timezone.timedelta(hours=2),
end=timezone.localtime() - timezone.timedelta(hours=1, minutes=30),
type='breast milk',
method='left breast',
amount=2.5
)
def test_add(self):
end = timezone.localtime()
start = end - timezone.timedelta(minutes=30)
params = {
'child': self.child.id,
'start': self.localtime_string(start),
'end': self.localtime_string(end),
'type': 'formula',
'method': 'left breast',
'amount': 0
}
page = self.c.post('/feedings/add/', params)
self.assertEqual(page.status_code, 200)
self.assertFormError(
page, 'form', 'method',
'Only "Bottle" method is allowed with "Formula" type.')
params.update({'method': 'bottle'})
page = self.c.post('/feedings/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(
page,
'Feeding entry for {} added'.format(str(self.child))
)
def test_edit(self):
end = timezone.localtime()
start = end - timezone.timedelta(minutes=30)
params = {
'child': self.feeding.child.id,
'start': self.localtime_string(start),
'end': self.localtime_string(end),
'type': self.feeding.type,
'method': self.feeding.method,
'amount': 100
}
page = self.c.post('/feedings/{}/'.format(self.feeding.id),
params, follow=True)
self.assertEqual(page.status_code, 200)
self.feeding.refresh_from_db()
self.assertEqual(self.feeding.amount, params['amount'])
self.assertContains(
page,
'Feeding entry for {} updated'.format(str(self.feeding.child))
)
def test_delete(self):
page = self.c.post('/feedings/{}/delete/'.format(self.feeding.id),
follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(page, 'Feeding entry deleted')
class SleepFormsTestCase(FormsTestCaseBase):
@classmethod
def setUpClass(cls):
super(SleepFormsTestCase, cls).setUpClass()
cls.sleep = models.Sleep.objects.create(
child=cls.child,
start=timezone.localtime() - timezone.timedelta(hours=6),
end=timezone.localtime() - timezone.timedelta(hours=4)
)
def test_add(self):
end = timezone.localtime()
start = end - timezone.timedelta(minutes=2)
params = {
'child': self.child.id,
'start': self.localtime_string(start),
'end': self.localtime_string(end),
}
page = self.c.post('/sleep/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(
page,
'Sleep entry for {} added'.format(str(self.child))
)
def test_edit(self):
end = timezone.localtime()
start = end - timezone.timedelta(minutes=2)
params = {
'child': self.sleep.child.id,
'start': self.localtime_string(start),
'end': self.localtime_string(end),
}
page = self.c.post('/sleep/{}/'.format(self.sleep.id),
params, follow=True)
self.assertEqual(page.status_code, 200)
self.sleep.refresh_from_db()
self.assertEqual(
self.localtime_string(self.sleep.end),
params['end']
)
self.assertContains(
page,
'Sleep entry for {} updated'.format(str(self.sleep.child))
)
def test_delete(self):
page = self.c.post('/sleep/{}/delete/'.format(self.sleep.id),
follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(page, 'Sleep entry deleted')
class TemperatureFormsTestCase(FormsTestCaseBase):
@classmethod
def setUpClass(cls):
super(TemperatureFormsTestCase, cls).setUpClass()
cls.temp = models.Temperature.objects.create(
child=cls.child,
temperature=98.6,
time=timezone.localtime() - timezone.timedelta(days=1)
)
def test_add(self):
params = {
'child': self.child.id,
'temperature': '98.6',
'time': self.localtime_string()
}
page = self.c.post('/temperature/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(
page,
'Temperature entry for {} added'.format(str(self.child))
)
def test_edit(self):
params = {
'child': self.temp.child.id,
'temperature': self.temp.temperature + 2,
'time': self.localtime_string()
}
page = self.c.post('/temperature/{}/'.format(self.temp.id),
params, follow=True)
self.assertEqual(page.status_code, 200)
self.temp.refresh_from_db()
self.assertEqual(self.temp.temperature, params['temperature'])
self.assertContains(
page,
'Temperature entry for {} updated'.format(str(self.temp.child))
)
def test_delete(self):
page = self.c.post('/temperature/{}/delete/'.format(self.temp.id),
follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(page, 'Temperature entry deleted')
class TummyTimeFormsTestCase(FormsTestCaseBase):
@classmethod
def setUpClass(cls):
super(TummyTimeFormsTestCase, cls).setUpClass()
cls.tt = models.TummyTime.objects.create(
child=cls.child,
start=timezone.localtime() - timezone.timedelta(hours=2),
end=timezone.localtime() - timezone.timedelta(hours=1, minutes=50)
)
def test_add(self):
end = timezone.localtime()
start = end - timezone.timedelta(minutes=2)
params = {
'child': self.child.id,
'start': self.localtime_string(start),
'end': self.localtime_string(end),
'milestone': ''
}
page = self.c.post('/tummy-time/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(
page,
'Tummy Time entry for {} added'.format(str(self.child))
)
def test_edit(self):
end = timezone.localtime()
start = end - timezone.timedelta(minutes=1, seconds=32)
params = {
'child': self.tt.child.id,
'start': self.localtime_string(start),
'end': self.localtime_string(end),
'milestone': 'Moved head!'
}
page = self.c.post('/tummy-time/{}/'.format(self.tt.id),
params, follow=True)
self.assertEqual(page.status_code, 200)
self.tt.refresh_from_db()
self.assertEqual(self.tt.milestone, params['milestone'])
self.assertContains(
page,
'Tummy Time entry for {} updated'.format(str(self.tt.child))
)
def test_delete(self):
page = self.c.post('/tummy-time/{}/delete/'.format(self.tt.id),
follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(page, 'Tummy Time entry deleted')
class TimerFormsTestCase(FormsTestCaseBase):
@classmethod
def setUpClass(cls):
super(TimerFormsTestCase, cls).setUpClass()
cls.timer = models.Timer.objects.create(user=cls.user)
def test_add(self):
params = {
'child': self.child.id,
'name': 'Test Timer',
'start': self.localtime_string()
}
page = self.c.post('/timers/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(page, params['name'])
self.assertContains(page, params['child'])
def test_edit(self):
start_time = self.timer.start - timezone.timedelta(hours=1)
params = {
'name': 'New Timer Name',
'start': self.localtime_string(start_time)
}
page = self.c.post('/timers/{}/edit/'.format(self.timer.id), params,
follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(page, params['name'])
self.timer.refresh_from_db()
self.assertEqual(
self.localtime_string(self.timer.start), params['start'])
def test_edit_stopped(self):
self.timer.stop()
params = {
'name': 'Edit stopped timer',
'start': self.localtime_string(self.timer.start),
'end': self.localtime_string(self.timer.end),
}
page = self.c.post('/timers/{}/edit/'.format(self.timer.id), params,
follow=True)
self.assertEqual(page.status_code, 200)
def test_delete_inactive(self):
models.Timer.objects.create(user=self.user)
self.assertEqual(models.Timer.objects.count(), 2)
self.timer.stop()
page = self.c.post('/timers/delete-inactive/', follow=True)
self.assertEqual(page.status_code, 200)
messages = list(page.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), 'All inactive timers deleted.')
self.assertEqual(models.Timer.objects.count(), 1)
class ValidationsTestCase(FormsTestCaseBase):
def test_validate_date(self):
future = timezone.localtime() + timezone.timedelta(days=1)
params = {
'child': self.child,
'weight': '8.5',
'date': self.localdate_string(future)
}
entry = models.Weight.objects.create(**params)
page = self.c.post('/weight/{}/'.format(entry.id), params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertFormError(page, 'form', 'date',
'Date can not be in the future.')
def test_validate_duration(self):
end = timezone.localtime() - timezone.timedelta(minutes=10)
start = end + timezone.timedelta(minutes=5)
params = {
'child': self.child,
'start': self.localtime_string(start),
'end': self.localtime_string(end),
'milestone': ''
}
page = self.c.post('/tummy-time/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertFormError(page, 'form', None,
'Start time must come before end time.')
start = end - timezone.timedelta(weeks=53)
params['start'] = self.localtime_string(start)
page = self.c.post('/tummy-time/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertFormError(page, 'form', None, 'Duration too long.')
def test_validate_time(self):
future = timezone.localtime() + timezone.timedelta(hours=1)
params = {
'child': self.child,
'start': self.localtime_string(),
'end': self.localtime_string(future),
'milestone': ''
}
page = self.c.post('/tummy-time/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertFormError(page, 'form', 'end',
'Date/time can not be in the future.')
def test_validate_unique_period(self):
entry = models.TummyTime.objects.create(
child=self.child,
start=timezone.localtime() - timezone.timedelta(minutes=10),
end=timezone.localtime() - timezone.timedelta(minutes=5),
)
start = entry.start - timezone.timedelta(minutes=2)
end = entry.end + timezone.timedelta(minutes=2)
params = {
'child': entry.child.id,
'start': self.localtime_string(start),
'end': self.localtime_string(end),
'milestone': ''
}
page = self.c.post('/tummy-time/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertFormError(
page,
'form',
None,
'Another entry intersects the specified time period.')
class WeightFormsTest(FormsTestCaseBase):
@classmethod
def setUpClass(cls):
super(WeightFormsTest, cls).setUpClass()
cls.weight = models.Weight.objects.create(
child=cls.child,
weight=8,
date=timezone.localdate() - timezone.timedelta(days=2)
)
def test_add(self):
params = {
'child': self.child.id,
'weight': 8.5,
'date': self.localdate_string()
}
page = self.c.post('/weight/add/', params, follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(
page,
'Weight entry for {} added'.format(str(self.child))
)
def test_edit(self):
params = {
'child': self.weight.child.id,
'weight': self.weight.weight + 1,
'date': self.localdate_string()
}
page = self.c.post('/weight/{}/'.format(self.weight.id),
params, follow=True)
self.assertEqual(page.status_code, 200)
self.weight.refresh_from_db()
self.assertEqual(self.weight.weight, params['weight'])
self.assertContains(
page,
'Weight entry for {} updated'.format(str(self.weight.child))
)
def test_delete(self):
page = self.c.post('/weight/{}/delete/'.format(self.weight.id),
follow=True)
self.assertEqual(page.status_code, 200)
self.assertContains(page, 'Weight entry deleted')
| 35.849765
| 79
| 0.579492
|
795133ac7c51cc9421f5dbe95cf8b7c9b0e242fc
| 1,766
|
py
|
Python
|
test/test_feature_extraction.py
|
kyledecker/project_ksd15_wjl11
|
f17b62ce1affaaaec65cdb0ea16c376ae00027d0
|
[
"MIT"
] | 2
|
2017-03-29T06:06:49.000Z
|
2017-06-11T22:04:59.000Z
|
test/test_feature_extraction.py
|
kyledecker/project_ksd15_wjl11
|
f17b62ce1affaaaec65cdb0ea16c376ae00027d0
|
[
"MIT"
] | null | null | null |
test/test_feature_extraction.py
|
kyledecker/project_ksd15_wjl11
|
f17b62ce1affaaaec65cdb0ea16c376ae00027d0
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.insert(0, os.path.abspath('./src/'))
def test_calc_mode():
from feature_extraction import calc_mode
import numpy as np
hist = np.zeros(256)
hist[5] = 0.4
hist[20] = 0.5
hist[100] = 0.2
actual = calc_mode(hist)
expected = 20
assert actual == expected
def test_calc_median():
from feature_extraction import calc_median
import numpy as np
data = [ii for ii in range(0, 3)]
actual = calc_median(data)
expected = 1.
assert actual == expected
def test_calc_variance():
from feature_extraction import calc_variance
import numpy as np
data = [ii for ii in range(0, 4)]
actual = calc_variance(data, omit=(0,))
expected = np.std([1, 2, 3])
assert actual == expected
def test_extract_features():
from feature_extraction import extract_features
import numpy as np
r = 5*np.ones([3, 3])
r[1, 1] = 0
g = np.zeros([3, 3])
b = [[ii for ii in range(0, 3)] for _ in range(0, 3)]
img = np.zeros([3, 3, 3])
img[:, :, 0] = r
img[:, :, 1] = g
img[:, :, 2] = b
actual, labels = extract_features(img, 'gb', 'r', 'rg', pct_yellow=True)
expected = [np.median(g), np.median(b), np.std(r), 5, 0, 0]
assert np.array_equal(actual, expected)
actual, labels = extract_features(img, 'r', 'r', 'r', omit=0)
expected = [5, 0, 5]
assert np.array_equal(actual, expected)
def test_calc_pct_yellow():
from feature_extraction import calc_pct_yellow
import numpy as np
rgb = np.ones((10, 10, 3))
rgb[1, 1, :] = [255, 255, 40]
rgb[0, 1, :] = [0, 0, 0]
rgb[0, 0, :] = [np.nan, np.nan, np.nan]
actual = calc_pct_yellow(rgb)
expected = 100/98
assert actual == expected
| 21.277108
| 76
| 0.603624
|
7951341f439b8f790edb531621dce45c8df11cad
| 637
|
py
|
Python
|
server/ui/admin/pces/urls.py
|
elise-baumgartner/onramp
|
beb3c807264fcb70d8069ff2e3990b0ce3f59912
|
[
"BSD-3-Clause"
] | 2
|
2016-09-09T04:19:01.000Z
|
2019-02-15T20:28:13.000Z
|
server/ui/admin/pces/urls.py
|
elise-baumgartner/onramp
|
beb3c807264fcb70d8069ff2e3990b0ce3f59912
|
[
"BSD-3-Clause"
] | 67
|
2016-06-02T19:37:56.000Z
|
2018-02-22T05:23:45.000Z
|
server/ui/admin/pces/urls.py
|
elise-baumgartner/onramp
|
beb3c807264fcb70d8069ff2e3990b0ce3f59912
|
[
"BSD-3-Clause"
] | 9
|
2015-06-22T22:10:22.000Z
|
2016-04-26T15:35:45.000Z
|
from django.conf.urls import url
import views
urlpatterns = [
url(r'GetAll/$', views.get_all_pces),
url(r'Modules/$', views.get_pce_modules),
url(r'Module/Add/$', views.add_pce_module),
url(r'Module/Edit/$', views.edit_pce_module),
url(r'Module/Delete/$', views.delete_pce_module),
url(r'Module/State/$', views.get_module_state),
url(r'Module/Deploy/$', views.deploy_module),
url(r'Add/$', views.add_pce),
url(r'Edit/$', views.edit_pce),
url(r'Delete/$', views.delete_pce),
url(r'Workspaces/$', views.get_pce_workspaces),
url(r'Jobs/$', views.get_pce_jobs),
url(r'^$', views.main),
]
| 33.526316
| 53
| 0.660911
|
7951347e2f67a50c020dfa873b5f3d4cd20285da
| 927
|
py
|
Python
|
ioflo/base/test/_testServing.py
|
BradyHammond/ioflo
|
177ac656d7c4ff801aebb0d8b401db365a5248ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 128
|
2015-01-14T12:26:56.000Z
|
2021-11-06T07:09:29.000Z
|
ioflo/base/test/_testServing.py
|
BradyHammond/ioflo
|
177ac656d7c4ff801aebb0d8b401db365a5248ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17
|
2015-01-28T18:26:50.000Z
|
2020-11-19T22:08:06.000Z
|
ioflo/base/test/_testServing.py
|
BradyHammond/ioflo
|
177ac656d7c4ff801aebb0d8b401db365a5248ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2015-01-27T23:28:31.000Z
|
2021-05-04T16:37:30.000Z
|
def TestOpenStuff():
""" """
import ioflo.base.storing as storing
storing.Store.Clear() #clear registry
s1 = ServerTask(store = storing.Store())
s2 = ServerTask(store = storing.Store())
print(s1.server.reopen())
print(s2.server.reopen())
def Test(verbose = False):
"""Module self test
"""
import ioflo.base.storing as storing
import ioflo.base.tasking as tasking
storing.Store.Clear() #clear registry
tasking.Tasker.Clear()
s = Server(store = storing.Store())
s.store.expose()
print("ready to go")
status = s.start()
while (not (status == STOPPED or status == ABORTED)):
try:
status = s.run()
except KeyboardInterrupt: #CNTL-C shutdown skedder
print(" Keyboard Interrupt manual shutdown of taskers ...")
s.server.close()
break
if __name__ == "__main__":
Test()
| 18.918367
| 74
| 0.597627
|
795134d7fb28a97c4dbc051effb6f36b34559e9d
| 1,028
|
py
|
Python
|
test/typedlist_test.py
|
DamavandiKamali/TinCanPython
|
a3fb50babcb09eb87999aea67b4a266a58b02949
|
[
"Apache-2.0"
] | 2
|
2018-08-01T18:50:36.000Z
|
2019-04-17T15:24:16.000Z
|
test/typedlist_test.py
|
DamavandiKamali/TinCanPython
|
a3fb50babcb09eb87999aea67b4a266a58b02949
|
[
"Apache-2.0"
] | 5
|
2019-03-22T23:14:19.000Z
|
2020-07-02T14:43:49.000Z
|
test/typedlist_test.py
|
DamavandiKamali/TinCanPython
|
a3fb50babcb09eb87999aea67b4a266a58b02949
|
[
"Apache-2.0"
] | 2
|
2018-07-19T09:54:24.000Z
|
2020-02-12T12:03:31.000Z
|
# Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
if __name__ == '__main__':
from main import setup_tincan_path
setup_tincan_path()
from tincan import TypedList
class TypedListTest(unittest.TestCase):
def test_Init(self):
with self.assertRaises(ValueError):
TypedList()
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TypedListTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| 31.151515
| 77
| 0.73249
|
795135b09cfbea7fc12ba338e77551aa658829b4
| 1,324
|
py
|
Python
|
tensorflow_datasets/text/pg19_test.py
|
jvishnuvardhan/datasets
|
b8e38187058f1221e67c6291b3f29385ebb35fa2
|
[
"Apache-2.0"
] | 3,380
|
2018-09-11T05:03:31.000Z
|
2022-03-31T20:04:57.000Z
|
tensorflow_datasets/text/pg19_test.py
|
jvishnuvardhan/datasets
|
b8e38187058f1221e67c6291b3f29385ebb35fa2
|
[
"Apache-2.0"
] | 3,142
|
2018-09-14T10:09:00.000Z
|
2022-03-31T18:25:44.000Z
|
tensorflow_datasets/text/pg19_test.py
|
jvishnuvardhan/datasets
|
b8e38187058f1221e67c6291b3f29385ebb35fa2
|
[
"Apache-2.0"
] | 1,438
|
2018-09-16T13:58:22.000Z
|
2022-03-31T11:19:54.000Z
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for PG-19 dataset module."""
import os
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.text import pg19
class Pg19Test(tfds.testing.DatasetBuilderTestCase):
@classmethod
def setUpClass(cls):
super(Pg19Test, cls).setUpClass()
pg19._DATA_DIR = os.path.join(
os.path.normpath(os.path.dirname(__file__) + "/../"),
"testing",
"test_data",
"fake_examples",
"pg19",
)
DATASET_CLASS = pg19.Pg19
SPLITS = {
"train": 3, # Number of fake train example
"test": 1, # Number of fake test example
"validation": 1 # Number of fake validation example
}
if __name__ == "__main__":
tfds.testing.test_main()
| 28.170213
| 74
| 0.702417
|
795137205fa5e1e34c71b16dce76bf7de63de1c7
| 886
|
py
|
Python
|
unreleased/azure-mgmt-intune/azure/mgmt/intune/models/application_paged.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | 1
|
2018-11-09T06:16:34.000Z
|
2018-11-09T06:16:34.000Z
|
unreleased/azure-mgmt-intune/azure/mgmt/intune/models/application_paged.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | null | null | null |
unreleased/azure-mgmt-intune/azure/mgmt/intune/models/application_paged.py
|
azuresdkci1x/azure-sdk-for-python-1722
|
e08fa6606543ce0f35b93133dbb78490f8e6bcc9
|
[
"MIT"
] | 1
|
2018-11-09T06:17:41.000Z
|
2018-11-09T06:17:41.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ApplicationPaged(Paged):
"""
A paging container for iterating over a list of Application object
"""
_attribute_map = {
'next_link': {'key': 'nextlink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Application]'}
}
def __init__(self, *args, **kwargs):
super(ApplicationPaged, self).__init__(*args, **kwargs)
| 31.642857
| 76
| 0.563205
|
795138ea891126576d3124ad0c9655f5fe1b33cd
| 1,764
|
py
|
Python
|
nipype/interfaces/camino/tests/test_auto_SFLUTGen.py
|
lucindasisk/nipype
|
d037ed577615feb11cab20d66a06a8ecd63e5034
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/camino/tests/test_auto_SFLUTGen.py
|
lucindasisk/nipype
|
d037ed577615feb11cab20d66a06a8ecd63e5034
|
[
"Apache-2.0"
] | 2
|
2018-04-17T19:18:16.000Z
|
2020-03-04T22:05:02.000Z
|
nipype/interfaces/camino/tests/test_auto_SFLUTGen.py
|
oesteban/nipype
|
c14f24eba1da08711bbb894e049ee858ed740096
|
[
"Apache-2.0"
] | null | null | null |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..calib import SFLUTGen
def test_SFLUTGen_inputs():
input_map = dict(
args=dict(argstr='%s', ),
binincsize=dict(
argstr='-binincsize %d',
units='NA',
),
directmap=dict(argstr='-directmap', ),
environ=dict(
nohash=True,
usedefault=True,
),
in_file=dict(
argstr='-inputfile %s',
extensions=None,
mandatory=True,
),
info_file=dict(
argstr='-infofile %s',
extensions=None,
mandatory=True,
),
minvectsperbin=dict(
argstr='-minvectsperbin %d',
units='NA',
),
order=dict(
argstr='-order %d',
units='NA',
),
out_file=dict(
argstr='> %s',
genfile=True,
position=-1,
),
outputstem=dict(
argstr='-outputstem %s',
usedefault=True,
),
pdf=dict(
argstr='-pdf %s',
usedefault=True,
),
)
inputs = SFLUTGen.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_SFLUTGen_outputs():
output_map = dict(
lut_one_fibre=dict(extensions=None, ),
lut_two_fibres=dict(extensions=None, ),
)
outputs = SFLUTGen.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 27.138462
| 67
| 0.520975
|
795139a349552a4b3b1dae5abc3ac265e6a518fc
| 1,833
|
py
|
Python
|
python/281_zigzag_iterator.py
|
ufjfeng/leetcode-soln
|
cbf2db0d81d5ef98f48c8d1df486559f89142bfd
|
[
"MIT"
] | null | null | null |
python/281_zigzag_iterator.py
|
ufjfeng/leetcode-soln
|
cbf2db0d81d5ef98f48c8d1df486559f89142bfd
|
[
"MIT"
] | null | null | null |
python/281_zigzag_iterator.py
|
ufjfeng/leetcode-soln
|
cbf2db0d81d5ef98f48c8d1df486559f89142bfd
|
[
"MIT"
] | 1
|
2019-11-22T19:28:11.000Z
|
2019-11-22T19:28:11.000Z
|
"""
Given two 1d vectors, implement an iterator to return their elements
alternately.
For example, given two 1d vectors:
v1 = [1, 2]
v2 = [3, 4, 5, 6]
By calling next repeatedly until hasNext returns false, the order of
elements returned by next should be: [1, 3, 2, 4, 5, 6].
Follow up: What if you are given k 1d vectors? How well can your code be
extended to such cases?
Clarification for the follow up question - Update (2015-09-18):
The "Zigzag" order is not clearly defined and is ambiguous for k > 2
cases. If "Zigzag" does not look right to you, replace "Zigzag" with
"Cyclic". For example, given the following input:
[1,2,3]
[4,5,6,7]
[8,9]
It should return [1,4,8,2,5,9,3,6,7].
"""
class ZigzagIterator(object):
def __init__(self, v1, v2):
"""
Initialize your data structure here.
:type v1: List[int]
:type v2: List[int]
"""
self.v1 = [c for c in reversed(v1)]
self.v2 = [c for c in reversed(v2)]
self.loc = 0
def next(self):
"""
:rtype: int
"""
if self.loc:
self.loc = not self.loc
if len(self.v2) > 0:
return self.v2.pop()
else:
return self.next()
else:
self.loc = not self.loc
if len(self.v1) > 0:
return self.v1.pop()
else:
return self.next()
def hasNext(self):
"""
:rtype: bool
"""
return len(self.v1) > 0 or len(self.v2) > 0
a = ZigzagIterator([1,2],[3,4,5,6])
while a.hasNext():
print(a.next())
# Your ZigzagIterator object will be instantiated and called as such:
# i, v = ZigzagIterator(v1, v2), []
# while i.hasNext(): v.append(i.next())
| 26.955882
| 72
| 0.546645
|
79513af29d2839e0633dd44a8917cb93a1a6ea0e
| 49,467
|
py
|
Python
|
tests/api/v2_1_1/test_discovery.py
|
nonstdout/dnacentersdk
|
dbbbc4baa5300aa9e5c9193f2ea71438018095f5
|
[
"MIT"
] | null | null | null |
tests/api/v2_1_1/test_discovery.py
|
nonstdout/dnacentersdk
|
dbbbc4baa5300aa9e5c9193f2ea71438018095f5
|
[
"MIT"
] | null | null | null |
tests/api/v2_1_1/test_discovery.py
|
nonstdout/dnacentersdk
|
dbbbc4baa5300aa9e5c9193f2ea71438018095f5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""DNACenterAPI discovery API fixtures and tests.
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from tests.environment import DNA_CENTER_VERSION
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '2.1.1', reason='version does not match')
def is_valid_get_count_of_all_discovery_jobs(json_schema_validate, obj):
json_schema_validate('jsd_069d9823451b892d_v2_1_1').validate(obj)
return True
def get_count_of_all_discovery_jobs(api):
endpoint_result = api.discovery.get_count_of_all_discovery_jobs(
)
return endpoint_result
@pytest.mark.discovery
def test_get_count_of_all_discovery_jobs(api, validator):
assert is_valid_get_count_of_all_discovery_jobs(
validator,
get_count_of_all_discovery_jobs(api)
)
def get_count_of_all_discovery_jobs_default(api):
endpoint_result = api.discovery.get_count_of_all_discovery_jobs(
)
return endpoint_result
@pytest.mark.discovery
def test_get_count_of_all_discovery_jobs_default(api, validator):
try:
assert is_valid_get_count_of_all_discovery_jobs(
validator,
get_count_of_all_discovery_jobs_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_netconf_credentials(json_schema_validate, obj):
json_schema_validate('jsd_17929bc7465bb564_v2_1_1').validate(obj)
return True
def create_netconf_credentials(api):
endpoint_result = api.discovery.create_netconf_credentials(
active_validation=True,
payload=[{'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'netconfPort': 'string'}]
)
return endpoint_result
@pytest.mark.discovery
def test_create_netconf_credentials(api, validator):
assert is_valid_create_netconf_credentials(
validator,
create_netconf_credentials(api)
)
def create_netconf_credentials_default(api):
endpoint_result = api.discovery.create_netconf_credentials(
active_validation=True,
payload=None
)
return endpoint_result
@pytest.mark.discovery
def test_create_netconf_credentials_default(api, validator):
try:
assert is_valid_create_netconf_credentials(
validator,
create_netconf_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_update_snmp_write_community(json_schema_validate, obj):
json_schema_validate('jsd_10b06a6a4f7bb3cb_v2_1_1').validate(obj)
return True
def update_snmp_write_community(api):
endpoint_result = api.discovery.update_snmp_write_community(
active_validation=True,
comments='string',
credentialType='GLOBAL',
description='string',
id='string',
instanceTenantId='string',
instanceUuid='string',
payload=None,
writeCommunity='string'
)
return endpoint_result
@pytest.mark.discovery
def test_update_snmp_write_community(api, validator):
assert is_valid_update_snmp_write_community(
validator,
update_snmp_write_community(api)
)
def update_snmp_write_community_default(api):
endpoint_result = api.discovery.update_snmp_write_community(
active_validation=True,
comments=None,
credentialType=None,
description=None,
id=None,
instanceTenantId=None,
instanceUuid=None,
payload=None,
writeCommunity=None
)
return endpoint_result
@pytest.mark.discovery
def test_update_snmp_write_community_default(api, validator):
try:
assert is_valid_update_snmp_write_community(
validator,
update_snmp_write_community_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_update_snmpv3_credentials(json_schema_validate, obj):
json_schema_validate('jsd_1da5ebdd434aacfe_v2_1_1').validate(obj)
return True
def update_snmpv3_credentials(api):
endpoint_result = api.discovery.update_snmpv3_credentials(
active_validation=True,
authPassword='string',
authType='SHA',
comments='string',
credentialType='GLOBAL',
description='string',
id='string',
instanceTenantId='string',
instanceUuid='string',
payload=None,
privacyPassword='string',
privacyType='DES',
snmpMode='AUTHPRIV',
username='string'
)
return endpoint_result
@pytest.mark.discovery
def test_update_snmpv3_credentials(api, validator):
assert is_valid_update_snmpv3_credentials(
validator,
update_snmpv3_credentials(api)
)
def update_snmpv3_credentials_default(api):
endpoint_result = api.discovery.update_snmpv3_credentials(
active_validation=True,
authPassword=None,
authType=None,
comments=None,
credentialType=None,
description=None,
id=None,
instanceTenantId=None,
instanceUuid=None,
payload=None,
privacyPassword=None,
privacyType=None,
snmpMode=None,
username=None
)
return endpoint_result
@pytest.mark.discovery
def test_update_snmpv3_credentials_default(api, validator):
try:
assert is_valid_update_snmpv3_credentials(
validator,
update_snmpv3_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_discoveries_by_range(json_schema_validate, obj):
json_schema_validate('jsd_33b799d04d0a8907_v2_1_1').validate(obj)
return True
def get_discoveries_by_range(api):
endpoint_result = api.discovery.get_discoveries_by_range(
records_to_return=0,
start_index=0
)
return endpoint_result
@pytest.mark.discovery
def test_get_discoveries_by_range(api, validator):
assert is_valid_get_discoveries_by_range(
validator,
get_discoveries_by_range(api)
)
def get_discoveries_by_range_default(api):
endpoint_result = api.discovery.get_discoveries_by_range(
records_to_return=0,
start_index=0
)
return endpoint_result
@pytest.mark.discovery
def test_get_discoveries_by_range_default(api, validator):
try:
assert is_valid_get_discoveries_by_range(
validator,
get_discoveries_by_range_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_network_devices_from_discovery(json_schema_validate, obj):
json_schema_validate('jsd_3d9b99c343398a27_v2_1_1').validate(obj)
return True
def get_network_devices_from_discovery(api):
endpoint_result = api.discovery.get_network_devices_from_discovery(
cli_status='value1,value2',
http_status='value1,value2',
id='string',
ip_address='value1,value2',
netconf_status='value1,value2',
ping_status='value1,value2',
snmp_status='value1,value2',
sort_by='string',
sort_order='string',
task_id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_get_network_devices_from_discovery(api, validator):
assert is_valid_get_network_devices_from_discovery(
validator,
get_network_devices_from_discovery(api)
)
def get_network_devices_from_discovery_default(api):
endpoint_result = api.discovery.get_network_devices_from_discovery(
cli_status=None,
http_status=None,
id='string',
ip_address=None,
netconf_status=None,
ping_status=None,
snmp_status=None,
sort_by=None,
sort_order=None,
task_id=None
)
return endpoint_result
@pytest.mark.discovery
def test_get_network_devices_from_discovery_default(api, validator):
try:
assert is_valid_get_network_devices_from_discovery(
validator,
get_network_devices_from_discovery_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_snmp_properties(json_schema_validate, obj):
json_schema_validate('jsd_44974ba5435a801d_v2_1_1').validate(obj)
return True
def get_snmp_properties(api):
endpoint_result = api.discovery.get_snmp_properties(
)
return endpoint_result
@pytest.mark.discovery
def test_get_snmp_properties(api, validator):
assert is_valid_get_snmp_properties(
validator,
get_snmp_properties(api)
)
def get_snmp_properties_default(api):
endpoint_result = api.discovery.get_snmp_properties(
)
return endpoint_result
@pytest.mark.discovery
def test_get_snmp_properties_default(api, validator):
try:
assert is_valid_get_snmp_properties(
validator,
get_snmp_properties_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_update_snmp_read_community(json_schema_validate, obj):
json_schema_validate('jsd_47a1b84b4e1b8044_v2_1_1').validate(obj)
return True
def update_snmp_read_community(api):
endpoint_result = api.discovery.update_snmp_read_community(
active_validation=True,
comments='string',
credentialType='GLOBAL',
description='string',
id='string',
instanceTenantId='string',
instanceUuid='string',
payload=None,
readCommunity='string'
)
return endpoint_result
@pytest.mark.discovery
def test_update_snmp_read_community(api, validator):
assert is_valid_update_snmp_read_community(
validator,
update_snmp_read_community(api)
)
def update_snmp_read_community_default(api):
endpoint_result = api.discovery.update_snmp_read_community(
active_validation=True,
comments=None,
credentialType=None,
description=None,
id=None,
instanceTenantId=None,
instanceUuid=None,
payload=None,
readCommunity=None
)
return endpoint_result
@pytest.mark.discovery
def test_update_snmp_read_community_default(api, validator):
try:
assert is_valid_update_snmp_read_community(
validator,
update_snmp_read_community_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_http_write_credentials(json_schema_validate, obj):
json_schema_validate('jsd_4d9ca8e2431a8a24_v2_1_1').validate(obj)
return True
def create_http_write_credentials(api):
endpoint_result = api.discovery.create_http_write_credentials(
active_validation=True,
payload=[{'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'port': 0, 'secure': True, 'username': 'string'}]
)
return endpoint_result
@pytest.mark.discovery
def test_create_http_write_credentials(api, validator):
assert is_valid_create_http_write_credentials(
validator,
create_http_write_credentials(api)
)
def create_http_write_credentials_default(api):
endpoint_result = api.discovery.create_http_write_credentials(
active_validation=True,
payload=None
)
return endpoint_result
@pytest.mark.discovery
def test_create_http_write_credentials_default(api, validator):
try:
assert is_valid_create_http_write_credentials(
validator,
create_http_write_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_delete_discovery_by_id(json_schema_validate, obj):
json_schema_validate('jsd_4c8cab5f435a80f4_v2_1_1').validate(obj)
return True
def delete_discovery_by_id(api):
endpoint_result = api.discovery.delete_discovery_by_id(
id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_delete_discovery_by_id(api, validator):
assert is_valid_delete_discovery_by_id(
validator,
delete_discovery_by_id(api)
)
def delete_discovery_by_id_default(api):
endpoint_result = api.discovery.delete_discovery_by_id(
id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_delete_discovery_by_id_default(api, validator):
try:
assert is_valid_delete_discovery_by_id(
validator,
delete_discovery_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_start_discovery(json_schema_validate, obj):
json_schema_validate('jsd_55b439dc4239b140_v2_1_1').validate(obj)
return True
def start_discovery(api):
endpoint_result = api.discovery.start_discovery(
active_validation=True,
cdpLevel=0,
discoveryType='string',
enablePasswordList=['string'],
globalCredentialIdList=['string'],
httpReadCredential={'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'port': 0, 'secure': True, 'username': 'string'},
httpWriteCredential={'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'port': 0, 'secure': True, 'username': 'string'},
ipAddressList='string',
ipFilterList=['string'],
lldpLevel=0,
name='string',
netconfPort='string',
noAddNewDevice=True,
parentDiscoveryId='string',
passwordList=['string'],
payload=None,
preferredMgmtIPMethod='string',
protocolOrder='string',
reDiscovery=True,
retry=0,
snmpAuthPassphrase='string',
snmpAuthProtocol='string',
snmpMode='string',
snmpPrivPassphrase='string',
snmpPrivProtocol='string',
snmpROCommunity='string',
snmpROCommunityDesc='string',
snmpRWCommunity='string',
snmpRWCommunityDesc='string',
snmpUserName='string',
snmpVersion='string',
timeout=0,
updateMgmtIp=True,
userNameList=['string']
)
return endpoint_result
@pytest.mark.discovery
def test_start_discovery(api, validator):
assert is_valid_start_discovery(
validator,
start_discovery(api)
)
def start_discovery_default(api):
endpoint_result = api.discovery.start_discovery(
active_validation=True,
cdpLevel=None,
discoveryType=None,
enablePasswordList=None,
globalCredentialIdList=None,
httpReadCredential=None,
httpWriteCredential=None,
ipAddressList=None,
ipFilterList=None,
lldpLevel=None,
name=None,
netconfPort=None,
noAddNewDevice=None,
parentDiscoveryId=None,
passwordList=None,
payload=None,
preferredMgmtIPMethod=None,
protocolOrder=None,
reDiscovery=None,
retry=None,
snmpAuthPassphrase=None,
snmpAuthProtocol=None,
snmpMode=None,
snmpPrivPassphrase=None,
snmpPrivProtocol=None,
snmpROCommunity=None,
snmpROCommunityDesc=None,
snmpRWCommunity=None,
snmpRWCommunityDesc=None,
snmpUserName=None,
snmpVersion=None,
timeout=None,
updateMgmtIp=None,
userNameList=None
)
return endpoint_result
@pytest.mark.discovery
def test_start_discovery_default(api, validator):
try:
assert is_valid_start_discovery(
validator,
start_discovery_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_credential_sub_type_by_credential_id(json_schema_validate, obj):
json_schema_validate('jsd_58a3699e489b9529_v2_1_1').validate(obj)
return True
def get_credential_sub_type_by_credential_id(api):
endpoint_result = api.discovery.get_credential_sub_type_by_credential_id(
id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_get_credential_sub_type_by_credential_id(api, validator):
assert is_valid_get_credential_sub_type_by_credential_id(
validator,
get_credential_sub_type_by_credential_id(api)
)
def get_credential_sub_type_by_credential_id_default(api):
endpoint_result = api.discovery.get_credential_sub_type_by_credential_id(
id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_get_credential_sub_type_by_credential_id_default(api, validator):
try:
assert is_valid_get_credential_sub_type_by_credential_id(
validator,
get_credential_sub_type_by_credential_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_snmp_write_community(json_schema_validate, obj):
json_schema_validate('jsd_6bacb8d14639bdc7_v2_1_1').validate(obj)
return True
def create_snmp_write_community(api):
endpoint_result = api.discovery.create_snmp_write_community(
active_validation=True,
payload=[{'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'writeCommunity': 'string'}]
)
return endpoint_result
@pytest.mark.discovery
def test_create_snmp_write_community(api, validator):
assert is_valid_create_snmp_write_community(
validator,
create_snmp_write_community(api)
)
def create_snmp_write_community_default(api):
endpoint_result = api.discovery.create_snmp_write_community(
active_validation=True,
payload=None
)
return endpoint_result
@pytest.mark.discovery
def test_create_snmp_write_community_default(api, validator):
try:
assert is_valid_create_snmp_write_community(
validator,
create_snmp_write_community_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_discovery_by_id(json_schema_validate, obj):
json_schema_validate('jsd_63bb88b74f59aa17_v2_1_1').validate(obj)
return True
def get_discovery_by_id(api):
endpoint_result = api.discovery.get_discovery_by_id(
id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_get_discovery_by_id(api, validator):
assert is_valid_get_discovery_by_id(
validator,
get_discovery_by_id(api)
)
def get_discovery_by_id_default(api):
endpoint_result = api.discovery.get_discovery_by_id(
id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_get_discovery_by_id_default(api, validator):
try:
assert is_valid_get_discovery_by_id(
validator,
get_discovery_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_update_global_credentials(json_schema_validate, obj):
json_schema_validate('jsd_709fda3c42b8877a_v2_1_1').validate(obj)
return True
def update_global_credentials(api):
endpoint_result = api.discovery.update_global_credentials(
active_validation=True,
global_credential_id='string',
payload=None,
siteUuids=['string']
)
return endpoint_result
@pytest.mark.discovery
def test_update_global_credentials(api, validator):
assert is_valid_update_global_credentials(
validator,
update_global_credentials(api)
)
def update_global_credentials_default(api):
endpoint_result = api.discovery.update_global_credentials(
active_validation=True,
global_credential_id='string',
payload=None,
siteUuids=None
)
return endpoint_result
@pytest.mark.discovery
def test_update_global_credentials_default(api, validator):
try:
assert is_valid_update_global_credentials(
validator,
update_global_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_snmp_read_community(json_schema_validate, obj):
json_schema_validate('jsd_7aa3da9d4e098ef2_v2_1_1').validate(obj)
return True
def create_snmp_read_community(api):
endpoint_result = api.discovery.create_snmp_read_community(
active_validation=True,
payload=[{'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'readCommunity': 'string'}]
)
return endpoint_result
@pytest.mark.discovery
def test_create_snmp_read_community(api, validator):
assert is_valid_create_snmp_read_community(
validator,
create_snmp_read_community(api)
)
def create_snmp_read_community_default(api):
endpoint_result = api.discovery.create_snmp_read_community(
active_validation=True,
payload=None
)
return endpoint_result
@pytest.mark.discovery
def test_create_snmp_read_community_default(api, validator):
try:
assert is_valid_create_snmp_read_community(
validator,
create_snmp_read_community_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_snmpv3_credentials(json_schema_validate, obj):
json_schema_validate('jsd_979688084b7ba60d_v2_1_1').validate(obj)
return True
def create_snmpv3_credentials(api):
endpoint_result = api.discovery.create_snmpv3_credentials(
active_validation=True,
payload=[{'authPassword': 'string', 'authType': 'SHA', 'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'privacyPassword': 'string', 'privacyType': 'DES', 'snmpMode': 'AUTHPRIV', 'username': 'string'}]
)
return endpoint_result
@pytest.mark.discovery
def test_create_snmpv3_credentials(api, validator):
assert is_valid_create_snmpv3_credentials(
validator,
create_snmpv3_credentials(api)
)
def create_snmpv3_credentials_default(api):
endpoint_result = api.discovery.create_snmpv3_credentials(
active_validation=True,
payload=None
)
return endpoint_result
@pytest.mark.discovery
def test_create_snmpv3_credentials_default(api, validator):
try:
assert is_valid_create_snmpv3_credentials(
validator,
create_snmpv3_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_update_http_read_credential(json_schema_validate, obj):
json_schema_validate('jsd_89b36b4649999d81_v2_1_1').validate(obj)
return True
def update_http_read_credential(api):
endpoint_result = api.discovery.update_http_read_credential(
active_validation=True,
comments='string',
credentialType='GLOBAL',
description='string',
id='string',
instanceTenantId='string',
instanceUuid='string',
password='string',
payload=None,
port=0,
secure=True,
username='string'
)
return endpoint_result
@pytest.mark.discovery
def test_update_http_read_credential(api, validator):
assert is_valid_update_http_read_credential(
validator,
update_http_read_credential(api)
)
def update_http_read_credential_default(api):
endpoint_result = api.discovery.update_http_read_credential(
active_validation=True,
comments=None,
credentialType=None,
description=None,
id=None,
instanceTenantId=None,
instanceUuid=None,
password=None,
payload=None,
port=None,
secure=None,
username=None
)
return endpoint_result
@pytest.mark.discovery
def test_update_http_read_credential_default(api, validator):
try:
assert is_valid_update_http_read_credential(
validator,
update_http_read_credential_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_list_of_discoveries_by_discovery_id(json_schema_validate, obj):
json_schema_validate('jsd_99872a134d0a9fb4_v2_1_1').validate(obj)
return True
def get_list_of_discoveries_by_discovery_id(api):
endpoint_result = api.discovery.get_list_of_discoveries_by_discovery_id(
id='string',
ip_address='string',
limit=0,
offset=0
)
return endpoint_result
@pytest.mark.discovery
def test_get_list_of_discoveries_by_discovery_id(api, validator):
assert is_valid_get_list_of_discoveries_by_discovery_id(
validator,
get_list_of_discoveries_by_discovery_id(api)
)
def get_list_of_discoveries_by_discovery_id_default(api):
endpoint_result = api.discovery.get_list_of_discoveries_by_discovery_id(
id='string',
ip_address=None,
limit=None,
offset=None
)
return endpoint_result
@pytest.mark.discovery
def test_get_list_of_discoveries_by_discovery_id_default(api, validator):
try:
assert is_valid_get_list_of_discoveries_by_discovery_id(
validator,
get_list_of_discoveries_by_discovery_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_cli_credentials(json_schema_validate, obj):
json_schema_validate('jsd_948ea8194348bc0b_v2_1_1').validate(obj)
return True
def create_cli_credentials(api):
endpoint_result = api.discovery.create_cli_credentials(
active_validation=True,
payload=[{'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'enablePassword': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'username': 'string'}]
)
return endpoint_result
@pytest.mark.discovery
def test_create_cli_credentials(api, validator):
assert is_valid_create_cli_credentials(
validator,
create_cli_credentials(api)
)
def create_cli_credentials_default(api):
endpoint_result = api.discovery.create_cli_credentials(
active_validation=True,
payload=None
)
return endpoint_result
@pytest.mark.discovery
def test_create_cli_credentials_default(api, validator):
try:
assert is_valid_create_cli_credentials(
validator,
create_cli_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_update_snmp_properties(json_schema_validate, obj):
json_schema_validate('jsd_a5ac99774c6bb541_v2_1_1').validate(obj)
return True
def create_update_snmp_properties(api):
endpoint_result = api.discovery.create_update_snmp_properties(
active_validation=True,
payload=[{'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'intValue': 0, 'systemPropertyName': 'string'}]
)
return endpoint_result
@pytest.mark.discovery
def test_create_update_snmp_properties(api, validator):
assert is_valid_create_update_snmp_properties(
validator,
create_update_snmp_properties(api)
)
def create_update_snmp_properties_default(api):
endpoint_result = api.discovery.create_update_snmp_properties(
active_validation=True,
payload=None
)
return endpoint_result
@pytest.mark.discovery
def test_create_update_snmp_properties_default(api, validator):
try:
assert is_valid_create_update_snmp_properties(
validator,
create_update_snmp_properties_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_devices_discovered_by_id(json_schema_validate, obj):
json_schema_validate('jsd_a6965b454c9a8663_v2_1_1').validate(obj)
return True
def get_devices_discovered_by_id(api):
endpoint_result = api.discovery.get_devices_discovered_by_id(
id='string',
task_id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_get_devices_discovered_by_id(api, validator):
assert is_valid_get_devices_discovered_by_id(
validator,
get_devices_discovered_by_id(api)
)
def get_devices_discovered_by_id_default(api):
endpoint_result = api.discovery.get_devices_discovered_by_id(
id='string',
task_id=None
)
return endpoint_result
@pytest.mark.discovery
def test_get_devices_discovered_by_id_default(api, validator):
try:
assert is_valid_get_devices_discovered_by_id(
validator,
get_devices_discovered_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_updates_discovery_by_id(json_schema_validate, obj):
json_schema_validate('jsd_9788b8fc4418831d_v2_1_1').validate(obj)
return True
def updates_discovery_by_id(api):
endpoint_result = api.discovery.updates_discovery_by_id(
active_validation=True,
attributeInfo={},
cdpLevel=0,
deviceIds='string',
discoveryCondition='string',
discoveryStatus='string',
discoveryType='string',
enablePasswordList='string',
globalCredentialIdList=['string'],
httpReadCredential={'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'port': 0, 'secure': True, 'username': 'string'},
httpWriteCredential={'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'port': 0, 'secure': True, 'username': 'string'},
id='string',
ipAddressList='string',
ipFilterList='string',
isAutoCdp=True,
lldpLevel=0,
name='string',
netconfPort='string',
numDevices=0,
parentDiscoveryId='string',
passwordList='string',
payload=None,
preferredMgmtIPMethod='string',
protocolOrder='string',
retryCount=0,
snmpAuthPassphrase='string',
snmpAuthProtocol='string',
snmpMode='string',
snmpPrivPassphrase='string',
snmpPrivProtocol='string',
snmpRoCommunity='string',
snmpRoCommunityDesc='string',
snmpRwCommunity='string',
snmpRwCommunityDesc='string',
snmpUserName='string',
timeOut=0,
updateMgmtIp=True,
userNameList='string'
)
return endpoint_result
@pytest.mark.discovery
def test_updates_discovery_by_id(api, validator):
assert is_valid_updates_discovery_by_id(
validator,
updates_discovery_by_id(api)
)
def updates_discovery_by_id_default(api):
endpoint_result = api.discovery.updates_discovery_by_id(
active_validation=True,
attributeInfo=None,
cdpLevel=None,
deviceIds=None,
discoveryCondition=None,
discoveryStatus=None,
discoveryType=None,
enablePasswordList=None,
globalCredentialIdList=None,
httpReadCredential=None,
httpWriteCredential=None,
id=None,
ipAddressList=None,
ipFilterList=None,
isAutoCdp=None,
lldpLevel=None,
name=None,
netconfPort=None,
numDevices=None,
parentDiscoveryId=None,
passwordList=None,
payload=None,
preferredMgmtIPMethod=None,
protocolOrder=None,
retryCount=None,
snmpAuthPassphrase=None,
snmpAuthProtocol=None,
snmpMode=None,
snmpPrivPassphrase=None,
snmpPrivProtocol=None,
snmpRoCommunity=None,
snmpRoCommunityDesc=None,
snmpRwCommunity=None,
snmpRwCommunityDesc=None,
snmpUserName=None,
timeOut=None,
updateMgmtIp=None,
userNameList=None
)
return endpoint_result
@pytest.mark.discovery
def test_updates_discovery_by_id_default(api, validator):
try:
assert is_valid_updates_discovery_by_id(
validator,
updates_discovery_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_discovery_jobs_by_ip(json_schema_validate, obj):
json_schema_validate('jsd_a4967be64dfaaa1a_v2_1_1').validate(obj)
return True
def get_discovery_jobs_by_ip(api):
endpoint_result = api.discovery.get_discovery_jobs_by_ip(
ip_address='string',
limit=0,
name='string',
offset=0
)
return endpoint_result
@pytest.mark.discovery
def test_get_discovery_jobs_by_ip(api, validator):
assert is_valid_get_discovery_jobs_by_ip(
validator,
get_discovery_jobs_by_ip(api)
)
def get_discovery_jobs_by_ip_default(api):
endpoint_result = api.discovery.get_discovery_jobs_by_ip(
ip_address=None,
limit=None,
name=None,
offset=None
)
return endpoint_result
@pytest.mark.discovery
def test_get_discovery_jobs_by_ip_default(api, validator):
try:
assert is_valid_get_discovery_jobs_by_ip(
validator,
get_discovery_jobs_by_ip_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_discovered_devices_by_range(json_schema_validate, obj):
json_schema_validate('jsd_a6b798ab4acaa34e_v2_1_1').validate(obj)
return True
def get_discovered_devices_by_range(api):
endpoint_result = api.discovery.get_discovered_devices_by_range(
id='string',
records_to_return=0,
start_index=0,
task_id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_get_discovered_devices_by_range(api, validator):
assert is_valid_get_discovered_devices_by_range(
validator,
get_discovered_devices_by_range(api)
)
def get_discovered_devices_by_range_default(api):
endpoint_result = api.discovery.get_discovered_devices_by_range(
id='string',
records_to_return=0,
start_index=0,
task_id=None
)
return endpoint_result
@pytest.mark.discovery
def test_get_discovered_devices_by_range_default(api, validator):
try:
assert is_valid_get_discovered_devices_by_range(
validator,
get_discovered_devices_by_range_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_http_read_credentials(json_schema_validate, obj):
json_schema_validate('jsd_bf859ac64a0ba19c_v2_1_1').validate(obj)
return True
def create_http_read_credentials(api):
endpoint_result = api.discovery.create_http_read_credentials(
active_validation=True,
payload=[{'comments': 'string', 'credentialType': 'GLOBAL', 'description': 'string', 'id': 'string', 'instanceTenantId': 'string', 'instanceUuid': 'string', 'password': 'string', 'port': 0, 'secure': True, 'username': 'string'}]
)
return endpoint_result
@pytest.mark.discovery
def test_create_http_read_credentials(api, validator):
assert is_valid_create_http_read_credentials(
validator,
create_http_read_credentials(api)
)
def create_http_read_credentials_default(api):
endpoint_result = api.discovery.create_http_read_credentials(
active_validation=True,
payload=None
)
return endpoint_result
@pytest.mark.discovery
def test_create_http_read_credentials_default(api, validator):
try:
assert is_valid_create_http_read_credentials(
validator,
create_http_read_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_update_http_write_credentials(json_schema_validate, obj):
json_schema_validate('jsd_b68a6bd8473a9a25_v2_1_1').validate(obj)
return True
def update_http_write_credentials(api):
endpoint_result = api.discovery.update_http_write_credentials(
active_validation=True,
comments='string',
credentialType='GLOBAL',
description='string',
id='string',
instanceTenantId='string',
instanceUuid='string',
password='string',
payload=None,
port=0,
secure=True,
username='string'
)
return endpoint_result
@pytest.mark.discovery
def test_update_http_write_credentials(api, validator):
assert is_valid_update_http_write_credentials(
validator,
update_http_write_credentials(api)
)
def update_http_write_credentials_default(api):
endpoint_result = api.discovery.update_http_write_credentials(
active_validation=True,
comments=None,
credentialType=None,
description=None,
id=None,
instanceTenantId=None,
instanceUuid=None,
password=None,
payload=None,
port=None,
secure=None,
username=None
)
return endpoint_result
@pytest.mark.discovery
def test_update_http_write_credentials_default(api, validator):
try:
assert is_valid_update_http_write_credentials(
validator,
update_http_write_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_update_netconf_credentials(json_schema_validate, obj):
json_schema_validate('jsd_c5acd9fa4c1a8abc_v2_1_1').validate(obj)
return True
def update_netconf_credentials(api):
endpoint_result = api.discovery.update_netconf_credentials(
active_validation=True,
comments='string',
credentialType='GLOBAL',
description='string',
id='string',
instanceTenantId='string',
instanceUuid='string',
netconfPort='string',
payload=None
)
return endpoint_result
@pytest.mark.discovery
def test_update_netconf_credentials(api, validator):
assert is_valid_update_netconf_credentials(
validator,
update_netconf_credentials(api)
)
def update_netconf_credentials_default(api):
endpoint_result = api.discovery.update_netconf_credentials(
active_validation=True,
comments=None,
credentialType=None,
description=None,
id=None,
instanceTenantId=None,
instanceUuid=None,
netconfPort=None,
payload=None
)
return endpoint_result
@pytest.mark.discovery
def test_update_netconf_credentials_default(api, validator):
try:
assert is_valid_update_netconf_credentials(
validator,
update_netconf_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_delete_all_discovery(json_schema_validate, obj):
json_schema_validate('jsd_db8e09234a988bab_v2_1_1').validate(obj)
return True
def delete_all_discovery(api):
endpoint_result = api.discovery.delete_all_discovery(
)
return endpoint_result
@pytest.mark.discovery
def test_delete_all_discovery(api, validator):
assert is_valid_delete_all_discovery(
validator,
delete_all_discovery(api)
)
def delete_all_discovery_default(api):
endpoint_result = api.discovery.delete_all_discovery(
)
return endpoint_result
@pytest.mark.discovery
def test_delete_all_discovery_default(api, validator):
try:
assert is_valid_delete_all_discovery(
validator,
delete_all_discovery_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_delete_discovery_by_specified_range(json_schema_validate, obj):
json_schema_validate('jsd_c1ba9a424c08a01b_v2_1_1').validate(obj)
return True
def delete_discovery_by_specified_range(api):
endpoint_result = api.discovery.delete_discovery_by_specified_range(
records_to_delete=0,
start_index=0
)
return endpoint_result
@pytest.mark.discovery
def test_delete_discovery_by_specified_range(api, validator):
assert is_valid_delete_discovery_by_specified_range(
validator,
delete_discovery_by_specified_range(api)
)
def delete_discovery_by_specified_range_default(api):
endpoint_result = api.discovery.delete_discovery_by_specified_range(
records_to_delete=0,
start_index=0
)
return endpoint_result
@pytest.mark.discovery
def test_delete_discovery_by_specified_range_default(api, validator):
try:
assert is_valid_delete_discovery_by_specified_range(
validator,
delete_discovery_by_specified_range_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_delete_global_credentials_by_id(json_schema_validate, obj):
json_schema_validate('jsd_f5ac590c4ca9975a_v2_1_1').validate(obj)
return True
def delete_global_credentials_by_id(api):
endpoint_result = api.discovery.delete_global_credentials_by_id(
global_credential_id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_delete_global_credentials_by_id(api, validator):
assert is_valid_delete_global_credentials_by_id(
validator,
delete_global_credentials_by_id(api)
)
def delete_global_credentials_by_id_default(api):
endpoint_result = api.discovery.delete_global_credentials_by_id(
global_credential_id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_delete_global_credentials_by_id_default(api, validator):
try:
assert is_valid_delete_global_credentials_by_id(
validator,
delete_global_credentials_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_update_cli_credentials(json_schema_validate, obj):
json_schema_validate('jsd_fba0d80747eb82e8_v2_1_1').validate(obj)
return True
def update_cli_credentials(api):
endpoint_result = api.discovery.update_cli_credentials(
active_validation=True,
comments='string',
credentialType='GLOBAL',
description='string',
enablePassword='string',
id='string',
instanceTenantId='string',
instanceUuid='string',
password='string',
payload=None,
username='string'
)
return endpoint_result
@pytest.mark.discovery
def test_update_cli_credentials(api, validator):
assert is_valid_update_cli_credentials(
validator,
update_cli_credentials(api)
)
def update_cli_credentials_default(api):
endpoint_result = api.discovery.update_cli_credentials(
active_validation=True,
comments=None,
credentialType=None,
description=None,
enablePassword=None,
id=None,
instanceTenantId=None,
instanceUuid=None,
password=None,
payload=None,
username=None
)
return endpoint_result
@pytest.mark.discovery
def test_update_cli_credentials_default(api, validator):
try:
assert is_valid_update_cli_credentials(
validator,
update_cli_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_discovered_network_devices_by_discovery_id(json_schema_validate, obj):
json_schema_validate('jsd_f6ac994f451ba011_v2_1_1').validate(obj)
return True
def get_discovered_network_devices_by_discovery_id(api):
endpoint_result = api.discovery.get_discovered_network_devices_by_discovery_id(
id='string',
task_id='string'
)
return endpoint_result
@pytest.mark.discovery
def test_get_discovered_network_devices_by_discovery_id(api, validator):
assert is_valid_get_discovered_network_devices_by_discovery_id(
validator,
get_discovered_network_devices_by_discovery_id(api)
)
def get_discovered_network_devices_by_discovery_id_default(api):
endpoint_result = api.discovery.get_discovered_network_devices_by_discovery_id(
id='string',
task_id=None
)
return endpoint_result
@pytest.mark.discovery
def test_get_discovered_network_devices_by_discovery_id_default(api, validator):
try:
assert is_valid_get_discovered_network_devices_by_discovery_id(
validator,
get_discovered_network_devices_by_discovery_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_global_credentials(json_schema_validate, obj):
json_schema_validate('jsd_ff816b8e435897eb_v2_1_1').validate(obj)
return True
def get_global_credentials(api):
endpoint_result = api.discovery.get_global_credentials(
credential_sub_type='string',
order='string',
sort_by='string'
)
return endpoint_result
@pytest.mark.discovery
def test_get_global_credentials(api, validator):
assert is_valid_get_global_credentials(
validator,
get_global_credentials(api)
)
def get_global_credentials_default(api):
endpoint_result = api.discovery.get_global_credentials(
credential_sub_type=None,
order=None,
sort_by=None
)
return endpoint_result
@pytest.mark.discovery
def test_get_global_credentials_default(api, validator):
try:
assert is_valid_get_global_credentials(
validator,
get_global_credentials_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
| 29.064042
| 307
| 0.709887
|
79513d233eab518d227b47f1a7b12d4b8aafee9a
| 2,677
|
py
|
Python
|
sip/execution_control/processing_block_controller/tests/test_processing_block_controller.py
|
SKA-ScienceDataProcessor/integration-prototype
|
5875dc0489f707232534ce75daf3707f909bcd15
|
[
"BSD-3-Clause"
] | 3
|
2016-11-08T02:27:05.000Z
|
2018-01-22T13:26:11.000Z
|
sip/execution_control/processing_block_controller/tests/test_processing_block_controller.py
|
SKA-ScienceDataProcessor/integration-prototype
|
5875dc0489f707232534ce75daf3707f909bcd15
|
[
"BSD-3-Clause"
] | 87
|
2016-11-24T11:09:01.000Z
|
2021-03-25T22:23:59.000Z
|
sip/execution_control/processing_block_controller/tests/test_processing_block_controller.py
|
SKA-ScienceDataProcessor/integration-prototype
|
5875dc0489f707232534ce75daf3707f909bcd15
|
[
"BSD-3-Clause"
] | 10
|
2016-05-18T09:41:36.000Z
|
2019-07-04T10:19:24.000Z
|
# coding=utf-8
"""Unit tests of the Processing Block Controller.
http://docs.celeryproject.org/en/latest/userguide/testing.html
FIXME(BMo) At the moment these tests require that the PBC has started.
Its possible that this requirement is not needed using some
Celery testing magic.
"""
import json
from os.path import dirname, join
import redis
import celery
import pytest
from sip_config_db import ConfigDb
from sip_config_db.scheduling import SchedulingBlockInstance
from sip_pbc import echo, execute_processing_block, version
from sip_pbc.release import __version__
from ._test_utils import add_workflow_definitions
DB = ConfigDb()
def test_pbc_inspect_workers():
"""Test the Celery API for inspecting the Celery workers."""
try:
celery.current_app.broker_connection().connect()
except redis.exceptions.ConnectionError:
pytest.fail('Failed to connect to broker: {}'
.format(celery.current_app.broker_connection().as_uri()),
pytrace=False)
inspect = celery.current_app.control.inspect()
workers = inspect.ping()
if workers is None:
pytest.skip('Unable to find any celery workers!')
for worker in workers:
assert not inspect.scheduled()[worker]
assert not inspect.active()[worker]
registered_tasks = inspect.registered_tasks()[worker]
assert 'sip_pbc.tasks.execute_processing_block' in registered_tasks
def test_pbc_echo():
"""Test the SIP PBC echo method."""
message = "Hello there!"
result = echo.apply(args=(message,))
assert result.get(timeout=3) == message
def test_pbc_version():
"""Test the SIP PBC version method."""
result = version.apply()
assert result.get(timeout=3) == __version__
def test_pbc_execute_workflow():
"""Test the SIP PBC execute_processing_block method."""
try:
DB.flush_db()
except ConnectionError:
pytest.fail('Failed to a connect a configuration database (Redis) '
'instance!', pytrace=False)
data_dir = join(dirname(__file__), 'data')
add_workflow_definitions(join(data_dir, 'workflow_definitions'))
with open(join(data_dir, 'sbi_config_3.json')) as _file:
sbi_config = json.load(_file)
sbi = SchedulingBlockInstance.from_config(sbi_config)
pb_ids = sbi.processing_block_ids
assert len(pb_ids) == 1
assert pb_ids[0] == 'PB-20181116-sip-001'
assert isinstance(pb_ids[0], str)
result = execute_processing_block.apply(args=(pb_ids[0],),
kwargs=dict(log_level='WARNING'))
assert result.get(timeout=10.0) == 'completed'
| 32.646341
| 77
| 0.692193
|
79513d3e73de790c57df097eafeae26964ff282c
| 1,451
|
py
|
Python
|
setup.py
|
MihaiBojin/python-cli-project-template
|
0caef1d48e6d428f793ccbe91a30670a8c2620d9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
MihaiBojin/python-cli-project-template
|
0caef1d48e6d428f793ccbe91a30670a8c2620d9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
MihaiBojin/python-cli-project-template
|
0caef1d48e6d428f793ccbe91a30670a8c2620d9
|
[
"Apache-2.0"
] | null | null | null |
from distutils.core import setup
import setuptools
from src.pkg.version import __version__, __title__
with open("README.md", "r", encoding='utf8') as fh:
long_description = fh.read()
# Parse requirements.txt
packages = list()
with open('requirements/prod.txt') as fh:
for line in fh.readlines():
dep = line.strip()
# skip empty lines
if len(dep) == 0:
continue
# skip comments
if dep[0] == '#':
continue
# Extract any comments
parts = dep.split('#', 1)
packages += [parts[0]]
setup(
name=__title__,
version=__version__,
author='John Doe',
author_email='',
packages=setuptools.find_packages(where='src'),
package_dir={
'': 'src',
},
# https://setuptools.readthedocs.io/en/latest/setuptools.html
package_data={
'': ['*.txt', '*.yml', '*.json'],
},
scripts=[],
entry_points={
'console_scripts': [
'tool = pkg.main:main',
]
},
url='http://pypi.python.org/pypi/[your-project-name-here]/',
license='LICENSE',
description='Python CLI tool template',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=packages,
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache License Version 2.0",
"Operating System :: POSIX",
],
)
| 25.910714
| 65
| 0.592006
|
79513d5c4a03ddefdd0c36260d0e50ecb46d4eef
| 10,105
|
py
|
Python
|
scripts/rev_version.py
|
skallaher/RevEng_PAJ7620
|
3165e9953f4845e050d0daad023529c2d410bc02
|
[
"MIT"
] | 8
|
2021-02-16T16:39:09.000Z
|
2021-09-17T01:30:56.000Z
|
scripts/rev_version.py
|
skallaher/RevEng_PAJ7620
|
3165e9953f4845e050d0daad023529c2d410bc02
|
[
"MIT"
] | 38
|
2020-12-21T08:13:14.000Z
|
2021-02-16T08:54:50.000Z
|
scripts/rev_version.py
|
skallaher/RevEng_PAJ7620
|
3165e9953f4845e050d0daad023529c2d410bc02
|
[
"MIT"
] | 5
|
2020-12-22T05:25:32.000Z
|
2021-09-30T15:35:21.000Z
|
#!/usr/bin/env python
"""Rev Version
Updates the version of the library in the following files
- Doxyfile # Doxygen
- library.properties # Arduino
Also updates the version of any examples which have been updated
since the last version (in <path to examples>/examples.doc)
"""
import os
import re
import subprocess
from copy import deepcopy
from typing import List
from packaging import version
import click
def get_updated_version_copy(existing_version: version.Version,
major: int = None, minor: int = None, micro: int = None) -> version.Version:
"""get_updated_version_copy
Generates a copy of a version.Version with the specified major, minor, micro version.
None values leaves the version components unchanged
inputs:
existing_version (version.Version): The existing version to base the changes on
major (int): The major version to write to the version copy. Unchanged if None
minor (int): The minor version to write to the version copy. Unchanged if None
micro (int): The micro version to write to the version copy. Unchanged if None
outputs:
version.Version: The version copy with modified values
"""
# Unroll version data
new_version = deepcopy(existing_version)
new_version_data = new_version._version # pylint: disable=W0212
new_release = list(new_version_data.release)
new_version_data = list(new_version_data)
if major:
new_release[0] = major
if minor:
new_release[1] = minor
if micro:
new_release[2] = micro
new_version_data[1] = tuple(new_release)
new_version_data = version._Version(*new_version_data) # pylint: disable=W0212
new_version._version = new_version_data # pylint: disable=W0212
return new_version
def rev_doxygen_project_number(current_version: version.Version, next_version: version.Version):
"""rev_doxygen_project_number
Updates any version references within Doxyfile
inputs:
current_version (version.Version): The current version of the library as it
appears in Doxyfile
next_version (version.Version): The next version of the library to update Doxyfile to
"""
with open("Doxyfile", 'r+') as doxyfile:
content = doxyfile.read()
new_content, num_replaced = re.subn(current_version.base_version, next_version.base_version,
content, flags=re.M)
if not num_replaced:
print("Failed to find {} in Doxyfile to update version.".format(current_version))
return
with open("Doxyfile", 'w') as doxyfile:
doxyfile.write(new_content)
def rev_library_properties_version(current_version: version.Version, next_version: version.Version):
"""rev_library_properties_version
Updates any version references within library.properties
inputs:
current_version (version.Version): The current version of the library as it
appears in library.properties
next_version (version.Version): The next version of the library to update library.properties to
"""
with open("library.properties", "r+") as props:
content = props.read()
new_content, num_replaced = re.subn(current_version.base_version,
next_version.base_version,
content, flags=re.M)
if not num_replaced:
print("Failed to find {} in library.properties to update version.".format(current_version))
return
with open("library.properties", "w") as props:
props.write(new_content)
def get_examples_changed(example_dir: str, previous_version: version.Version) -> List[str]:
"""get_examples_changed
Fetch all of the example files which have changed since the last version
inputs:
example_dir (str): The directory which contains the examples.
previous_version (version.Version): The previous version of the library, used
to check against for example changes.
outputs:
List[str]: The filenames (without paths) of each file in the example_dir which has changed
"""
# Get all example files changed since current version tag
changed_examples = subprocess.run(args=["git", "diff", "--name-only",
"v{}".format(previous_version.base_version), "--", example_dir],
capture_output=True,
check=True)
changed_examples_str = changed_examples.stdout.decode()
return [os.path.basename(f) for f in changed_examples_str.split('\n') if os.path.basename(f)]
def rev_example_versions(example_dir: str, previous_version: version.Version):
"""rev_example_versions
Update the version of each example iff the example file has changed
inputs:
example_dir (str): The directory which contains the examples.
previous_version (version.Version): The previous version of the library, used
to check against for example changes.
"""
with open(os.path.join(example_dir, "examples.doc"), 'r+') as ex_file:
examples = re.findall(r'/\*\*(.*?)\*/', ex_file.read(), flags=re.M | re.DOTALL)
new_examples = []
changed_files = get_examples_changed(example_dir, previous_version)
print("Changed: {}".format(changed_files))
for example in examples:
example_filename = re.search(r'example\s+(.*\..*)\n', example)[1]
if example_filename in changed_files:
ex_version = version.parse(re.search(r'version\s+({})\n'.format(version.VERSION_PATTERN),
example, flags=re.VERBOSE | re.IGNORECASE | re.M)[1])
ex_version = get_updated_version_copy(ex_version, minor=ex_version.minor + 1)
new_example = re.sub(r'(version\s+)(.*)\n', r'\g<1>{}\n'.format(ex_version.base_version),
example)
print("NEW: {}".format(example_filename))
new_examples.append(new_example)
else:
print("OLD: {}".format(example_filename))
new_examples.append(example)
output = '\n\n'.join([r'/**{}*/'.format(e) for e in new_examples])
with open(os.path.join(example_dir, "examples.doc"), 'w') as ex_file:
ex_file.write(output)
def get_current_version() -> version.Version:
"""get_current_version
Gets the current version as it exists in library.properties
outputs:
version.Version: The version as it exists in library.properties
"""
with open("library.properties", 'r') as props:
content = props.read()
existing_version = version.parse(re.search("version=({})".format(version.VERSION_PATTERN),
content, flags=re.VERBOSE | re.IGNORECASE)[1])
return existing_version
def calculate_next_version(current_version: version.Version) -> version.Version:
"""calculate_next_version
Get the next version based upon the current one by incrementing the minor version
and setting the micro version to 0
inputs:
current_version (version.Version): The current version of the library
outputs:
version.Version: The next version of the library to rev to
"""
return get_updated_version_copy(current_version, minor=current_version.minor + 1, micro=0)
def calculate_prev_version(current_version: version.Version) -> version.Version:
"""calculate_prev_version
Get the previous version based upon the current one by decrementing the minor version
if micro version is 0, else by decrementing the micro version
inputs:
current_version (version.Version): The current version of the library
outputs:
version.Version: The previous version of the library
"""
if current_version.micro == 0:
return get_updated_version_copy(current_version, minor=current_version.minor - 1)
return get_updated_version_copy(current_version, micro=current_version.micro - 1)
@click.command()
@click.option("--current-version", "--current",
help="Current version of the library. Fetched from library.properties by default")
@click.option("--previous-version", "--previous",
help="Previous version of the library. Defaults to current version with minor version - 1 "
"if micro version is 0 (e.g. 1.4.0 -> 1.3.0), or micro version - 1 "
"(e.g. 1.4.2 -> 1.4.1)")
@click.option("--next-version", "--next",
help="Version to rev the library to. Defaults to current version with minor version + 1 "
"and micro version set to 0 (e.g. 1.3.2 -> 1.4.0).")
@click.option("--example-dir", help="Path to the examples directory", default="examples")
def cmd(current_version, previous_version, next_version, example_dir):
"""Run the rev version
"""
# Move to project root to find files relative to
project_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
os.chdir(project_root)
if not current_version:
current_version = get_current_version()
print("Found current version {}".format(current_version.base_version))
if not previous_version:
previous_version = calculate_prev_version(current_version=current_version)
print("Found previous version {}".format(previous_version.base_version))
if not next_version:
next_version = calculate_next_version(current_version=current_version)
print("Found next version {}".format(next_version.base_version))
rev_doxygen_project_number(current_version=current_version, next_version=next_version)
rev_library_properties_version(current_version=current_version, next_version=next_version)
rev_example_versions(example_dir=example_dir, previous_version=previous_version)
if __name__ == "__main__":
cmd() # pylint: disable=E1120
| 41.929461
| 108
| 0.66858
|
79513dee3364d4b596b7a4fe17f4f9211297bbb2
| 3,283
|
py
|
Python
|
torchFI/modules/linear.py
|
bfgoldstein/torchfi
|
6a735ecf81c39bfdfa770e6a41a66e4f88ea808b
|
[
"Apache-2.0"
] | 6
|
2019-10-17T17:52:43.000Z
|
2020-08-12T09:08:45.000Z
|
torchFI/modules/linear.py
|
bfgoldstein/torchfi
|
6a735ecf81c39bfdfa770e6a41a66e4f88ea808b
|
[
"Apache-2.0"
] | 1
|
2021-05-04T02:57:38.000Z
|
2021-05-04T02:57:38.000Z
|
torchFI/modules/linear.py
|
bfgoldstein/torchfi
|
6a735ecf81c39bfdfa770e6a41a66e4f88ea808b
|
[
"Apache-2.0"
] | 1
|
2020-08-12T09:08:47.000Z
|
2020-08-12T09:08:47.000Z
|
###############################################################
# This file was created using part of Distiller project developed by:
# NervanaSystems https://github.com/NervanaSystems/distiller
#
# Changes were applied to satisfy torchFI project needs
###############################################################
import math
import numpy as np
from enum import Enum
from collections import OrderedDict
import torch
import torch.nn as nn
from util.log import *
class FILinear(nn.Linear):
def __init__(self, fi, name, in_features, out_features, weight=None, bias=None):
self.fi = fi
self.name = name
self.id = fi.addNewLayer(name, FILinear)
super(FILinear, self).__init__(in_features, out_features,
True if bias is not None else False)
if weight is not None:
self.weight = weight
if bias is not None:
self.bias = bias
def forward(self, input):
if self.fi.injectionMode and self.id == self.fi.injectionLayer:
# XNOR Operation
# True only if both injectionFeatures and injectionWeights are True or False
# False if one of them is True
if not(self.fi.injectionFeatures ^ self.fi.injectionWeights):
# decide where to apply injection
# weights = 0, activations = 1
# locInjection = np.random.randint(0, 2)
locInjection = np.random.binomial(1, .5)
else:
locInjection = self.fi.injectionFeatures
if locInjection:
if self.fi.log:
logWarning("\tInjecting Fault into feature data of Linear "
+ self.name + " layer.")
faulty_res = self.fi.injectFeatures(input.data)
for idx, (indices, faulty_val) in enumerate(faulty_res):
# add idx as batch index to indices array
input.data[tuple([idx] + indices)] = faulty_val
return nn.functional.linear(input, self.weight, self.bias)
else:
# create new tensor to apply FI
weightFI = self.weight.clone()
if self.fi.log:
logWarning("\tInjecting Fault into weight data of Linear "
+ self.name + " layer.")
indices, faulty_val = self.fi.inject(weightFI.data)
weightFI.data[tuple(indices)] = faulty_val
return nn.functional.linear(input, weightFI, self.bias)
else:
return super(FILinear, self).forward(input)
@staticmethod
def from_pytorch_impl(fi, name, linear: nn.Linear):
return FILinear(fi, name, linear.in_features, linear.out_features,
linear.weight, linear.bias)
def __repr__(self):
return "%s(in_features=%d, out_features=%d, bias=%s, id=%d)" % (
self.__class__.__name__,
self.in_features,
self.out_features,
str(True if self.bias is not None else False),
self.id)
| 37.735632
| 88
| 0.531831
|
79513e55f7625ed7448cfb04b651a6129c72aa02
| 9,927
|
py
|
Python
|
mnist_4.0_batchnorm_five_layers_sigmoid.py
|
hughkong/asstarer
|
ed331aa2b4c7665f10214117510cfee099216ede
|
[
"Apache-2.0"
] | 1
|
2021-06-20T11:44:05.000Z
|
2021-06-20T11:44:05.000Z
|
mnist_4.0_batchnorm_five_layers_sigmoid.py
|
hughkong/asstarer
|
ed331aa2b4c7665f10214117510cfee099216ede
|
[
"Apache-2.0"
] | null | null | null |
mnist_4.0_batchnorm_five_layers_sigmoid.py
|
hughkong/asstarer
|
ed331aa2b4c7665f10214117510cfee099216ede
|
[
"Apache-2.0"
] | 1
|
2020-05-05T17:21:15.000Z
|
2020-05-05T17:21:15.000Z
|
# encoding: UTF-8
# Copyright 2016 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflowvisu
import math
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
tf.set_random_seed(0)
# neural network with 5 layers
#
# · · · · · · · · · · (input data, flattened pixels) X [batch, 784] # 784 = 28*28
# \x/x\x/x\x/x\x/x\x/ -- fully connected layer (sigmoid+BN) W1 [784, 200] B1[200]
# · · · · · · · · · Y1 [batch, 200]
# \x/x\x/x\x/x\x/ -- fully connected layer (sigmoid+BN) W2 [200, 100] B2[100]
# · · · · · · · Y2 [batch, 100]
# \x/x\x/x\x/ -- fully connected layer (sigmoid+BN) W3 [100, 60] B3[60]
# · · · · · Y3 [batch, 60]
# \x/x\x/ -- fully connected layer (sigmoid+BN) W4 [60, 30] B4[30]
# · · · Y4 [batch, 30]
# \x/ -- fully connected layer (softmax+BN) W5 [30, 10] B5[10]
# · Y5 [batch, 10]
# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)
mnist = read_data_sets("data", one_hot=True, reshape=False, validation_size=0)
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [None, 10])
# variable learning rate
lr = tf.placeholder(tf.float32)
# train/test selector for batch normalisation
tst = tf.placeholder(tf.bool)
# training iteration
iter = tf.placeholder(tf.int32)
# five layers and their number of neurons (tha last layer has 10 softmax neurons)
L = 200
M = 100
N = 60
P = 30
Q = 10
# Weights initialised with small random values between -0.2 and +0.2
# When using RELUs, make sure biases are initialised with small *positive* values for example 0.1 = tf.ones([K])/10
W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1)) # 784 = 28 * 28
S1 = tf.Variable(tf.ones([L]))
O1 = tf.Variable(tf.zeros([L]))
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
S2 = tf.Variable(tf.ones([M]))
O2 = tf.Variable(tf.zeros([M]))
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
S3 = tf.Variable(tf.ones([N]))
O3 = tf.Variable(tf.zeros([N]))
W4 = tf.Variable(tf.truncated_normal([N, P], stddev=0.1))
S4 = tf.Variable(tf.ones([P]))
O4 = tf.Variable(tf.zeros([P]))
W5 = tf.Variable(tf.truncated_normal([P, Q], stddev=0.1))
B5 = tf.Variable(tf.zeros([Q]))
## Batch normalisation conclusions with sigmoid activation function:
# BN is applied between logits and the activation function
# On Sigmoids it is very clear that without BN, the sigmoids saturate, with BN, they output
# a clean gaussian distribution of values, especially with high initial learning rates.
# sigmoid, no batch-norm, lr(0.003, 0.0001, 2000) => 97.5%
# sigmoid, batch-norm lr(0.03, 0.0001, 1000) => 98%
# sigmoid, batch-norm, no offsets => 97.3%
# sigmoid, batch-norm, no scales => 98.1% but cannot hold fast learning rate at start
# sigmoid, batch-norm, no scales, no offsets => 96%
# Both scales and offsets are useful with sigmoids.
# With RELUs, the scale variables can be omitted.
# Biases are not useful with batch norm, offsets are to be used instead
# Steady 98.5% accuracy using these parameters:
# moving average decay: 0.998 (equivalent to averaging over two epochs)
# learning rate decay from 0.03 to 0.0001 speed 1000 => max 98.59 at 6500 iterations, 98.54 at 10K it, 98% at 1300it, 98.5% at 3200it
def batchnorm(Ylogits, Offset, Scale, is_test, iteration):
exp_moving_avg = tf.train.ExponentialMovingAverage(0.998, iteration) # adding the iteration prevents from averaging across non-existing iterations
bnepsilon = 1e-5
mean, variance = tf.nn.moments(Ylogits, [0])
update_moving_everages = exp_moving_avg.apply([mean, variance])
m = tf.cond(is_test, lambda: exp_moving_avg.average(mean), lambda: mean)
v = tf.cond(is_test, lambda: exp_moving_avg.average(variance), lambda: variance)
Ybn = tf.nn.batch_normalization(Ylogits, m, v, Offset, Scale, bnepsilon)
return Ybn, update_moving_everages
def no_batchnorm(Ylogits, Offset, Scale, is_test, iteration):
return Ylogits, tf.no_op()
# The model
XX = tf.reshape(X, [-1, 784])
Y1l = tf.matmul(XX, W1)
Y1bn, update_ema1 = batchnorm(Y1l, O1, S1, tst, iter)
Y1 = tf.nn.sigmoid(Y1bn)
Y2l = tf.matmul(Y1, W2)
Y2bn, update_ema2 = batchnorm(Y2l, O2, S2, tst, iter)
Y2 = tf.nn.sigmoid(Y2bn)
Y3l = tf.matmul(Y2, W3)
Y3bn, update_ema3 = batchnorm(Y3l, O3, S3, tst, iter)
Y3 = tf.nn.sigmoid(Y3bn)
Y4l = tf.matmul(Y3, W4)
Y4bn, update_ema4 = batchnorm(Y4l, O4, S4, tst, iter)
Y4 = tf.nn.sigmoid(Y4bn)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)
update_ema = tf.group(update_ema1, update_ema2, update_ema3, update_ema4)
# cross-entropy loss function (= -sum(Y_i * log(Yi)) ), normalised for batches of 100 images
# TensorFlow provides the softmax_cross_entropy_with_logits function to avoid numerical stability
# problems with log(0) which is NaN
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100
# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# matplotlib visualisation
allweights = tf.concat([tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1]), tf.reshape(W5, [-1])], 0)
allbiases = tf.concat([tf.reshape(O1, [-1]), tf.reshape(O2, [-1]), tf.reshape(O3, [-1]), tf.reshape(O4, [-1]), tf.reshape(B5, [-1])], 0)
# to use for sigmoid
allactivations = tf.concat([tf.reshape(Y1, [-1]), tf.reshape(Y2, [-1]), tf.reshape(Y3, [-1]), tf.reshape(Y4, [-1])], 0)
# to use for RELU
#allactivations = tf.concat([tf.reduce_max(Y1, [0]), tf.reduce_max(Y2, [0]), tf.reduce_max(Y3, [0]), tf.reduce_max(Y4, [0])], 0)
alllogits = tf.concat([tf.reshape(Y1l, [-1]), tf.reshape(Y2l, [-1]), tf.reshape(Y3l, [-1]), tf.reshape(Y4l, [-1])], 0)
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis(title4="Logits", title5="activations", histogram4colornum=2, histogram5colornum=2)
# training step, the learning rate is a placeholder
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
# training on batches of 100 images with 100 labels
batch_X, batch_Y = mnist.train.next_batch(100)
# learning rate decay (without batch norm)
#max_learning_rate = 0.003
#min_learning_rate = 0.0001
#decay_speed = 2000
# learning rate decay (with batch norm)
max_learning_rate = 0.03
min_learning_rate = 0.0001
decay_speed = 1000.0
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i/decay_speed)
# compute training values for visualisation
if update_train_data:
a, c, im, al, ac = sess.run([accuracy, cross_entropy, I, alllogits, allactivations], {X: batch_X, Y_: batch_Y, tst: False})
print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c) + " (lr:" + str(learning_rate) + ")")
datavis.append_training_curves_data(i, a, c)
datavis.update_image1(im)
datavis.append_data_histograms(i, al, ac)
# compute test values for visualisation
if update_test_data:
a, c, im = sess.run([accuracy, cross_entropy, It], {X: mnist.test.images, Y_: mnist.test.labels, tst: True})
print(str(i) + ": ********* epoch " + str(i*100//mnist.train.images.shape[0]+1) + " ********* test accuracy:" + str(a) + " test loss: " + str(c))
datavis.append_test_curves_data(i, a, c)
datavis.update_image2(im)
# the backpropagation training step
sess.run(train_step, {X: batch_X, Y_: batch_Y, lr: learning_rate, tst: False})
sess.run(update_ema, {X: batch_X, Y_: batch_Y, tst: False, iter: i})
datavis.animate(training_step, iterations=10000+1, train_data_update_freq=20, test_data_update_freq=100, more_tests_at_start=True)
# to save the animation as a movie, add save_movie=True as an argument to datavis.animate
# to disable the visualisation use the following line instead of the datavis.animate line
# for i in range(10000+1): training_step(i, i % 100 == 0, i % 20 == 0)
print("max test accuracy: " + str(datavis.get_max_test_accuracy()))
# Some results to expect:
# (In all runs, if sigmoids are used, all biases are initialised at 0, if RELUs are used,
# all biases are initialised at 0.1 apart from the last one which is initialised at 0.)
## decaying learning rate from 0.003 to 0.0001 decay_speed 2000, 10K iterations
# final test accuracy = 0.9813 (sigmoid - training cross-entropy not stabilised)
# final test accuracy = 0.9842 (relu - training set fully learned, test accuracy stable)
| 46.825472
| 153
| 0.676639
|
79513e5a37a266822c68841af92a103880eed399
| 1,252
|
py
|
Python
|
main.py
|
itsnikhil/tnb-analysis
|
c14bff7b0b17ae0cb49a3d23720632d1329d2bdc
|
[
"MIT"
] | 25
|
2020-12-04T01:40:14.000Z
|
2021-12-16T13:10:20.000Z
|
main.py
|
itsnikhil/tnb-analysis
|
c14bff7b0b17ae0cb49a3d23720632d1329d2bdc
|
[
"MIT"
] | 3
|
2021-06-06T12:40:03.000Z
|
2022-01-10T14:20:50.000Z
|
main.py
|
itsnikhil/tnb-analysis
|
c14bff7b0b17ae0cb49a3d23720632d1329d2bdc
|
[
"MIT"
] | 6
|
2021-03-28T16:34:09.000Z
|
2021-07-31T22:27:28.000Z
|
import os
from datetime import datetime
from thenewboston.constants.network import MAX_POINT_VALUE
from thenewboston.utils.network import fetch
from utils.files import write_json
from utils.format_results import format_results
PRIMARY_VALIDATOR_IP = '54.219.234.129'
def fetch_account_data():
"""
Fetch all account data from primary validator
Return list of accounts
"""
results = []
next_url = f'http://{PRIMARY_VALIDATOR_IP}/accounts'
while next_url:
print(next_url)
data = fetch(url=next_url, headers={})
accounts = data['results']
results += accounts
next_url = data['next']
return results
def run():
"""
Run main application
"""
now = datetime.utcnow()
date_time = now.strftime('%Y-%m-%d-%H_%M_%S')
data = format_results(fetch_account_data())
verify_results(data=data)
write_json(
file=f'./account_backups/{date_time}.json',
data=data
)
def verify_results(*, data):
"""
Ensure total coins is equal to
"""
total = sum(v['balance'] for k, v in data.items())
if total == MAX_POINT_VALUE:
print('\nValid')
else:
print('\nInvalid')
if __name__ == '__main__':
run()
| 19.5625
| 58
| 0.639776
|
79513e949943ac9e7656cca2c5431b1415205ecb
| 780
|
py
|
Python
|
src/models/model.py
|
evanaze/captcha
|
62d226742be7f4091e54a7ea960703812bd44fd5
|
[
"MIT"
] | null | null | null |
src/models/model.py
|
evanaze/captcha
|
62d226742be7f4091e54a7ea960703812bd44fd5
|
[
"MIT"
] | 3
|
2021-03-26T18:14:39.000Z
|
2021-09-20T14:09:05.000Z
|
src/models/model.py
|
evanaze/captcha
|
62d226742be7f4091e54a7ea960703812bd44fd5
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 5, 2)
self.conv2 = nn.Conv2d(32, 64, 7, 3)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(36864, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
| 26.896552
| 44
| 0.528205
|
79513e961b4549f94402760f10315c38cbfc605b
| 17,069
|
py
|
Python
|
qiskit_experiments/library/characterization/fine_amplitude.py
|
yoshida-ryuhei/qiskit-experiments
|
82561acf86b407dcda0a9ec69fe18de2b0a592a2
|
[
"Apache-2.0"
] | null | null | null |
qiskit_experiments/library/characterization/fine_amplitude.py
|
yoshida-ryuhei/qiskit-experiments
|
82561acf86b407dcda0a9ec69fe18de2b0a592a2
|
[
"Apache-2.0"
] | null | null | null |
qiskit_experiments/library/characterization/fine_amplitude.py
|
yoshida-ryuhei/qiskit-experiments
|
82561acf86b407dcda0a9ec69fe18de2b0a592a2
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Fine amplitude characterization experiment."""
from typing import List, Optional, Sequence
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit import Gate
from qiskit.circuit.library import XGate, SXGate
from qiskit.providers.backend import Backend
from qiskit_experiments.data_processing import DataProcessor, nodes
from qiskit_experiments.framework import BaseExperiment, Options
from qiskit_experiments.framework.restless_mixin import RestlessMixin
from qiskit_experiments.library.characterization.analysis import FineAmplitudeAnalysis
class FineAmplitude(BaseExperiment, RestlessMixin):
r"""Error amplifying fine amplitude calibration experiment.
# section: overview
The :class:`FineAmplitude` calibration experiment repeats N times a gate with a pulse
to amplify the under-/over-rotations in the gate to determine the optimal amplitude.
The circuits are therefore of the form:
.. parsed-literal::
┌──────┐ ┌──────┐ ░ ┌─┐
q_0: ┤ Gate ├─ ... ─┤ Gate ├─░─┤M├
└──────┘ └──────┘ ░ └╥┘
measure: 1/═════════ ... ═════════════╩═
0
Here, Gate is the name of the gate which will be repeated. The user can optionally add a
square-root of X pulse before the gates are repeated. This square-root of X pulse allows
the analysis to differentiate between over rotations and under rotations in the case of
pi-pulses. Importantly, the resulting data is analyzed by a fit to a cosine function in
which we try to determine the over/under rotation given an intended rotation angle per
gate which must also be specified by the user.
Error amplifying experiments are most sensitive to angle errors when we measure points along
the equator of the Bloch sphere. This is why users should insert a square-root of X pulse
before running calibrations for :math:`\pm\pi` rotations. When all data points are close to
the equator, it is difficult for a fitter to infer the overall scale of the error. When
calibrating a :math:`pi` rotation, one can use ``add_xp_circuit = True`` to insert one
circuit that puts the qubit in the excited state to set the scale for the other circuits.
Furthermore, when running calibrations for :math:`\pm\pi/2` rotations users are advised
to use an odd number of repetitions, e.g. [1, 2, 3, 5, 7, ...] to ensure that the ideal
points are on the equator of the Bloch sphere. Note the presence of two repetitions which
allows us to prepare the excited state. Therefore, ``add_xp_circuit = True`` is not needed
in this case.
# section: example
The steps to run a fine amplitude experiment are
.. code-block:: python
qubit = 3
amp_cal = FineAmplitude(qubit, SXGate())
amp_cal.set_experiment_options(
angle_per_gate=np.pi/2,
add_xp_circuit=False,
add_sx=False
)
amp_cal.run(backend)
Note that there are subclasses of :class:`FineAmplitude` such as :class:`FineSXAmplitude`
that set the appropriate options by default.
# section: analysis_ref
:py:class:`FineAmplitudeAnalysis`
# section: reference
.. ref_arxiv:: 1 1504.06597
# section: tutorial
:doc:`/tutorials/fine_calibrations`
"""
@classmethod
def _default_experiment_options(cls) -> Options:
r"""Default values for the fine amplitude experiment.
Experiment Options:
repetitions (List[int]): A list of the number of times that the gate is repeated.
gate_type (Gate): This is a gate class such as XGate, so that one can obtain a gate
by doing :code:`options.gate_class()`.
normalization (bool): If set to True the DataProcessor will normalized the
measured signal to the interval [0, 1]. Defaults to True.
add_cal_circuits (bool): If set to True then two circuits to calibrate 0 and 1 points
will be added. These circuits are often needed to properly calibrate the amplitude
of the ping-pong oscillation that encodes the errors. This helps account for
state preparation and measurement errors.
"""
options = super()._default_experiment_options()
options.repetitions = list(range(1, 15))
options.gate = None
options.normalization = True
options.add_cal_circuits = True
return options
def __init__(
self,
qubits: Sequence[int],
gate: Gate,
backend: Optional[Backend] = None,
measurement_qubits: Sequence[int] = None,
):
"""Setup a fine amplitude experiment on the given qubit.
Args:
qubits: The qubit(s) on which to run the fine amplitude calibration experiment.
gate: The gate that will be repeated.
backend: Optional, the backend to run the experiment on.
measurement_qubits: The qubits in the given physical qubits that need to
be measured.
"""
super().__init__(qubits, analysis=FineAmplitudeAnalysis(), backend=backend)
self.set_experiment_options(gate=gate)
if measurement_qubits is not None:
self._measurement_qubits = [self.physical_qubits.index(q) for q in measurement_qubits]
else:
self._measurement_qubits = range(self.num_qubits)
def _spam_cal_circuits(self, meas_circuit: QuantumCircuit) -> List[QuantumCircuit]:
"""This method returns the calibration circuits.
Calibration circuits allow the experiment to overcome state preparation and
measurement errors which cause ideal probabilities to be below 1.
Args:
meas_circuit: The measurement circuit, so that we only apply x gates to the
measured qubits.
Returns:
Two circuits that calibrate the spam errors for the 0 and 1 state.
"""
cal_circuits = []
for add_x in [0, 1]:
circ = QuantumCircuit(self.num_qubits, meas_circuit.num_clbits)
if add_x:
qubits = meas_circuit.get_instructions("measure")[0][1]
circ.x(qubits)
circ.compose(meas_circuit, inplace=True)
circ.metadata = {
"experiment_type": self._type,
"qubits": self.physical_qubits,
"xval": add_x,
"unit": "gate number",
"series": "spam-cal",
}
cal_circuits.append(circ)
return cal_circuits
def _pre_circuit(self, num_clbits: int) -> QuantumCircuit:
"""Return a preparation circuit.
This method can be overridden by subclasses e.g. to calibrate gates on
transitions other than the 0 <-> 1 transition.
"""
return QuantumCircuit(self.num_qubits, num_clbits)
def _measure_circuit(self) -> QuantumCircuit:
"""Create the measurement part of the quantum circuit.
Sub-classes may override this function.
Returns:
A quantum circuit which defines the qubits that will be measured.
"""
circuit = QuantumCircuit(self.num_qubits, len(self._measurement_qubits))
for idx, qubit in enumerate(self._measurement_qubits):
circuit.measure(qubit, idx)
return circuit
def circuits(self) -> List[QuantumCircuit]:
"""Create the circuits for the fine amplitude calibration experiment.
Returns:
A list of circuits with a variable number of gates.
Raises:
CalibrationError: If the analysis options do not contain the angle_per_gate.
"""
repetitions = self.experiment_options.get("repetitions")
qubits = range(self.num_qubits)
meas_circ = self._measure_circuit()
pre_circ = self._pre_circuit(meas_circ.num_clbits)
if self.experiment_options.add_cal_circuits:
circuits = self._spam_cal_circuits(meas_circ)
else:
circuits = []
for repetition in repetitions:
circuit = QuantumCircuit(self.num_qubits, meas_circ.num_clbits)
# Add pre-circuit
circuit.compose(pre_circ, qubits, range(meas_circ.num_clbits), inplace=True)
for _ in range(repetition):
circuit.append(self.experiment_options.gate, qubits)
# Add the measurement part of the circuit
circuit.compose(meas_circ, qubits, range(meas_circ.num_clbits), inplace=True)
circuit.metadata = {
"experiment_type": self._type,
"qubits": self.physical_qubits,
"xval": repetition,
"unit": "gate number",
"series": 1,
}
circuits.append(circuit)
return circuits
def _metadata(self):
metadata = super()._metadata()
# Store measurement level and meas return if they have been
# set for the experiment
for run_opt in ["meas_level", "meas_return"]:
if hasattr(self.run_options, run_opt):
metadata[run_opt] = getattr(self.run_options, run_opt)
return metadata
class FineXAmplitude(FineAmplitude):
r"""A fine amplitude experiment with all the options set for the :math:`\pi`-rotation.
# section: overview
:class:`FineXAmplitude` is a subclass of :class:`FineAmplitude` and is used to set
the appropriate values for the default options.
"""
def __init__(self, qubit: int, backend: Optional[Backend] = None):
"""Initialize the experiment."""
super().__init__([qubit], XGate(), backend=backend)
# Set default analysis options
self.analysis.set_options(
fixed_parameters={
"angle_per_gate": np.pi,
"phase_offset": np.pi / 2,
}
)
@classmethod
def _default_experiment_options(cls) -> Options:
r"""Default values for the fine amplitude experiment.
Experiment Options:
gate (Gate): Gate to characterize. Defaults to an XGate.
"""
options = super()._default_experiment_options()
options.gate = XGate()
return options
def _pre_circuit(self, num_clbits: int) -> QuantumCircuit:
"""The preparation circuit is an sx gate to move to the equator of the Bloch sphere."""
circuit = QuantumCircuit(self.num_qubits, num_clbits)
circuit.sx(0)
return circuit
class FineSXAmplitude(FineAmplitude):
r"""A fine amplitude experiment with all the options set for the :math:`\pi/2`-rotation.
# section: overview
:class:`FineSXAmplitude` is a subclass of :class:`FineAmplitude` and is used to set
the appropriate values for the default options.
"""
def __init__(self, qubit: int, backend: Optional[Backend] = None):
"""Initialize the experiment."""
super().__init__([qubit], SXGate(), backend=backend)
# Set default analysis options
self.analysis.set_options(
fixed_parameters={
"angle_per_gate": np.pi / 2,
"phase_offset": np.pi,
}
)
@classmethod
def _default_experiment_options(cls) -> Options:
r"""Default values for the fine amplitude experiment.
Experiment Options:
gate (Gate): FineSXAmplitude calibrates an SXGate.
add_cal_circuits (bool): If set to True then two circuits to calibrate 0 and 1 points
will be added. This option is set to False by default for ``FineSXAmplitude``
since the amplitude calibration can be achieved with two SX gates and this is
included in the repetitions.
repetitions (List[int]): By default the repetitions take on odd numbers for
:math:`\pi/2` target angles as this ideally prepares states on the equator of
the Bloch sphere. Note that the repetitions include two repetitions which
plays the same role as including a circuit with an X gate.
"""
options = super()._default_experiment_options()
options.gate = SXGate()
options.add_cal_circuits = False
options.repetitions = [0, 1, 2, 3, 5, 7, 9, 11, 13, 15, 17, 21, 23, 25]
return options
class FineZXAmplitude(FineAmplitude):
r"""A fine amplitude experiment for the :code:`RZXGate(np.pi / 2)`.
# section: overview
:class:`FineZXAmplitude` is a subclass of :class:`FineAmplitude` and is used to set
the appropriate values for the default options to calibrate a :code:`RZXGate(np.pi / 2)`.
# section: example
To run this experiment the user will have to provide the instruction schedule
map in the transpile options that contains the schedule for the experiment.
..code-block:: python
qubits = (1, 2)
inst_map = InstructionScheduleMap()
inst_map.add("szx", qubits, my_schedule)
fine_amp = FineZXAmplitude(qubits, backend)
fine_amp.set_transpile_options(inst_map=inst_map)
Here, :code:`my_schedule` is the pulse schedule that will implement the
:code:`RZXGate(np.pi / 2)` rotation.
"""
def __init__(self, qubits: Sequence[int], backend: Optional[Backend] = None):
"""Initialize the experiment."""
# We cannot use RZXGate since it has a parameter so we redefine the gate.
# Failing to do so causes issues with QuantumCircuit.calibrations.
gate = Gate("szx", 2, [])
super().__init__(qubits, gate, backend=backend, measurement_qubits=[qubits[1]])
# Set default analysis options
self.analysis.set_options(
fixed_parameters={
"angle_per_gate": np.pi / 2,
"phase_offset": np.pi,
},
outcome="1",
)
@classmethod
def _default_experiment_options(cls) -> Options:
r"""Default values for the fine amplitude experiment.
Experiment Options:
add_cal_circuits (bool): If set to True then two circuits to calibrate 0 and 1 points
will be added. This option is set to False by default for ``FineZXAmplitude``
since the amplitude calibration can be achieved with two RZX gates and this is
included in the repetitions.
repetitions (List[int]): A list of the number of times that the gate is repeated.
"""
options = super()._default_experiment_options()
options.add_cal_circuits = False
options.repetitions = [0, 1, 2, 3, 4, 5, 7, 9, 11, 13]
return options
@classmethod
def _default_transpile_options(cls) -> Options:
"""Default transpile options for the fine amplitude experiment.
Experiment Options:
basis_gates: Set to :code:`["szx"]`.
inst_map: The instruction schedule map that will contain the schedule for the
Rzx(pi/2) gate. This schedule should be stored under the instruction name
``szx``.
"""
options = super()._default_transpile_options()
options.basis_gates = ["szx"]
options.inst_map = None
return options
def enable_restless(
self, rep_delay: Optional[float] = None, override_processor_by_restless: bool = True
):
"""Enable restless measurements.
We wrap the method of the :class:`RestlessMixin` to readout both qubits. This forces
the control qubit to be in either the 0 or 1 state before the next circuit starts
since restless measurements do not reset qubits.
"""
self.analysis.set_options(outcome="11")
super().enable_restless(rep_delay, override_processor_by_restless)
self._measurement_qubits = range(self.num_qubits)
def _get_restless_processor(self, meas_level: int = 2) -> DataProcessor:
"""Marginalize the counts after the restless shot reordering."""
return DataProcessor(
"memory",
[
nodes.RestlessToCounts(self._num_qubits),
nodes.MarginalizeCounts({1}), # keep only the target.
nodes.Probability("1"),
],
)
| 39.603248
| 100
| 0.635948
|
79513eeb499b2e5f0077364077eda705ac684812
| 993
|
py
|
Python
|
moex/migrations/0035_auto_20201129_2111.py
|
ghostforpy/bonds-docker
|
fda77225b85264cb4ba06b15ff63bc807858425a
|
[
"MIT"
] | 2
|
2020-09-08T12:51:56.000Z
|
2021-08-18T15:27:52.000Z
|
moex/migrations/0035_auto_20201129_2111.py
|
ghostforpy/bonds-docker
|
fda77225b85264cb4ba06b15ff63bc807858425a
|
[
"MIT"
] | 1
|
2021-12-13T20:43:35.000Z
|
2021-12-13T20:43:35.000Z
|
moex/migrations/0035_auto_20201129_2111.py
|
ghostforpy/bonds-docker
|
fda77225b85264cb4ba06b15ff63bc807858425a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-11-29 18:11
from django.db import migrations
from decimal import Decimal
from ..models import SecurityPortfolios,Security
def refresh_total_cost_in_rub(apps, schema_editor):
s_p = SecurityPortfolios.objects.\
select_related('security').\
select_related('portfolio').\
all()
for i in s_p:
if i.security.main_board_faceunit != 'SUR':
valute = Security.objects.filter(
shortname=i.security.main_board_faceunit
).get()
i.total_cost_in_rub = Decimal(
i.total_cost) * Decimal(valute.today_price)
else:
i.total_cost_in_rub = i.total_cost
i.save(update_fields=['total_cost_in_rub'])
i.portfolio.refresh_portfolio()
class Migration(migrations.Migration):
dependencies = [
('moex', '0034_auto_20201129_1819'),
]
operations = [
migrations.RunPython(refresh_total_cost_in_rub),
]
| 30.090909
| 59
| 0.643505
|
79513f12efe5e747361181458ea2794fcc5ae115
| 8,797
|
py
|
Python
|
ucscsdk/mometa/adaptor/AdaptorMenloHostPortStatsHist.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 9
|
2016-12-22T08:39:25.000Z
|
2019-09-10T15:36:19.000Z
|
ucscsdk/mometa/adaptor/AdaptorMenloHostPortStatsHist.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 10
|
2017-01-31T06:59:56.000Z
|
2021-11-09T09:14:37.000Z
|
ucscsdk/mometa/adaptor/AdaptorMenloHostPortStatsHist.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 13
|
2016-11-14T07:42:58.000Z
|
2022-02-10T17:32:05.000Z
|
"""This module contains the general information for AdaptorMenloHostPortStatsHist ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class AdaptorMenloHostPortStatsHistConsts():
MOST_RECENT_FALSE = "false"
MOST_RECENT_NO = "no"
MOST_RECENT_TRUE = "true"
MOST_RECENT_YES = "yes"
SUSPECT_FALSE = "false"
SUSPECT_NO = "no"
SUSPECT_TRUE = "true"
SUSPECT_YES = "yes"
class AdaptorMenloHostPortStatsHist(ManagedObject):
"""This is AdaptorMenloHostPortStatsHist class."""
consts = AdaptorMenloHostPortStatsHistConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("AdaptorMenloHostPortStatsHist", "adaptorMenloHostPortStatsHist", "[id]", VersionMeta.Version111a, "OutputOnly", 0xf, [], ["read-only"], [u'adaptorMenloHostPortStats'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "ulong", VersionMeta.Version111a, MoPropertyMeta.NAMING, None, None, None, None, [], []),
"most_recent": MoPropertyMeta("most_recent", "mostRecent", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rx_pause_cfc": MoPropertyMeta("rx_pause_cfc", "rxPauseCFC", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rx_pause_cfc_delta": MoPropertyMeta("rx_pause_cfc_delta", "rxPauseCFCDelta", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rx_pause_cfc_delta_avg": MoPropertyMeta("rx_pause_cfc_delta_avg", "rxPauseCFCDeltaAvg", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rx_pause_cfc_delta_max": MoPropertyMeta("rx_pause_cfc_delta_max", "rxPauseCFCDeltaMax", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rx_pause_cfc_delta_min": MoPropertyMeta("rx_pause_cfc_delta_min", "rxPauseCFCDeltaMin", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rx_pause_pfc": MoPropertyMeta("rx_pause_pfc", "rxPausePFC", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rx_pause_pfc_delta": MoPropertyMeta("rx_pause_pfc_delta", "rxPausePFCDelta", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rx_pause_pfc_delta_avg": MoPropertyMeta("rx_pause_pfc_delta_avg", "rxPausePFCDeltaAvg", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rx_pause_pfc_delta_max": MoPropertyMeta("rx_pause_pfc_delta_max", "rxPausePFCDeltaMax", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rx_pause_pfc_delta_min": MoPropertyMeta("rx_pause_pfc_delta_min", "rxPausePFCDeltaMin", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"suspect": MoPropertyMeta("suspect", "suspect", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"thresholded": MoPropertyMeta("thresholded", "thresholded", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"time_collected": MoPropertyMeta("time_collected", "timeCollected", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"tx_pause_cfc": MoPropertyMeta("tx_pause_cfc", "txPauseCFC", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"tx_pause_cfc_delta": MoPropertyMeta("tx_pause_cfc_delta", "txPauseCFCDelta", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"tx_pause_cfc_delta_avg": MoPropertyMeta("tx_pause_cfc_delta_avg", "txPauseCFCDeltaAvg", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"tx_pause_cfc_delta_max": MoPropertyMeta("tx_pause_cfc_delta_max", "txPauseCFCDeltaMax", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"tx_pause_cfc_delta_min": MoPropertyMeta("tx_pause_cfc_delta_min", "txPauseCFCDeltaMin", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"tx_pause_pfc": MoPropertyMeta("tx_pause_pfc", "txPausePFC", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"tx_pause_pfc_delta": MoPropertyMeta("tx_pause_pfc_delta", "txPausePFCDelta", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"tx_pause_pfc_delta_avg": MoPropertyMeta("tx_pause_pfc_delta_avg", "txPausePFCDeltaAvg", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"tx_pause_pfc_delta_max": MoPropertyMeta("tx_pause_pfc_delta_max", "txPausePFCDeltaMax", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"tx_pause_pfc_delta_min": MoPropertyMeta("tx_pause_pfc_delta_min", "txPausePFCDeltaMin", "ulong", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"id": "id",
"mostRecent": "most_recent",
"rn": "rn",
"rxPauseCFC": "rx_pause_cfc",
"rxPauseCFCDelta": "rx_pause_cfc_delta",
"rxPauseCFCDeltaAvg": "rx_pause_cfc_delta_avg",
"rxPauseCFCDeltaMax": "rx_pause_cfc_delta_max",
"rxPauseCFCDeltaMin": "rx_pause_cfc_delta_min",
"rxPausePFC": "rx_pause_pfc",
"rxPausePFCDelta": "rx_pause_pfc_delta",
"rxPausePFCDeltaAvg": "rx_pause_pfc_delta_avg",
"rxPausePFCDeltaMax": "rx_pause_pfc_delta_max",
"rxPausePFCDeltaMin": "rx_pause_pfc_delta_min",
"status": "status",
"suspect": "suspect",
"thresholded": "thresholded",
"timeCollected": "time_collected",
"txPauseCFC": "tx_pause_cfc",
"txPauseCFCDelta": "tx_pause_cfc_delta",
"txPauseCFCDeltaAvg": "tx_pause_cfc_delta_avg",
"txPauseCFCDeltaMax": "tx_pause_cfc_delta_max",
"txPauseCFCDeltaMin": "tx_pause_cfc_delta_min",
"txPausePFC": "tx_pause_pfc",
"txPausePFCDelta": "tx_pause_pfc_delta",
"txPausePFCDeltaAvg": "tx_pause_pfc_delta_avg",
"txPausePFCDeltaMax": "tx_pause_pfc_delta_max",
"txPausePFCDeltaMin": "tx_pause_pfc_delta_min",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.most_recent = None
self.rx_pause_cfc = None
self.rx_pause_cfc_delta = None
self.rx_pause_cfc_delta_avg = None
self.rx_pause_cfc_delta_max = None
self.rx_pause_cfc_delta_min = None
self.rx_pause_pfc = None
self.rx_pause_pfc_delta = None
self.rx_pause_pfc_delta_avg = None
self.rx_pause_pfc_delta_max = None
self.rx_pause_pfc_delta_min = None
self.status = None
self.suspect = None
self.thresholded = None
self.time_collected = None
self.tx_pause_cfc = None
self.tx_pause_cfc_delta = None
self.tx_pause_cfc_delta_avg = None
self.tx_pause_cfc_delta_max = None
self.tx_pause_cfc_delta_min = None
self.tx_pause_pfc = None
self.tx_pause_pfc_delta = None
self.tx_pause_pfc_delta_avg = None
self.tx_pause_pfc_delta_max = None
self.tx_pause_pfc_delta_min = None
ManagedObject.__init__(self, "AdaptorMenloHostPortStatsHist", parent_mo_or_dn, **kwargs)
| 71.520325
| 259
| 0.689781
|
79513f1eacf0b08d6f373e50d327514b98fdf7e2
| 2,297
|
py
|
Python
|
src/install_venv_app.py
|
muravjov/fablib
|
e64ba200651fa92555af6bc2a2f74606ac6495fe
|
[
"MIT"
] | null | null | null |
src/install_venv_app.py
|
muravjov/fablib
|
e64ba200651fa92555af6bc2a2f74606ac6495fe
|
[
"MIT"
] | null | null | null |
src/install_venv_app.py
|
muravjov/fablib
|
e64ba200651fa92555af6bc2a2f74606ac6495fe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
#
# install_venv_app - утилита прописывания пути к приложению посредством механизма .pth/site.py
#
# Причина: для конечных web-приложений на Python не требуется полноценная упаковка в пакеты для virtualenv,
# так как они только потребители кода других библиотек; однако прописаться в .pth venv-а со временем становится
# необходимой потребностью
#
# Для этого данный скрипт на лету создает в venv-е минимальный пакет, единственно важное действие которого - добавить
# в easy_install.pth указанную вторым аргументом папку
# Расчет папки в venv, где будут находиться .pth:
#
# distutils.dist.py: Distribution.get_command_obj("install") => Command.ensure_finalized(self) =>
# => distutils/command/install.py : install.finalize_options
#
# См. шаблоны в INSTALL_SCHEMES, например:
# 'unix_prefix': {
# 'purelib': '$base/lib/python$py_version_short/site-packages',
# 'platlib': '$platbase/lib/python$py_version_short/site-packages',
# 'headers': '$base/include/python$py_version_short/$dist_name',
# 'scripts': '$base/bin',
# 'data' : '$base',
# },
#
# Собственно обновление .pth проходит в setuptools/command/easy_install.py: update_pth()
import os
def install(pkg_name, src_path):
# :TRICKY: приходится менять путь
old_dir = os.getcwd()
os.chdir(src_path)
try:
from setuptools import setup
setup(
name = pkg_name,
version = 1,
# выбор директории, которая будет прописана в .pth - см. код egg_info.finalize_options():
# self.egg_base = (self.distribution.package_dir or {}).get('',os.curdir)
#package_dir = {'': 'src'},
# с этими же аргументами запускает setup.py и venv/pip install -e src_path,
# см. install_editable()
script_args = ["develop", "--no-deps"],
script_name = '', # :KLUDGE: без setup.py (но warning остается)
)
finally:
os.chdir(old_dir)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("pkg_name")
parser.add_argument("src_path")
args = parser.parse_args()
install(args.pkg_name, args.src_path)
if __name__ == "__main__":
main()
| 34.283582
| 117
| 0.659991
|
79513fac9ff47c3aebaaa2425e05b686ee952104
| 1,225
|
py
|
Python
|
manualtest/kuafu_mem.py
|
haosulab/SAPIEN
|
6bc3f4e2be910199b793f185aea5791d9f193e4c
|
[
"MIT"
] | 21
|
2021-10-13T11:56:45.000Z
|
2022-03-30T16:09:21.000Z
|
manualtest/kuafu_mem.py
|
haosulab/SAPIEN
|
6bc3f4e2be910199b793f185aea5791d9f193e4c
|
[
"MIT"
] | 25
|
2021-10-20T20:14:37.000Z
|
2022-03-30T05:55:15.000Z
|
manualtest/kuafu_mem.py
|
haosulab/SAPIEN
|
6bc3f4e2be910199b793f185aea5791d9f193e4c
|
[
"MIT"
] | 5
|
2021-10-31T17:43:52.000Z
|
2022-03-01T09:45:53.000Z
|
# A minimal example of using KuafuRenderer
#
# By Jet <i@jetd.me>
#
import sapien.core as sapien
import numpy as np
def main():
sim = sapien.Engine()
sapien.KuafuRenderer.set_log_level("debug")
config = sapien.KuafuConfig()
config.use_viewer = True
config.spp = 1
renderer = sapien.KuafuRenderer(config)
sim.set_renderer(renderer)
config = sapien.SceneConfig()
scene = sim.create_scene(config)
scene.add_ground(0)
scene.set_timestep(1 / 60)
mount = scene.create_actor_builder().build_static()
cam1 = scene.add_mounted_camera(
"cam", mount, sapien.Pose(), 800, 600, 0, 1.0, 0.1, 100)
mount.set_pose(sapien.Pose([-12, 0, 3]))
for i in range(128):
builder = scene.create_actor_builder()
builder.add_capsule_visual()
builder.add_capsule_collision()
sphere = builder.build()
sphere.set_pose(sapien.Pose(p=[np.random.rand(), np.random.rand(), i * 10]))
scene.set_ambient_light([0.4, 0.4, 0.4])
dirlight = scene.add_directional_light(
[-1, -1, -1], color=[3.0, 3.0, 3.0]
)
while renderer.is_running:
scene.step()
scene.update_render()
cam1.take_picture()
main()
| 23.557692
| 84
| 0.638367
|
79513fe0e38eb905f0efffedb478ff4f3a314d11
| 6,209
|
py
|
Python
|
vit_pytorch/vit.py
|
rocke2020/vit-pytorch
|
a1f828da0c952fa56a90a71f7c88c8e0025c1d42
|
[
"MIT"
] | null | null | null |
vit_pytorch/vit.py
|
rocke2020/vit-pytorch
|
a1f828da0c952fa56a90a71f7c88c8e0025c1d42
|
[
"MIT"
] | null | null | null |
vit_pytorch/vit.py
|
rocke2020/vit-pytorch
|
a1f828da0c952fa56a90a71f7c88c8e0025c1d42
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class ViT(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim,
pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_height // patch_height) * (image_width // patch_width)
patch_dim = channels * patch_height * patch_width
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
self.pool = pool
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, img):
x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
# TODO maybe no need :(n+1), just self.pos_embedding is OK.
x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
# x.shape, b, n+1, d
x = self.transformer(x)
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
x = self.to_latent(x)
return self.mlp_head(x)
class AttentionWithMask(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads # 64 x 8
self.heads = heads # 8
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None):
# n is the patch_num + 1, patch_num = (img_size/patch_size)**2.
# just assume img_size 224, patch_size 32, 224/32=7 it is 7*7+1=50 here.
# yolo-v1 also use patch num 7*7.
b, n, _, h = *x.shape, self.heads # n=50,h=8,
# self.to_qkv(x)得到的尺寸为[b,50,64x8x3],然后chunk成3份
# 也就是说,qkv是一个三元tuple,每一份都是[b,50,64x8]的大小
qkv = self.to_qkv(x).chunk(3, dim = -1)
# 把每一份从[b,50,64x8]变成[b,8,50,64]的形式
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
# 这一步不太好理解,q和k都是[b,8,50,64]的形式,50理解为特征数量,64为特征变量
# dots.shape=[b,8,50,50]
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
# 不考虑mask这一块的内容
mask_value = -torch.finfo(dots.dtype).max
if mask is not None:
mask = F.pad(mask.flatten(1), (1, 0), value = True)
assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'
mask = mask[:, None, :] * mask[:, :, None]
dots.masked_fill_(~mask, mask_value)
del mask
# 对[b,8,50,50]的最后一个维度做softmax
attn = dots.softmax(dim=-1)
# 这个attn就是计算出来的自注意力值,和v做点乘,out.shape=[b,8,50,64]
out = torch.einsum('bhij,bhjd->bhid', attn, v)
# out.shape变成[b,50,8x64]
out = rearrange(out, 'b h n d -> b n (h d)')
# out.shape重新变成[b,60,128]
out = self.to_out(out)
return out
| 35.278409
| 139
| 0.568691
|
79514130475e9f190c083c6272011b4579940bfa
| 5,284
|
py
|
Python
|
examples/pyopencap/pyscf/cap_trajectory.py
|
trex47/opencap
|
fd641133b2abaab22c13912d97fe1fe64b132dd7
|
[
"MIT"
] | null | null | null |
examples/pyopencap/pyscf/cap_trajectory.py
|
trex47/opencap
|
fd641133b2abaab22c13912d97fe1fe64b132dd7
|
[
"MIT"
] | null | null | null |
examples/pyopencap/pyscf/cap_trajectory.py
|
trex47/opencap
|
fd641133b2abaab22c13912d97fe1fe64b132dd7
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import functools
from numpy import linalg as LA
import matplotlib.pyplot as plt
import argparse
#########################################
# Alter these values to suit your purposes
ref_energy = -1.1738730219803575
guess = 13.1
eta_list = np.linspace(0,10000,1001)
###########################
au2eV= 27.2113961
eta_list = eta_list * 1E-5
# a root is a single eigenvalue of the cap hamiltonian at a particular value of eta
@functools.total_ordering
class root():
def __init__(self, energy, eta):
self.eta = eta
self.energy = energy
def __lt__(self, other):
return self.eta < other.eta
def __eq__(self, other):
return self.eta == other.eta and self.eta == other.eta
# a trajectory is a group of eigenvalues over a range of eta values, grouped by proximity to an initial guess
class trajectory():
def __init__(self,states,guess):
min=500
cur=-1
for st in states:
if np.absolute(st.energy-guess)<min:
cur=st
min=np.absolute(st.energy-guess)
self.last=cur
self.states=[cur]
# picks out the state from the list of states whose energy is closest to the previous entry in the trajectory
def add_state(self,states):
min=500
cur=-1
for st in states:
if np.absolute(st.energy-self.last.energy)<min:
cur=st
min=np.absolute(st.energy-self.last.energy)
self.last = cur
self.states.append(cur)
# applies first order correciton
def get_corrections(self):
energies=[]
etas=[]
for st in self.states:
energies.append(st.energy)
etas.append(st.eta)
derivs=list(np.gradient(energies)/np.gradient(etas))
for i in range(0,len(self.states)):
self.states[i].corr_energy=self.states[i].energy-derivs[i]*self.states[i].eta
# read in data from opencap output file
parser = argparse.ArgumentParser()
parser.add_argument('pos_arg', type=str,
help='Name of OpenCAP output file')
args = parser.parse_args()
with open(args.pos_arg, 'r') as file :
filedata = file.readlines()
idx=-1
for i in range(0,len(filedata)):
line = filedata[i]
if "Printing out matrices required for Perturbative CAP calculation." in line:
idx=i
num_roots=int(filedata[idx+1].split()[-1])
start=idx+3
# zeroth order hamiltonian first
H_0=[]
for i in range(start,start+num_roots):
l1 = filedata[i].split()
l1= [float(x) for x in l1]
H_0+=l1
H_0 = np.reshape(H_0,(num_roots,num_roots))
start2=start+num_roots+1
# now the cap matrix
cap_mat=[]
for i in range(start2,start2+num_roots):
l1 = filedata[i].split()
l1= [float(x) for x in l1]
cap_mat+=l1
cap_mat= np.reshape(cap_mat,(num_roots,num_roots))
all_roots=[]
# diagonalize over range of eta values and generate trajectories
for i in range(0,len(eta_list)):
eta=eta_list[i]
roots=[]
fullH = H_0 +1.0j * eta * cap_mat
eigv,eigvc=LA.eig(fullH)
for eig in eigv:
E = (eig - ref_energy) * au2eV
roots.append(root(E,eta))
all_roots.append(root(E,eta))
if i==0:
traj=trajectory(roots,guess)
else:
traj.add_state(roots)
# first lets plot everything
re_traj = []
im_traj = []
energies=[]
for root in all_roots:
re_traj.append(np.real(root.energy))
im_traj.append(np.imag(root.energy))
energies.append(root.energy)
plt.title("Eigenvalue trajectories")
plt.xlabel("Re(E)[eV]")
plt.ylabel("Im(E)[eV]")
plt.plot(re_traj,im_traj,'ro')
plt.show()
# lets get the corrected trajectory
traj.get_corrections()
re_traj = []
im_traj = []
corr_re=[]
corr_im=[]
uc_energies=[]
corr_energies=[]
for root in traj.states:
uc_energies.append(root.energy)
re_traj.append(np.real(root.energy))
im_traj.append(np.imag(root.energy))
corr_re.append(np.real(root.corr_energy))
corr_im.append(np.imag(root.corr_energy))
corr_energies.append(root.corr_energy)
# plot uncorrected and corrected trajectory
plt.title("Resonance trajectory")
plt.plot(re_traj,im_traj,'-ro',label="Uncorrected trajectory")
plt.plot(corr_re,corr_im,'-bo',label="Corrected trajectory")
plt.xlabel("Re(E)[eV]")
plt.ylabel("Im(E)[eV]")
plt.legend()
plt.show()
# plot derivative, find stationary point on uncorrected trajectory
derivs=list(np.absolute(np.gradient(uc_energies)/np.gradient(eta_list)))
plt.plot(eta_list,derivs)
plt.title("Uncorrected derivative")
plt.show()
sorted_derivs = sorted(derivs)
points = []
etas = []
for i in range(0,5):
points.append(uc_energies[derivs.index(sorted_derivs[i])])
etas.append(eta_list[derivs.index(sorted_derivs[i])])
print("Uncorrected:")
print(points)
print(sorted_derivs[:5])
print(etas)
# plot derivative, find stationary point on corrected trajectory
derivs=list(np.absolute(np.gradient(corr_energies)/np.gradient(eta_list)))
plt.plot(eta_list,derivs)
plt.title("Corrected derivative")
plt.show()
sorted_derivs = sorted(derivs)
points = []
etas = []
for i in range(0,5):
points.append(corr_energies[derivs.index(sorted_derivs[i])])
etas.append(eta_list[derivs.index(sorted_derivs[i])])
print("Corrected:")
print(points)
print(sorted_derivs[:5])
print(etas)
| 28.562162
| 113
| 0.672218
|
79514177db332a6e0db5a798c31983cac75fa3eb
| 1,562
|
py
|
Python
|
test.py
|
mudathirlawal/python-playground
|
db405904fc83f5ad0caedccbf08c2fe7f62abf0b
|
[
"MIT"
] | null | null | null |
test.py
|
mudathirlawal/python-playground
|
db405904fc83f5ad0caedccbf08c2fe7f62abf0b
|
[
"MIT"
] | null | null | null |
test.py
|
mudathirlawal/python-playground
|
db405904fc83f5ad0caedccbf08c2fe7f62abf0b
|
[
"MIT"
] | null | null | null |
def pig_latin(text):
say = " "
# Separate the text into words
words = text.split()
new_words_list = []
for word in words:
# Create the pig latin word and add it to the list
new_words_list.append('{}{}{}'.format(word[1:], word[0], 'ay'))
# Turn the list back into a phrase
new_text = say.join(new_words_list)
return new_text
print('\n')
print(pig_latin("hello how are you")) # Should be "ellohay owhay reaay ouyay"
print(pig_latin("programming in python is fun")) # Should be "rogrammingpay niay ythonpay siay unfay"
def group_list(group, users):
members = ""
for user in users:
members += (' {},'.format(user))
return '{}: {}'.format(group, members.__(len(users) - 1))
print(group_list("Marketing", ["Mike", "Karen", "Jake", "Tasha"])) # Should be "Marketing: Mike, Karen, Jake, Tasha"
print(group_list("Engineering", ["Kim", "Jay", "Tom"])) # Should be "Engineering: Kim, Jay, Tom"
print(group_list("Users", "")) # Should be "Users:"
print('Adding test string ...')
# UNDER CONSTRUCTION:
# def pig_latin(text):
# say = ""
# # Separate the text into words
# words = text.split()
# print(words)
# for word in words:
# # Create the pig latin word and add it to the list
# words.append('{}{}'.format(text[0], 'ay'))
# # Turn the list back into a phrase
# words = say.join(words)
# return words
# print(pig_latin("hello how are you")) # Should be "ellohay owhay reaay ouyay"
# print(pig_latin("programming in python is fun")) # Should be "rogrammingpay niay ythonpay siay unfay"
| 31.877551
| 116
| 0.650448
|
7951428aab3ff47b78666dacdd5263c055c8b1c5
| 1,158
|
py
|
Python
|
setup.py
|
EliasCampos/minorm
|
ed0eea4b448eed2a7ffedf41301c7f226e938dfd
|
[
"MIT"
] | 2
|
2020-12-20T08:07:32.000Z
|
2022-02-25T18:49:08.000Z
|
setup.py
|
EliasCampos/minorm
|
ed0eea4b448eed2a7ffedf41301c7f226e938dfd
|
[
"MIT"
] | null | null | null |
setup.py
|
EliasCampos/minorm
|
ed0eea4b448eed2a7ffedf41301c7f226e938dfd
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from setuptools import setup
def get_version(root_path):
version_file = root_path / 'minorm' / '__init__.py'
with version_file.open() as f:
for line in f:
if line.startswith('__version__'):
return line.split('=')[1].strip().strip('"').strip("'")
ROOT_PATH = Path(__file__).parent
README = ROOT_PATH / 'README.rst'
setup(
name='minorm',
version=get_version(ROOT_PATH),
description='A minimalistic ORM with basic features.',
long_description=README.read_text(),
long_description_content_type='text/x-rst',
url='https://github.com/EliasCampos/minorm',
author='Campos Ilya',
author_email='camposylia@gmail.com',
license="MIT",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
packages=['minorm'],
)
| 30.473684
| 71
| 0.630397
|
7951436a3c5c9513cee4d685b40ae9c85b96b4e5
| 1,552
|
py
|
Python
|
config/wsgi.py
|
misael1999/taximil-api
|
7c69a7ee27cb0223c7ba61aa08116faf0f95da46
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
misael1999/taximil-api
|
7c69a7ee27cb0223c7ba61aa08116faf0f95da46
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
misael1999/taximil-api
|
7c69a7ee27cb0223c7ba61aa08116faf0f95da46
|
[
"MIT"
] | null | null | null |
"""
WSGI config for Comparte Ride project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# scooter directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'scooter'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
| 41.945946
| 79
| 0.79317
|
7951436e30a57826ae0379cd8681c5286b78abb6
| 4,022
|
py
|
Python
|
tensorflow/contrib/cluster_resolver/python/training/tpu_cluster_resolver.py
|
dantkz/tensorflow
|
5333bbeb3142af2a06f1ebd971061fc4e28da743
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/cluster_resolver/python/training/tpu_cluster_resolver.py
|
dantkz/tensorflow
|
5333bbeb3142af2a06f1ebd971061fc4e28da743
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/cluster_resolver/python/training/tpu_cluster_resolver.py
|
dantkz/tensorflow
|
5333bbeb3142af2a06f1ebd971061fc4e28da743
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Cloud TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
class TPUClusterResolver(ClusterResolver):
"""Cluster Resolver for Google Cloud TPUs.
This is an implementation of cluster resolvers for the Google Cloud TPU
service. As Cloud TPUs are in alpha, you will need to specify a API definition
file for this to consume, in addition to a list of Cloud TPUs in your Google
Cloud Platform project.
"""
def __init__(self,
project,
zone,
tpu_names,
job_name='tpu_worker',
credentials='default',
service=None):
"""Creates a new TPUClusterResolver object.
The ClusterResolver will then use the parameters to query the Cloud TPU APIs
for the IP addresses and ports of each Cloud TPU listed.
Args:
project: Name of the GCP project containing Cloud TPUs
zone: Zone where the TPUs are located
tpu_names: A list of names of the target Cloud TPUs.
job_name: Name of the TensorFlow job the TPUs belong to.
credentials: GCE Credentials. If None, then we use default credentials
from the oauth2client
service: The GCE API object returned by the googleapiclient.discovery
function. If you specify a custom service object, then the credentials
parameter will be ignored.
Raises:
ImportError: If the googleapiclient is not installed.
"""
self._project = project
self._zone = zone
self._tpu_names = tpu_names
self._job_name = job_name
self._credentials = credentials
if credentials == 'default':
if _GOOGLE_API_CLIENT_INSTALLED:
self._credentials = GoogleCredentials.get_application_default()
if service is None:
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('googleapiclient must be installed before using the '
'TPU cluster resolver')
self._service = discovery.build(
'tpu', 'v1alpha1',
credentials=self._credentials)
else:
self._service = service
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest TPU information.
We retrieve the information from the GCE APIs every time this method is
called.
Returns:
A ClusterSpec containing host information returned from Cloud TPUs.
"""
worker_list = []
for tpu_name in self._tpu_names:
full_name = 'projects/%s/locations/%s/nodes/%s' % (
self._project, self._zone, tpu_name)
request = self._service.projects().locations().nodes().get(name=full_name)
response = request.execute()
instance_url = '%s:%s' % (response['ipAddress'], response['port'])
worker_list.append(instance_url)
return ClusterSpec({self._job_name: worker_list})
| 36.563636
| 96
| 0.701144
|
7951449f13b4cd597030b6462fadf2c181c117c4
| 809
|
py
|
Python
|
setup.py
|
topicaxis/text-analysis-service
|
e94dfd3ea4e9e2259ba61f135e77c6d702c440e6
|
[
"MIT"
] | 1
|
2020-01-07T00:08:40.000Z
|
2020-01-07T00:08:40.000Z
|
setup.py
|
pmatigakis/text-analysis-service
|
e94dfd3ea4e9e2259ba61f135e77c6d702c440e6
|
[
"MIT"
] | 1
|
2019-11-02T15:41:28.000Z
|
2019-11-02T15:41:28.000Z
|
setup.py
|
pmatigakis/text-analysis-service
|
e94dfd3ea4e9e2259ba61f135e77c6d702c440e6
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
def get_requirements():
with open("requirements.txt") as f:
requirements = [
line.strip()
for line in f
if not line.startswith("-e")
]
return requirements
def get_test_requirements():
with open("requirements-test.txt") as f:
requirements = [
line.strip()
for line in f
]
return requirements
setup(
name="Text analysis service",
version="0.10.3",
packages=find_packages(exclude=["tests"]),
include_package_data=True,
zip_safe=False,
install_requires=get_requirements(),
test_suite="nose.collector",
tests_require=get_test_requirements(),
entry_points={
'console_scripts': ['tas-cli=tas.cli:main'],
}
)
| 21.289474
| 52
| 0.613103
|
7951478782d8e34effe06bdad0b23b6fc2dd0a4f
| 4,884
|
py
|
Python
|
teuthology/contextutil.py
|
shrek-github/teuthology
|
5a89fb217aaf2cd7948f7419f431230ab25053c6
|
[
"MIT"
] | null | null | null |
teuthology/contextutil.py
|
shrek-github/teuthology
|
5a89fb217aaf2cd7948f7419f431230ab25053c6
|
[
"MIT"
] | 1
|
2020-03-05T03:00:08.000Z
|
2020-03-05T03:00:08.000Z
|
teuthology/contextutil.py
|
shrek-github/teuthology
|
5a89fb217aaf2cd7948f7419f431230ab25053c6
|
[
"MIT"
] | 1
|
2020-03-04T03:04:06.000Z
|
2020-03-04T03:04:06.000Z
|
import contextlib
import sys
import logging
import time
import itertools
from teuthology.config import config
from teuthology.exceptions import MaxWhileTries
from six import reraise
log = logging.getLogger(__name__)
@contextlib.contextmanager
def nested(*managers):
"""
Like contextlib.nested but takes callables returning context
managers, to avoid the major reason why contextlib.nested was
deprecated.
This version also logs any exceptions early, much like run_tasks,
to ease debugging. TODO combine nested and run_tasks.
"""
exits = []
vars = []
exc = (None, None, None)
try:
for mgr_fn in managers:
mgr = mgr_fn()
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except Exception:
log.exception('Saw exception from nested tasks')
exc = sys.exc_info()
# FIXME this needs to be more generic
if config.ctx and config.ctx.config.get('interactive-on-error'):
config.ctx.config['interactive-on-error'] = False
from teuthology.task import interactive
log.warning('Saw failure, going into interactive mode...')
interactive.task(ctx=config.ctx, config=None)
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except Exception:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
reraise(*exc)
class safe_while(object):
"""
A context manager to remove boiler plate code that deals with `while` loops
that need a given number of tries and some seconds to sleep between each
one of those tries.
The most simple example possible will try 10 times sleeping for 6 seconds:
>>> from teuthology.contexutil import safe_while
>>> with safe_while() as proceed:
... while proceed():
... # repetitive code here
... print("hello world")
...
Traceback (most recent call last):
...
MaxWhileTries: reached maximum tries (5) after waiting for 75 seconds
Yes, this adds yet another level of indentation but it allows you to
implement while loops exactly the same as before with just 1 more
indentation level and one extra call. Everything else stays the same,
code-wise. So adding this helper to existing code is simpler.
:param sleep: The amount of time to sleep between tries. Default 6
:param increment: The amount to add to the sleep value on each try.
Default 0.
:param tries: The amount of tries before giving up. Default 10.
:param action: The name of the action being attempted. Default none.
:param _raise: Whether to raise an exception (or log a warning).
Default True.
:param _sleeper: The function to use to sleep. Only used for testing.
Default time.sleep
"""
def __init__(self, sleep=6, increment=0, tries=10, action=None,
_raise=True, _sleeper=None):
self.sleep = sleep
self.increment = increment
self.tries = tries
self.counter = 0
self.sleep_current = sleep
self.action = action
self._raise = _raise
self.sleeper = _sleeper or time.sleep
def _make_error_msg(self):
"""
Sum the total number of seconds we waited while providing the number
of tries we attempted
"""
total_seconds_waiting = sum(
itertools.islice(
itertools.count(self.sleep, self.increment),
self.tries
)
)
msg = 'reached maximum tries ({tries})' + \
' after waiting for {total} seconds'
if self.action:
msg = "'{action}' " + msg
msg = msg.format(
action=self.action,
tries=self.tries,
total=total_seconds_waiting,
)
return msg
def __call__(self):
self.counter += 1
if self.counter == 1:
return True
if self.counter > self.tries:
error_msg = self._make_error_msg()
if self._raise:
raise MaxWhileTries(error_msg)
else:
log.warning(error_msg)
return False
self.sleeper(self.sleep_current)
self.sleep_current += self.increment
return True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
| 33.22449
| 79
| 0.59869
|
795148dd80fc92a0199d6c5937f92fc2dab57954
| 3,666
|
py
|
Python
|
second/builder/dataset_builder.py
|
zhb0920/second.pytorch
|
f980f3d18749b7a2830983222d1695b5cb321dae
|
[
"MIT"
] | null | null | null |
second/builder/dataset_builder.py
|
zhb0920/second.pytorch
|
f980f3d18749b7a2830983222d1695b5cb321dae
|
[
"MIT"
] | null | null | null |
second/builder/dataset_builder.py
|
zhb0920/second.pytorch
|
f980f3d18749b7a2830983222d1695b5cb321dae
|
[
"MIT"
] | null | null | null |
from second.protos import input_reader_pb2
from second.data.dataset import KittiDataset
from second.data.preprocess import prep_pointcloud
import numpy as np
from second.builder import dbsampler_builder
from functools import partial
from second.utils import config_tool
def build(input_reader_config,
model_config,
training,
voxel_generator,
target_assigner=None):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
generate_bev = model_config.use_bev
without_reflectivity = model_config.without_reflectivity
num_point_features = model_config.num_point_features
downsample_factor = config_tool.get_downsample_factor(model_config)
cfg = input_reader_config
db_sampler_cfg = input_reader_config.database_sampler
db_sampler = None
if len(db_sampler_cfg.sample_groups) > 0: # enable sample
db_sampler = dbsampler_builder.build(db_sampler_cfg)
u_db_sampler_cfg = input_reader_config.unlabeled_database_sampler
u_db_sampler = None
if len(u_db_sampler_cfg.sample_groups) > 0: # enable sample
u_db_sampler = dbsampler_builder.build(u_db_sampler_cfg)
grid_size = voxel_generator.grid_size
# [352, 400]
feature_map_size = grid_size[:2] // downsample_factor
feature_map_size = [*feature_map_size, 1][::-1]
print("feature_map_size", feature_map_size)
assert all([n != '' for n in target_assigner.classes]), "you must specify class_name in anchor_generators."
prep_func = partial(
prep_pointcloud,
root_path=cfg.kitti_root_path,
class_names=target_assigner.classes,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
training=training,
max_voxels=cfg.max_number_of_voxels,
remove_outside_points=False,
remove_unknown=cfg.remove_unknown_examples,
create_targets=training,
shuffle_points=cfg.shuffle_points,
gt_rotation_noise=list(cfg.groundtruth_rotation_uniform_noise),
gt_loc_noise_std=list(cfg.groundtruth_localization_noise_std),
global_rotation_noise=list(cfg.global_rotation_uniform_noise),
global_scaling_noise=list(cfg.global_scaling_uniform_noise),
global_random_rot_range=list(
cfg.global_random_rotation_range_per_object),
db_sampler=db_sampler,
unlabeled_db_sampler=u_db_sampler,
generate_bev=generate_bev,
without_reflectivity=without_reflectivity,
num_point_features=num_point_features,
anchor_area_threshold=cfg.anchor_area_threshold,
gt_points_drop=cfg.groundtruth_points_drop_percentage,
gt_drop_max_keep=cfg.groundtruth_drop_max_keep_points,
remove_points_after_sample=cfg.remove_points_after_sample,
remove_environment=cfg.remove_environment,
use_group_id=cfg.use_group_id,
downsample_factor=downsample_factor)
dataset = KittiDataset(
info_path=cfg.kitti_info_path,
root_path=cfg.kitti_root_path,
num_point_features=num_point_features,
target_assigner=target_assigner,
feature_map_size=feature_map_size,
prep_func=prep_func)
return dataset
| 42.137931
| 111
| 0.743317
|
7951490c0d2695e682766ac457b586dbc2e538a2
| 3,080
|
py
|
Python
|
pg_copy.py
|
ollieglass/sqlalchemy-pg-copy
|
882747b7a4764631c6c5c4217a28ceb15f13ac4f
|
[
"MIT"
] | 2
|
2019-12-18T16:41:49.000Z
|
2020-07-31T18:09:31.000Z
|
pg_copy.py
|
ollieglass/sqlalchemy-pg-copy
|
882747b7a4764631c6c5c4217a28ceb15f13ac4f
|
[
"MIT"
] | null | null | null |
pg_copy.py
|
ollieglass/sqlalchemy-pg-copy
|
882747b7a4764631c6c5c4217a28ceb15f13ac4f
|
[
"MIT"
] | null | null | null |
import json
import io
import datetime as dt
# ------------------------------------------------------------------------------
# CSV pre-processing
def string_to_pg_csv_string(v):
v = v.replace('\\', '\\\\') # replace single \ with \\
v = v.replace('\n', '\\n')
v = v.replace('\r', '\\r')
v = v.replace('"', '""')
# can't store unicode null in Postgres so replace with empty string
# https://stackoverflow.com/a/28816294/160406
v = v.replace('\0', '')
v = v.replace('\x00', '')
v = v.replace(u'\u0000', '')
return f'"{v}"'
def value_to_pg_csv_value(v):
if v is None:
return r'\N'
elif isinstance(v, str):
return string_to_pg_csv_string(v)
elif isinstance(v, dt.datetime):
return f'"{v.isoformat()}"'
return str(v)
def list_of_dicts_to_pg_csv_lines(objs):
columns = objs[0].keys()
for o in objs:
values = [ value_to_pg_csv_value(o[c]) for c in columns ]
line = ','.join(values) + '\n'
yield line
# ------------------------------------------------------------------------------
# StringIO
def lines_to_stringio(lines):
f = io.StringIO()
for l in lines:
f.write(l)
f.seek(0)
return f
# ------------------------------------------------------------------------------
# Insert
def insert_with_copy(engine, objs, target_table, target_schema=None, ignore_duplicates=True):
"""Fast insert to a Postgres SQL table using COPY
Ignores duplicates by
- inserting to a temp table
- INSERT INTO from temp to target with ON CONFLICT DO NOTHING
- dropping temp table
Args:
engine: SQLAclhemy engine object
objs: list of dictionaries. Keys must be identical
target_table: name of table to be uploaded to
target_schema: optional
ignore_duplicates: optional
"""
if target_schema:
target_table = f'{target_schema}.{target_table}'
column_names = ','.join(objs[0].keys())
pg_csv_lines = list_of_dicts_to_pg_csv_lines(objs)
f = lines_to_stringio(pg_csv_lines)
conn = engine.raw_connection()
with conn.cursor() as cursor:
if ignore_duplicates:
staging_table = f'staging_{target_table}'
cursor.execute(f'''
CREATE TEMP TABLE {staging_table}
ON COMMIT DROP
AS
SELECT *
FROM {target_table}
WITH NO DATA
''')
cursor.copy_expert(f"""
COPY {staging_table} ({column_names})
FROM STDIN WITH CSV NULL '\\N'
""", f)
cursor.execute(f'''
INSERT INTO {target_table} ({column_names})
SELECT {column_names}
FROM {staging_table}
ON CONFLICT DO NOTHING
''')
else:
cursor.copy_expert(f"""
COPY {target_table} ({column_names})
FROM STDIN WITH CSV NULL '\\N'
""", f)
conn.commit()
conn.close()
| 26.324786
| 93
| 0.520455
|
7951497d1bde555444080015e51aeaf7dd26effe
| 547
|
py
|
Python
|
python/9th/test_2.py
|
Elendeer/homework
|
b17db0dddc8ce23ce047cb764f759d9ec47f6a8b
|
[
"MIT"
] | 1
|
2020-10-14T03:45:24.000Z
|
2020-10-14T03:45:24.000Z
|
python/9th/test_2.py
|
Elendeer/homework
|
b17db0dddc8ce23ce047cb764f759d9ec47f6a8b
|
[
"MIT"
] | null | null | null |
python/9th/test_2.py
|
Elendeer/homework
|
b17db0dddc8ce23ce047cb764f759d9ec47f6a8b
|
[
"MIT"
] | null | null | null |
'''
Author : Daniel_Elendeer
Date : 2020-11-21 18:33:18
LastEditors : Daniel_Elendeer
LastEditTime : 2020-11-21 18:55:46
Description :
'''
if __name__ == '__main__':
n, k = map(int, input().split())
lst = list(range(1, n + 1))
print(lst)
idx = 0
count = k
while len(lst) != 1:
count -= 1
if idx >= len(lst):
idx = 0
if count == 0:
lst.remove(lst[idx])
print(lst)
count = k
else :
idx += 1
print(lst.pop())
| 18.233333
| 36
| 0.47532
|
795149baf617c7d31780bf1b40d91a07ae35b243
| 2,482
|
py
|
Python
|
benchmarks/benchmarks/signal_filtering.py
|
smola/scipy
|
ff8b9d9e87a585a820846d7f459d6156ba621c4d
|
[
"BSD-3-Clause"
] | 2
|
2020-06-20T14:11:14.000Z
|
2020-10-12T07:11:36.000Z
|
benchmarks/benchmarks/signal_filtering.py
|
smola/scipy
|
ff8b9d9e87a585a820846d7f459d6156ba621c4d
|
[
"BSD-3-Clause"
] | null | null | null |
benchmarks/benchmarks/signal_filtering.py
|
smola/scipy
|
ff8b9d9e87a585a820846d7f459d6156ba621c4d
|
[
"BSD-3-Clause"
] | 1
|
2021-10-20T08:10:46.000Z
|
2021-10-20T08:10:46.000Z
|
from __future__ import division, absolute_import, print_function
import numpy as np
import timeit
from concurrent.futures import ThreadPoolExecutor, wait
try:
from scipy.signal import lfilter, firwin, decimate, butter, sosfilt
except ImportError:
pass
from .common import Benchmark
class Decimate(Benchmark):
param_names = ['q', 'ftype', 'zero_phase']
params = [
[2, 10, 30],
['iir', 'fir'],
[True, False]
]
def setup(self, q, ftype, zero_phase):
np.random.seed(123456)
sample_rate = 10000.
t = np.arange(int(1e6), dtype=np.float64) / sample_rate
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*4e3*t)
def time_decimate(self, q, ftype, zero_phase):
decimate(self.sig, q, ftype=ftype, zero_phase=zero_phase)
class Lfilter(Benchmark):
param_names = ['n_samples', 'numtaps']
params = [
[1e3, 50e3, 1e6],
[9, 23, 51]
]
def setup(self, n_samples, numtaps):
np.random.seed(125678)
sample_rate = 25000.
t = np.arange(n_samples, dtype=np.float64) / sample_rate
nyq_rate = sample_rate / 2.
cutoff_hz = 3000.0
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*11e3*t)
self.coeff = firwin(numtaps, cutoff_hz/nyq_rate)
def time_lfilter(self, n_samples, numtaps):
lfilter(self.coeff, 1.0, self.sig)
class ParallelSosfilt(Benchmark):
timeout = 100
timer = timeit.default_timer
param_names = ['n_samples', 'threads']
params = [
[1e3, 10e3],
[1, 2, 4]
]
def setup(self, n_samples, threads):
self.filt = butter(8, 8e-6, "lowpass", output="sos")
self.data = np.arange(int(n_samples) * 3000).reshape(int(n_samples), 3000)
self.chunks = np.array_split(self.data, threads)
def time_sosfilt(self, n_samples, threads):
pool = ThreadPoolExecutor(max_workers=threads)
futures = []
for i in range(threads):
futures.append(pool.submit(sosfilt, self.filt, self.chunks[i]))
wait(futures)
class Sosfilt(Benchmark):
param_names = ['n_samples', 'order']
params = [
[1000, 1000000],
[6, 20]
]
def setup(self, n_samples, order):
self.sos = butter(order, [0.1575, 0.1625], 'band', output='sos')
self.y = np.random.RandomState(0).randn(n_samples)
def time_sosfilt_basic(self, n_samples, order):
sosfilt(self.sos, self.y)
| 28.204545
| 82
| 0.616035
|
79514ac8aaf153c1a4bc7e7dba08e9a507ad15fa
| 1,031
|
py
|
Python
|
python/test/test_inference_service_list.py
|
adriangonz/seldon-deploy-sdk
|
c5504838630a87053387cec57ec2e1e7251971e2
|
[
"Apache-2.0"
] | 6
|
2021-02-18T14:37:54.000Z
|
2022-01-13T13:27:43.000Z
|
python/test/test_inference_service_list.py
|
adriangonz/seldon-deploy-sdk
|
c5504838630a87053387cec57ec2e1e7251971e2
|
[
"Apache-2.0"
] | 14
|
2021-01-04T16:32:03.000Z
|
2021-12-13T17:53:59.000Z
|
python/test/test_inference_service_list.py
|
adriangonz/seldon-deploy-sdk
|
c5504838630a87053387cec57ec2e1e7251971e2
|
[
"Apache-2.0"
] | 7
|
2021-03-17T09:05:55.000Z
|
2022-01-05T10:39:56.000Z
|
# coding: utf-8
"""
Seldon Deploy API
API to interact and manage the lifecycle of your machine learning models deployed through Seldon Deploy. # noqa: E501
OpenAPI spec version: v1alpha1
Contact: hello@seldon.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import seldon_deploy_sdk
from seldon_deploy_sdk.models.inference_service_list import InferenceServiceList # noqa: E501
from seldon_deploy_sdk.rest import ApiException
class TestInferenceServiceList(unittest.TestCase):
"""InferenceServiceList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInferenceServiceList(self):
"""Test InferenceServiceList"""
# FIXME: construct object with mandatory attributes with example values
# model = seldon_deploy_sdk.models.inference_service_list.InferenceServiceList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.146341
| 122
| 0.734239
|
79514bea4a06e27854cdd81f2a682b80f6c6fa3d
| 1,916
|
py
|
Python
|
TUI/PlaySound.py
|
ApachePointObservatory/TUI
|
8f130368254161a2748167b7c8260cc24170c28c
|
[
"BSD-3-Clause"
] | 1
|
2020-01-28T06:28:00.000Z
|
2020-01-28T06:28:00.000Z
|
TUI/PlaySound.py
|
ApachePointObservatory/TUI
|
8f130368254161a2748167b7c8260cc24170c28c
|
[
"BSD-3-Clause"
] | 1
|
2017-06-05T22:53:58.000Z
|
2017-06-05T22:53:58.000Z
|
TUI/PlaySound.py
|
r-owen/TUI
|
8f130368254161a2748167b7c8260cc24170c28c
|
[
"BSD-3-Clause"
] | 1
|
2020-01-28T06:28:02.000Z
|
2020-01-28T06:28:02.000Z
|
#!/usr/bin/env python
"""Play each of the standard sounds for TUI.
Gets the sounds from TUI preferences.
2003-04-28 ROwen Minimal implementation.
2003-10-30 ROwen Added msgReceived.
2003-11-24 ROwen Moved to TUI.Sounds; changed to use sound prefs.
2003-12-03 ROwen Added exposureBegins, exposureEnds, guidingBegins, guidingEnds.
2003-12-09 ROwen Modified to import TUI.TUIModel when it's used; this
allows TUI.Sounds to be imported before TUI.TUIModel.
2004-05-18 ROwen Stopped importing RO.Wdg; it wasn't used.
2005-08-02 ROwen Moved from Sounds/PlaySounds.py -> PlaySound.py
2006-04-14 ROwen Added guideModeChanges.
2006-10-24 ROwen Added logHighlightedText.
2009-11-09 ROwen Added support for Play Sounds preference.
2011-01-31 ROwen Added exposureFailed.
"""
import TUI.TUIModel
_Prefs = None
_PlaySoundsPref = None
def _playSound(name):
if name is None:
return
global _Prefs, _PlaySoundsPref
if _Prefs is None:
_Prefs = TUI.TUIModel.getModel().prefs
_PlaySoundsPref = _Prefs.getPrefVar("Play Sounds")
if _PlaySoundsPref.getValue():
_Prefs.getPrefVar(name).play()
def axisHalt():
_playSound("Axis Halt")
def axisSlew():
_playSound("Axis Slew")
def axisTrack():
_playSound("Axis Track")
def cmdDone():
_playSound("Command Done")
def cmdFailed():
_playSound("Command Failed")
def exposureBegins():
_playSound("Exposure Begins")
def exposureEnds():
_playSound("Exposure Ends")
def exposureFailed():
_playSound("Exposure Failed")
def guidingBegins():
_playSound("Guiding Begins")
def guideModeChanges():
_playSound("Guide Mode Changes")
def guidingEnds():
_playSound("Guiding Ends")
def msgReceived():
_playSound("Message Received")
def noGuideStar():
_playSound("No Guide Star")
def logHighlightedText():
_playSound("Log Highlighted Text")
| 25.891892
| 84
| 0.7119
|
79514c332631574ac04f6a851b3e3e7f51959d91
| 2,088
|
py
|
Python
|
apps/contact/views.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 32
|
2017-02-22T13:38:38.000Z
|
2022-03-31T23:29:54.000Z
|
apps/contact/views.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 694
|
2017-02-15T23:09:52.000Z
|
2022-03-31T23:16:07.000Z
|
apps/contact/views.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 35
|
2017-09-02T21:13:09.000Z
|
2022-02-21T11:30:30.000Z
|
import logging
from django.contrib import messages
from django.core.mail import EmailMessage
from django.shortcuts import redirect, render
from apps.contact.forms import ContactForm
# Index page
def index(request):
context = {"form": ContactForm}
return render(request, "contact/index.html", context)
def contact_submit(request):
log = logging.getLogger(__name__)
if request.method == "POST":
form = ContactForm(request.POST)
if form.is_valid():
name = form.cleaned_data["contact_name"]
content = form.cleaned_data["content"]
from_email = form.cleaned_data["contact_email"]
to_email = [form.cleaned_data["contact_receiver"]]
client_ip = get_client_ip(request)
if not name:
name = "Noen"
if request.user.is_authenticated:
username = request.user.username
log.info(
"{username} has tried to contact {to_email}".format(
username=username, to_email=to_email
)
)
else:
log.info(
"A user at {client_ip} has tried to contact {to_email}".format(
client_ip=client_ip, to_email=to_email
)
)
subject = (
"[Kontakt] {name} har kontaktet dere gjennom online.ntnu.no".format(
name=name
)
)
EmailMessage(subject, content, from_email, to_email).send()
messages.success(request, "Meldingen ble sendt")
else:
messages.error(
request,
"Meldingen ble ikke sendt. Prøv igjen eller send mail direkte til dotkom",
)
return redirect("contact_index")
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR")
return ip
| 29.828571
| 90
| 0.566092
|
79514dc63cefb88fa908e188c15efb3d20f5fd3f
| 26,829
|
py
|
Python
|
CountyMapbook/CountyMapbook_v2.py
|
adambreznicky/smudge_python
|
af7ba221890253ac6fe7f38691b351861f8b3d96
|
[
"MIT"
] | 1
|
2017-05-24T02:05:20.000Z
|
2017-05-24T02:05:20.000Z
|
CountyMapbook/CountyMapbook_v2.py
|
adambreznicky/smudge_python
|
af7ba221890253ac6fe7f38691b351861f8b3d96
|
[
"MIT"
] | null | null | null |
CountyMapbook/CountyMapbook_v2.py
|
adambreznicky/smudge_python
|
af7ba221890253ac6fe7f38691b351861f8b3d96
|
[
"MIT"
] | null | null | null |
__file__ = 'CountyMapbook_v1'
__date__ = '6/18/2014'
__author__ = 'ABREZNIC'
import os, arcpy,datetime
from arcpy import env
from reportlab.lib import colors
from reportlab.lib.units import inch
from reportlab.platypus import BaseDocTemplate, Paragraph, frames, Table, TableStyle, Frame, flowables, Flowable, PageTemplate
#date
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
#variables
cofolder = "C:\\TxDOT\\CountyMapbook"
workspace = cofolder + "\\" + curYear
database = workspace + "\\Working.gdb"
comanche = "Connection to Comanche.sde"
histmarkers = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Texas_Historical_Commission\\TPP_GIS.APP_TPP_GIS_ADMIN.Historical_Markers"
campgrounds = ""
restareas = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Travel\\TPP_GIS.APP_TPP_GIS_ADMIN.REST_AREA_PNT"
parks = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Park\\TPP_GIS.APP_TPP_GIS_ADMIN.Public_Lands_2014"
cemeteries = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery"
cemeteriesPT = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery\\TPP_GIS.APP_TPP_GIS_ADMIN.Cemetery_Points"
roads = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways"
counties = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.County\\TPP_GIS.APP_TPP_GIS_ADMIN.County"
airports = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport"
airportsPT = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport\\TPP_GIS.APP_TPP_GIS_ADMIN.Airport_Points"
prisons = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Base_Map_Layers\\TPP_GIS.APP_TPP_GIS_ADMIN.Prisons"
military = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Base_Map_Layers\\TPP_GIS.APP_TPP_GIS_ADMIN.Military"
schools = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Base_Map_Layers\\TPP_GIS.APP_TPP_GIS_ADMIN.Education"
cities = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City"
citiesPT = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City_Points"
lakes = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Water\\TPP_GIS.APP_TPP_GIS_ADMIN.Water_Bodies"
railroads = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Railroad\\TPP_GIS.APP_TPP_GIS_ADMIN.Railroads"
rivers = "Database Connections\\" + comanche + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Water\\TPP_GIS.APP_TPP_GIS_ADMIN.Streams"
grid = "Database Connections\\" + comanche + "\\TPP_GIS.MCHAMB1.Map_Index_Grids\\TPP_GIS.MCHAMB1.State_Grid_120K"
def preparation():
print "Creating database..."
if not os.path.exists(workspace):
os.makedirs(workspace)
else:
try:
arcpy.Delete_management(database)
except:
pass
for file in os.listdir(workspace):
thefile = os.path.join(workspace, file)
os.remove(thefile)
arcpy.CreateFileGDB_management(workspace, "Working.gdb")
print "Copying historical markers..."
arcpy.Select_analysis(histmarkers, database + "\\histmarkers", "markernum IS NOT NULL and indexname IS NOT NULL")
arcpy.AddField_management(database + "\\histmarkers", "label", "TEXT", "", "", 200)
cursor = arcpy.UpdateCursor(database + "\\histmarkers")
for row in cursor:
row.setValue("label", str(row.markernum) + " - " + row.indexname)
cursor.updateRow(row)
del cursor
del row
print "Copying campgrounds..."
print "Copying rest areas..."
arcpy.Copy_management(restareas, database + "\\restareas")
arcpy.AddField_management(database + "\\restareas", "label", "TEXT", "", "", 200)
cursor = arcpy.UpdateCursor(database + "\\restareas")
for row in cursor:
if row.RA_TYPE_NM == "Picnic":
row.setValue("label", "Picnic Area")
elif row.RA_TYPE_NM == "Rest":
row.setValue("label", "Rest Area")
elif row.RA_TYPE_NM == "Rest_EX":
row.setValue("label", "Rest Area (EX)")
elif row.RA_TYPE_NM == "TIC":
row.setValue("label", "Travel Information Center")
else:
cursor.deleteRow(row)
cursor.updateRow(row)
del cursor
del row
print "Copying parks..."
arcpy.Select_analysis(parks, database + "\\parks", "(GOVT_JURIS = '3' OR GOVT_JURIS = '4') AND LAND_NM IS NOT NULL AND LAND_NM <> ''")
print "Copying cemeteries..."
arcpy.Select_analysis(cemeteries, database + "\\cemeteries", "CEMETERY_NM IS NOT NULL AND CEMETERY_NM <> ''")
arcpy.Select_analysis(cemeteriesPT, database + "\\cemeteries_point", "CEMETERY_NM IS NOT NULL AND CEMETERY_NM <> ''")
print "Copying highways..."
arcpy.Select_analysis(roads, database + "\\highways", "(( RTE_CLASS = '1' OR RTE_CLASS = '6' ) AND RDBD_TYPE = 'KG' AND RTE_OPEN = 1 ) OR (RTE_NM = 'SL0008' AND RDBD_TYPE = 'KG' AND RTE_OPEN = 1 )")
print "Copying counties..."
arcpy.Copy_management(counties, database + "\\counties")
print "Copying airports..."
arcpy.Select_analysis(airports, database + "\\airports", "ARPRT_NM <> '' AND ARPRT_NM IS NOT NULL")
arcpy.Select_analysis(airportsPT, database + "\\airports_point", "DISPLAY = 'Yes'")
print "Copying county roads..."
arcpy.Select_analysis(roads, database + "\\countyroads", "RTE_CLASS = '2' AND RTE_OPEN = 1 AND RDBD_TYPE = 'KG'")
print "Copying prisons..."
arcpy.Copy_management(prisons, database + "\\prisons")
print "Copying military..."
arcpy.Copy_management(military, database + "\\military")
print "Copying schools..."
arcpy.Copy_management(schools, database + "\\schools")
print "Copying cities..."
arcpy.Copy_management(cities, database + "\\cities")
arcpy.Select_analysis(citiesPT, database + "\\cities_point", "INC = 'N'")
print "Copying lakes..."
arcpy.Select_analysis(lakes, database + "\\lakes", "BODY_NM IS NOT NULL AND BODY_NM <> '' AND BODY_TYPE = '1'")
print "Copying railroads..."
arcpy.Select_analysis(railroads, database + "\\railroads", "RR_TYPE = 'M' AND RR_STAT = 'A'")
print "Copying rivers..."
arcpy.Select_analysis(rivers, database + "\\rivers", "STRM_TYPE = '1'")
print "Copying federal roads..."
arcpy.Select_analysis(roads, database + "\\federalroads", "RTE_CLASS = '7' AND RTE_OPEN = 1 AND RDBD_TYPE = 'KG' AND FULL_ST_NM <> '' AND FULL_ST_NM IS NOT NULL")
print "Copying grid..."
arcpy.Copy_management(grid, database + "\\grid")
#
print "Renumbering grid..."
cursor = arcpy.UpdateCursor(database + "\\grid")
for row in cursor:
row.setValue("ID", row.ID - 66)
row.setValue("STATE_ID", row.STATE_ID - 66)
if row.NORTH != 0:
row.setValue("NORTH", row.NORTH - 66)
if row.SOUTH != 0:
row.setValue("SOUTH", row.SOUTH - 66)
if row.EAST != 0:
row.setValue("EAST", row.EAST - 66)
if row.WEST != 0:
row.setValue("WEST", row.WEST - 66)
cursor.updateRow(row)
del cursor
del row
print "Creating union..."
arcpy.Union_analysis([database + "\\grid", database + "\\counties"], database + "\\union")
cursor = arcpy.UpdateCursor(database + "\\union")
for row in cursor:
if row.CNTY_NM == "" or row.CNTY_NM is None or row.ID == 0:
cursor.deleteRow(row)
del cursor
del row
def intersects():
env.workspace = database
print "Creating field dictionary..."
dict = {}
dict["histmarkers"] = "label"
#dict["campgrounds"] = ""
dict["restareas"] = "label"
dict["parks"] = "LAND_NM"
dict["cemeteries"] = "CEMETERY_NM"
dict["cemeteries_point"] = "CEMETERY_NM"
dict["highways"] = "FULL_ST_NM"
dict["counties"] = "CNTY_NM"
dict["airports"] = "ARPRT_NM"
dict["airports_point"] = "ARPRT_NM"
dict["countyroads"] = "FULL_ST_NM"
dict["prisons"] = "PRISON_NM"
dict["military"] = "BASE_NM"
dict["schools"] = "SCHOOL_NM"
dict["cities"] = "CITY_NM"
dict["cities_point"] = "CITY_NM"
dict["lakes"] = "BODY_NM"
dict["railroads"] = "RR_NM"
dict["rivers"] = "STRM_NM"
dict["federalroads"] = "FULL_ST_NM"
print "Performing intersects..."
fcList = arcpy.ListFeatureClasses()
for fc in fcList:
if fc != "union" and fc != "grid":
print str(fc)
arcpy.Intersect_analysis(["union", fc], fc + "__INTERSECT")
del fcList
del fc
print "Summarizing..."
fcList = arcpy.ListFeatureClasses()
for fc in fcList:
if fc.split("__")[-1] == "INTERSECT":
dictname = fc.split("__")[0]
print dictname
field = dict[dictname]
arcpy.AddField_management(fc, "UNIQUE", "TEXT", "", "", 250)
cursor = arcpy.UpdateCursor(fc)
for row in cursor:
value = row.getValue(field)
if value is None:
value = ""
row.setValue("UNIQUE", str(row.ID) + row.CNTY_NM + value)
cursor.updateRow(row)
del cursor
del row
arcpy.Statistics_analysis(fc, dictname + "_SUMMARIZED", [["ID", "MIN"], ["CNTY_NM", "FIRST"], [dict[dictname], "FIRST"]], ["UNIQUE"])
print "Merging with point tables..."
arcpy.Merge_management(["cemeteries_SUMMARIZED", "cemeteries_point_SUMMARIZED"], "cemeteries_all_SUMMARIZED")
arcpy.Merge_management(["airports_SUMMARIZED", "airports_point_SUMMARIZED"], "airports_all_SUMMARIZED")
arcpy.Merge_management(["cities_SUMMARIZED", "cities_point_SUMMARIZED"], "cities_all_SUMMARIZED")
print "Renaming tables..."
arcpy.Rename_management("cemeteries_SUMMARIZED", "cemeteries_SUMpreMERGE")
arcpy.Rename_management("cemeteries_point_SUMMARIZED", "cemeteries_point_SUMpreMERGE")
arcpy.Rename_management("airports_SUMMARIZED", "airports_SUMpreMERGE")
arcpy.Rename_management("airports_point_SUMMARIZED", "airports_point_SUMpreMERGE")
arcpy.Rename_management("cities_SUMMARIZED", "cities_SUMpreMERGE")
arcpy.Rename_management("cities_point_SUMMARIZED", "cities_point_SUMpreMERGE")
def pdf():
print "Making directory..."
if not os.path.exists(workspace + "\\PDF"):
os.makedirs(workspace + "\\PDF")
else:
for file in os.listdir(workspace + "\\PDF"):
thefile = os.path.join(workspace + "\\PDF", file)
os.remove(thefile)
#
print "Creating field dictionary..."
dict = {}
dict["histmarkers"] = "label"
#dict["campgrounds"] = ""
dict["restareas"] = "label"
dict["parks"] = "LAND_NM"
dict["cemeteries"] = "CEMETERY_NM"
dict["cemeteries_point"] = "CEMETERY_NM"
dict["highways"] = "FULL_ST_NM"
dict["counties"] = "CNTY_NM"
dict["airports"] = "ARPRT_NM"
dict["airports_point"] = "ARPRT_NM"
dict["countyroads"] = "FULL_ST_NM"
dict["prisons"] = "PRISON_NM"
dict["military"] = "BASE_NM"
dict["schools"] = "SCHOOL_NM"
dict["cities"] = "CITY_NM"
dict["cities_point"] = "CITY_NM"
dict["lakes"] = "BODY_NM"
dict["railroads"] = "RR_NM"
dict["rivers"] = "STRM_NM"
dict["federalroads"] = "FULL_ST_NM"
#
print "Creating label dictionary..."
dict2 = {}
dict2["histmarkers"] = ["Marker", "Historical Markers", 1]
#dict2["campgrounds"] = [""]
dict2["restareas"] = ["Rest Area", "Rest Areas", 1]
dict2["parks"] = ["Park", "Parks", 1]
dict2["cemeteries"] = ["Cemetery", "Cemeteries", 1]
dict2["cemeteries_point"] = ["Cemetery", "Cemeteries", 1]
dict2["highways"] = ["Highway", "Highways", 1]
dict2["counties"] = ["County", "Counties", 1]
dict2["airports"] = ["Airport", "Airports", 1]
dict2["airports_point"] = ["Airport", "Airports", 1.925]
dict2["countyroads"] = ["Street", "County Roads", 1]
dict2["prisons"] = ["Prison", "Prisons", 1]
dict2["military"] = ["Military Base", "Military Bases", 1]
dict2["schools"] = ["School", "Schools", 1]
dict2["cities"] = ["City", "Cities", 1]
dict2["cities_point"] = ["City", "Cities", 1]
dict2["lakes"] = ["Lake", "Lakes", 1]
dict2["railroads"] = ["Railroad", "Railroads", 1]
dict2["rivers"] = ["River", "Major Rivers", 1]
dict2["federalroads"] = ["Roadway", "Federal Roads", 1]
#
print "Creating index PDF pages..."
env.workspace = database
fcList = arcpy.ListTables()
fcList.sort()
for fc in fcList:
if fc.split("_")[-1] == "SUMMARIZED":
feature = fc.split("_")[0]
print feature
title = dict2[feature][1]
subtitle = dict2[feature][0]
wide = dict2[feature][2]
from reportlab.lib.pagesizes import ELEVENSEVENTEEN, landscape
width, height = landscape(ELEVENSEVENTEEN)
f = frames.Frame(.8*inch, .8*inch, wide*inch, 9.4*inch, bottomPadding=1)
f2 = frames.Frame((.8 + 2 * wide)*inch, .8*inch, wide*inch, 9.4*inch, bottomPadding=1)
f3 = frames.Frame((.8 + 3 * wide), .8*inch, wide*inch, 9.4*inch, bottomPadding=1)
f4 = frames.Frame((.8 + 4 * wide), .8*inch, wide*inch, 9.4*inch, bottomPadding=1)
f5 = frames.Frame((.8 + 5 * wide), .8*inch, wide*inch, 9.4*inch, bottomPadding=1)
f6 = frames.Frame((.8 + 6 * wide), .8*inch, wide*inch, 9.4*inch, bottomPadding=1)
f7 = frames.Frame((.8 + 7 * wide), .8*inch, wide*inch, 9.4*inch, bottomPadding=1)
f8 = frames.Frame((.8 + 8 * wide), .8*inch, wide*inch, 9.4*inch, bottomPadding=1)
doc = BaseDocTemplate(workspace + "\\PDF\\" + fc + ".pdf", pagesize=landscape(ELEVENSEVENTEEN))
def thecanvas(c, doc):
from reportlab.lib.pagesizes import ELEVENSEVENTEEN, landscape
width, height = landscape(ELEVENSEVENTEEN)
c.setFont("Helvetica-Bold", 18)
c.setFillColorRGB(0, 0, 1)
c.drawCentredString(width/2,height - .7*inch, title.upper())
c.setFillColorRGB(.5, .3, .1)
#hortizontal lines
c.line(.75*inch, height - .75*inch, width - .75*inch, height - .75*inch)
c.line(.75*inch, .75*inch, width - .75*inch, .75*inch)
c.setFont("Times-Bold", 8)
c.setFillColorRGB(0, 0, 0)
pageNUM = c.getPageNumber()
c.drawRightString(width-.75*inch, .6*inch, "Page " + str(pageNUM))
doc.addPageTemplates([PageTemplate(frames=[f, f2, f3, f4, f5, f6, f7, f8], onPage=thecanvas)])
#
elements = []
data = []
dictvalue = dict[fc.split("_")[0]]
cursor = arcpy.SearchCursor(fc, "", "", "", "FIRST_CNTY_NM A; FIRST_" + dictvalue + " A; MIN_ID A")
starter = 0
counto = int(arcpy.GetCount_management(fc).getOutput(0))
total = counto-1
previous = ""
for row in cursor:
current = row.FIRST_CNTY_NM
if starter == 0:
line = ["", current, ""]
data.append(line)
t = Table(data, colWidths=[.1*inch, 1.725*inch, .1*inch], rowHeights=[.3*inch]*len(data), )
t.setStyle(TableStyle([
('FONTNAME', (1, 0), (1, 0), 'Times-Bold'),
('FONTSIZE', (1, 0), (1, 0), 12),
('TEXTCOLOR', (1, 0), (1, 0), colors.red),
('ALIGN', (1, 0), (1, 0), 'CENTER'),
('VALIGN', (1, 0), (1, 0), 'MIDDLE'),
('LINEBEFORE', (0, 0), (0, 0), 1, colors.saddlebrown),
('LINEAFTER', (2, 0), (2, 0), 1, colors.saddlebrown),
('LINEBELOW', (1, 0), (1, 0), 1, colors.saddlebrown)
]))
elements.append(t)
data = []
line = [""]
data.append(line)
t = Table(data, colWidths=[1.925*inch], rowHeights=[.1*inch]*len(data), )
t.setStyle(TableStyle([
('LINEBEFORE', (0, 0), (0, 0), 1, colors.saddlebrown),
('LINEAFTER', (0, 0), (0, 0), 1, colors.saddlebrown)
]))
elements.append(t)
data = []
line = [subtitle, "Page"]
data.append(line)
t = Table(data, colWidths=[1.625*inch, .3*inch], rowHeights=[.12*inch]*len(data))
t.setStyle(TableStyle([
('FONTNAME', (0, 0), (1, (len(data)-1)), 'Times-BoldItalic'),
('FONTSIZE', (0, 0), (1, (len(data)-1)), 6),
('TEXTCOLOR', (0, 0), (1, (len(data)-1)), colors.darkblue),
('ALIGN', (0, 0), (0, (len(data)-1)), 'LEFT'),
('ALIGN', (1, 0), (1, (len(data)-1)), 'RIGHT'),
('LINEBEFORE', (0, 0), (0, (len(data)-1)), 1, colors.saddlebrown),
('LINEAFTER', (1, 0), (1, (len(data)-1)), 1, colors.saddlebrown),
('BOTTOMPADDING', (0, 0), (1, (len(data)-1)), -1)
]))
elements.append(t)
data = []
cellvalue = row.getValue("FIRST_" + dictvalue)
line = [cellvalue, int(row.MIN_ID)]
data.append(line)
previous = current
elif current == previous and starter != total:
cellvalue = row.getValue("FIRST_" + dictvalue)
line = [cellvalue, int(row.MIN_ID)]
data.append(line)
previous = current
elif current != previous and starter != total:
t = Table(data, colWidths=[1.625*inch, .3*inch], rowHeights=[.12*inch]*len(data))
t.setStyle(TableStyle([
('FONTSIZE', (0, 0), (1, (len(data)-1)), 6),
('ALIGN', (0, 0), (0, (len(data)-1)), 'LEFT'),
('ALIGN', (1, 0), (1, (len(data)-1)), 'RIGHT'),
('BACKGROUND', (1, 0), (1, (len(data)-1)), colors.Color(.9, .9, .9)),
('LINEBEFORE', (0, 0), (0, (len(data)-1)), 1, colors.saddlebrown),
('LINEAFTER', (1, 0), (1, (len(data)-1)), 1, colors.saddlebrown),
('BOTTOMPADDING', (0, 0), (1, (len(data)-1)), -4)
]))
elements.append(t)
previous = current
data = []
line = ["", current, ""]
data.append(line)
t = Table(data, colWidths=[.1*inch, 1.725*inch, .1*inch], rowHeights=[.3*inch]*len(data), )
t.setStyle(TableStyle([
('FONTNAME', (1, 0), (1, 0), 'Times-Bold'),
('FONTSIZE', (1, 0), (1, 0), 12),
('TEXTCOLOR', (1, 0), (1, 0), colors.red),
('ALIGN', (1, 0), (1, 0), 'CENTER'),
('VALIGN', (1, 0), (1, 0), 'MIDDLE'),
('LINEBEFORE', (0, 0), (0, 0), 1, colors.saddlebrown),
('LINEAFTER', (2, 0), (2, 0), 1, colors.saddlebrown),
('LINEBELOW', (1, 0), (1, 0), 1, colors.saddlebrown)
]))
elements.append(t)
data = []
line = [""]
data.append(line)
t = Table(data, colWidths=[1.925*inch], rowHeights=[.1*inch]*len(data), )
t.setStyle(TableStyle([
('LINEBEFORE', (0, 0), (0, 0), 1, colors.saddlebrown),
('LINEAFTER', (0, 0), (0, 0), 1, colors.saddlebrown)
]))
elements.append(t)
data = []
line = [subtitle, "Page"]
data.append(line)
t = Table(data, colWidths=[1.625*inch, .3*inch], rowHeights=[.12*inch]*len(data))
t.setStyle(TableStyle([
('FONTNAME', (0, 0), (1, (len(data)-1)), 'Times-BoldItalic'),
('FONTSIZE', (0, 0), (1, (len(data)-1)), 6),
('TEXTCOLOR', (0, 0), (1, (len(data)-1)), colors.darkblue),
('ALIGN', (0, 0), (0, (len(data)-1)), 'LEFT'),
('ALIGN', (1, 0), (1, (len(data)-1)), 'RIGHT'),
('LINEBEFORE', (0, 0), (0, (len(data)-1)), 1, colors.saddlebrown),
('LINEAFTER', (1, 0), (1, (len(data)-1)), 1, colors.saddlebrown),
('BOTTOMPADDING', (0, 0), (1, (len(data)-1)), -1)
]))
elements.append(t)
data = []
cellvalue = row.getValue("FIRST_" + dictvalue)
line = [cellvalue, int(row.MIN_ID)]
data.append(line)
elif current == previous and starter == total:
cellvalue = row.getValue("FIRST_" + dictvalue)
line = [cellvalue, int(row.MIN_ID)]
data.append(line)
t = Table(data, colWidths=[1.625*inch, .3*inch], rowHeights=[.12*inch]*len(data))
t.setStyle(TableStyle([
('FONTSIZE', (0, 0), (1, (len(data)-1)), 6),
('ALIGN', (0, 0), (0, (len(data)-1)), 'LEFT'),
('ALIGN', (1, 0), (1, (len(data)-1)), 'RIGHT'),
('BACKGROUND', (1, 0), (1, (len(data)-1)), colors.Color(.9, .9, .9)),
('LINEBEFORE', (0, 0), (0, (len(data)-1)), 1, colors.saddlebrown),
('LINEAFTER', (1, 0), (1, (len(data)-1)), 1, colors.saddlebrown),
('BOTTOMPADDING', (0, 0), (1, (len(data)-1)), -4)
]))
elements.append(t)
elif current != previous and starter == total:
t = Table(data, colWidths=[1.625*inch, .3*inch], rowHeights=[.12*inch]*len(data))
t.setStyle(TableStyle([
('FONTSIZE', (0, 0), (1, (len(data)-1)), 6),
('ALIGN', (0, 0), (0, (len(data)-1)), 'LEFT'),
('ALIGN', (1, 0), (1, (len(data)-1)), 'RIGHT'),
('BACKGROUND', (1, 0), (1, (len(data)-1)), colors.Color(.9, .9, .9)),
('LINEBEFORE', (0, 0), (0, (len(data)-1)), 1, colors.saddlebrown),
('LINEAFTER', (1, 0), (1, (len(data)-1)), 1, colors.saddlebrown),
('BOTTOMPADDING', (0, 0), (1, (len(data)-1)), -4)
]))
elements.append(t)
previous = current
data = []
line = ["", current, ""]
data.append(line)
t = Table(data, colWidths=[.1*inch, 1.725*inch, .1*inch], rowHeights=[.3*inch]*len(data), )
t.setStyle(TableStyle([
('FONTNAME', (1, 0), (1, 0), 'Times-Bold'),
('FONTSIZE', (1, 0), (1, 0), 12),
('TEXTCOLOR', (1, 0), (1, 0), colors.red),
('ALIGN', (1, 0), (1, 0), 'CENTER'),
('VALIGN', (1, 0), (1, 0), 'MIDDLE'),
('LINEBEFORE', (0, 0), (0, 0), 1, colors.saddlebrown),
('LINEAFTER', (2, 0), (2, 0), 1, colors.saddlebrown),
('LINEBELOW', (1, 0), (1, 0), 1, colors.saddlebrown)
]))
elements.append(t)
data = []
line = [""]
data.append(line)
t = Table(data, colWidths=[1.925*inch], rowHeights=[.1*inch]*len(data), )
t.setStyle(TableStyle([
('LINEBEFORE', (0, 0), (0, 0), 1, colors.saddlebrown),
('LINEAFTER', (0, 0), (0, 0), 1, colors.saddlebrown)
]))
elements.append(t)
data = []
line = [subtitle, "Page"]
data.append(line)
t = Table(data, colWidths=[1.625*inch, .3*inch], rowHeights=[.12*inch]*len(data))
t.setStyle(TableStyle([
('FONTNAME', (0, 0), (1, (len(data)-1)), 'Times-BoldItalic'),
('FONTSIZE', (0, 0), (1, (len(data)-1)), 6),
('TEXTCOLOR', (0, 0), (1, (len(data)-1)), colors.darkblue),
('ALIGN', (0, 0), (0, (len(data)-1)), 'LEFT'),
('ALIGN', (1, 0), (1, (len(data)-1)), 'RIGHT'),
('LINEBEFORE', (0, 0), (0, (len(data)-1)), 1, colors.saddlebrown),
('LINEAFTER', (1, 0), (1, (len(data)-1)), 1, colors.saddlebrown),
('BOTTOMPADDING', (0, 0), (1, (len(data)-1)), -1)
]))
elements.append(t)
data = []
cellvalue = row.getValue("FIRST_" + dictvalue)
line = [cellvalue, int(row.MIN_ID)]
data.append(line)
t = Table(data, colWidths=[1.625*inch, .3*inch], rowHeights=[.12*inch]*len(data))
t.setStyle(TableStyle([
('FONTSIZE', (0, 0), (1, (len(data)-1)), 6),
('ALIGN', (0, 0), (0, (len(data)-1)), 'LEFT'),
('ALIGN', (1, 0), (1, (len(data)-1)), 'RIGHT'),
('BACKGROUND', (1, 0), (1, (len(data)-1)), colors.Color(.9, .9, .9)),
('LINEBEFORE', (0, 0), (0, (len(data)-1)), 1, colors.saddlebrown),
('LINEAFTER', (1, 0), (1, (len(data)-1)), 1, colors.saddlebrown),
('BOTTOMPADDING', (0, 0), (1, (len(data)-1)), -4)
]))
elements.append(t)
starter += 1
doc.build(elements)
del cursor
#preparation()
#intersects()
pdf()
print "That's all folks!"
| 53.021739
| 202
| 0.523948
|
79514e09e1d51f8e7065d45bcfab6dc44035c06a
| 11,386
|
py
|
Python
|
auth0/v3/management/users.py
|
emfloyd2/auth0-python
|
c99e7fccb050740051f13785775a116684cf0006
|
[
"MIT"
] | null | null | null |
auth0/v3/management/users.py
|
emfloyd2/auth0-python
|
c99e7fccb050740051f13785775a116684cf0006
|
[
"MIT"
] | null | null | null |
auth0/v3/management/users.py
|
emfloyd2/auth0-python
|
c99e7fccb050740051f13785775a116684cf0006
|
[
"MIT"
] | null | null | null |
from .rest import RestClient
class Users(object):
"""Auth0 users endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
"""
def __init__(self, domain, token, telemetry=True):
self.domain = domain
self.client = RestClient(jwt=token, telemetry=telemetry)
def _url(self, id=None):
url = 'https://{}/api/v2/users'.format(self.domain)
if id is not None:
return '{}/{}'.format(url, id)
return url
def list(self, page=0, per_page=25, sort=None, connection=None, q=None,
search_engine=None, include_totals=True, fields=None,
include_fields=True):
"""List or search users.
Args:
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
sort (str, optional): The field to use for sorting.
1 == ascending and -1 == descending. (e.g: email:1)
connection (str, optional): Connection filter.
q (str, optional): Query in Lucene query string syntax. Only fields
in app_metadata, user_metadata or the normalized user profile
are searchable.
search_engine (str, optional): The version of the search_engine to use
when querying for users. Will default to the latest version available.
See: https://auth0.com/docs/users/search
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be include in the result, False otherwise.
See: https://auth0.com/docs/api/management/v2#!/Users/get_users
"""
params = {
'per_page': per_page,
'page': page,
'include_totals': str(include_totals).lower(),
'sort': sort,
'connection': connection,
'fields': fields and ','.join(fields) or None,
'include_fields': str(include_fields).lower(),
'q': q,
'search_engine': search_engine
}
return self.client.get(self._url(), params=params)
def create(self, body):
"""Creates a new user.
Args:
body (dict): Please see: https://auth0.com/docs/api/v2#!/Users/post_users
"""
return self.client.post(self._url(), data=body)
def delete_all_users(self):
"""Deletes all users (USE WITH CAUTION).
Args:
"""
return self.client.delete(self._url())
def get(self, id, fields=None, include_fields=True):
"""Get a user.
Args:
id (str): The user_id of the user to retrieve.
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise.
See: https://auth0.com/docs/api/management/v2#!/Users/get_users_by_id
"""
params = {
'fields': fields and ','.join(fields) or None,
'include_fields': str(include_fields).lower()
}
return self.client.get(self._url(id), params=params)
def delete(self, id):
"""Delete a user.
Args:
id (str): The user_id of the user to delete.
See: https://auth0.com/docs/api/management/v2#!/Users/delete_users_by_id
"""
return self.client.delete(self._url(id))
def update(self, id, body):
"""Update a user with the attributes passed in 'body'
Args:
id (str): The user_id of the user to update.
body (dict): Please see: https://auth0.com/docs/api/v2#!/Users/patch_users_by_id
"""
return self.client.patch(self._url(id), data=body)
def list_roles(self, id, page=0, per_page=25, include_totals=True):
"""List the roles associated with a user.
Args:
id (str): The user's id.
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
See https://auth0.com/docs/api/management/v2#!/Users/get_user_roles
"""
params = {
'per_page': per_page,
'page': page,
'include_totals': str(include_totals).lower()
}
url = self._url('{}/roles'.format(id))
return self.client.get(url, params=params)
def remove_roles(self, id, roles):
"""Removes an array of roles from a user.
Args:
id (str): The user's id.
roles (list of str): A list of roles ids to unassociate from the user.
See https://auth0.com/docs/api/management/v2#!/Users/delete_user_roles
"""
url = self._url('{}/roles'.format(id))
body = {'roles': roles}
return self.client.delete(url, data=body)
def add_roles(self, id, roles):
"""Associate an array of roles with a user.
Args:
id (str): The user's id.
roles (list of str): A list of roles ids to associated with the user.
See https://auth0.com/docs/api/management/v2#!/Users/post_user_roles
"""
url = self._url('{}/roles'.format(id))
body = {'roles': roles}
return self.client.post(url, data=body)
def list_permissions(self, id, page=0, per_page=25, include_totals=True):
"""List the permissions associated to the user.
Args:
id (str): The user's id.
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
See https://auth0.com/docs/api/management/v2#!/Users/get_permissions
"""
params = {
'per_page': per_page,
'page': page,
'include_totals': str(include_totals).lower()
}
url = self._url('{}/permissions'.format(id))
return self.client.get(url, params=params)
def remove_permissions(self, id, permissions):
"""Removes permissions from a user.
Args:
id (str): The user's id.
permissions (list of str): A list of permission ids to unassociate from the user.
See https://auth0.com/docs/api/management/v2#!/Users/delete_permissions
"""
url = self._url('{}/permissions'.format(id))
body = {'permissions': permissions}
return self.client.delete(url, data=body)
def add_permissions(self, id, permissions):
"""Assign permissions to a user.
Args:
id (str): The user's id.
permissions (list of str): A list of permission ids to associated with the user.
See https://auth0.com/docs/api/management/v2#!/Users/post_permissions
"""
url = self._url('{}/permissions'.format(id))
body = {'permissions': permissions}
return self.client.post(url, data=body)
def delete_multifactor(self, id, provider):
"""Delete a user's multifactor provider.
Args:
id (str): The user's id.
provider (str): The multifactor provider. Supported values 'duo'
or 'google-authenticator'
See: https://auth0.com/docs/api/management/v2#!/Users/delete_multifactor_by_provider
"""
url = self._url('{}/multifactor/{}'.format(id, provider))
return self.client.delete(url)
def unlink_user_account(self, id, provider, user_id):
"""Unlink a user account
Args:
id (str): The user_id of the user identity.
provider (str): The type of identity provider (e.g: facebook).
user_id (str): The unique identifier for the user for the identity.
See: https://auth0.com/docs/api/management/v2#!/Users/delete_user_identity_by_user_id
"""
url = self._url('{}/identities/{}/{}'.format(id, provider, user_id))
return self.client.delete(url)
def link_user_account(self, user_id, body):
"""Link user accounts.
Links the account specified in the body (secondary account) to the
account specified by the id param of the URL (primary account).
Args:
id (str): The user_id of the primary identity where you are linking
the secondary account to.
body (dict): Please see: https://auth0.com/docs/api/v2#!/Users/post_identities
"""
url = self._url('{}/identities'.format(user_id))
return self.client.post(url, data=body)
def regenerate_recovery_code(self, user_id):
"""Removes the current recovery token, generates and returns a new one
Args:
user_id (str): The user_id of the user identity.
See: https://auth0.com/docs/api/management/v2#!/Users/post_recovery_code_regeneration
"""
url = self._url('{}/recovery-code-regeneration'.format(user_id))
return self.client.post(url)
def get_guardian_enrollments(self, user_id):
"""Retrieves all Guardian enrollments.
Args:
user_id (str): The user_id of the user to retrieve
See: https://auth0.com/docs/api/management/v2#!/Users/get_enrollments
"""
url = self._url('{}/enrollments'.format(user_id))
return self.client.get(url)
def get_log_events(self, user_id, page=0, per_page=50, sort=None,
include_totals=False):
"""Retrieve every log event for a specific user id
Args:
user_id (str): The user_id of the logs to retrieve
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
Default: 50. Max value: 100
sort (str, optional): The field to use for sorting. Use field:order
where order is 1 for ascending and -1 for descending.
For example date:-1
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
See: https://auth0.com/docs/api/management/v2#!/Users/get_logs_by_user
"""
params = {
'per_page': per_page,
'page': page,
'include_totals': str(include_totals).lower(),
'sort': sort
}
url = self._url('{}/logs'.format(user_id))
return self.client.get(url, params=params)
| 34.192192
| 93
| 0.589232
|
79514f822ed57b90a3ac05a7838024972a9877ea
| 76,582
|
py
|
Python
|
nova/tests/network/test_neutronv2.py
|
citrix-openstack-build/nova
|
e5fc28adda4b95325f74402fe63ea969e816e6c3
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/network/test_neutronv2.py
|
citrix-openstack-build/nova
|
e5fc28adda4b95325f74402fe63ea969e816e6c3
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/network/test_neutronv2.py
|
citrix-openstack-build/nova
|
e5fc28adda4b95325f74402fe63ea969e816e6c3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import uuid
import mox
from neutronclient.common import exceptions
from neutronclient.v2_0 import client
from oslo.config import cfg
from nova.compute import flavors
from nova.conductor import api as conductor_api
from nova import context
from nova import exception
from nova.network import model
from nova.network import neutronv2
from nova.network.neutronv2 import api as neutronapi
from nova.network.neutronv2 import constants
from nova.openstack.common import jsonutils
from nova import test
from nova import utils
CONF = cfg.CONF
#NOTE: Neutron client raises Exception which is discouraged by HACKING.
# We set this variable here and use it for assertions below to avoid
# the hacking checks until we can make neutron client throw a custom
# exception class instead.
NEUTRON_CLIENT_EXCEPTION = Exception
class MyComparator(mox.Comparator):
def __init__(self, lhs):
self.lhs = lhs
def _com_dict(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for key, value in lhs.iteritems():
if key not in rhs:
return False
rhs_value = rhs[key]
if not self._com(value, rhs_value):
return False
return True
def _com_list(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for lhs_value in lhs:
if lhs_value not in rhs:
return False
return True
def _com(self, lhs, rhs):
if lhs is None:
return rhs is None
if isinstance(lhs, dict):
if not isinstance(rhs, dict):
return False
return self._com_dict(lhs, rhs)
if isinstance(lhs, list):
if not isinstance(rhs, list):
return False
return self._com_list(lhs, rhs)
if isinstance(lhs, tuple):
if not isinstance(rhs, tuple):
return False
return self._com_list(lhs, rhs)
return lhs == rhs
def equals(self, rhs):
return self._com(self.lhs, rhs)
def __repr__(self):
return str(self.lhs)
class TestNeutronClient(test.TestCase):
def test_withtoken(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
endpoint_url=CONF.neutron_url,
token=my_context.auth_token,
timeout=CONF.neutron_url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
neutronv2.get_client(my_context)
def test_withouttoken_keystone_connection_error(self):
self.flags(neutron_auth_strategy='keystone')
self.flags(neutron_url='http://anyhost/')
my_context = context.RequestContext('userid', 'my_tenantid')
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
neutronv2.get_client,
my_context)
def test_withouttoken_keystone_not_auth(self):
self.flags(neutron_auth_strategy=None)
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid', 'my_tenantid')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
endpoint_url=CONF.neutron_url,
auth_strategy=None,
timeout=CONF.neutron_url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
neutronv2.get_client(my_context)
class TestNeutronv2Base(test.TestCase):
def setUp(self):
super(TestNeutronv2Base, self).setUp()
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance',
'availability_zone': 'nova',
'host': 'some_host',
'security_groups': []}
self.instance2 = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance2',
'availability_zone': 'nova',
'security_groups': []}
self.nets1 = [{'id': 'my_netid1',
'name': 'my_netname1',
'tenant_id': 'my_tenantid'}]
self.nets2 = []
self.nets2.append(self.nets1[0])
self.nets2.append({'id': 'my_netid2',
'name': 'my_netname2',
'tenant_id': 'my_tenantid'})
self.nets3 = self.nets2 + [{'id': 'my_netid3',
'name': 'my_netname3',
'tenant_id': 'my_tenantid'}]
self.nets4 = [{'id': 'his_netid4',
'name': 'his_netname4',
'tenant_id': 'his_tenantid'}]
self.nets = [self.nets1, self.nets2, self.nets3, self.nets4]
self.port_address = '10.0.1.2'
self.port_data1 = [{'network_id': 'my_netid1',
'device_id': self.instance2['uuid'],
'device_owner': 'compute:nova',
'id': 'my_portid1',
'fixed_ips': [{'ip_address': self.port_address,
'subnet_id': 'my_subid1'}],
'mac_address': 'my_mac1', }]
self.float_data1 = [{'port_id': 'my_portid1',
'fixed_ip_address': self.port_address,
'floating_ip_address': '172.0.1.2'}]
self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9',
'subnet_id': 'my_subid1'}]}]
self.port_address2 = '10.0.2.2'
self.port_data2 = []
self.port_data2.append(self.port_data1[0])
self.port_data2.append({'network_id': 'my_netid2',
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
'id': 'my_portid2',
'fixed_ips':
[{'ip_address': self.port_address2,
'subnet_id': 'my_subid2'}],
'mac_address': 'my_mac2', })
self.float_data2 = []
self.float_data2.append(self.float_data1[0])
self.float_data2.append({'port_id': 'my_portid2',
'fixed_ip_address': '10.0.2.2',
'floating_ip_address': '172.0.2.2'})
self.port_data3 = [{'network_id': 'my_netid1',
'device_id': 'device_id3',
'device_owner': 'compute:nova',
'id': 'my_portid3',
'fixed_ips': [], # no fixed ip
'mac_address': 'my_mac3', }]
self.subnet_data1 = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2 = []
self.subnet_data_n = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']},
{'id': 'my_subid2',
'cidr': '20.0.1.0/24',
'network_id': 'my_netid2',
'gateway_ip': '20.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2.append({'id': 'my_subid2',
'cidr': '10.0.2.0/24',
'network_id': 'my_netid2',
'gateway_ip': '10.0.2.1',
'dns_nameservers': ['8.8.2.1', '8.8.2.2']})
self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3',
'name': 'ext_net',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db',
'name': 'nova',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_unassociated = {'tenant_id': 'my_tenantid',
'id': 'fip_id1',
'floating_ip_address': '172.24.4.227',
'floating_network_id': self.fip_pool['id'],
'port_id': None,
'fixed_ip_address': None,
'router_id': None}
fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address']
self.fip_associated = {'tenant_id': 'my_tenantid',
'id': 'fip_id2',
'floating_ip_address': '172.24.4.228',
'floating_network_id': self.fip_pool['id'],
'port_id': self.port_data2[1]['id'],
'fixed_ip_address': fixed_ip_address,
'router_id': 'router_id1'}
self._returned_nw_info = []
self.mox.StubOutWithMock(neutronv2, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
self.addCleanup(CONF.reset)
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.stubs.UnsetAll)
def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_instance_nw_info')
has_portbinding = False
has_extra_dhcp_opts = False
# Note: (dkehn) this option check should be removed as soon as support
# in neutron released, see https://bugs.launchpad.net/nova/+bug/1214162
if (cfg.CONF.dhcp_options_enabled == True and kwargs.get(
'dhcp_options', None) != None):
has_extra_dhcp_opts = True
dhcp_options = kwargs.get('dhcp_options')
if kwargs.get('portbinding'):
has_portbinding = True
api.extensions[constants.PORTBINDING_EXT] = 1
self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
neutronv2.get_client(
mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn(
self.moxed_client)
api._refresh_neutron_extensions_cache()
else:
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
ports = {}
fixed_ips = {}
macs = kwargs.get('macs')
if macs:
macs = set(macs)
req_net_ids = []
if 'requested_networks' in kwargs:
for id, fixed_ip, port_id in kwargs['requested_networks']:
if port_id:
self.moxed_client.show_port(port_id).AndReturn(
{'port': {'id': 'my_portid1',
'network_id': 'my_netid1',
'mac_address': 'my_mac1',
'device_id': kwargs.get('_device') and
self.instance2['uuid'] or ''}})
ports['my_netid1'] = self.port_data1[0]
id = 'my_netid1'
if macs is not None:
macs.discard('my_mac1')
else:
fixed_ips[id] = fixed_ip
req_net_ids.append(id)
expected_network_order = req_net_ids
else:
expected_network_order = [n['id'] for n in nets]
if kwargs.get('_break') == 'pre_list_networks':
self.mox.ReplayAll()
return api
search_ids = [net['id'] for net in nets if net['id'] in req_net_ids]
if search_ids:
mox_list_params = {'id': mox.SameElementsAs(search_ids)}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance['project_id'],
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': []})
for net_id in expected_network_order:
port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
port = ports.get(net_id, None)
if not has_portbinding:
api._populate_neutron_extension_values(
self.instance, mox.IgnoreArg()).AndReturn(None)
else:
# since _populate_neutron_extension_values() will call
# _has_port_binding_extension()
api._has_port_binding_extension().AndReturn(has_portbinding)
api._has_port_binding_extension().AndReturn(has_portbinding)
if port:
port_id = port['id']
self.moxed_client.update_port(port_id,
MyComparator(port_req_body)
).AndReturn(
{'port': port})
else:
fixed_ip = fixed_ips.get(net_id)
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address':
fixed_ip}]
port_req_body['port']['network_id'] = net_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
self.instance['project_id']
if macs:
port_req_body['port']['mac_address'] = macs.pop()
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
res_port = {'port': {'id': 'fake'}}
if has_extra_dhcp_opts:
port_req_body['port']['extra_dhcp_opts'] = dhcp_options
if kwargs.get('_break') == 'mac' + net_id:
self.mox.ReplayAll()
return api
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn(res_port)
api._get_instance_nw_info(mox.IgnoreArg(),
self.instance,
networks=nets).AndReturn(
self._returned_nw_info)
self.mox.ReplayAll()
return api
def _verify_nw_info(self, nw_inf, index=0):
id_suffix = index + 1
self.assertEquals('10.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index]['address'])
self.assertEquals('172.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index].floating_ip_addresses()[0])
self.assertEquals('my_netname%s' % id_suffix,
nw_inf[index]['network']['label'])
self.assertEquals('my_portid%s' % id_suffix, nw_inf[index]['id'])
self.assertEquals('my_mac%s' % id_suffix, nw_inf[index]['address'])
self.assertEquals('10.0.%s.0/24' % id_suffix,
nw_inf[index]['network']['subnets'][0]['cidr'])
self.assertTrue(model.IP(address='8.8.%s.1' % id_suffix) in
nw_inf[index]['network']['subnets'][0]['dns'])
def _get_instance_nw_info(self, number):
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(mox.IgnoreArg(),
self.instance['uuid'],
mox.IgnoreArg())
port_data = number == 1 and self.port_data1 or self.port_data2
self.mox.StubOutWithMock(conductor_api.API,
'instance_get_by_uuid')
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']}})
info_cache = {'info_cache': {'network_info':
jsonutils.dumps(net_info_cache)}}
api.conductor_api.instance_get_by_uuid(
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(info_cache)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn({'networks': nets})
self.moxed_client.list_networks(
shared=True).AndReturn({'networks': []})
for i in xrange(1, number + 1):
float_data = number == 1 and self.float_data1 or self.float_data2
for ip in port_data[i - 1]['fixed_ips']:
float_data = [x for x in float_data
if x['fixed_ip_address'] == ip['ip_address']]
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=port_data[i - 1]['id']).AndReturn(
{'floatingips': float_data})
subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2
self.moxed_client.list_subnets(
id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn(
{'subnets': subnet_data})
self.moxed_client.list_ports(
network_id=subnet_data[0]['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': []})
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context, self.instance)
for i in xrange(0, number):
self._verify_nw_info(nw_inf, i)
def _allocate_for_instance(self, net_idx=1, **kwargs):
api = self._stub_allocate_for_instance(net_idx, **kwargs)
return api.allocate_for_instance(self.context, self.instance, **kwargs)
class TestNeutronv2(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2, self).setUp()
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_get_instance_nw_info_1(self):
# Test to get one port in one network and subnet.
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(1)
def test_get_instance_nw_info_2(self):
# Test to get one port in each of two networks and subnets.
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(2)
def test_get_instance_nw_info_with_nets(self):
# Test get instance_nw_info with networks passed in.
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': self.port_data1})
port_data = self.port_data1
for ip in port_data[0]['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=port_data[0]['id']).AndReturn(
{'floatingips': self.float_data1})
self.moxed_client.list_subnets(
id=mox.SameElementsAs(['my_subid1'])).AndReturn(
{'subnets': self.subnet_data1})
self.moxed_client.list_ports(
network_id='my_netid1',
device_owner='network:dhcp').AndReturn(
{'ports': self.dhcp_port_data1})
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
self.instance,
networks=self.nets1)
self._verify_nw_info(nw_inf, 0)
def test_get_instance_nw_info_without_subnet(self):
# Test get instance_nw_info for a port without subnet.
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': self.port_data3})
self.moxed_client.list_networks(
shared=False,
tenant_id=self.instance['project_id']).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_networks(
shared=True).AndReturn({'networks': []})
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self.mox.StubOutWithMock(conductor_api.API,
'instance_get_by_uuid')
net_info_cache = []
for port in self.port_data3:
net_info_cache.append({"network": {"id": port['network_id']}})
info_cache = {'info_cache': {'network_info':
jsonutils.dumps(net_info_cache)}}
api.conductor_api.instance_get_by_uuid(
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(info_cache)
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
self.instance)
id_suffix = 3
self.assertEquals(0, len(nw_inf.fixed_ips()))
self.assertEquals('my_netname1', nw_inf[0]['network']['label'])
self.assertEquals('my_portid%s' % id_suffix, nw_inf[0]['id'])
self.assertEquals('my_mac%s' % id_suffix, nw_inf[0]['address'])
self.assertEquals(0, len(nw_inf[0]['network']['subnets']))
def test_refresh_neutron_extensions_cache(self):
api = neutronapi.API()
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': 'nvp-qos'}]})
self.mox.ReplayAll()
api._refresh_neutron_extensions_cache()
self.assertEquals({'nvp-qos': {'name': 'nvp-qos'}}, api.extensions)
def test_populate_neutron_extension_values_rxtx_factor(self):
api = neutronapi.API()
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': 'nvp-qos'}]})
self.mox.ReplayAll()
instance_type = flavors.get_default_flavor()
instance_type['rxtx_factor'] = 1
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, instance_type))
instance = {'system_metadata': sys_meta}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(instance, port_req_body)
self.assertEquals(port_req_body['port']['rxtx_factor'], 1)
def test_allocate_for_instance_1(self):
# Allocate one port in one network env.
self._allocate_for_instance(1)
def test_allocate_for_instance_2(self):
# Allocate one port in two networks env.
self._allocate_for_instance(2)
def test_allocate_for_instance_accepts_macs_kwargs_None(self):
# The macs kwarg should be accepted as None.
self._allocate_for_instance(1, macs=None)
def test_allocate_for_instance_accepts_macs_kwargs_set(self):
# The macs kwarg should be accepted, as a set, the
# _allocate_for_instance helper checks that the mac is used to create a
# port.
self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
def test_allocate_for_instance_accepts_only_portid(self):
# Make sure allocate_for_instance works when only a portid is provided
self._returned_nw_info = self.port_data1
result = self._allocate_for_instance(
requested_networks=[(None, None, 'my_portid1')])
self.assertEqual(self.port_data1, result)
def test_allocate_for_instance_not_enough_macs_via_ports(self):
# using a hypervisor MAC via a pre-created port will stop it being
# used to dynamically create a port on a network. We put the network
# first in requested_networks so that if the code were to not pre-check
# requested ports, it would incorrectly assign the mac and not fail.
requested_networks = [
(self.nets2[1]['id'], None, None),
(None, None, 'my_portid1')]
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac1']),
_break='mac' + self.nets2[1]['id'])
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['my_mac1']))
def test_allocate_for_instance_not_enough_macs(self):
# If not enough MAC addresses are available to allocate to networks, an
# error should be raised.
# We could pass in macs=set(), but that wouldn't tell us that
# allocate_for_instance tracks used macs properly, so we pass in one
# mac, and ask for two networks.
requested_networks = [
(self.nets2[1]['id'], None, None),
(self.nets2[0]['id'], None, None)]
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2']),
_break='mac' + self.nets2[0]['id'])
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['my_mac2']))
def test_allocate_for_instance_two_macs_two_networks(self):
# If two MACs are available and two networks requested, two new ports
# get made and no exceptions raised.
requested_networks = [
(self.nets2[1]['id'], None, None),
(self.nets2[0]['id'], None, None)]
self._allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2', 'my_mac1']))
def test_allocate_for_instance_mac_conflicting_requested_port(self):
# specify only first and last network
requested_networks = [(None, None, 'my_portid1')]
api = self._stub_allocate_for_instance(
net_idx=1, requested_networks=requested_networks,
macs=set(['unknown:mac']),
_break='pre_list_networks')
self.assertRaises(exception.PortNotUsable,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['unknown:mac']))
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = [
(net['id'], None, None)
for net in (self.nets3[1], self.nets3[0], self.nets3[2])]
self._allocate_for_instance(net_idx=3,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
# specify only first and last network
requested_networks = [(self.nets1[0]['id'], '10.0.1.0/24', None)]
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_port(self):
requested_networks = [(None, None, 'myportid1')]
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_no_networks(self):
"""verify the exception thrown when there are no networks defined."""
api = neutronapi.API()
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': []})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
self.mox.ReplayAll()
nwinfo = api.allocate_for_instance(self.context, self.instance)
self.assertEqual(len(nwinfo), 0)
def test_allocate_for_instance_ex1(self):
"""verify we will delete created ports
if we fail to allocate all net resources.
Mox to raise exception when creating a second port.
In this case, the code should delete the first created port.
"""
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension().MultipleTimes().AndReturn(False)
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
index = 0
for network in self.nets2:
binding_port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
port_req_body = {
'port': {
'network_id': network['id'],
'admin_state_up': True,
'tenant_id': self.instance['project_id'],
},
}
port_req_body['port'].update(binding_port_req_body['port'])
port = {'id': 'portid_' + network['id']}
api._populate_neutron_extension_values(
self.instance, binding_port_req_body).AndReturn(None)
if index == 0:
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn({'port': port})
else:
NeutronOverQuota = exceptions.NeutronClientException(
message="Quota exceeded for resources: ['port']",
status_code=409)
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(NeutronOverQuota)
index += 1
self.moxed_client.delete_port('portid_' + self.nets2[0]['id'])
self.mox.ReplayAll()
self.assertRaises(exception.PortLimitExceeded,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_ex2(self):
"""verify we have no port to delete
if we fail to allocate the first net resource.
Mox to raise exception when creating the first port.
In this case, the code should not delete any ports.
"""
api = neutronapi.API()
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
port_req_body = {
'port': {
'network_id': self.nets2[0]['id'],
'admin_state_up': True,
'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id'],
},
}
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(
Exception("fail to create port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_no_port_or_network(self):
class BailOutEarly(Exception):
pass
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_available_networks')
# Make sure we get an empty list and then bail out of the rest
# of the function
api._get_available_networks(self.context, self.instance['project_id'],
[]).AndRaise(BailOutEarly)
self.mox.ReplayAll()
self.assertRaises(BailOutEarly,
api.allocate_for_instance,
self.context, self.instance,
requested_networks=[(None, None, None)])
def test_allocate_for_instance_second_time(self):
# Make sure that allocate_for_instance only returns ports that it
# allocated during _that_ run.
new_port = {'id': 'fake'}
self._returned_nw_info = self.port_data1 + [new_port]
nw_info = self._allocate_for_instance()
self.assertEqual(nw_info, [new_port])
def test_allocate_for_instance_port_in_use(self):
# If a port is already in use, an exception should be raised.
requested_networks = [(None, None, 'my_portid1')]
api = self._stub_allocate_for_instance(
requested_networks=requested_networks,
_break='pre_list_networks',
_device=True)
self.assertRaises(exception.PortInUse,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks)
def _deallocate_for_instance(self, number):
port_data = number == 1 and self.port_data1 or self.port_data2
self.moxed_client.list_ports(
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
for port in port_data:
self.moxed_client.delete_port(port['id'])
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance)
def test_deallocate_for_instance_1(self):
# Test to deallocate in one port env.
self._deallocate_for_instance(1)
def test_deallocate_for_instance_2(self):
# Test to deallocate in two ports env.
self._deallocate_for_instance(2)
def _test_deallocate_port_for_instance(self, number):
port_data = number == 1 and self.port_data1 or self.port_data2
self.moxed_client.delete_port(port_data[0]['id'])
self.mox.StubOutWithMock(conductor_api.API,
'instance_get_by_uuid')
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']}})
info_cache = {'info_cache': {'network_info':
jsonutils.dumps(net_info_cache)}}
api = neutronapi.API()
api.conductor_api.instance_get_by_uuid(
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(info_cache)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data[1:]})
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': [self.nets2[1]]})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
float_data = number == 1 and self.float_data1 or self.float_data2
for data in port_data[1:]:
for ip in data['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=data['id']).AndReturn(
{'floatingips': float_data[1:]})
for port in port_data[1:]:
self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({})
self.mox.ReplayAll()
nwinfo = api.deallocate_port_for_instance(self.context, self.instance,
port_data[0]['id'])
self.assertEqual(len(nwinfo), len(port_data[1:]))
if len(port_data) > 1:
self.assertEqual(nwinfo[0]['network']['id'], 'my_netid2')
def test_deallocate_port_for_instance_1(self):
# Test to deallocate the first and only port
self._test_deallocate_port_for_instance(1)
def test_deallocate_port_for_instance_2(self):
# Test to deallocate the first port of two
self._test_deallocate_port_for_instance(2)
def test_list_ports(self):
search_opts = {'parm': 'value'}
self.moxed_client.list_ports(**search_opts)
self.mox.ReplayAll()
neutronapi.API().list_ports(self.context, **search_opts)
def test_show_port(self):
self.moxed_client.show_port('foo')
self.mox.ReplayAll()
neutronapi.API().show_port(self.context, 'foo')
def test_validate_networks(self):
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks)
def test_validate_networks_ex_1(self):
requested_networks = [('my_netid1', 'test', None)]
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1'])).AndReturn(
{'networks': self.nets1})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks)
except exception.NetworkNotFound as ex:
self.assertTrue("my_netid2" in str(ex))
def test_validate_networks_ex_2(self):
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None),
('my_netid3', 'test3', None)]
ids = ['my_netid1', 'my_netid2', 'my_netid3']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks)
except exception.NetworkNotFound as ex:
self.assertTrue("my_netid2, my_netid3" in str(ex))
def test_validate_networks_duplicate(self):
"""Verify that the correct exception is thrown when duplicate
network ids are passed to validate_networks.
"""
requested_networks = [('my_netid1', None, None),
('my_netid1', None, None)]
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
api.validate_networks,
self.context, requested_networks)
def test_validate_networks_not_specified(self):
requested_networks = []
self.moxed_client.list_networks(
tenant_id=self.context.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_networks(
shared=True).AndReturn(
{'networks': self.nets2})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkAmbiguous,
api.validate_networks,
self.context, requested_networks)
def test_validate_networks_port_not_found(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
requested_networks = [('my_netid1', None, '3123-ad34-bc43-32332ca33e')]
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=404)
self.moxed_client.show_port(requested_networks[0][2]).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.PortNotFound,
api.validate_networks,
self.context, requested_networks)
def test_validate_networks_port_in_use(self):
requested_networks = [(None, None, self.port_data3[0]['id'])]
self.moxed_client.show_port(self.port_data3[0]['id']).\
AndReturn({'port': self.port_data3[0]})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortInUse,
api.validate_networks,
self.context, requested_networks)
def test_validate_networks_ports_in_same_network(self):
port_a = self.port_data3[0]
port_b = self.port_data1[0]
self.assertEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = [(None, None, port_a['id']),
(None, None, port_b['id'])]
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
api.validate_networks,
self.context, requested_networks)
def test_validate_networks_ports_not_in_same_network(self):
port_a = self.port_data3[0]
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = [(None, None, port_a['id']),
(None, None, port_b['id'])]
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
search_opts = {'id': [port_a['network_id'], port_b['network_id']]}
self.moxed_client.list_networks(
**search_opts).AndReturn({'networks': self.nets2})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks)
def _mock_list_ports(self, port_data=None):
if port_data is None:
port_data = self.port_data2
address = self.port_address
self.moxed_client.list_ports(
fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn(
{'ports': port_data})
self.mox.ReplayAll()
return address
def test_get_instance_uuids_by_ip_filter(self):
self._mock_list_ports()
filters = {'ip': '^10\\.0\\.1\\.2$'}
api = neutronapi.API()
result = api.get_instance_uuids_by_ip_filter(self.context, filters)
self.assertEquals(self.instance2['uuid'], result[0]['instance_uuid'])
self.assertEquals(self.instance['uuid'], result[1]['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_no_ports(self):
address = self._mock_list_ports(port_data=[])
api = neutronapi.API()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.get_fixed_ip_by_address,
self.context, address)
def test_get_fixed_ip_by_address_succeeds_for_1_port(self):
address = self._mock_list_ports(port_data=self.port_data1)
api = neutronapi.API()
result = api.get_fixed_ip_by_address(self.context, address)
self.assertEquals(self.instance2['uuid'], result['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self):
address = self._mock_list_ports()
api = neutronapi.API()
self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances,
api.get_fixed_ip_by_address,
self.context, address)
def _get_available_networks(self, prv_nets, pub_nets, req_ids=None):
api = neutronapi.API()
nets = prv_nets + pub_nets
if req_ids:
mox_list_params = {'id': req_ids}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance['project_id'],
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': prv_nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': pub_nets})
self.mox.ReplayAll()
rets = api._get_available_networks(self.context,
self.instance['project_id'],
req_ids)
self.assertEqual(rets, nets)
def test_get_available_networks_all_private(self):
self._get_available_networks(prv_nets=self.nets2, pub_nets=[])
def test_get_available_networks_all_public(self):
self._get_available_networks(prv_nets=[], pub_nets=self.nets2)
def test_get_available_networks_private_and_public(self):
self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4)
def test_get_available_networks_with_network_ids(self):
prv_nets = [self.nets3[0]]
pub_nets = [self.nets3[-1]]
# specify only first and last network
req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
self._get_available_networks(prv_nets, pub_nets, req_ids)
def test_get_floating_ip_pools(self):
api = neutronapi.API()
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.mox.ReplayAll()
pools = api.get_floating_ip_pools(self.context)
expected = [{'name': self.fip_pool['name']},
{'name': self.fip_pool_nova['name']}]
self.assertEqual(expected, pools)
def _get_expected_fip_model(self, fip_data, idx=0):
expected = {'id': fip_data['id'],
'address': fip_data['floating_ip_address'],
'pool': self.fip_pool['name'],
'project_id': fip_data['tenant_id'],
'fixed_ip_id': fip_data['port_id'],
'fixed_ip':
{'address': fip_data['fixed_ip_address']},
'instance': ({'uuid': self.port_data2[idx]['device_id']}
if fip_data['port_id']
else None)}
return expected
def _test_get_floating_ip(self, fip_data, idx=0, by_address=False):
api = neutronapi.API()
fip_id = fip_data['id']
net_id = fip_data['floating_network_id']
address = fip_data['floating_ip_address']
if by_address:
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
else:
self.moxed_client.show_floatingip(fip_id).\
AndReturn({'floatingip': fip_data})
self.moxed_client.show_network(net_id).\
AndReturn({'network': self.fip_pool})
if fip_data['port_id']:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[idx]})
self.mox.ReplayAll()
expected = self._get_expected_fip_model(fip_data, idx)
if by_address:
fip = api.get_floating_ip_by_address(self.context, address)
else:
fip = api.get_floating_ip(self.context, fip_id)
self.assertEqual(expected, fip)
def test_get_floating_ip_unassociated(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0)
def test_get_floating_ip_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1)
def test_get_floating_ip_by_address(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0,
by_address=True)
def test_get_floating_ip_by_address_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1,
by_address=True)
def test_get_floating_ip_by_address_not_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': []})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ip_by_id_not_found(self):
api = neutronapi.API()
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=404)
floating_ip_id = self.fip_unassociated['id']
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFound,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_by_address_multiple_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated] * 2})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpMultipleFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ips_by_project(self):
api = neutronapi.API()
project_id = self.context.project_id
self.moxed_client.list_floatingips(tenant_id=project_id).\
AndReturn({'floatingips': [self.fip_unassociated,
self.fip_associated]})
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.moxed_client.list_ports(tenant_id=project_id).\
AndReturn({'ports': self.port_data2})
self.mox.ReplayAll()
expected = [self._get_expected_fip_model(self.fip_unassociated),
self._get_expected_fip_model(self.fip_associated, idx=1)]
fips = api.get_floating_ips_by_project(self.context)
self.assertEqual(expected, fips)
def _test_get_instance_id_by_floating_address(self, fip_data,
associated=False):
api = neutronapi.API()
address = fip_data['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
if associated:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[1]})
self.mox.ReplayAll()
if associated:
expected = self.port_data2[1]['device_id']
else:
expected = None
fip = api.get_instance_id_by_floating_address(self.context, address)
self.assertEqual(expected, fip)
def test_get_instance_id_by_floating_address(self):
self._test_get_instance_id_by_floating_address(self.fip_unassociated)
def test_get_instance_id_by_floating_address_associated(self):
self._test_get_instance_id_by_floating_address(self.fip_associated,
associated=True)
def test_allocate_floating_ip(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, 'ext_net')
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_allocate_floating_ip_with_pool_id(self):
api = neutronapi.API()
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'id': pool_id}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, pool_id)
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_allocate_floating_ip_with_default_pool(self):
api = neutronapi.API()
pool_name = self.fip_pool_nova['name']
pool_id = self.fip_pool_nova['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool_nova]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context)
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_release_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self.mox.ReplayAll()
api.release_floating_ip(self.context, address)
def test_release_floating_ip_associated(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpAssociated,
api.release_floating_ip, self.context, address)
def _setup_mock_for_refresh_cache(self, api, instances):
nw_info = self.mox.CreateMock(model.NetworkInfo)
self.mox.StubOutWithMock(api, '_get_instance_nw_info')
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
for instance in instances:
nw_info.json()
api._get_instance_nw_info(mox.IgnoreArg(), instance).\
AndReturn(nw_info)
api.db.instance_info_cache_update(mox.IgnoreArg(),
instance['uuid'],
mox.IgnoreArg())
def test_associate_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fixed_address = self.port_address2
fip_id = self.fip_unassociated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[1]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'],
'fixed_ip_address': fixed_address}})
self._setup_mock_for_refresh_cache(api, [self.instance])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, self.instance,
address, fixed_address)
def test_reassociate_floating_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
old_fixed_address = self.fip_associated['fixed_ip_address']
new_fixed_address = self.port_address
fip_id = self.fip_associated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance2['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': 'my_portid1',
'fixed_ip_address': new_fixed_address}})
self.moxed_client.show_port(self.fip_associated['port_id']).\
AndReturn({'port': self.port_data2[1]})
self.mox.StubOutWithMock(api.db, 'instance_get_by_uuid')
api.db.instance_get_by_uuid(mox.IgnoreArg(),
self.instance['uuid']).\
AndReturn(self.instance)
self._setup_mock_for_refresh_cache(api, [self.instance,
self.instance2])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, self.instance2,
address, new_fixed_address)
def test_associate_floating_ip_not_found_fixed_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fixed_address = self.fip_associated['fixed_ip_address']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.associate_floating_ip, self.context,
self.instance, address, fixed_address)
def test_disassociate_floating_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fip_id = self.fip_associated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': None}})
self._setup_mock_for_refresh_cache(api, [self.instance])
self.mox.ReplayAll()
api.disassociate_floating_ip(self.context, self.instance, address)
def test_add_fixed_ip_to_instance(self):
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [self.instance])
network_id = 'my_netid1'
search_opts = {'network_id': network_id}
self.moxed_client.list_subnets(
**search_opts).AndReturn({'subnets': self.subnet_data_n})
search_opts = {'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
'network_id': network_id}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [{'subnet_id': 'my_subid1'},
{'subnet_id': 'my_subid1'}],
},
}
port = self.port_data1[0]
port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
self.moxed_client.update_port('my_portid1',
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.add_fixed_ip_to_instance(self.context, self.instance, network_id)
def test_remove_fixed_ip_from_instance(self):
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [self.instance])
address = '10.0.0.3'
zone = 'compute:%s' % self.instance['availability_zone']
search_opts = {'device_id': self.instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [],
},
}
port = self.port_data1[0]
port['fixed_ips'] = []
self.moxed_client.update_port('my_portid1',
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.remove_fixed_ip_from_instance(self.context, self.instance, address)
def test_list_floating_ips_without_l3_support(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(
status_code=404)
self.moxed_client.list_floatingips(
fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound)
self.mox.ReplayAll()
neutronv2.get_client('fake')
floatingips = api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 1)
self.assertEqual(floatingips, [])
def test_nw_info_get_ips(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'}],
'id': 'port-id',
}
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 'port-id').AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
self.mox.ReplayAll()
neutronv2.get_client('fake')
result = api._nw_info_get_ips(self.moxed_client, fake_port)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['address'], '1.1.1.1')
self.assertEqual(result[0]['floating_ips'][0]['address'], '10.0.0.1')
def test_nw_info_get_subnets(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'},
{'ip_address': '2.2.2.2'}],
'id': 'port-id',
}
fake_subnet = model.Subnet(cidr='1.0.0.0/8')
fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']]
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
api._get_subnets_from_port(self.context, fake_port).AndReturn(
[fake_subnet])
self.mox.ReplayAll()
neutronv2.get_client('fake')
subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips)
self.assertEqual(len(subnets), 1)
self.assertEqual(len(subnets[0]['ips']), 1)
self.assertEqual(subnets[0]['ips'][0]['address'], '1.1.1.1')
def _test_nw_info_build_network(self, vif_type):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id',
'binding:vif_type': vif_type,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
self.mox.ReplayAll()
neutronv2.get_client('fake')
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(net['subnets'], fake_subnets)
self.assertEqual(net['id'], 'net-id')
self.assertEqual(net['label'], 'foo')
self.assertEqual(net.get_meta('tenant_id'), 'tenant')
self.assertEqual(net.get_meta('injected'), CONF.flat_injected)
return net, iid
def test_nw_info_build_network_ovs(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS)
self.assertEqual(net['bridge'], CONF.neutron_ovs_bridge)
self.assertFalse('should_create_bridge' in net)
self.assertEqual(iid, 'port-id')
def test_nw_info_build_network_bridge(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE)
self.assertEqual(net['bridge'], 'brqnet-id')
self.assertTrue(net['should_create_bridge'])
self.assertEqual(iid, None)
def test_nw_info_build_network_other(self):
net, iid = self._test_nw_info_build_network(None)
self.assertEqual(net['bridge'], None)
self.assertFalse('should_create_bridge' in net)
self.assertEqual(iid, None)
def test_build_network_info_model(self):
api = neutronapi.API()
fake_inst = {'project_id': 'fake', 'uuid': 'uuid'}
fake_ports = [
{'id': 'port0',
'network_id': 'net-id',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:01',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
},
# This does not match the networks we provide below,
# so it should be ignored (and is here to verify that)
{'id': 'port1',
'network_id': 'other-net-id',
},
]
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [
{'id': 'net-id',
'name': 'foo',
'tenant_id': 'fake',
}
]
neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes(
).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
tenant_id='fake', device_id='uuid').AndReturn(
{'ports': fake_ports})
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 'port0').AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
api._get_subnets_from_port(self.context, fake_ports[0]).AndReturn(
fake_subnets)
self.mox.ReplayAll()
neutronv2.get_client('fake')
nw_info = api._build_network_info_model(self.context, fake_inst,
fake_nets)
self.assertEqual(len(nw_info), 1)
self.assertEqual(nw_info[0]['id'], 'port0')
self.assertEqual(nw_info[0]['address'], 'de:ad:be:ef:00:01')
self.assertEqual(nw_info[0]['devname'], 'tapport0')
self.assertEqual(nw_info[0]['ovs_interfaceid'], None)
self.assertEqual(nw_info[0]['type'], model.VIF_TYPE_BRIDGE)
self.assertEqual(nw_info[0]['network']['bridge'], 'brqnet-id')
def test_get_all_empty_list_networks(self):
api = neutronapi.API()
self.moxed_client.list_networks().AndReturn({'networks': []})
self.mox.ReplayAll()
networks = api.get_all(self.context)
self.assertEqual(networks, [])
class TestNeutronv2ModuleMethods(test.TestCase):
def test_ensure_requested_network_ordering_no_preference_ids(self):
l = [1, 2, 3]
neutronapi._ensure_requested_network_ordering(
lambda x: x,
l,
None)
def test_ensure_requested_network_ordering_no_preference_hashes(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
None)
self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
def test_ensure_requested_network_ordering_with_preference(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
[1, 2, 3])
self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
class TestNeutronv2Portbinding(TestNeutronv2Base):
def test_allocate_for_instance_portbinding(self):
self._allocate_for_instance(1, portbinding=True)
def test_populate_neutron_extension_values_binding(self):
api = neutronapi.API()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.PORTBINDING_EXT}]})
self.mox.ReplayAll()
host_id = 'my_host_id'
instance = {'host': host_id}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(instance, port_req_body)
self.assertEquals(port_req_body['port']['binding:host_id'], host_id)
def test_migrate_instance_finish_binding_false(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(refresh_cache=True).AndReturn(False)
self.mox.ReplayAll()
api.migrate_instance_finish(self.context, None, None)
def test_migrate_instance_finish_binding_true(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(refresh_cache=True).AndReturn(True)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host', }
port_req_body = {'port':
{'binding:host_id': migration['dest_compute']}}
self.moxed_client.update_port('test1',
port_req_body).AndReturn(None)
self.mox.ReplayAll()
api.migrate_instance_finish(self.context, self.instance, migration)
def test_migrate_instance_finish_binding_true_exception(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(refresh_cache=True).AndReturn(True)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host', }
port_req_body = {'port':
{'binding:host_id': migration['dest_compute']}}
self.moxed_client.update_port('test1',
port_req_body).AndRaise(
Exception("fail to update port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
api.migrate_instance_finish,
self.context, self.instance, migration)
class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2ExtraDhcpOpts, self).setUp()
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self):
# Note: (dkehn) this option check should be removed as soon as support
# in neutron released, see https://bugs.launchpad.net/nova/+bug/1214162
CONF.set_override('dhcp_options_enabled', True)
self._allocate_for_instance(1, extra_dhcp_opts=False)
CONF.set_override('dhcp_options_enabled', False)
def test_allocate_for_instance_extradhcpopts(self):
# Note: (dkehn) this option check should be removed as soon as support
# in neutron released, see https://bugs.launchpad.net/nova/+bug/1214162
CONF.set_override('dhcp_options_enabled', True)
dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
self._allocate_for_instance(1, dhcp_options=dhcp_opts)
CONF.set_override('dhcp_options_enabled', False)
| 44.55032
| 79
| 0.584889
|
79515123dae0695a6504167d5adb4253989226f5
| 886
|
py
|
Python
|
pygmsh/opencascade/box.py
|
jorgensd/pygmsh
|
64fa6dbc78c6270241c1576b80370123436cd07b
|
[
"MIT"
] | 1
|
2022-03-13T04:39:03.000Z
|
2022-03-13T04:39:03.000Z
|
pygmsh/opencascade/box.py
|
kurtsansom/pygmsh
|
9c2cfe1c6c7f80943f61f3e695a453e343544619
|
[
"MIT"
] | null | null | null |
pygmsh/opencascade/box.py
|
kurtsansom/pygmsh
|
9c2cfe1c6c7f80943f61f3e695a453e343544619
|
[
"MIT"
] | null | null | null |
from .volume_base import VolumeBase
class Box(VolumeBase):
"""
Creates a box.
Parameters
----------
x0 : array-like[3]
List containing the x, y, z values of the start point.
extends : array-like[3]
List of the 3 extents of the box edges.
char_length : float
Characteristic length of the mesh elements of this polygon.
"""
def __init__(self, x0, extents, char_length=None):
super().__init__()
assert len(x0) == 3
assert len(extents) == 3
self.x0 = x0
self.extents = extents
self.char_length = char_length
args = list(x0) + list(extents)
args = ", ".join([f"{arg}" for arg in args])
self.code = "\n".join(
[f"{self.id} = newv;", f"Box({self.id}) = {{{args}}};"]
+ self.char_length_code(char_length)
)
return
| 24.611111
| 67
| 0.551919
|
79515210babf07fcb797cec5475cafbc2f0e44e6
| 735
|
py
|
Python
|
ecg/examples/cinc17/entry/evaler.py
|
yanagiragi/ECG-acquisition-classification
|
1c84211a304dacd18232c86fe38811a7bb4b7b77
|
[
"Apache-2.0"
] | 35
|
2019-05-15T16:11:00.000Z
|
2022-01-12T03:29:10.000Z
|
ecg/examples/cinc17/entry/evaler.py
|
HabibMrad/ECG-acquisition-classification
|
d50985c8a774d8f6eebcc2a9750fe3485b389815
|
[
"Apache-2.0"
] | 9
|
2020-01-28T22:44:44.000Z
|
2022-02-10T00:09:38.000Z
|
ecg/examples/cinc17/entry/evaler.py
|
HabibMrad/ECG-acquisition-classification
|
d50985c8a774d8f6eebcc2a9750fe3485b389815
|
[
"Apache-2.0"
] | 26
|
2019-05-21T06:23:20.000Z
|
2022-03-10T17:35:13.000Z
|
import json
import keras
import numpy as np
import scipy.io as sio
import scipy.stats as sst
import load
import network
import util
def predict(record):
ecg = load.load_ecg(record +".mat")
preproc = util.load(".")
x = preproc.process_x([ecg])
params = json.load(open("config.json"))
params.update({
"compile" : False,
"input_shape": [None, 1],
"num_categories": len(preproc.classes)
})
model = network.build_network(**params)
model.load_weights('model.hdf5')
probs = model.predict(x)
prediction = sst.mode(np.argmax(probs, axis=2).squeeze())[0][0]
return preproc.int_to_class[prediction]
if __name__ == '__main__':
import sys
print predict(sys.argv[1])
| 22.272727
| 67
| 0.657143
|
79515322ebad3fb53310683ab29ba78b416618b4
| 5,960
|
py
|
Python
|
syntropy_sdk/models/user_data_response.py
|
SyntropyNet/syntropy-python-sdk
|
27b7756b136f83886fd2a6e342fa4d4073779ff7
|
[
"MIT"
] | 1
|
2020-12-17T17:30:12.000Z
|
2020-12-17T17:30:12.000Z
|
syntropy_sdk/models/user_data_response.py
|
SyntropyNet/syntropy-python-sdk
|
27b7756b136f83886fd2a6e342fa4d4073779ff7
|
[
"MIT"
] | null | null | null |
syntropy_sdk/models/user_data_response.py
|
SyntropyNet/syntropy-python-sdk
|
27b7756b136f83886fd2a6e342fa4d4073779ff7
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
syntropy-auth-service
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class UserDataResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"user_id": "float",
"user_email": "str",
"user_settings": "UserSettingsObject",
"user_scopes": "list[str]",
}
attribute_map = {
"user_id": "user_id",
"user_email": "user_email",
"user_settings": "user_settings",
"user_scopes": "user_scopes",
}
def __init__(
self, user_id=None, user_email=None, user_settings=None, user_scopes=None
): # noqa: E501
"""UserDataResponse - a model defined in Swagger""" # noqa: E501
self._user_id = None
self._user_email = None
self._user_settings = None
self._user_scopes = None
self.discriminator = None
self.user_id = user_id
self.user_email = user_email
self.user_settings = user_settings
self.user_scopes = user_scopes
@property
def user_id(self):
"""Gets the user_id of this UserDataResponse. # noqa: E501
:return: The user_id of this UserDataResponse. # noqa: E501
:rtype: float
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this UserDataResponse.
:param user_id: The user_id of this UserDataResponse. # noqa: E501
:type: float
"""
if user_id is None:
raise ValueError(
"Invalid value for `user_id`, must not be `None`"
) # noqa: E501
self._user_id = user_id
@property
def user_email(self):
"""Gets the user_email of this UserDataResponse. # noqa: E501
:return: The user_email of this UserDataResponse. # noqa: E501
:rtype: str
"""
return self._user_email
@user_email.setter
def user_email(self, user_email):
"""Sets the user_email of this UserDataResponse.
:param user_email: The user_email of this UserDataResponse. # noqa: E501
:type: str
"""
if user_email is None:
raise ValueError(
"Invalid value for `user_email`, must not be `None`"
) # noqa: E501
self._user_email = user_email
@property
def user_settings(self):
"""Gets the user_settings of this UserDataResponse. # noqa: E501
:return: The user_settings of this UserDataResponse. # noqa: E501
:rtype: UserSettingsObject
"""
return self._user_settings
@user_settings.setter
def user_settings(self, user_settings):
"""Sets the user_settings of this UserDataResponse.
:param user_settings: The user_settings of this UserDataResponse. # noqa: E501
:type: UserSettingsObject
"""
if user_settings is None:
raise ValueError(
"Invalid value for `user_settings`, must not be `None`"
) # noqa: E501
self._user_settings = user_settings
@property
def user_scopes(self):
"""Gets the user_scopes of this UserDataResponse. # noqa: E501
:return: The user_scopes of this UserDataResponse. # noqa: E501
:rtype: list[str]
"""
return self._user_scopes
@user_scopes.setter
def user_scopes(self, user_scopes):
"""Sets the user_scopes of this UserDataResponse.
:param user_scopes: The user_scopes of this UserDataResponse. # noqa: E501
:type: list[str]
"""
if user_scopes is None:
raise ValueError(
"Invalid value for `user_scopes`, must not be `None`"
) # noqa: E501
self._user_scopes = user_scopes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(UserDataResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserDataResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.792271
| 119
| 0.574161
|
795153994b28df8d73264f3cd68bd874c69e86e9
| 13,739
|
py
|
Python
|
extract_color_mesh.py
|
U-sepSick/NeRF
|
c5910f84321eb5f72e3332507b0384f1b23f51f7
|
[
"MIT"
] | 993
|
2020-04-20T02:32:58.000Z
|
2022-03-31T15:51:51.000Z
|
extract_color_mesh.py
|
U-sepSick/NeRF
|
c5910f84321eb5f72e3332507b0384f1b23f51f7
|
[
"MIT"
] | 142
|
2020-04-20T15:15:30.000Z
|
2022-03-31T11:54:27.000Z
|
extract_color_mesh.py
|
U-sepSick/NeRF
|
c5910f84321eb5f72e3332507b0384f1b23f51f7
|
[
"MIT"
] | 210
|
2020-05-03T06:16:24.000Z
|
2022-03-30T06:30:57.000Z
|
import torch
import os
import numpy as np
import cv2
from PIL import Image
from collections import defaultdict
from tqdm import tqdm
import mcubes
import open3d as o3d
from plyfile import PlyData, PlyElement
from argparse import ArgumentParser
from models.rendering import *
from models.nerf import *
from utils import load_ckpt
from datasets import dataset_dict
torch.backends.cudnn.benchmark = True
def get_opts():
parser = ArgumentParser()
parser.add_argument('--root_dir', type=str,
default='/home/ubuntu/data/nerf_example_data/nerf_synthetic/lego',
help='root directory of dataset')
parser.add_argument('--dataset_name', type=str, default='blender',
choices=['blender', 'llff'],
help='which dataset to validate')
parser.add_argument('--scene_name', type=str, default='test',
help='scene name, used as output ply filename')
parser.add_argument('--img_wh', nargs="+", type=int, default=[800, 800],
help='resolution (img_w, img_h) of the image')
parser.add_argument('--N_samples', type=int, default=64,
help='number of samples to infer the acculmulated opacity')
parser.add_argument('--chunk', type=int, default=32*1024,
help='chunk size to split the input to avoid OOM')
parser.add_argument('--ckpt_path', type=str, required=True,
help='pretrained checkpoint path to load')
parser.add_argument('--N_grid', type=int, default=256,
help='size of the grid on 1 side, larger=higher resolution')
parser.add_argument('--x_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--y_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--z_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--sigma_threshold', type=float, default=20.0,
help='threshold to consider a location is occupied')
parser.add_argument('--occ_threshold', type=float, default=0.2,
help='''threshold to consider a vertex is occluded.
larger=fewer occluded pixels''')
#### method using vertex normals ####
parser.add_argument('--use_vertex_normal', action="store_true",
help='use vertex normals to compute color')
parser.add_argument('--N_importance', type=int, default=64,
help='number of fine samples to infer the acculmulated opacity')
parser.add_argument('--near_t', type=float, default=1.0,
help='the near bound factor to start the ray')
return parser.parse_args()
@torch.no_grad()
def f(models, embeddings, rays, N_samples, N_importance, chunk, white_back):
"""Do batched inference on rays using chunk."""
B = rays.shape[0]
results = defaultdict(list)
for i in range(0, B, chunk):
rendered_ray_chunks = \
render_rays(models,
embeddings,
rays[i:i+chunk],
N_samples,
False,
0,
0,
N_importance,
chunk,
white_back,
test_time=True)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = torch.cat(v, 0)
return results
if __name__ == "__main__":
args = get_opts()
kwargs = {'root_dir': args.root_dir,
'img_wh': tuple(args.img_wh)}
if args.dataset_name == 'llff':
kwargs['spheric_poses'] = True
kwargs['split'] = 'test'
else:
kwargs['split'] = 'train'
dataset = dataset_dict[args.dataset_name](**kwargs)
embedding_xyz = Embedding(3, 10)
embedding_dir = Embedding(3, 4)
embeddings = [embedding_xyz, embedding_dir]
nerf_fine = NeRF()
load_ckpt(nerf_fine, args.ckpt_path, model_name='nerf_fine')
nerf_fine.cuda().eval()
# define the dense grid for query
N = args.N_grid
xmin, xmax = args.x_range
ymin, ymax = args.y_range
zmin, zmax = args.z_range
# assert xmax-xmin == ymax-ymin == zmax-zmin, 'the ranges must have the same length!'
x = np.linspace(xmin, xmax, N)
y = np.linspace(ymin, ymax, N)
z = np.linspace(zmin, zmax, N)
xyz_ = torch.FloatTensor(np.stack(np.meshgrid(x, y, z), -1).reshape(-1, 3)).cuda()
dir_ = torch.zeros_like(xyz_).cuda()
# sigma is independent of direction, so any value here will produce the same result
# predict sigma (occupancy) for each grid location
print('Predicting occupancy ...')
with torch.no_grad():
B = xyz_.shape[0]
out_chunks = []
for i in tqdm(range(0, B, args.chunk)):
xyz_embedded = embedding_xyz(xyz_[i:i+args.chunk]) # (N, embed_xyz_channels)
dir_embedded = embedding_dir(dir_[i:i+args.chunk]) # (N, embed_dir_channels)
xyzdir_embedded = torch.cat([xyz_embedded, dir_embedded], 1)
out_chunks += [nerf_fine(xyzdir_embedded)]
rgbsigma = torch.cat(out_chunks, 0)
sigma = rgbsigma[:, -1].cpu().numpy()
sigma = np.maximum(sigma, 0).reshape(N, N, N)
# perform marching cube algorithm to retrieve vertices and triangle mesh
print('Extracting mesh ...')
vertices, triangles = mcubes.marching_cubes(sigma, args.sigma_threshold)
##### Until mesh extraction here, it is the same as the original repo. ######
vertices_ = (vertices/N).astype(np.float32)
## invert x and y coordinates (WHY? maybe because of the marching cubes algo)
x_ = (ymax-ymin) * vertices_[:, 1] + ymin
y_ = (xmax-xmin) * vertices_[:, 0] + xmin
vertices_[:, 0] = x_
vertices_[:, 1] = y_
vertices_[:, 2] = (zmax-zmin) * vertices_[:, 2] + zmin
vertices_.dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
face = np.empty(len(triangles), dtype=[('vertex_indices', 'i4', (3,))])
face['vertex_indices'] = triangles
PlyData([PlyElement.describe(vertices_[:, 0], 'vertex'),
PlyElement.describe(face, 'face')]).write(f'{args.scene_name}.ply')
# remove noise in the mesh by keeping only the biggest cluster
print('Removing noise ...')
mesh = o3d.io.read_triangle_mesh(f"{args.scene_name}.ply")
idxs, count, _ = mesh.cluster_connected_triangles()
max_cluster_idx = np.argmax(count)
triangles_to_remove = [i for i in range(len(face)) if idxs[i] != max_cluster_idx]
mesh.remove_triangles_by_index(triangles_to_remove)
mesh.remove_unreferenced_vertices()
print(f'Mesh has {len(mesh.vertices)/1e6:.2f} M vertices and {len(mesh.triangles)/1e6:.2f} M faces.')
vertices_ = np.asarray(mesh.vertices).astype(np.float32)
triangles = np.asarray(mesh.triangles)
# perform color prediction
# Step 0. define constants (image width, height and intrinsics)
W, H = args.img_wh
K = np.array([[dataset.focal, 0, W/2],
[0, dataset.focal, H/2],
[0, 0, 1]]).astype(np.float32)
# Step 1. transform vertices into world coordinate
N_vertices = len(vertices_)
vertices_homo = np.concatenate([vertices_, np.ones((N_vertices, 1))], 1) # (N, 4)
if args.use_vertex_normal: ## use normal vector method as suggested by the author.
## see https://github.com/bmild/nerf/issues/44
mesh.compute_vertex_normals()
rays_d = torch.FloatTensor(np.asarray(mesh.vertex_normals))
near = dataset.bounds.min() * torch.ones_like(rays_d[:, :1])
far = dataset.bounds.max() * torch.ones_like(rays_d[:, :1])
rays_o = torch.FloatTensor(vertices_) - rays_d * near * args.near_t
nerf_coarse = NeRF()
load_ckpt(nerf_coarse, args.ckpt_path, model_name='nerf_coarse')
nerf_coarse.cuda().eval()
results = f([nerf_coarse, nerf_fine], embeddings,
torch.cat([rays_o, rays_d, near, far], 1).cuda(),
args.N_samples,
args.N_importance,
args.chunk,
dataset.white_back)
else: ## use my color average method. see README_mesh.md
## buffers to store the final averaged color
non_occluded_sum = np.zeros((N_vertices, 1))
v_color_sum = np.zeros((N_vertices, 3))
# Step 2. project the vertices onto each training image to infer the color
print('Fusing colors ...')
for idx in tqdm(range(len(dataset.image_paths))):
## read image of this pose
image = Image.open(dataset.image_paths[idx]).convert('RGB')
image = image.resize(tuple(args.img_wh), Image.LANCZOS)
image = np.array(image)
## read the camera to world relative pose
P_c2w = np.concatenate([dataset.poses[idx], np.array([0, 0, 0, 1]).reshape(1, 4)], 0)
P_w2c = np.linalg.inv(P_c2w)[:3] # (3, 4)
## project vertices from world coordinate to camera coordinate
vertices_cam = (P_w2c @ vertices_homo.T) # (3, N) in "right up back"
vertices_cam[1:] *= -1 # (3, N) in "right down forward"
## project vertices from camera coordinate to pixel coordinate
vertices_image = (K @ vertices_cam).T # (N, 3)
depth = vertices_image[:, -1:]+1e-5 # the depth of the vertices, used as far plane
vertices_image = vertices_image[:, :2]/depth
vertices_image = vertices_image.astype(np.float32)
vertices_image[:, 0] = np.clip(vertices_image[:, 0], 0, W-1)
vertices_image[:, 1] = np.clip(vertices_image[:, 1], 0, H-1)
## compute the color on these projected pixel coordinates
## using bilinear interpolation.
## NOTE: opencv's implementation has a size limit of 32768 pixels per side,
## so we split the input into chunks.
colors = []
remap_chunk = int(3e4)
for i in range(0, N_vertices, remap_chunk):
colors += [cv2.remap(image,
vertices_image[i:i+remap_chunk, 0],
vertices_image[i:i+remap_chunk, 1],
interpolation=cv2.INTER_LINEAR)[:, 0]]
colors = np.vstack(colors) # (N_vertices, 3)
## predict occlusion of each vertex
## we leverage the concept of NeRF by constructing rays coming out from the camera
## and hitting each vertex; by computing the accumulated opacity along this path,
## we can know if the vertex is occluded or not.
## for vertices that appear to be occluded from every input view, we make the
## assumption that its color is the same as its neighbors that are facing our side.
## (think of a surface with one side facing us: we assume the other side has the same color)
## ray's origin is camera origin
rays_o = torch.FloatTensor(dataset.poses[idx][:, -1]).expand(N_vertices, 3)
## ray's direction is the vector pointing from camera origin to the vertices
rays_d = torch.FloatTensor(vertices_) - rays_o # (N_vertices, 3)
rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
near = dataset.bounds.min() * torch.ones_like(rays_o[:, :1])
## the far plane is the depth of the vertices, since what we want is the accumulated
## opacity along the path from camera origin to the vertices
far = torch.FloatTensor(depth) * torch.ones_like(rays_o[:, :1])
results = f([nerf_fine], embeddings,
torch.cat([rays_o, rays_d, near, far], 1).cuda(),
args.N_samples,
0,
args.chunk,
dataset.white_back)
opacity = results['opacity_coarse'].cpu().numpy()[:, np.newaxis] # (N_vertices, 1)
opacity = np.nan_to_num(opacity, 1)
non_occluded = np.ones_like(non_occluded_sum) * 0.1/depth # weight by inverse depth
# near=more confident in color
non_occluded += opacity < args.occ_threshold
v_color_sum += colors * non_occluded
non_occluded_sum += non_occluded
# Step 3. combine the output and write to file
if args.use_vertex_normal:
v_colors = results['rgb_fine'].cpu().numpy() * 255.0
else: ## the combined color is the average color among all views
v_colors = v_color_sum/non_occluded_sum
v_colors = v_colors.astype(np.uint8)
v_colors.dtype = [('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertices_.dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
vertex_all = np.empty(N_vertices, vertices_.dtype.descr+v_colors.dtype.descr)
for prop in vertices_.dtype.names:
vertex_all[prop] = vertices_[prop][:, 0]
for prop in v_colors.dtype.names:
vertex_all[prop] = v_colors[prop][:, 0]
face = np.empty(len(triangles), dtype=[('vertex_indices', 'i4', (3,))])
face['vertex_indices'] = triangles
PlyData([PlyElement.describe(vertex_all, 'vertex'),
PlyElement.describe(face, 'face')]).write(f'{args.scene_name}.ply')
print('Done!')
| 45.796667
| 105
| 0.594803
|
7951549885efb88bc1edb83caae759ffad5ff681
| 794
|
py
|
Python
|
firstapp/firstapp/urls.py
|
aiegoo/django
|
2f508a318edd26403509a61eb44e99fda8b7ed64
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
firstapp/firstapp/urls.py
|
aiegoo/django
|
2f508a318edd26403509a61eb44e99fda8b7ed64
|
[
"bzip2-1.0.6",
"MIT"
] | 8
|
2020-02-12T03:14:20.000Z
|
2022-03-11T23:59:53.000Z
|
firstapp/firstapp/urls.py
|
aiegoo/django
|
2f508a318edd26403509a61eb44e99fda8b7ed64
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
"""firstapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^posts/$', admin.site.urls),
]
| 34.521739
| 77
| 0.701511
|
795155ed2440b3ec0207c8d5c044c269d9b27057
| 10,940
|
py
|
Python
|
linux/bin/dmenu-frecency.py
|
nevesnunes/env
|
7a5e3816334337e04a87e1a2e4dc322215901744
|
[
"MIT"
] | 4
|
2020-04-07T14:45:02.000Z
|
2021-12-28T22:43:16.000Z
|
linux/bin/dmenu-frecency.py
|
nevesnunes/env
|
7a5e3816334337e04a87e1a2e4dc322215901744
|
[
"MIT"
] | null | null | null |
linux/bin/dmenu-frecency.py
|
nevesnunes/env
|
7a5e3816334337e04a87e1a2e4dc322215901744
|
[
"MIT"
] | 2
|
2020-04-08T03:12:06.000Z
|
2021-03-04T20:33:03.000Z
|
#!/usr/bin/env python
"""Dmenu launcher with history sorted by frecency.
Usage:
dmenu-frecency [--read-apps]
Options:
--read-apps rereads all .desktop files.
"""
from docopt import docopt
import os
import sys
import xdg.BaseDirectory
from xdg.DesktopEntry import DesktopEntry
from subprocess import Popen, PIPE
from datetime import datetime
from collections import defaultdict
import pickle
import re
import gzip
import json
import tempfile
import shlex
CONFIG_DIR = xdg.BaseDirectory.save_config_path('dmenu-frecency')
# Python 2 compatibility
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class Application:
def __init__(self, name, command_line, mtime=None, path=None, is_desktop=False):
self.name = name
self.path = path
self.command_line = command_line
self.is_desktop = is_desktop
self.show_command = False
if mtime is None:
self.mtime = datetime.now()
else:
self.mtime = mtime
def run(self):
if os.fork() == 0:
if self.path:
os.chdir(self.path)
os.execvp(self.command_line[0], self.command_line)
def __lt__(self, other):
return (self.is_desktop, self.mtime) < (other.is_desktop, other.mtime)
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __str__(self):
return "<Application: {} {!r}>".format(self.name, self.command_line)
STATE_VERSION = 4
def get_command(desktop_entry):
tokens = []
for token in shlex.split(desktop_entry.getExec()):
if token == '%i':
if desktop_entry.getIcon():
tokens.append('--icon')
tokens.append(desktop_entry.getIcon())
else:
i = 0
newtok = ""
nc = len(token)
while i < nc:
c = token[i]
if c == '%' and i < nc - 1:
i += 1
code = token[i]
if code == 'c' and desktop_entry.getName():
newtok += desktop_entry.getName()
elif code == '%':
newtok += '%'
else:
newtok += c
i += 1
if newtok:
tokens.append(newtok)
return tuple(tokens)
class LauncherState:
STATE_FILENAME = os.path.join(CONFIG_DIR, 'state')
def __init__(self, config):
self.version = STATE_VERSION
self.config = config
self.find_apps()
self.apps_generated_at = datetime.now()
self.visits = defaultdict(list)
self.visit_count = defaultdict(int)
self.app_last_visit = None
self.frecency_cache = {}
def apps_by_frecency(self):
app_last_visit = self.app_last_visit if self.config['preselect-last-visit'] else None
if app_last_visit is not None:
yield app_last_visit
for app, frec in sorted(self.frecency_cache.items(), key=lambda x: (-x[1], x[0])):
if app_last_visit is None or app_last_visit != app:
yield app
for app in self.sorted_apps:
if app not in self.frecency_cache:
if app_last_visit is None or app_last_visit != app:
yield app
def add_visit(self, app):
if not app.is_desktop and app.command_line in self.command_apps:
app = self.command_apps[app.command_line]
app.show_command = True
try:
self.sorted_apps.remove(app)
except ValueError:
pass # not in list
vs = self.visits[app]
now = datetime.now()
vs.append(now)
self.visit_count[app] += 1
self.visits[app] = vs[-self.config['frecency-visits']:]
self.app_last_visit = app if self.config['preselect-last-visit'] else None
def update_frecencies(self):
for app in self.visits.keys():
self.frecency_cache[app] = self.frecency(app)
def frecency(self, app):
points = 0
for v in self.visits[app]:
days_ago = (datetime.now() - v).days
if days_ago < 4:
points += 100
elif days_ago < 14:
points += 70
elif days_ago < 31:
points += 50
elif days_ago < 90:
points += 30
else:
points += 10
return int(self.visit_count[app] * points / len(self.visits[app]))
@classmethod
def load(cls, config):
try:
with gzip.open(cls.STATE_FILENAME, 'rb') as f:
obj = pickle.load(f)
version = getattr(obj, 'version', 0)
if version < STATE_VERSION:
new_obj = cls(config)
if version <= 1:
for app, vs in obj.visits.items():
vc = obj.visit_count[app]
app.is_desktop = True
new_obj.visit_count[app] = vc
new_obj.visits[app] = vs
new_obj.find_apps()
new_obj.clean_cache()
new_obj.update_frecencies()
new_obj.config = config
return new_obj
else:
obj.config = config
return obj
except FileNotFoundError:
return cls(config)
def save(self):
with tempfile.NamedTemporaryFile(
'wb',
dir=os.path.dirname(self.STATE_FILENAME),
delete=False) as tf:
with gzip.open(tf, 'wb') as gzipf:
pickle.dump(self, gzipf)
tempname = tf.name
os.rename(tempname, self.STATE_FILENAME)
def find_apps(self):
self.apps = {}
self.command_apps = {}
if self.config['scan-desktop-files']:
for applications_directory in xdg.BaseDirectory.load_data_paths("applications"):
if os.path.exists(applications_directory):
for dirpath, dirnames, filenames in os.walk(applications_directory):
for f in filenames:
if f.endswith('.desktop'):
full_filename = os.path.join(dirpath, f)
self.add_desktop(full_filename)
if self.config['scan-path']:
for pathdir in os.environ["PATH"].split(os.pathsep):
pathdir = pathdir.strip('"')
if not os.path.isdir(pathdir):
continue
for f in os.listdir(pathdir):
filename = os.path.join(pathdir, f)
if os.path.isfile(filename) and os.access(filename, os.X_OK):
app = Application(
name=f,
command_line=(f,),
mtime=datetime.fromtimestamp(os.path.getmtime(filename)))
self.add_app(app)
self.sorted_apps = sorted(self.apps.values(), reverse=True)
def add_desktop(self, filename):
try:
d = DesktopEntry(filename)
if d.getHidden() or d.getNoDisplay() or d.getTerminal() or d.getType() != 'Application':
return
app = Application(
name=d.getName(),
command_line=get_command(d),
mtime=datetime.fromtimestamp(os.path.getmtime(filename)),
is_desktop=True)
if d.getPath():
app.path = d.getPath()
self.add_app(app)
except (xdg.Exceptions.ParsingError,
xdg.Exceptions.DuplicateGroupError,
xdg.Exceptions.DuplicateKeyError):
pass
def add_app(self, app):
if app.command_line not in self.command_apps:
self.apps[app.name] = app
self.command_apps[app.command_line] = app
def clean_cache(self):
for app in list(self.frecency_cache.keys()):
if app.is_desktop and app.name not in self.apps:
del self.frecency_cache[app]
if self.app_last_visit is not None and self.app_last_visit.name not in self.apps:
self.app_last_visit = None
class DmenuFrecency:
CONFIG_FILENAME = os.path.join(CONFIG_DIR, 'config.json')
DEFAULT_CONFIG = {
'dmenu': 'dmenu',
'dmenu-args': ['-i'],
'cache-days': 1,
'frecency-visits': 10,
'preselect-last-visit': False,
'scan-desktop-files': True,
'scan-path': False,
}
NAME_WITH_COMMAND = re.compile(r"(.+) \([^()]+\)")
def __init__(self, arguments):
self.read_apps = arguments['--read-apps']
self.load_config()
self.state = LauncherState.load(self.config)
assert self.state, "Failed to load state."
def load_config(self):
self.config = {}
self.config.update(self.DEFAULT_CONFIG)
try:
with open(self.CONFIG_FILENAME, 'r') as f:
self.config.update(json.load(f))
except FileNotFoundError:
with open(self.CONFIG_FILENAME, 'w') as f:
json.dump(self.config, f, sort_keys=True, indent=4)
f.write('\n')
def main(self):
if self.read_apps:
self.state.find_apps()
self.state.clean_cache()
self.state.save()
return
dmenu = Popen([self.config['dmenu']] + self.config['dmenu-args'], stdin=PIPE, stdout=PIPE)
for app in self.state.apps_by_frecency():
app_name = app.name.encode('utf-8')
dmenu.stdin.write(app_name)
if app.show_command and app.name != app.command_line[0]:
dmenu.stdin.write(" ({})".format(' '.join(app.command_line)).encode('utf-8'))
dmenu.stdin.write(b'\n')
stdout, stderr = dmenu.communicate()
result = stdout.decode('utf-8').strip()
if not result:
return
if result in self.state.apps:
app = self.state.apps[result]
else:
m = self.NAME_WITH_COMMAND.match(result)
if m and m.group(1) in self.state.apps:
app = self.state.apps[m.group(1)]
else:
app = Application(
name=result,
command_line=tuple(shlex.split(result)))
app.run()
self.state.add_visit(app)
self.state.update_frecencies()
if (datetime.now() - self.state.apps_generated_at).days >= self.config['cache-days']:
self.state.find_apps()
self.state.clean_cache()
self.state.save()
if __name__ == '__main__':
arguments = docopt(__doc__, version="0.1")
DmenuFrecency(arguments).main()
| 33.25228
| 100
| 0.543601
|
7951566bdd729b13d1042abd4eb51e249413f327
| 452
|
py
|
Python
|
clusterapp/features/TimeParameters/StartTimeParameter.py
|
ealmuina/thesis
|
d436ae0c6f775c56b2072889ceafae1507291c74
|
[
"MIT"
] | 1
|
2018-02-11T07:36:31.000Z
|
2018-02-11T07:36:31.000Z
|
clusterapp/features/TimeParameters/StartTimeParameter.py
|
ealmuina/thesis
|
d436ae0c6f775c56b2072889ceafae1507291c74
|
[
"MIT"
] | null | null | null |
clusterapp/features/TimeParameters/StartTimeParameter.py
|
ealmuina/thesis
|
d436ae0c6f775c56b2072889ceafae1507291c74
|
[
"MIT"
] | null | null | null |
import numpy as np
from .TimeParameter import TimeParameter
from .__init__ import *
class StartTimeParameter(TimeParameter):
name = 'StartTime'
"""docstring for StartTimeParameter"""
def __init__(self):
super(TimeParameter, self).__init__()
def measure(self, segment):
value = segment.IndexFrom / segment.samplingRate
segment.measures_dict[self.name] = np.round(value, DECIMAL_PLACES)
return True
| 22.6
| 74
| 0.70354
|
7951568f8bb478a934e278554c4b65789cd0c5f2
| 7,973
|
py
|
Python
|
OnlineJudge/account/views/admin.py
|
FrozenWhalePP/OnlineJudge
|
aec81292046b4a0896ff164565b490dc37bd91cb
|
[
"MIT"
] | 2
|
2020-03-07T02:26:00.000Z
|
2020-06-01T15:03:17.000Z
|
OnlineJudge/account/views/admin.py
|
FrozenWhalePP/OnlineJudge
|
aec81292046b4a0896ff164565b490dc37bd91cb
|
[
"MIT"
] | null | null | null |
OnlineJudge/account/views/admin.py
|
FrozenWhalePP/OnlineJudge
|
aec81292046b4a0896ff164565b490dc37bd91cb
|
[
"MIT"
] | null | null | null |
import os
import re
import xlsxwriter
from django.db import transaction, IntegrityError
from django.db.models import Q
from django.http import HttpResponse
from django.contrib.auth.hashers import make_password
from submission.models import Submission
from utils.api import APIView, validate_serializer
from utils.shortcuts import rand_str
from ..decorators import super_admin_required
from ..models import AdminType, ProblemPermission, User, UserProfile
from ..serializers import EditUserSerializer, UserAdminSerializer, GenerateUserSerializer
from ..serializers import ImportUserSeralizer
class UserAdminAPI(APIView):
@validate_serializer(ImportUserSeralizer)
@super_admin_required
def post(self, request):
"""
Import User
"""
data = request.data["users"]
user_list = []
for user_data in data:
if len(user_data) != 3 or len(user_data[0]) > 32:
return self.error(f"Error occurred while processing data '{user_data}'")
user_list.append(User(username=user_data[0], password=make_password(user_data[1]), email=user_data[2]))
try:
with transaction.atomic():
ret = User.objects.bulk_create(user_list)
UserProfile.objects.bulk_create([UserProfile(user=user) for user in ret])
return self.success()
except IntegrityError as e:
# Extract detail from exception message
# duplicate key value violates unique constraint "user_username_key"
# DETAIL: Key (username)=(root11) already exists.
return self.error(str(e).split("\n")[1])
@validate_serializer(EditUserSerializer)
@super_admin_required
def put(self, request):
"""
Edit user api
"""
data = request.data
try:
user = User.objects.get(id=data["id"])
except User.DoesNotExist:
return self.error("User does not exist")
if User.objects.filter(username=data["username"].lower()).exclude(id=user.id).exists():
return self.error("Username already exists")
if User.objects.filter(email=data["email"].lower()).exclude(id=user.id).exists():
return self.error("Email already exists")
pre_username = user.username
user.username = data["username"].lower()
user.email = data["email"].lower()
user.admin_type = data["admin_type"]
user.is_disabled = data["is_disabled"]
if data["admin_type"] == AdminType.ADMIN:
user.problem_permission = data["problem_permission"]
elif data["admin_type"] == AdminType.SUPER_ADMIN:
user.problem_permission = ProblemPermission.ALL
else:
user.problem_permission = ProblemPermission.NONE
if data["password"]:
user.set_password(data["password"])
if data["open_api"]:
# Avoid reset user appkey after saving changes
if not user.open_api:
user.open_api_appkey = rand_str()
else:
user.open_api_appkey = None
user.open_api = data["open_api"]
if data["two_factor_auth"]:
# Avoid reset user tfa_token after saving changes
if not user.two_factor_auth:
user.tfa_token = rand_str()
else:
user.tfa_token = None
user.two_factor_auth = data["two_factor_auth"]
user.save()
if pre_username != user.username:
Submission.objects.filter(username=pre_username).update(username=user.username)
UserProfile.objects.filter(user=user).update(real_name=data["real_name"])
return self.success(UserAdminSerializer(user).data)
@super_admin_required
def get(self, request):
"""
User list api / Get user by id
"""
user_id = request.GET.get("id")
if user_id:
try:
user = User.objects.get(id=user_id)
except User.DoesNotExist:
return self.error("User does not exist")
return self.success(UserAdminSerializer(user).data)
user = User.objects.all().order_by("-create_time")
keyword = request.GET.get("keyword", None)
if keyword:
user = user.filter(Q(username__icontains=keyword) |
Q(userprofile__real_name__icontains=keyword) |
Q(email__icontains=keyword))
return self.success(self.paginate_data(request, user, UserAdminSerializer))
@super_admin_required
def delete(self, request):
id = request.GET.get("id")
if not id:
return self.error("Invalid Parameter, id is required")
ids = id.split(",")
if str(request.user.id) in ids:
return self.error("Current user can not be deleted")
User.objects.filter(id__in=ids).delete()
return self.success()
class GenerateUserAPI(APIView):
@super_admin_required
def get(self, request):
"""
download users excel
"""
file_id = request.GET.get("file_id")
if not file_id:
return self.error("Invalid Parameter, file_id is required")
if not re.match(r"^[a-zA-Z0-9]+$", file_id):
return self.error("Illegal file_id")
file_path = f"/tmp/{file_id}.xlsx"
if not os.path.isfile(file_path):
return self.error("File does not exist")
with open(file_path, "rb") as f:
raw_data = f.read()
os.remove(file_path)
response = HttpResponse(raw_data)
response["Content-Disposition"] = f"attachment; filename=users.xlsx"
response["Content-Type"] = "application/xlsx"
return response
@validate_serializer(GenerateUserSerializer)
@super_admin_required
def post(self, request):
"""
Generate User
"""
data = request.data
number_max_length = max(len(str(data["number_from"])), len(str(data["number_to"])))
if number_max_length + len(data["prefix"]) + len(data["suffix"]) > 32:
return self.error("Username should not more than 32 characters")
if data["number_from"] > data["number_to"]:
return self.error("Start number must be lower than end number")
file_id = rand_str(8)
filename = f"/tmp/{file_id}.xlsx"
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("A:B", 20)
worksheet.write("A1", "Username")
worksheet.write("B1", "Password")
i = 1
user_list = []
for number in range(data["number_from"], data["number_to"] + 1):
raw_password = rand_str(data["password_length"])
user = User(username=f"{data['prefix']}{number}{data['suffix']}", password=make_password(raw_password))
user.raw_password = raw_password
user_list.append(user)
try:
with transaction.atomic():
ret = User.objects.bulk_create(user_list)
UserProfile.objects.bulk_create([UserProfile(user=user) for user in ret])
for item in user_list:
worksheet.write_string(i, 0, item.username)
worksheet.write_string(i, 1, item.raw_password)
i += 1
workbook.close()
return self.success({"file_id": file_id})
except IntegrityError as e:
# Extract detail from exception message
# duplicate key value violates unique constraint "user_username_key"
# DETAIL: Key (username)=(root11) already exists.
return self.error(str(e).split("\n")[1])
| 39.275862
| 116
| 0.598269
|
79515834f7dfae204a53de88d3192612ce8acb78
| 2,181
|
py
|
Python
|
run.py
|
eric-sentient/MIDI-reAE-chords
|
ac16210bbeb822cb4babb95974a4a05d527763cc
|
[
"MIT"
] | null | null | null |
run.py
|
eric-sentient/MIDI-reAE-chords
|
ac16210bbeb822cb4babb95974a4a05d527763cc
|
[
"MIT"
] | null | null | null |
run.py
|
eric-sentient/MIDI-reAE-chords
|
ac16210bbeb822cb4babb95974a4a05d527763cc
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import models
import model
import load_train
from random import randrange
BATCH_SIZE = 400
EPOCHS = 100
VERBOSE = True
LOAD = False
GENERATE = True
GEN_SAMPLES = 6
X, y = None, None
if LOAD:
model = models.load_model('MIDI-reAE.h5')
model.summary()
else:
X, y = load_train.get_train()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
X_valid, X_test, y_valid, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=42)
checkpoint_name = 'weights/Weights-{epoch:03d}--{val_loss:.5f}.hdf5'
checkpoint = ModelCheckpoint(
checkpoint_name,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='auto')
model = model.make_model()
history = model.fit(
X_train,
y_train,
batch_size=BATCH_SIZE,
verbose=VERBOSE,
callbacks=[checkpoint],
epochs=EPOCHS,
validation_data=(X_valid, y_valid))
model.save('MIDI-reAE.h5')
if GENERATE:
if not X:
X, y = load_train.get_train()
for _ in range(GEN_SAMPLES):
i = randrange(len(X) - 1)
randX, randy = X[i], y[i]
num_notes = round(0.5 * sum([np.count_nonzero(b) for b in randX]))
yhat = model.predict(randX.reshape(1,2,101))
ypred = [(x.argsort()[-num_notes:][::-1]) for x in yhat.reshape(2, 101)]
def decode(vector):
return [i for i in range(len(vector)) if vector[i] == 1]
print("input sequence:")
print([decode(b) for b in randX])
print("generated output:")
print()
print("actual output:")
print([decode(b) for b in randy])
import mido
import matplotlib
mid = MidiFile(type=0)
track = MidiTrack()
mid.tracks.append(track)
pred = randX + ypred
t = 0
for i in range(len(pred)):
for note in pred[i]:
track.append(Message('note_on', note=note, velocity=80, time=t))
t = t + 400
for note in pred[i]:
track.append(Message('note_off', note=note, velocity=80,time=t))
mid.draw_roll()
| 24.784091
| 101
| 0.659789
|
7951589169affccbe27ad8c97ce896cd3d70f086
| 26,486
|
py
|
Python
|
tests/test_cli.py
|
alandtse/python-semantic-release
|
b58fbc16bf5145bc512ccce71561ed6fada68b90
|
[
"MIT"
] | 3
|
2019-09-30T12:47:32.000Z
|
2020-06-15T14:55:16.000Z
|
tests/test_cli.py
|
alandtse/python-semantic-release
|
b58fbc16bf5145bc512ccce71561ed6fada68b90
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
alandtse/python-semantic-release
|
b58fbc16bf5145bc512ccce71561ed6fada68b90
|
[
"MIT"
] | 1
|
2019-09-30T12:47:49.000Z
|
2019-09-30T12:47:49.000Z
|
from click.testing import CliRunner
import semantic_release
from semantic_release.cli import changelog, main, publish, version
from semantic_release.errors import GitError, ImproperConfigurationError
from . import mock, pytest, reset_config
from .mocks import mock_version_file
assert reset_config
@pytest.fixture
def runner():
return CliRunner()
def test_main_should_call_correct_function(mocker, runner):
mock_version = mocker.patch('semantic_release.cli.version')
result = runner.invoke(main, ['version'])
mock_version.assert_called_once_with(
noop=False,
post=False,
force_level=None,
retry=False,
define=(),
)
assert result.exit_code == 0
def test_version_by_commit_should_call_correct_functions(mocker, runner):
mocker.patch('semantic_release.cli.config.getboolean', lambda *x: False)
mock_tag_new_version = mocker.patch('semantic_release.cli.tag_new_version')
mock_commit_new_version = mocker.patch('semantic_release.cli.commit_new_version')
mock_set_new_version = mocker.patch('semantic_release.cli.set_new_version')
mock_new_version = mocker.patch('semantic_release.cli.get_new_version', return_value='2.0.0')
mock_evaluate_bump = mocker.patch('semantic_release.cli.evaluate_version_bump',
return_value='major')
mock_current_version = mocker.patch('semantic_release.cli.get_current_version',
return_value='1.2.3')
result = runner.invoke(main, ['version'])
mock_current_version.assert_called_once_with()
mock_evaluate_bump.assert_called_once_with('1.2.3', None)
mock_new_version.assert_called_once_with('1.2.3', 'major')
mock_set_new_version.assert_called_once_with('2.0.0')
mock_commit_new_version.assert_called_once_with('2.0.0')
mock_tag_new_version.assert_called_once_with('2.0.0')
assert result.exit_code == 0
def test_version_by_tag_should_call_correct_functions(mocker, runner):
orig = semantic_release.cli.config.get
def wrapped_config_get(*args, **kwargs):
if (len(args) >= 2 and args[0] == 'semantic_release'
and args[1] == 'version_source'):
return 'tag'
elif (len(args) >= 2 and args[0] == 'semantic_release'
and args[1] == 'commit_version_number'):
return True
return orig(*args, **kwargs)
mocker.patch('semantic_release.cli.config.get', wrapped_config_get)
mocker.patch('semantic_release.cli.config.getboolean', lambda *x: False)
mock_set_new_version = mocker.patch('semantic_release.cli.set_new_version')
mock_tag_new_version = mocker.patch('semantic_release.cli.tag_new_version')
mock_commit_new_version = mocker.patch('semantic_release.cli.commit_new_version')
mock_new_version = mocker.patch('semantic_release.cli.get_new_version', return_value='2.0.0')
mock_evaluate_bump = mocker.patch('semantic_release.cli.evaluate_version_bump',
return_value='major')
mock_current_version = mocker.patch('semantic_release.cli.get_current_version',
return_value='1.2.3')
result = runner.invoke(main, ['version'])
mock_current_version.assert_called_once_with()
mock_evaluate_bump.assert_called_once_with('1.2.3', None)
mock_new_version.assert_called_once_with('1.2.3', 'major')
mock_set_new_version.assert_called_once_with('2.0.0')
mock_commit_new_version.assert_called_once_with('2.0.0')
mock_tag_new_version.assert_called_once_with('2.0.0')
assert result.exit_code == 0
def test_version_by_tag_with_commit_version_number_should_call_correct_functions(mocker, runner):
orig = semantic_release.cli.config.get
def wrapped_config_get(*args, **kwargs):
if len(args) >= 2 and args[0] == 'semantic_release' and args[1] == 'version_source':
return 'tag'
return orig(*args, **kwargs)
mocker.patch('semantic_release.cli.config.get', wrapped_config_get)
mocker.patch('semantic_release.cli.config.getboolean', lambda *x: False)
mock_set_new_version = mocker.patch('semantic_release.cli.set_new_version')
mock_tag_new_version = mocker.patch('semantic_release.cli.tag_new_version')
mock_new_version = mocker.patch('semantic_release.cli.get_new_version', return_value='2.0.0')
mock_evaluate_bump = mocker.patch('semantic_release.cli.evaluate_version_bump',
return_value='major')
mock_current_version = mocker.patch('semantic_release.cli.get_current_version',
return_value='1.2.3')
result = runner.invoke(main, ['version'])
mock_current_version.assert_called_once_with()
mock_evaluate_bump.assert_called_once_with('1.2.3', None)
mock_new_version.assert_called_once_with('1.2.3', 'major')
mock_set_new_version.assert_called_once_with('2.0.0')
mock_tag_new_version.assert_called_once_with('2.0.0')
assert result.exit_code == 0
def test_force_major(mocker, runner):
mock_version = mocker.patch('semantic_release.cli.version')
result = runner.invoke(main, ['version', '--major'])
mock_version.assert_called_once_with(
noop=False,
post=False,
force_level='major',
retry=False,
define=(),
)
assert mock_version.call_args_list[0][1]['force_level'] == 'major'
assert result.exit_code == 0
def test_force_minor(mocker, runner):
mock_version = mocker.patch('semantic_release.cli.version')
result = runner.invoke(main, ['version', '--minor'])
mock_version.assert_called_once_with(
noop=False,
post=False,
force_level='minor',
retry=False,
define=(),
)
assert mock_version.call_args_list[0][1]['force_level'] == 'minor'
assert result.exit_code == 0
def test_force_patch(mocker, runner):
mock_version = mocker.patch('semantic_release.cli.version')
result = runner.invoke(main, ['version', '--patch'])
mock_version.assert_called_once_with(
noop=False,
post=False,
force_level='patch',
retry=False,
define=(),
)
assert mock_version.call_args_list[0][1]['force_level'] == 'patch'
assert result.exit_code == 0
def test_retry(mocker, runner):
mock_version = mocker.patch('semantic_release.cli.version')
result = runner.invoke(main, ['version', '--retry'])
mock_version.assert_called_once_with(
noop=False,
post=False,
force_level=None,
retry=True,
define=(),
)
assert result.exit_code == 0
def test_noop_mode(mocker, runner):
mock_tag_new_version = mocker.patch('semantic_release.cli.tag_new_version')
mock_set_new = mocker.patch('semantic_release.cli.commit_new_version')
mock_commit_new = mocker.patch('semantic_release.cli.set_new_version')
mocker.patch('semantic_release.cli.evaluate_version_bump', lambda *x: 'major')
result = runner.invoke(main, ['version', '--noop'])
assert not mock_set_new.called
assert not mock_commit_new.called
assert not mock_tag_new_version.called
assert result.exit_code == 0
def test_version_no_change(mocker, runner):
mock_tag_new_version = mocker.patch('semantic_release.cli.tag_new_version')
mock_commit_new_version = mocker.patch('semantic_release.cli.commit_new_version')
mock_set_new_version = mocker.patch('semantic_release.cli.set_new_version')
mock_new_version = mocker.patch('semantic_release.cli.get_new_version', return_value='1.2.3')
mock_evaluate_bump = mocker.patch('semantic_release.cli.evaluate_version_bump',
return_value=None)
mock_current_version = mocker.patch('semantic_release.cli.get_current_version',
return_value='1.2.3')
result = runner.invoke(main, ['version'])
mock_current_version.assert_called_once_with()
mock_evaluate_bump.assert_called_once_with('1.2.3', None)
mock_new_version.assert_called_once_with('1.2.3', None)
assert not mock_set_new_version.called
assert not mock_commit_new_version.called
assert not mock_tag_new_version.called
assert result.exit_code == 0
def test_version_check_build_status_fails(mocker, runner):
mock_check_build_status = mocker.patch('semantic_release.cli.check_build_status',
return_value=False)
mock_tag_new_version = mocker.patch('semantic_release.cli.tag_new_version')
mock_commit_new = mocker.patch('semantic_release.cli.commit_new_version')
mock_set_new = mocker.patch('semantic_release.cli.set_new_version')
mocker.patch('semantic_release.cli.config.getboolean', lambda *x: True)
mocker.patch('semantic_release.cli.evaluate_version_bump', lambda *x: 'major')
result = runner.invoke(main, ['version'])
assert mock_check_build_status.called
assert not mock_set_new.called
assert not mock_commit_new.called
assert not mock_tag_new_version.called
assert result.exit_code == 0
def test_version_by_commit_check_build_status_succeeds(mocker, runner):
mocker.patch('semantic_release.cli.config.getboolean', lambda *x: True)
mock_check_build_status = mocker.patch('semantic_release.cli.check_build_status',
return_value=True)
mock_tag_new_version = mocker.patch('semantic_release.cli.tag_new_version')
mocker.patch('semantic_release.cli.evaluate_version_bump', lambda *x: 'major')
mock_commit_new = mocker.patch('semantic_release.cli.commit_new_version')
mock_set_new = mocker.patch('semantic_release.cli.set_new_version')
result = runner.invoke(main, ['version'])
assert mock_check_build_status.called
assert mock_set_new.called
assert mock_commit_new.called
assert mock_tag_new_version.called
assert result.exit_code == 0
def test_version_by_tag_check_build_status_succeeds(mocker, runner):
orig = semantic_release.cli.config.get
def wrapped_config_get(*args, **kwargs):
if len(args) >= 2 and args[0] == 'semantic_release' and args[1] == 'version_source':
return 'tag'
return orig(*args, **kwargs)
mocker.patch('semantic_release.cli.config.get', wrapped_config_get)
mocker.patch('semantic_release.cli.config.getboolean', lambda *x: True)
mock_check_build_status = mocker.patch('semantic_release.cli.check_build_status',
return_value=True)
mock_set_new_version = mocker.patch('semantic_release.cli.set_new_version')
mock_tag_new_version = mocker.patch('semantic_release.cli.tag_new_version')
mocker.patch('semantic_release.cli.evaluate_version_bump', lambda *x: 'major')
result = runner.invoke(main, ['version'])
assert mock_check_build_status.called
assert mock_set_new_version.called
assert mock_tag_new_version.called
assert result.exit_code == 0
def test_version_check_build_status_not_called_if_disabled(mocker, runner):
mock_check_build_status = mocker.patch('semantic_release.cli.check_build_status')
mocker.patch('semantic_release.cli.config.getboolean', lambda *x: False)
mocker.patch('semantic_release.cli.tag_new_version', None)
mocker.patch('semantic_release.cli.evaluate_version_bump', lambda *x: 'major')
mocker.patch('semantic_release.cli.commit_new_version', None)
mocker.patch('semantic_release.cli.set_new_version', None)
runner.invoke(main, ['version'])
assert not mock_check_build_status.called
def test_version_retry_and_giterror(mocker):
mocker.patch('semantic_release.cli.get_current_version',
mock.Mock(side_effect=GitError()))
result = version(retry=True)
assert not result
def test_version_retry(mocker):
mock_get_current = mocker.patch('semantic_release.cli.get_current_version',
return_value='current')
mock_evaluate_bump = mocker.patch('semantic_release.cli.evaluate_version_bump',
return_value='patch')
mock_get_new = mocker.patch('semantic_release.cli.get_new_version',
return_value='new')
mocker.patch('semantic_release.cli.config.getboolean', lambda *x: False)
result = version(noop=False, retry=True, force_level=False)
assert result
mock_get_current.assert_called_once_with()
mock_evaluate_bump.assert_called_once_with('current', False)
mock_get_new.assert_called_once_with('current', 'patch')
def test_publish_should_not_upload_to_pypi_if_option_is_false(mocker, runner):
mocker.patch('semantic_release.cli.checkout')
mocker.patch('semantic_release.cli.ci_checks.check')
mock_upload = mocker.patch('semantic_release.cli.upload_to_pypi')
mocker.patch('semantic_release.cli.post_changelog', lambda *x: True)
mocker.patch('semantic_release.cli.push_new_version', lambda *x: True)
mocker.patch('semantic_release.cli.version', lambda: True)
mocker.patch('semantic_release.cli.markdown_changelog', lambda *x, **y: 'CHANGES')
mocker.patch('semantic_release.cli.get_new_version', lambda *x: '2.0.0')
mocker.patch('semantic_release.cli.check_token', lambda: True)
mocker.patch('semantic_release.cli.config.getboolean', lambda *x: False)
runner.invoke(main, ['publish'])
assert not mock_upload.called
def test_publish_should_do_nothing_when_version_fails(mocker, runner):
mocker.patch('semantic_release.cli.checkout')
mocker.patch('semantic_release.cli.get_new_version', lambda *x: '2.0.0')
mocker.patch('semantic_release.cli.evaluate_version_bump', lambda *x: 'feature')
mocker.patch('semantic_release.cli.generate_changelog')
mock_log = mocker.patch('semantic_release.cli.post_changelog')
mock_upload = mocker.patch('semantic_release.cli.upload_to_pypi')
mock_push = mocker.patch('semantic_release.cli.push_new_version')
mock_ci_check = mocker.patch('semantic_release.ci_checks.check')
mock_version = mocker.patch('semantic_release.cli.version', return_value=False)
result = runner.invoke(main, ['publish'])
mock_version.assert_called_once_with(
noop=False,
post=False,
force_level=None,
retry=False,
define=(),
)
assert not mock_push.called
assert not mock_upload.called
assert not mock_log.called
assert mock_ci_check.called
assert result.exit_code == 0
def test_publish_should_call_functions(mocker, runner):
mock_push = mocker.patch('semantic_release.cli.push_new_version')
mock_checkout = mocker.patch('semantic_release.cli.checkout')
mock_version = mocker.patch('semantic_release.cli.version', return_value=True)
mock_log = mocker.patch('semantic_release.cli.post_changelog')
mock_ci_check = mocker.patch('semantic_release.ci_checks.check')
mock_pypi = mocker.patch('semantic_release.cli.upload_to_pypi')
mocker.patch('semantic_release.cli.get_repository_owner_and_name',
return_value=('relekang', 'python-semantic-release'))
mocker.patch('semantic_release.cli.evaluate_version_bump', lambda *x: 'feature')
mocker.patch('semantic_release.cli.generate_changelog')
mocker.patch('semantic_release.cli.markdown_changelog', lambda *x, **y: 'CHANGES')
mocker.patch('semantic_release.cli.get_new_version', lambda *x: '2.0.0')
mocker.patch('semantic_release.cli.check_token', lambda: True)
result = runner.invoke(main, ['publish'])
assert result.exit_code == 0
assert mock_ci_check.called
assert mock_push.called
assert mock_pypi.called
mock_version.assert_called_once_with(
noop=False,
post=False,
force_level=None,
retry=False,
define=(),
)
mock_log.assert_called_once_with(u'relekang', 'python-semantic-release', '2.0.0', 'CHANGES')
mock_checkout.assert_called_once_with('master')
def test_publish_retry_version_fail(mocker):
mock_get_current = mocker.patch('semantic_release.cli.get_current_version',
return_value='current')
mock_get_previous = mocker.patch('semantic_release.cli.get_previous_version',
return_value='previous')
mock_get_owner_name = mocker.patch('semantic_release.cli.get_repository_owner_and_name',
return_value=('owner', 'name'))
mock_ci_check = mocker.patch('semantic_release.ci_checks.check')
mock_checkout = mocker.patch('semantic_release.cli.checkout')
mocker.patch('semantic_release.cli.config.get', lambda *x: 'my_branch')
mock_version = mocker.patch('semantic_release.cli.version', return_value=False)
publish(noop=False, retry=True, force_level=False)
mock_get_current.assert_called_once_with()
mock_get_previous.assert_called_once_with('current')
mock_get_owner_name.assert_called_once_with()
mock_ci_check.assert_called()
mock_checkout.assert_called_once_with('my_branch')
mock_version.assert_called_once_with(noop=False, retry=True, force_level=False)
def test_publish_bad_token(mocker):
mock_get_current = mocker.patch('semantic_release.cli.get_current_version',
return_value='current')
mock_get_previous = mocker.patch('semantic_release.cli.get_previous_version',
return_value='previous')
mock_get_owner_name = mocker.patch('semantic_release.cli.get_repository_owner_and_name',
return_value=('owner', 'name'))
mock_ci_check = mocker.patch('semantic_release.ci_checks.check')
mock_checkout = mocker.patch('semantic_release.cli.checkout')
mocker.patch('semantic_release.cli.config.get', return_value='my_branch')
mocker.patch('semantic_release.cli.config.getboolean', return_value=False)
mock_version = mocker.patch('semantic_release.cli.version')
mock_get_token = mocker.patch('semantic_release.cli.get_token',
return_value='SUPERTOKEN')
mock_get_domain = mocker.patch('semantic_release.cli.get_domain',
return_value='domain')
mock_push = mocker.patch('semantic_release.cli.push_new_version')
mock_check_token = mocker.patch('semantic_release.cli.check_token',
return_value=False)
publish(noop=False, retry=True, force_level=False)
mock_get_current.assert_called_once_with()
mock_get_previous.assert_called_once_with('current')
mock_get_owner_name.assert_called_once_with()
mock_ci_check.assert_called()
mock_checkout.assert_called_once_with('my_branch')
mock_version.assert_called_once_with(noop=False, retry=True, force_level=False)
mock_get_token.assert_called()
mock_get_domain.assert_called()
mock_push.assert_called_once_with(auth_token='SUPERTOKEN', owner='owner', name='name',
branch='my_branch', domain='domain')
mock_check_token.assert_called_once_with()
def test_publish_giterror_when_posting(mocker):
mock_get_current = mocker.patch('semantic_release.cli.get_current_version',
return_value='current')
mock_evaluate = mocker.patch('semantic_release.cli.evaluate_version_bump',
return_value='patch')
mock_get_new = mocker.patch('semantic_release.cli.get_new_version',
return_value='new')
mock_get_owner_name = mocker.patch('semantic_release.cli.get_repository_owner_and_name',
return_value=('owner', 'name'))
mock_ci_check = mocker.patch('semantic_release.ci_checks.check')
mock_checkout = mocker.patch('semantic_release.cli.checkout')
mocker.patch('semantic_release.cli.config.get', return_value='my_branch')
mocker.patch('semantic_release.cli.config.getboolean', return_value=False)
mock_version = mocker.patch('semantic_release.cli.version')
mock_get_token = mocker.patch('semantic_release.cli.get_token',
return_value='SUPERTOKEN')
mock_get_domain = mocker.patch('semantic_release.cli.get_domain',
return_value='domain')
mock_push = mocker.patch('semantic_release.cli.push_new_version')
mock_check_token = mocker.patch('semantic_release.cli.check_token',
return_value=True)
mock_generate = mocker.patch('semantic_release.cli.generate_changelog',
return_value='super changelog')
mock_markdown = mocker.patch('semantic_release.cli.markdown_changelog',
return_value='super md changelog')
mock_post = mocker.patch('semantic_release.cli.post_changelog',
mock.Mock(side_effect=GitError()))
publish(noop=False, retry=False, force_level=False)
mock_get_current.assert_called_once_with()
mock_evaluate.assert_called_once_with('current', False)
mock_get_new.assert_called_once_with('current', 'patch')
mock_get_owner_name.assert_called_once_with()
mock_ci_check.assert_called()
mock_checkout.assert_called_once_with('my_branch')
mock_version.assert_called_once_with(noop=False, retry=False, force_level=False)
mock_get_token.assert_called_once_with()
mock_get_domain.assert_called_once_with()
mock_push.assert_called_once_with(auth_token='SUPERTOKEN', owner='owner', name='name',
branch='my_branch', domain='domain')
mock_check_token.assert_called_once_with()
mock_generate.assert_called_once_with('current', 'new')
mock_markdown.assert_called_once_with('new', 'super changelog', header=False)
mock_post.assert_called_once_with('owner', 'name', 'new', 'super md changelog')
def test_changelog_should_call_functions(mocker, runner):
mock_changelog = mocker.patch('semantic_release.cli.changelog', return_value=True)
result = runner.invoke(main, ['changelog'])
assert result.exit_code == 0
mock_changelog.assert_called_once_with(
noop=False,
post=False,
force_level=None,
retry=False,
unreleased=False,
define=(),
)
def test_overload_by_cli(mocker, runner):
mock_open = mocker.patch('semantic_release.history.open', mock_version_file)
runner.invoke(main, ['version', '--noop', '--patch', '-D',
'version_variable=my_version_path:my_version_var'])
mock_open.assert_called_once_with('my_version_path', 'r')
mock_open.reset_mock()
def test_changelog_noop(mocker):
mocker.patch('semantic_release.cli.get_current_version', return_value='current')
mock_previous_version = mocker.patch('semantic_release.cli.get_previous_version',
return_value='previous')
mock_generate_changelog = mocker.patch('semantic_release.cli.generate_changelog',
return_value='super changelog')
mock_markdown_changelog = mocker.patch('semantic_release.cli.markdown_changelog',
return_value='super changelog')
changelog(noop=True, unreleased=False)
mock_previous_version.assert_called_once_with('current')
mock_generate_changelog.assert_called_once_with('previous', 'current')
mock_markdown_changelog.assert_called_once_with('current', 'super changelog', header=False)
def test_changelog_post_unreleased_no_token(mocker):
mocker.patch('semantic_release.cli.get_current_version', return_value='current')
mock_previous_version = mocker.patch('semantic_release.cli.get_previous_version',
return_value='previous')
mock_generate_changelog = mocker.patch('semantic_release.cli.generate_changelog',
return_value='super changelog')
mock_markdown_changelog = mocker.patch('semantic_release.cli.markdown_changelog',
return_value='super changelog')
mock_check_token = mocker.patch('semantic_release.cli.check_token',
return_value=False)
changelog(unreleased=True, post=True)
mock_previous_version.assert_called_once_with('current')
mock_generate_changelog.assert_called_once_with('current', None)
mock_markdown_changelog.assert_called_once_with('current', 'super changelog', header=False)
mock_check_token.assert_called_once_with()
def test_changelog_post_complete(mocker):
mocker.patch('semantic_release.cli.get_current_version', return_value='current')
mock_previous_version = mocker.patch('semantic_release.cli.get_previous_version',
return_value='previous')
mock_generate_changelog = mocker.patch('semantic_release.cli.generate_changelog',
return_value='super changelog')
mock_markdown_changelog = mocker.patch('semantic_release.cli.markdown_changelog',
return_value='super md changelog')
mock_check_token = mocker.patch('semantic_release.cli.check_token',
return_value=True)
mock_get_owner_name = mocker.patch('semantic_release.cli.get_repository_owner_and_name',
return_value=('owner', 'name'))
mock_post_changelog = mocker.patch('semantic_release.cli.post_changelog')
changelog(unreleased=True, post=True)
mock_previous_version.assert_called_once_with('current')
mock_generate_changelog.assert_called_once_with('current', None)
mock_markdown_changelog.assert_any_call('current', 'super changelog', header=False)
mock_check_token.assert_called_once_with()
mock_get_owner_name.assert_called_once_with()
mock_post_changelog.assert_called_once_with('owner', 'name', 'current',
'super md changelog')
def test_changelog_raises_exception_when_no_current_version(mocker):
mocker.patch('semantic_release.cli.get_current_version', return_value=None)
with pytest.raises(ImproperConfigurationError):
changelog()
@mock.patch('semantic_release.cli.debug')
def test_main_debug(mock_debug, runner):
runner.invoke(main, ['version', '--noop'])
assert mock_debug.called
| 47.808664
| 97
| 0.708941
|
795158bc3d98d0b60da961b8d2ff0a91e982952d
| 73,097
|
py
|
Python
|
examples/pytorch_pretrained_bert/modeling_conv.py
|
ankit-ai/BertQA-Attention-on-Steroids
|
49c3de360f88f55c8442b9f8153af56c28a689a9
|
[
"Apache-2.0"
] | 122
|
2019-03-21T05:47:45.000Z
|
2021-12-16T06:57:37.000Z
|
examples/pytorch_pretrained_bert/modeling_conv.py
|
TigerMachineLearning/BertQA-Attention-on-Steroids
|
49c3de360f88f55c8442b9f8153af56c28a689a9
|
[
"Apache-2.0"
] | 2
|
2019-06-10T03:35:06.000Z
|
2020-04-16T07:23:23.000Z
|
examples/pytorch_pretrained_bert/modeling_conv.py
|
TigerMachineLearning/BertQA-Attention-on-Steroids
|
49c3de360f88f55c8442b9f8153af56c28a689a9
|
[
"Apache-2.0"
] | 16
|
2019-03-21T12:21:10.000Z
|
2021-12-16T06:47:59.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertDeSelfOutput(nn.Module):
def __init__(self, config):
super(BertDeSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qdense = nn.Linear(config.hidden_size, config.hidden_size)
self.qLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.qdropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, chidden_states,qhidden_states, cinput_tensor,qinput_tensor):
chidden_states = self.dense(chidden_states)
chidden_states = self.dropout(chidden_states)
chidden_states = self.LayerNorm(chidden_states + cinput_tensor)
qhidden_states = self.dense(qhidden_states)
qhidden_states = self.dropout(qhidden_states)
qhidden_states = self.LayerNorm(qhidden_states + cinput_tensor)
return chidden_states,qhidden_states
class BertDeAttention(nn.Module):
def __init__(self, config):
super(BertDeAttention, self).__init__()
self.self = BertMultiAttention(config)
self.output = BertDeSelfOutput(config) #Can use De here
def forward(self, cinput_tensor,qinput_tensor, attention_mask, qattention_mask):
cself_output,qself_output = self.self(cinput_tensor,qinput_tensor, attention_mask,qattention_mask)
cattention_output,qattention_output = self.output(cself_output,qself_output, cinput_tensor,qinput_tensor)
return cattention_output,qattention_output
class BertDeIntermediate(nn.Module):
def __init__(self, config):
super(BertDeIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.qdense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, chidden_states,qhidden_states):
#print('In DeIntermediate -dim of chidden_states is',chidden_states.size())
chidden_states = self.dense(chidden_states)
chidden_states = self.intermediate_act_fn(chidden_states)
qhidden_states = self.qdense(qhidden_states)
qhidden_states = self.intermediate_act_fn(qhidden_states)
return chidden_states,qhidden_states
class BertDeOutput(nn.Module):
def __init__(self, config):
super(BertDeOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qdense = nn.Linear(config.intermediate_size, config.hidden_size)
self.qLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.qdropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, chidden_states,qhidden_states, cinput_tensor,qinput_tensor):
#print('In BertDeOutput - size of chidden_states is',chidden_states.size())
chidden_states = self.dense(chidden_states)
chidden_states = self.dropout(chidden_states)
chidden_states = self.LayerNorm(chidden_states + cinput_tensor)
qhidden_states = self.qdense(qhidden_states)
qhidden_states = self.qdropout(qhidden_states)
qhidden_states = self.qLayerNorm(qhidden_states + qinput_tensor)
return chidden_states,qhidden_states
class BertDeLayer(nn.Module):
def __init__(self, config):
super(BertDeLayer, self).__init__()
self.attention = BertDeAttention(config)
self.intermediate = BertDeIntermediate(config)
self.output = BertDeOutput(config)
def forward(self, chidden_states,qhidden_states, attention_mask,qattention_mask):
cattention_output,qattention_output = self.attention(chidden_states,qhidden_states, attention_mask,qattention_mask)
#Call this one more time to calculaye qattention_output^
#print('In DeLayer - dim of cattention_output',cattention_output.size())
cintermediate_output,qintermediate_output = self.intermediate(cattention_output,qattention_output)
clayer_output,qlayer_output = self.output(cintermediate_output,qintermediate_output,cattention_output,qattention_output)
return clayer_output,qlayer_output
class BertDecoder(nn.Module):
def __init__(self, config):
super(BertDecoder, self).__init__()
layer = BertDeLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(6)])
def forward(self, hidden_states, cattention_mask, qattention_mask, output_all_deencoded_layers=True):
call_deencoder_layers = []
qall_deencoder_layers = []
chidden_states = hidden_states
qhidden_states = hidden_states
for layer_module in self.layer:
chidden_states,qhidden_states = layer_module(chidden_states,qhidden_states, cattention_mask,qattention_mask)
if output_all_deencoded_layers:
call_deencoder_layers.append(chidden_states)
qall_deencoder_layers.append(qhidden_states)
if not output_all_deencoded_layers:
call_deencoder_layers.append(chidden_states)
qall_deencoder_layers.append(qhidden_states)
return call_deencoder_layers,qall_deencoder_layers
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertMultiAttention(nn.Module):
def __init__(self, config):
super(BertMultiAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
#print('config.hidden_size is',config.hidden_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.query.weight)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.key.weight)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.value.weight)
self.qquery = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.qquery.weight)
self.qkey = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.qkey.weight)
self.qvalue = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.qvalue.weight)
self.cdropout = nn.Dropout(config.attention_probs_dropout_prob)
self.qdropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
#NOTE -
# 1. enc_hidden_states is context embeddings
# 2. dec_hidden_states is question embeddings
# start expt with query from dec_hidden_states
# key and value from context
def forward(self, enc_hidden_states,dec_hidden_states, attention_mask,qattention_mask):
#print('forward of decoder')
#print('shape of dec_hidden_states is',dec_hidden_states.shape)
#print('size of self.all_head_size is',self.all_head_size)
mixed_query_layer = self.query(dec_hidden_states)
mixed_key_layer = self.key(enc_hidden_states)
mixed_value_layer = self.value(enc_hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.cdropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
#--- Q2C
qmixed_query_layer = self.qquery(enc_hidden_states)
qmixed_key_layer = self.qkey(dec_hidden_states)
qmixed_value_layer = self.qvalue(dec_hidden_states)
qquery_layer = self.transpose_for_scores(qmixed_query_layer)
qkey_layer = self.transpose_for_scores(qmixed_key_layer)
qvalue_layer = self.transpose_for_scores(qmixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
qattention_scores = torch.matmul(qquery_layer, qkey_layer.transpose(-1, -2))
qattention_scores = qattention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
qattention_scores = qattention_scores + qattention_mask
# Normalize the attention scores to probabilities.
qattention_probs = nn.Softmax(dim=-1)(qattention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
qattention_probs = self.qdropout(qattention_probs)
q_layer = torch.matmul(qattention_probs, qvalue_layer)
q_layer = q_layer.permute(0, 2, 1, 3).contiguous()
new_q_layer_shape = q_layer.size()[:-2] + (self.all_head_size,)
q_layer = q_layer.view(*new_q_layer_shape)
return context_layer,q_layer
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
#print('attention_output is',attention_output.shape)
intermediate_output = self.intermediate(attention_output)
#print('intermediate_output is',intermediate_output.shape)
layer_output = self.output(intermediate_output, attention_output)
#print('layer_output is',layer_output.shape)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
#print('size of hidden_states in BertEncoder is',hidden_states.shape)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
#print('extended_attention_mask',extended_attention_mask)
#print('attention_mask',attention_mask)
#print('token_type_ids',token_type_ids)
qattention_mask = attention_mask - token_type_ids
cattention_mask = attention_mask - qattention_mask
#print('*************************')
#print('cattention_mask',cattention_mask)
#print('qattention_mask',qattention_mask)
cextended_attention_mask = cattention_mask.unsqueeze(1).unsqueeze(2)
cextended_attention_mask = cextended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
cextended_attention_mask = (1.0 - cextended_attention_mask) * -10000.0
qextended_attention_mask = qattention_mask.unsqueeze(1).unsqueeze(2)
qextended_attention_mask = qextended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
qextended_attention_mask = (1.0 - qextended_attention_mask) * -10000.0
#raise SystemExit
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
#print('*** encoded_layers is',encoded_layers.shape)
sequence_output = encoded_layers[-1]
#print('*** sequence_output is',sequence_output.shape)
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return cextended_attention_mask,qextended_attention_mask,sequence_output#encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(PreTrainedBertModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(PreTrainedBertModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(PreTrainedBertModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForMultipleChoice(PreTrainedBertModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices=2):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(PreTrainedBertModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(PreTrainedBertModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
self.enc_trans = nn.Linear(2*config.max_position_embeddings,config.max_position_embeddings)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
#print('hidden size in QA is',config.hidden_size)
self.apply(self.init_bert_weights)
self.conv1d = nn.Conv1d(in_channels=2,out_channels=1,kernel_size=3,padding=1)
self.decoder = BertDecoder(config)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
ks=3
#convs
self.conv_512_1= nn.Conv1d(in_channels=768, out_channels=512, kernel_size=ks, padding=1)
self.conv_512_2= nn.Conv1d(in_channels=512, out_channels=512, kernel_size=ks, padding=1)
self.conv_256_1= nn.Conv1d(in_channels=512, out_channels=256, kernel_size=ks, padding=1)
self.conv_256_2= nn.Conv1d(in_channels=256, out_channels=256, kernel_size=ks, padding=1)
self.conv_128_1= nn.Conv1d(in_channels=256, out_channels=128, kernel_size=ks, padding=1)
self.conv_128_2= nn.Conv1d(in_channels=128, out_channels=128, kernel_size=ks, padding=1)
self.conv_64_1= nn.Conv1d(in_channels=128, out_channels=64, kernel_size=ks, padding=1)
self.conv_64_2= nn.Conv1d(in_channels=64, out_channels=64, kernel_size=ks, padding=1)
self.conv_32_1= nn.Conv1d(in_channels=64, out_channels=32, kernel_size=ks, padding=1)
self.conv_32_2= nn.Conv1d(in_channels=32, out_channels=32, kernel_size=ks, padding=1)
self.conv_16_1= nn.Conv1d(in_channels=32, out_channels=16, kernel_size=ks, padding=1)
self.conv_16_2= nn.Conv1d(in_channels=16, out_channels=16, kernel_size=ks, padding=1)
self.conv_8_1= nn.Conv1d(in_channels=16, out_channels=8, kernel_size=ks, padding=1)
self.conv_8_2= nn.Conv1d(in_channels=8, out_channels=8, kernel_size=ks, padding=1)
self.conv_4_1= nn.Conv1d(in_channels=8, out_channels=4, kernel_size=ks, padding=1)
self.conv_4_2= nn.Conv1d(in_channels=4, out_channels=4, kernel_size=ks, padding=1)
self.conv_out=nn.Conv1d(in_channels=4, out_channels=2, kernel_size=ks, padding=1)
#Freeze embedding layers of Bert
#for param in self.bert.parameters():
# param.requires_grad = False
# print(param)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
c_attention_mask,q_attention_mask,sequence_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
#print('shape of sequence_output',sequence_output.shape)
#Ankit addition - Decoder
cdeencoded_layers,qdeencoded_layers = self.decoder(sequence_output, #2d --> 1d translated
c_attention_mask,q_attention_mask,
output_all_deencoded_layers=False)#output_all_encoded_layers)
cdeencoded_layers = cdeencoded_layers[-1]
qdeencoded_layers = qdeencoded_layers[-1]
cdeencoded_layers = cdeencoded_layers.unsqueeze(-1)
qdeencoded_layers = qdeencoded_layers.unsqueeze(-1)
enc_cat = torch.cat((cdeencoded_layers,qdeencoded_layers), dim=-1)
#print('enc_cat size is',enc_cat.size())
#enc_cat = enc_cat.permute(0,2,1)
encshape = enc_cat.shape
#print('AFTERPERMUTE - enc_cat size is',enc_cat.size())
#sequence_output1d = self.enc_trans(enc_cat)
#print('Dim of sequence_output is',sequence_output1d.size())
enc_cat = enc_cat.reshape(-1,enc_cat.shape[2],enc_cat.shape[3]).contiguous()
#print('AFTER : enc_cat size is',enc_cat.size())
enc_cat = enc_cat.permute(0,2,1).contiguous()
sequence_output1d = self.conv1d(enc_cat)
#print('shape of sequence_output1d',sequence_output1d.shape)
sequence_output1d = sequence_output1d.squeeze(1).contiguous()
sequence_output1d = sequence_output1d.reshape(encshape[0],encshape[1],encshape[2])
#Skip connection with bert embeddings
sequence_output1d = self.LayerNorm(sequence_output + sequence_output1d)
sequence_output1d = sequence_output1d.permute(0,2,1).contiguous()
#print('seq after perm: ', sequence_output.shape)
out_512_1=self.conv_512_1(sequence_output1d)
#print('out 512 1 shape', out_512_1.shape)
out_512_2=self.conv_512_2(out_512_1)
#print('out 512 2 shape', out_512_2.shape)
#elem_1=self.LayerNorm(sequence_output+out_512_2)
#print('512 size: ', elem_1.shape)
out_256_1=self.conv_256_1(out_512_2)
out_256_2=self.conv_256_2(out_256_1)
#elem_2=self.LayerNorm(sequence_output+out_256_2)
#print('256 size: ', elem_2.shape)
out_128_1=self.conv_128_1(out_256_2)
out_128_2=self.conv_128_2(out_128_1)
#elem_3=self.LayerNorm(sequence_output+out_128_2)
#print('128 size: ', elem_3.shape)
out_64_1=self.conv_64_1(out_128_2)
out_64_2=self.conv_64_2(out_64_1)
#elem_4=self.LayerNorm(sequence_output+out_64_2)
#print('64 size: ', elem_4.shape)
out_32_1=self.conv_32_1(out_64_2)
out_32_2=self.conv_32_2(out_32_1)
#elem_5=self.LayerNorm(sequence_output+out_32_2)
#print('32 size: ', elem_5.shape)
out_16_1=self.conv_16_1(out_32_2)
out_16_2=self.conv_16_2(out_16_1)
#elem_6=self.LayerNorm(sequence_output+out_16_2)
#print('16 size: ', elem_6.shape)
out_8_1=self.conv_8_1(out_16_2)
out_8_2=self.conv_8_2(out_8_1)
#elem_7=self.LayerNorm(sequence_output+out_8_2)
#print('8 size: ', elem_7.shape)
out_4_1=self.conv_4_1(out_8_2)
out_4_2=self.conv_4_2(out_4_1)
#elem_8=self.LayerNorm(sequence_output+out_4_2)
#print('4 size: ', elem_8.shape)
out=self.conv_out(out_4_2)
#print('out before perm: ', out.shape)
out = out.permute(0,2,1).contiguous()
#out = self.LayerNorm2(out)
#print('out after perm: ', out.shape)
logits=out
#logits = self.qa_outputs(sequence_output1d)
#print('Dim of logits is',logits.size())
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
| 49.557288
| 145
| 0.686198
|
795159781025b9c3bf600d1d006e88ef620ca937
| 5,909
|
py
|
Python
|
src/3-postprocess/peaks.py
|
DirkEilander/compound_hotspots
|
f9d7960633be80e8e24d2f2563df367cc3f060c6
|
[
"BSD-3-Clause"
] | 1
|
2022-01-17T07:02:13.000Z
|
2022-01-17T07:02:13.000Z
|
src/3-postprocess/peaks.py
|
DirkEilander/compound_hotspots
|
f9d7960633be80e8e24d2f2563df367cc3f060c6
|
[
"BSD-3-Clause"
] | null | null | null |
src/3-postprocess/peaks.py
|
DirkEilander/compound_hotspots
|
f9d7960633be80e8e24d2f2563df367cc3f060c6
|
[
"BSD-3-Clause"
] | 1
|
2022-01-17T02:48:28.000Z
|
2022-01-17T02:48:28.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dirk Eilander (contact: dirk.eilander@vu.nl)
# Created: Nov 2nd 2018
import xarray as xr
import numpy as np
import scipy
__all__ = ['get_peaks', 'peaks_over_threshold', 'annual_max']
def get_peaks(ts, min_dist=1, dim='time', chunks={}):
"""Returns a DataArray with peak values, all other values are set to NaN.
Peaks are defined as a high data point surrounded by lower data points.
If multiple high data points are surrounded by lower data points (a flat peak)
only the first high data point is selected. The minimum distance between peaks
can be set using the min_dist argument.
Parameters
----------
ts : xarray DataArray
time series
min_dist : int, optional
minimum distance between peaks [ts step] (the default is 1)
dim : str, optional
name of time dimension in ts (the default is 'time')
Returns
-------
xarray DataArray
timeseries of peaks
"""
# get sign of trend
trend = xr.ufuncs.sign(ts.diff(dim=dim))
# set flats to negative trend to deal with flat peaks
trend = xr.where(trend==0, -1, trend)
# peaks where previous and next points are lower
peaks = ts.where(trend.diff(dim=dim, label='lower')==-2).chunk(chunks)
if min_dist > 1:
# import pdb; pdb.set_trace()
# max_in_wdw = peaks.rolling(center=True, min_periods=1, **{dim: min_dist*2}).max()
# see git issue https://github.com/pydata/xarray/issues/3165
max_in_wdw = peaks.rolling(center=True, min_periods=1, **{dim: min_dist*2}).construct('window').max('window')
peaks = peaks.where(max_in_wdw==peaks)
return peaks
def peaks_over_threshold(ts, threshold, min_dist=1, dim='time', chunks={}):
"""Returns a DataArray with Peaks Over Threshold (POT), all other values are
set to NaN.
Peaks are defined as a high data point surrounded by lower data points.
If multiple high data points are surrounded by lower data points (a flat peak)
only the first high data point is selected. The minimum distance between peaks
can be set using the min_dist argument.
Parameters
----------
ts : xarray DataArray
time series
threshold : float
threshold value
min_dist : int, optional
minimum distance between peaks [ts step] (the default is 0)
dim : str, optional
name of time dimension in ts (the default is 'time')
Returns
-------
xarray DataArray
timeseries of peaks over threshold
"""
peaks = get_peaks(ts, dim=dim, min_dist=min_dist, chunks=chunks)
# peaks over threshold (POT)
peaks = peaks.where(peaks > threshold)
return peaks
def annual_max(da, min_dist=1, dim='time', reduce=False):
"""Returns a DataArray with Annual Maxima (AM) peaks
Peaks are defined as a high data point surrounded by lower data points.
If multiple high data points are surrounded by lower data points (a flat peak)
only the first high data point is selected. The minimum distance between peaks
can be set using the min_dist argument.
Parameters
----------
ts : xarray DataArray
time series
threshold : float
threshold value
min_dist : int, optional
minimum distance between peaks [ts step] (the default is 0)
dim : str, optional
name of time dimension in ts (the default is 'time')
reduce : bool, optional
if True, reduce the AM series to a year timestep; if False, keep full timeseries
with all values but the AM set to NaN (the default is False)
Returns
-------
xarray DataArray
timeseries of annual maxima peaks
"""
peaks = get_peaks(da, min_dist=min_dist, dim=dim)
grp = '{}.year'.format(dim)
peaks_grp = peaks.groupby(grp)
if reduce == False:
peaks_am = peaks.where(peaks_grp.max(dim=dim) == peaks_grp)
else:
peaks_am = peaks_grp.max(dim=dim)
return peaks_am
def nanpercentile(da, q, dim='time', interpolation='linear'):
"""Returns the qth percentile of the data along the specified core dimension,
while ignoring nan values.
Parameters
----------
da: xarray DataArray
Input data array
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100 inclusive.
dim : str, optional
name of the core dimension (the default is 'time')
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
Returns
-------
percentile : xarray DataArray
The core dimension is reduce to quantiles and returned at the last dimension.
"""
def _nanpercentile(*args, **kwargs):
"""nanpercentile, but with q moved to the last axis"""
return np.moveaxis(np.nanpercentile(*args, **kwargs), 0, -1)
# nanpercentile parameters
q = np.atleast_1d(q)
q_kwargs = dict(q=q, axis=-1, interpolation=interpolation)
# apply_ufunc parameters
kwargs = dict(
input_core_dims=[[dim]],
output_core_dims=[['percentile']],
dask='parallelized',
output_dtypes=[float],
output_sizes={'percentile': q.size} # on output, <dim> is reduced to length q.size
)
if 'percentile' in da.coords:
da = da.drop('percentile')
percentile = xr.apply_ufunc(_nanpercentile, da.chunk({dim: -1}), kwargs=q_kwargs, **kwargs)
percentile['percentile'] = xr.Variable('percentile', q)
return percentile.squeeze() # if q.size=1 remove dim
| 37.636943
| 118
| 0.636994
|
79515a5b7fa0aea185bc44cdb12a87246824ca6a
| 12,386
|
py
|
Python
|
nipype/utils/config.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | null | null | null |
nipype/utils/config.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | 2
|
2018-04-26T12:09:32.000Z
|
2018-04-27T06:36:49.000Z
|
nipype/utils/config.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | 1
|
2019-11-14T14:16:57.000Z
|
2019-11-14T14:16:57.000Z
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
'''
Created on 20 Apr 2010
logging options : INFO, DEBUG
hash_method : content, timestamp
@author: Chris Filo Gorgolewski
'''
import os
import sys
import errno
import atexit
from warnings import warn
from distutils.version import LooseVersion
import configparser
import numpy as np
from simplejson import load, dump
from .misc import str2bool
from filelock import SoftFileLock
CONFIG_DEPRECATIONS = {
'profile_runtime': ('monitoring.enabled', '1.0'),
'filemanip_level': ('logging.utils_level', '1.0'),
}
NUMPY_MMAP = LooseVersion(np.__version__) >= LooseVersion('1.12.0')
DEFAULT_CONFIG_TPL = """\
[logging]
workflow_level = INFO
utils_level = INFO
interface_level = INFO
log_to_file = false
log_directory = {log_dir}
log_size = 16384000
log_rotate = 4
[execution]
create_report = true
crashdump_dir = {crashdump_dir}
hash_method = timestamp
job_finished_timeout = 5
keep_inputs = false
local_hash_check = true
matplotlib_backend = Agg
plugin = Linear
remove_node_directories = false
remove_unnecessary_outputs = true
try_hard_link_datasink = true
single_thread_matlab = true
crashfile_format = pklz
stop_on_first_crash = false
stop_on_first_rerun = false
use_relative_paths = false
stop_on_unknown_version = false
write_provenance = false
parameterize_dirs = true
poll_sleep_duration = 2
xvfb_max_wait = 10
check_version = true
[monitoring]
enabled = false
sample_frequency = 1
summary_append = true
[check]
interval = 1209600
""".format
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class NipypeConfig(object):
"""Base nipype config class"""
def __init__(self, *args, **kwargs):
self._config = configparser.ConfigParser()
self._cwd = None
config_dir = os.path.expanduser('~/.nipype')
self.data_file = os.path.join(config_dir, 'nipype.json')
self.set_default_config()
self._display = None
self._resource_monitor = None
self._config.read(
[os.path.join(config_dir, 'nipype.cfg'), 'nipype.cfg'])
for option in CONFIG_DEPRECATIONS:
for section in ['execution', 'logging', 'monitoring']:
if self.has_option(section, option):
new_section, new_option = CONFIG_DEPRECATIONS[option][
0].split('.')
if not self.has_option(new_section, new_option):
# Warn implicit in get
self.set(new_section, new_option,
self.get(section, option))
@property
def cwd(self):
"""Cache current working directory ASAP"""
# Run getcwd only once, preventing multiproc to finish
# with error having changed to the wrong path
if self._cwd is None:
try:
self._cwd = os.getcwd()
except OSError:
warn('Trying to run Nipype from a nonexistent directory "{}".'.
format(os.getenv('PWD', 'unknown')), RuntimeWarning)
raise
return self._cwd
def set_default_config(self):
"""Read default settings template and set into config object"""
default_cfg = DEFAULT_CONFIG_TPL(
log_dir=os.path.expanduser(
'~'), # Get $HOME in a platform-agnostic way
crashdump_dir=self.cwd # Read cached cwd
)
try:
self._config.read_string(default_cfg) # Python >= 3.2
except AttributeError:
from io import StringIO
self._config.readfp(StringIO(default_cfg))
def enable_debug_mode(self):
"""Enables debug configuration"""
from .. import logging
self._config.set('execution', 'stop_on_first_crash', 'true')
self._config.set('execution', 'remove_unnecessary_outputs', 'false')
self._config.set('execution', 'keep_inputs', 'true')
self._config.set('logging', 'workflow_level', 'DEBUG')
self._config.set('logging', 'interface_level', 'DEBUG')
self._config.set('logging', 'utils_level', 'DEBUG')
logging.update_logging(self._config)
def set_log_dir(self, log_dir):
"""Sets logging directory
This should be the first thing that is done before any nipype class
with logging is imported.
"""
self._config.set('logging', 'log_directory', log_dir)
def get(self, section, option, default=None):
"""Get an option"""
if option in CONFIG_DEPRECATIONS:
msg = ('Config option "%s" has been deprecated as of nipype %s. '
'Please use "%s" instead.') % (
option, CONFIG_DEPRECATIONS[option][1],
CONFIG_DEPRECATIONS[option][0])
warn(msg)
section, option = CONFIG_DEPRECATIONS[option][0].split('.')
if self._config.has_option(section, option):
return self._config.get(section, option)
return default
def set(self, section, option, value):
"""Set new value on option"""
if isinstance(value, bool):
value = str(value)
if option in CONFIG_DEPRECATIONS:
msg = ('Config option "%s" has been deprecated as of nipype %s. '
'Please use "%s" instead.') % (
option, CONFIG_DEPRECATIONS[option][1],
CONFIG_DEPRECATIONS[option][0])
warn(msg)
section, option = CONFIG_DEPRECATIONS[option][0].split('.')
return self._config.set(section, option, value)
def getboolean(self, section, option):
"""Get a boolean option from section"""
return self._config.getboolean(section, option)
def has_option(self, section, option):
"""Check if option exists in section"""
return self._config.has_option(section, option)
@property
def _sections(self):
return self._config._sections
def get_data(self, key):
"""Read options file"""
if not os.path.exists(self.data_file):
return None
with SoftFileLock('%s.lock' % self.data_file):
with open(self.data_file, 'rt') as file:
datadict = load(file)
if key in datadict:
return datadict[key]
return None
def save_data(self, key, value):
"""Store config flie"""
datadict = {}
if os.path.exists(self.data_file):
with SoftFileLock('%s.lock' % self.data_file):
with open(self.data_file, 'rt') as file:
datadict = load(file)
else:
dirname = os.path.dirname(self.data_file)
if not os.path.exists(dirname):
mkdir_p(dirname)
with SoftFileLock('%s.lock' % self.data_file):
with open(self.data_file, 'wt') as file:
datadict[key] = value
dump(datadict, file)
def update_config(self, config_dict):
"""Extend internal dictionary with config_dict"""
for section in ['execution', 'logging', 'check']:
if section in config_dict:
for key, val in list(config_dict[section].items()):
if not key.startswith('__'):
self._config.set(section, key, str(val))
def update_matplotlib(self):
"""Set backend on matplotlib from options"""
import matplotlib
matplotlib.use(self.get('execution', 'matplotlib_backend'))
def enable_provenance(self):
"""Sets provenance storing on"""
self._config.set('execution', 'write_provenance', 'true')
self._config.set('execution', 'hash_method', 'content')
@property
def resource_monitor(self):
"""Check if resource_monitor is available"""
if self._resource_monitor is not None:
return self._resource_monitor
# Cache config from nipype config
self.resource_monitor = str2bool(
self._config.get('monitoring', 'enabled')) or False
return self._resource_monitor
@resource_monitor.setter
def resource_monitor(self, value):
# Accept string true/false values
if isinstance(value, (str, bytes)):
value = str2bool(value.lower())
if value is False:
self._resource_monitor = False
elif value is True:
if not self._resource_monitor:
# Before setting self._resource_monitor check psutil
# availability
self._resource_monitor = False
try:
import psutil
self._resource_monitor = LooseVersion(
psutil.__version__) >= LooseVersion('5.0')
except ImportError:
pass
finally:
if not self._resource_monitor:
warn('Could not enable the resource monitor: '
'psutil>=5.0 could not be imported.')
self._config.set('monitoring', 'enabled',
('%s' % self._resource_monitor).lower())
def enable_resource_monitor(self):
"""Sets the resource monitor on"""
self.resource_monitor = True
def disable_resource_monitor(self):
"""Sets the resource monitor off"""
self.resource_monitor = False
def get_display(self):
"""Returns the first display available"""
# Check if an Xorg server is listening
# import subprocess as sp
# if not hasattr(sp, 'DEVNULL'):
# setattr(sp, 'DEVNULL', os.devnull)
# x_listening = bool(sp.call('ps au | grep -v grep | grep -i xorg',
# shell=True, stdout=sp.DEVNULL))
if self._display is not None:
return ':%d' % self._display.new_display
sysdisplay = None
if self._config.has_option('execution', 'display_variable'):
sysdisplay = self._config.get('execution', 'display_variable')
sysdisplay = sysdisplay or os.getenv('DISPLAY')
if sysdisplay:
from collections import namedtuple
def _mock():
pass
# Store a fake Xvfb object. Format - <host>:<display>[.<screen>]
ndisp = sysdisplay.split(':')[-1].split('.')[0]
Xvfb = namedtuple('Xvfb', ['new_display', 'stop'])
self._display = Xvfb(int(ndisp), _mock)
return self.get_display()
else:
if 'darwin' in sys.platform:
raise RuntimeError(
'Xvfb requires root permissions to run in OSX. Please '
'make sure that an X server is listening and set the '
'appropriate config on either $DISPLAY or nipype\'s '
'"display_variable" config. Valid X servers include '
'VNC, XQuartz, or manually started Xvfb.')
# If $DISPLAY is empty, it confuses Xvfb so unset
if sysdisplay == '':
del os.environ['DISPLAY']
try:
from xvfbwrapper import Xvfb
except ImportError:
raise RuntimeError(
'A display server was required, but $DISPLAY is not '
'defined and Xvfb could not be imported.')
self._display = Xvfb(nolisten='tcp')
self._display.start()
# Older versions of xvfbwrapper used vdisplay_num
if not hasattr(self._display, 'new_display'):
setattr(self._display, 'new_display',
self._display.vdisplay_num)
return self.get_display()
def stop_display(self):
"""Closes the display if started"""
if self._display is not None:
from .. import logging
self._display.stop()
logging.getLogger('nipype.interface').debug(
'Closing display (if virtual)')
@atexit.register
def free_display():
"""Stop virtual display (if it is up)"""
from .. import config
config.stop_display()
| 34.121212
| 79
| 0.591313
|
79515bdb18c08bbced49fbaf4464b1d47a1db895
| 6,434
|
py
|
Python
|
furnace/engine/evaluator.py
|
lxxue/cil-road-segmentation-2019
|
c6477556dc3d6d9c8ed2f2a3f185b4d986a03bb4
|
[
"MIT"
] | null | null | null |
furnace/engine/evaluator.py
|
lxxue/cil-road-segmentation-2019
|
c6477556dc3d6d9c8ed2f2a3f185b4d986a03bb4
|
[
"MIT"
] | null | null | null |
furnace/engine/evaluator.py
|
lxxue/cil-road-segmentation-2019
|
c6477556dc3d6d9c8ed2f2a3f185b4d986a03bb4
|
[
"MIT"
] | 1
|
2020-06-08T02:09:18.000Z
|
2020-06-08T02:09:18.000Z
|
import os
import os.path as osp
import cv2
import numpy as np
import time
from tqdm import tqdm
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from engine.logger import get_logger
from utils.pyt_utils import load_model, link_file, ensure_dir
from utils.img_utils import pad_image_to_shape, normalize
logger = get_logger()
# Evaluator class manage whole evaluation process with distributed processing
# and non-distributed processing
class Evaluator(object):
def __init__(self, dataset, class_num, image_mean, image_std, network,
multi_scales, is_flip, devices,
verbose=False, save_path=None):
self.dataset = dataset
self.ndata = self.dataset.get_length()
self.class_num = class_num
self.image_mean = image_mean
self.image_std = image_std
self.multi_scales = multi_scales
self.is_flip = is_flip
self.network = network
self.devices = devices
self.context = mp.get_context('spawn')
self.val_func = None
self.results_queue = self.context.Queue(self.ndata)
self.verbose = verbose
self.save_path = save_path
if save_path is not None:
ensure_dir(save_path)
# Function for running evaluation process
def run(self, model_path, model_indice, log_file, log_file_link):
"""Evaluate models."""
if '.pth' in model_indice:
models = [model_indice, ]
else:
models = [os.path.join(model_path,
'epoch-%s.pth' % model_indice), ]
results = open(log_file, 'a')
link_file(log_file, log_file_link)
for model in models:
logger.info("Load Model: %s" % model)
self.val_func = load_model(self.network, model)
result_line = self.multi_process_evaluation()
results.write('Model: ' + model + '\n')
results.write(result_line)
results.write('\n')
results.flush()
results.close()
# multi-device distributed processing if the dataset is too large
def multi_process_evaluation(self):
start_eval_time = time.perf_counter()
nr_devices = len(self.devices)
stride = int(np.ceil(self.ndata / nr_devices))
# start multi-process on multi-gpu
procs = []
for d in range(nr_devices):
e_record = min((d + 1) * stride, self.ndata)
shred_list = list(range(d * stride, e_record))
device = self.devices[d]
logger.info(
'GPU %s handle %d data.' % (device, len(shred_list)))
p = self.context.Process(target=self.worker,
args=(shred_list, device))
procs.append(p)
for p in procs:
p.start()
all_results = []
for _ in tqdm(range(self.ndata)):
t = self.results_queue.get()
all_results.append(t)
if self.verbose:
self.compute_metric(all_results)
for p in procs:
p.join()
result_line = self.compute_metric(all_results)
logger.info(
'Evaluation Elapsed Time: %.2fs' % (
time.perf_counter() - start_eval_time))
return result_line
def worker(self, shred_list, device):
start_load_time = time.time()
logger.info('Load Model on Device %d: %.2fs' % (
device, time.time() - start_load_time))
for idx in shred_list:
dd = self.dataset[idx]
results_dict = self.func_per_iteration(dd, device)
self.results_queue.put(results_dict)
def func_per_iteration(self, data, device):
raise NotImplementedError
# inherited from eval.py
def compute_metric(self, results):
raise NotImplementedError
# function for evaluating the whole image at once and select the most
# probable prediction amongst all.
def whole_eval(self, img, output_size, input_size=None, device=None):
if input_size is not None:
img, margin = self.process_image(img, input_size)
else:
img = self.process_image(img, input_size)
pred = self.val_func_process(img, device)
if input_size is not None:
pred = pred[:, margin[0]:(pred.shape[1] - margin[1]),
margin[2]:(pred.shape[2] - margin[3])]
pred = pred.permute(1, 2, 0)
pred = pred.cpu().numpy()
if output_size is not None:
pred = cv2.resize(pred,
(output_size[1], output_size[0]),
interpolation=cv2.INTER_LINEAR)
# pred = pred.argmax(2)
pred = pred[:, :, 1]
return pred
# function for loading image into model and form prediction
def val_func_process(self, input_data, device=None):
input_data = np.ascontiguousarray(input_data[None, :, :, :],
dtype=np.float32)
input_data = torch.FloatTensor(input_data).cuda(device)
with torch.cuda.device(input_data.get_device()):
self.val_func.eval()
self.val_func.to(input_data.get_device())
with torch.no_grad():
score = self.val_func(input_data)
score = score[0]
if self.is_flip:
input_data = input_data.flip(-1)
score_flip = self.val_func(input_data)
score_flip = score_flip[0]
score += score_flip.flip(-1)
score = torch.exp(score)
# score = score.data
return score
# function for input image munipulation to correct dimension.
def process_image(self, img, crop_size=None):
p_img = img
if img.shape[2] < 3:
im_b = p_img
im_g = p_img
im_r = p_img
p_img = np.concatenate((im_b, im_g, im_r), axis=2)
p_img = normalize(p_img, self.image_mean, self.image_std)
if crop_size is not None:
p_img, margin = pad_image_to_shape(p_img, crop_size,
cv2.BORDER_CONSTANT, value=0)
p_img = p_img.transpose(2, 0, 1)
return p_img, margin
p_img = p_img.transpose(2, 0, 1)
return p_img
| 34.042328
| 77
| 0.580199
|
79515c155a33611319d9e17abf536a7e050a3441
| 15,510
|
py
|
Python
|
test/vanilla/legacy/Expected/AcceptanceTests/BodyComplex/bodycomplex/operations/_basic_operations.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 35
|
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/vanilla/legacy/Expected/AcceptanceTests/BodyComplex/bodycomplex/operations/_basic_operations.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 652
|
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/vanilla/legacy/Expected/AcceptanceTests/BodyComplex/bodycomplex/operations/_basic_operations.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 29
|
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
# fmt: off
def build_get_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/complex/basic/valid')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2016-02-29"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/complex/basic/valid')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_invalid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/complex/basic/invalid')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/complex/basic/empty')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/complex/basic/null')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_not_provided_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/complex/basic/notprovided')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
# fmt: on
class BasicOperations(object):
"""BasicOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~bodycomplex.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_valid(
self, **kwargs # type: Any
):
# type: (...) -> "_models.Basic"
"""Get complex type {id: 2, name: 'abc', color: 'YELLOW'}.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Basic, or the result of cls(response)
:rtype: ~bodycomplex.models.Basic
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.Basic"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_valid_request(
template_url=self.get_valid.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("Basic", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_valid.metadata = {"url": "/complex/basic/valid"} # type: ignore
@distributed_trace
def put_valid(
self,
complex_body, # type: "_models.Basic"
**kwargs # type: Any
):
# type: (...) -> None
"""Please put {id: 2, name: 'abc', color: 'Magenta'}.
:param complex_body: Please put {id: 2, name: 'abc', color: 'Magenta'}.
:type complex_body: ~bodycomplex.models.Basic
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
json = self._serialize.body(complex_body, "Basic")
request = build_put_valid_request(
content_type=content_type,
json=json,
template_url=self.put_valid.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
put_valid.metadata = {"url": "/complex/basic/valid"} # type: ignore
@distributed_trace
def get_invalid(
self, **kwargs # type: Any
):
# type: (...) -> "_models.Basic"
"""Get a basic complex type that is invalid for the local strong type.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Basic, or the result of cls(response)
:rtype: ~bodycomplex.models.Basic
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.Basic"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_invalid_request(
template_url=self.get_invalid.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("Basic", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_invalid.metadata = {"url": "/complex/basic/invalid"} # type: ignore
@distributed_trace
def get_empty(
self, **kwargs # type: Any
):
# type: (...) -> "_models.Basic"
"""Get a basic complex type that is empty.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Basic, or the result of cls(response)
:rtype: ~bodycomplex.models.Basic
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.Basic"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_empty_request(
template_url=self.get_empty.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("Basic", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_empty.metadata = {"url": "/complex/basic/empty"} # type: ignore
@distributed_trace
def get_null(
self, **kwargs # type: Any
):
# type: (...) -> "_models.Basic"
"""Get a basic complex type whose properties are null.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Basic, or the result of cls(response)
:rtype: ~bodycomplex.models.Basic
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.Basic"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_null_request(
template_url=self.get_null.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("Basic", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_null.metadata = {"url": "/complex/basic/null"} # type: ignore
@distributed_trace
def get_not_provided(
self, **kwargs # type: Any
):
# type: (...) -> "_models.Basic"
"""Get a basic complex type while the server doesn't provide a response payload.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Basic, or the result of cls(response)
:rtype: ~bodycomplex.models.Basic
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.Basic"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_not_provided_request(
template_url=self.get_not_provided.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("Basic", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_not_provided.metadata = {"url": "/complex/basic/notprovided"} # type: ignore
| 36.153846
| 106
| 0.655255
|
79515c9cac1a0457daf2ae4d45c2a78f723e64f1
| 6,991
|
py
|
Python
|
multiagent/core.py
|
Latronists/Two-Layer-Reinforcement-Learning
|
91dbadf1a385e42c0eeea8a6329ed164edddb41b
|
[
"MIT"
] | null | null | null |
multiagent/core.py
|
Latronists/Two-Layer-Reinforcement-Learning
|
91dbadf1a385e42c0eeea8a6329ed164edddb41b
|
[
"MIT"
] | null | null | null |
multiagent/core.py
|
Latronists/Two-Layer-Reinforcement-Learning
|
91dbadf1a385e42c0eeea8a6329ed164edddb41b
|
[
"MIT"
] | null | null | null |
import numpy as np
# physical/external base state of all entites
class EntityState(object):
def __init__(self):
# physical position
self.p_pos = None
# physical velocity
self.p_vel = None
# state of agents (including communication and internal/mental state)
class AgentState(EntityState):
def __init__(self):
super(AgentState, self).__init__()
# communication utterance
self.c = None
# action of the agent
class Action(object):
def __init__(self):
# physical action
self.u = None
# communication action
self.c = None
# properties and state of physical world entity
class Entity(object):
def __init__(self):
# name
self.name = ''
# properties:
self.size = 0.050
# entity can move / be pushed
self.movable = False
# entity collides with others
self.collide = True
# material density (affects mass)
self.density = 25.0
# color
self.color = None
# max speed and accel
self.max_speed = None
self.accel = None
# state
self.state = EntityState()
# mass
self.initial_mass = 1.0
@property
def mass(self):
return self.initial_mass
# properties of landmark entities
class Landmark(Entity):
def __init__(self):
super(Landmark, self).__init__()
# properties of agent entities
class Agent(Entity):
def __init__(self):
super(Agent, self).__init__()
# agents are movable by default
self.movable = True
# cannot send communication signals
self.silent = False
# cannot observe the world
self.blind = False
# physical motor noise amount
self.u_noise = None
# communication noise amount
self.c_noise = None
# control range
self.u_range = 1.0
# state
self.state = AgentState()
# action
self.action = Action()
# script behavior to execute
self.action_callback = None
# multi-agent world
class World(object):
def __init__(self):
# list of agents and entities (can change at execution-time!)
self.agents = []
self.landmarks = []
# communication channel dimensionality
self.dim_c = 0
# position dimensionality
self.dim_p = 2
# color dimensionality
self.dim_color = 3
# simulation timestep
self.dt = 0.1
# physical damping
self.damping = 0.25
# contact response parameters
self.contact_force = 1e+2
self.contact_margin = 1e-3
# return all entities in the world
@property
def entities(self):
return self.agents + self.landmarks
# return all agents controllable by external policies
@property
def policy_agents(self):
return [agent for agent in self.agents if agent.action_callback is None]
# return all agents controlled by world scripts
@property
def scripted_agents(self):
return [agent for agent in self.agents if agent.action_callback is not None]
# update state of the world
def step(self):
# set actions for scripted agents
for agent in self.scripted_agents:
agent.action = agent.action_callback(agent, self)
# gather forces applied to entities
p_force = [None] * len(self.entities)
# apply agent physical controls
p_force = self.apply_action_force(p_force)
# apply environment forces
p_force = self.apply_environment_force(p_force)
# integrate physical state
self.integrate_state(p_force)
# update agent state
for agent in self.agents:
self.update_agent_state(agent)
# gather agent action forces
def apply_action_force(self, p_force):
# set applied forces
for i,agent in enumerate(self.agents):
if agent.movable:
noise = np.random.randn(*agent.action.u.shape) * agent.u_noise if agent.u_noise else 0.0
p_force[i] = agent.action.u + noise
return p_force
# gather physical forces acting on entities
def apply_environment_force(self, p_force):
# simple (but inefficient) collision response
for a,entity_a in enumerate(self.entities):
for b,entity_b in enumerate(self.entities):
if(b <= a): continue
[f_a, f_b] = self.get_collision_force(entity_a, entity_b)
if(f_a is not None):
if(p_force[a] is None): p_force[a] = 0.0
p_force[a] = f_a + p_force[a]
if(f_b is not None):
if(p_force[b] is None): p_force[b] = 0.0
p_force[b] = f_b + p_force[b]
return p_force
# integrate physical state
def integrate_state(self, p_force):
for i,entity in enumerate(self.entities):
if not entity.movable: continue
entity.state.p_vel = entity.state.p_vel * (1 - self.damping)
if (p_force[i] is not None):
entity.state.p_vel += (p_force[i] / entity.mass) * self.dt
if entity.max_speed is not None:
speed = np.sqrt(np.square(entity.state.p_vel[0]) + np.square(entity.state.p_vel[1]))
if speed > entity.max_speed:
entity.state.p_vel = entity.state.p_vel / np.sqrt(np.square(entity.state.p_vel[0]) +
np.square(entity.state.p_vel[1])) * entity.max_speed
entity.state.p_pos += entity.state.p_vel * self.dt
def update_agent_state(self, agent):
# set communication state (directly for now)
if agent.silent:
agent.state.c = np.zeros(self.dim_c)
else:
noise = np.random.randn(*agent.action.c.shape) * agent.c_noise if agent.c_noise else 0.0
agent.state.c = agent.action.c + noise
# get collision forces for any contact between two entities
def get_collision_force(self, entity_a, entity_b):
if (not entity_a.collide) or (not entity_b.collide):
return [None, None] # not a collider
if (entity_a is entity_b):
return [None, None] # don't collide against itself
# compute actual distance between entities
delta_pos = entity_a.state.p_pos - entity_b.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
# minimum allowable distance
dist_min = entity_a.size + entity_b.size
# softmax penetration
k = self.contact_margin
penetration = np.logaddexp(0, -(dist - dist_min)/k)*k
force = self.contact_force * delta_pos / dist * penetration
force_a = +force if entity_a.movable else None
force_b = -force if entity_b.movable else None
return [force_a, force_b]
| 35.48731
| 118
| 0.602918
|
79515d2f037a3eec6a827a2696f87ca8b9a82a69
| 98
|
py
|
Python
|
fibonacci-serisi.py
|
melihcemipek/python
|
a0e6e8e5fbffb481c158f19181c10de6e58a8a13
|
[
"MIT"
] | null | null | null |
fibonacci-serisi.py
|
melihcemipek/python
|
a0e6e8e5fbffb481c158f19181c10de6e58a8a13
|
[
"MIT"
] | null | null | null |
fibonacci-serisi.py
|
melihcemipek/python
|
a0e6e8e5fbffb481c158f19181c10de6e58a8a13
|
[
"MIT"
] | null | null | null |
maks = 100
a, b = 1, 1
print(a)
for i in range(maks):
print(b)
t = b
b += a
a = t
| 10.888889
| 21
| 0.44898
|
79515ddfdb84ae73131a49d896ea33ace93c571b
| 2,834
|
py
|
Python
|
0608.Labour & Income - European Union - Eurostat - Labour Market - Unemployment Rate.py
|
alphacastio/connectors-eurostats
|
297c92f667649469def9b1d140eab0aecb86fa89
|
[
"MIT"
] | null | null | null |
0608.Labour & Income - European Union - Eurostat - Labour Market - Unemployment Rate.py
|
alphacastio/connectors-eurostats
|
297c92f667649469def9b1d140eab0aecb86fa89
|
[
"MIT"
] | null | null | null |
0608.Labour & Income - European Union - Eurostat - Labour Market - Unemployment Rate.py
|
alphacastio/connectors-eurostats
|
297c92f667649469def9b1d140eab0aecb86fa89
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import numpy as np
import pandas as pd
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[2]:
df = pd.read_csv('https://ec.europa.eu/eurostat/databrowser-backend/api/extraction/1.0/LIVE/false/sdmx/csv/UNE_RT_M?compressed=true',
compression='gzip')
# In[3]:
dict_countries = {'AT':'Austria','BE':'Belgium','BG':'Bulgaria','CH':'Switzerland','CY':'Cyprus',
'CZ':'Czechia','DE':'Germany (until 1990 former territory of the FRG)','DK':'Denmark',
'EA':'Euro area (EA11-1999, EA12-2001, EA13-2007, EA...','EA18':'Euro area - 18 countries (2014)',
'EA19':'Euro area - 19 countries (from 2015)','EE':'Estonia','EL':'Greece','ES':'Spain',
'EU25':'European Union - 25 countries (2004-2006)','EU27_2007':'European Union - 27 countries (2007-2013)',
'EU27_2020':'European Union - 27 countries (from 2020)','EU28':'European Union - 28 countries (2013-2020)',
'FI':'Finland','FR':'France','HR':'Croatia','HU':'Hungary','IE':'Ireland','IS':'Iceland',
'IT':'Italy','JP':'Japan','LT':'Lithuania','LU':'Luxembourg','LV':'Latvia','MT':'Malta',
'NL':'Netherlands','NO':'Norway','PL':'Poland','PT':'Portugal','RO':'Romania',
'SE':'Sweden','SI':'Slovenia','SK':'Slovakia','TR':'Turkey','UK':'United Kingdom','US':'United States'}
dict_sex = {'F':'Female', 'M':'Male', 'T':'Total'}
#Hago el reemplazo de los codigos de los paises y de la columna sexo
df['geo'].replace(dict_countries, inplace=True)
df['sex'].replace(dict_sex, inplace=True)
# In[4]:
#Mantengo solo el SA y NSA y que tenga el porcentaje, no los miles
df = df[(df['s_adj'] != 'TC') & (df['unit'] == 'PC_ACT')]
# In[5]:
#Concateno la variable
df['variable'] = df['sex'] + ' - ' + df['s_adj']
#Mantengo solo unas variables
df = df[['variable','geo', 'TIME_PERIOD', 'OBS_VALUE']]
# In[6]:
#Pivoteo los datos
df = df.pivot_table(index=['TIME_PERIOD', 'geo'], columns = 'variable', values='OBS_VALUE', aggfunc=np.sum).reset_index()
# In[7]:
#Reemplazo el nombre de las columnas
df.rename(columns = {'TIME_PERIOD':'Date', 'geo':'country'}, inplace=True)
#Cambio el formato de fecha
df['Date'] = pd.to_datetime(df['Date'], format = '%Y-%m')
df.set_index('Date', inplace=True)
df.rename_axis(None, axis=1, inplace=True)
# In[8]:
#Agrego el prefijo y corrijo el de country
df = df.add_prefix('Unemployment Rate - ')
df.rename(columns={'Unemployment Rate - country': 'country'}, inplace=True)
alphacast.datasets.dataset(608).upload_data_from_df(df,
deleteMissingFromDB = True, onConflictUpdateDB = True, uploadIndex=True)
| 32.574713
| 133
| 0.632322
|
79515e47eb5124f52dec27a29f8af3c5dd927343
| 27,136
|
py
|
Python
|
python/orca/src/bigdl/orca/learn/bigdl/estimator.py
|
sgwhat/BigDL
|
25b402666fbb26b0bc18fc8100e9a00469844778
|
[
"Apache-2.0"
] | null | null | null |
python/orca/src/bigdl/orca/learn/bigdl/estimator.py
|
sgwhat/BigDL
|
25b402666fbb26b0bc18fc8100e9a00469844778
|
[
"Apache-2.0"
] | null | null | null |
python/orca/src/bigdl/orca/learn/bigdl/estimator.py
|
sgwhat/BigDL
|
25b402666fbb26b0bc18fc8100e9a00469844778
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.orca.learn.metrics import Metric
from bigdl.orca.learn.utils import bigdl_metric_results_to_dict
from bigdl.dllib.nnframes import NNEstimator, NNModel
from bigdl.dllib.estimator import Estimator as SparkEstimator
from bigdl.orca.learn.spark_estimator import Estimator as OrcaSparkEstimator
from bigdl.orca.data import SparkXShards
from bigdl.dllib.optim.optimizer import MaxEpoch
from bigdl.dllib.feature.common import FeatureSet
from bigdl.orca.learn.metrics import Accuracy
from pyspark.sql.dataframe import DataFrame
from bigdl.dllib.utils.log4Error import *
class Estimator(object):
@staticmethod
def from_bigdl(*, model, loss=None, optimizer=None, metrics=None,
feature_preprocessing=None, label_preprocessing=None,
model_dir=None):
"""
Construct an Estimator with BigDL model, loss function and Preprocessing for feature and
label data.
:param model: BigDL Model to be trained.
:param loss: BigDL criterion.
:param optimizer: BigDL optimizer.
:param metrics: A evaluation metric or a list of evaluation metrics
:param feature_preprocessing: Used when data in `fit` and `predict` is a Spark DataFrame.
The param converts the data in feature column to a Tensor or to a Sample directly.
It expects a List of Int as the size of the converted Tensor, or a Preprocessing[F,
Tensor[T]]
If a List of Int is set as feature_preprocessing, it can only handle the case that
feature column contains the following data types:
Float, Double, Int, Array[Float], Array[Double], Array[Int] and MLlib Vector. The
feature data are converted to Tensors with the specified sizes before
sending to the model. Internally, a SeqToTensor is generated according to the
size, and used as the feature_preprocessing.
Alternatively, user can set feature_preprocessing as Preprocessing[F, Tensor[T]]
that transforms the feature data to a Tensor[T]. Some pre-defined Preprocessing are
provided in package bigdl.dllib.feature. Multiple Preprocessing can be combined as a
ChainedPreprocessing.
The feature_preprocessing will also be copied to the generated NNModel and applied
to feature column during transform.
:param label_preprocessing: Used when data in `fit` and `predict` is a Spark DataFrame.
similar to feature_preprocessing, but applies to Label data.
:param model_dir: The path to save model. During the training, if checkpoint_trigger is
defined and triggered, the model will be saved to model_dir.
:return:
"""
return BigDLEstimator(model=model, loss=loss, optimizer=optimizer,
metrics=metrics,
feature_preprocessing=feature_preprocessing,
label_preprocessing=label_preprocessing, model_dir=model_dir)
class BigDLEstimator(OrcaSparkEstimator):
def __init__(self, *, model, loss, optimizer=None, metrics=None,
feature_preprocessing=None, label_preprocessing=None, model_dir=None):
self.loss = loss
self.optimizer = optimizer
self.metrics = Metric.convert_metrics_list(metrics)
self.feature_preprocessing = feature_preprocessing
self.label_preprocessing = label_preprocessing
self.model_dir = model_dir
self.model = model
self.nn_model = NNModel(self.model, feature_preprocessing=self.feature_preprocessing)
self.nn_estimator = NNEstimator(self.model, self.loss, self.feature_preprocessing,
self.label_preprocessing)
if self.optimizer is None:
from bigdl.dllib.optim.optimizer import SGD
self.optimizer = SGD()
self.nn_estimator.setOptimMethod(self.optimizer)
self.estimator = SparkEstimator(self.model, self.optimizer, self.model_dir)
self.log_dir = None
self.app_name = None
self.is_nnframe_fit = False
def fit(self, data, epochs, batch_size=32, feature_cols="features", label_cols="label",
caching_sample=True, validation_data=None, validation_trigger=None,
checkpoint_trigger=None):
"""
Train this BigDL model with train data.
:param data: train data. It can be XShards or Spark DataFrame.
If data is XShards, each partition is a dictionary of {'x': feature,
'y': label}, where feature(label) is a numpy array or a list of numpy arrays.
:param epochs: Number of epochs to train the model.
:param batch_size: Batch size used for training. Default: 32.
:param feature_cols: Feature column name(s) of data. Only used when data is a Spark
DataFrame. Default: "features".
:param label_cols: Label column name(s) of data. Only used when data is a Spark DataFrame.
Default: "label".
:param caching_sample: whether to cache the Samples after preprocessing. Default: True
:param validation_data: Validation data. XShards and Spark DataFrame are supported.
If data is XShards, each partition is a dictionary of {'x': feature,
'y': label}, where feature(label) is a numpy array or a list of numpy arrays.
:param validation_trigger: Orca Trigger to trigger validation computation.
:param checkpoint_trigger: Orca Trigger to set a checkpoint.
:return:
"""
from bigdl.orca.learn.trigger import Trigger
invalidInputError(batch_size > 0, "batch_size should be greater than 0")
if validation_data is not None:
invalidInputError(self.metrics is not None,
"You should provide metrics when creating this estimator"
" if you provide validation_data.")
if isinstance(data, DataFrame):
if isinstance(feature_cols, list):
data, validation_data, feature_cols = \
BigDLEstimator._combine_cols(data, feature_cols, col_name="features",
val_data=validation_data)
if isinstance(label_cols, list):
data, validation_data, label_cols = \
BigDLEstimator._combine_cols(data, label_cols, col_name="label",
val_data=validation_data)
self.nn_estimator.setBatchSize(batch_size).setMaxEpoch(epochs) \
.setCachingSample(caching_sample).setFeaturesCol(feature_cols) \
.setLabelCol(label_cols)
if validation_data is not None:
invalidInputError(isinstance(validation_data, DataFrame),
"validation_data should be a spark DataFrame.")
invalidInputError(validation_trigger is not None,
"You should provide validation_trigger if you provide"
" validation_data.")
validation_trigger = Trigger.convert_trigger(validation_trigger)
self.nn_estimator.setValidation(validation_trigger, validation_data,
self.metrics, batch_size)
if self.log_dir is not None and self.app_name is not None:
from bigdl.dllib.optim.optimizer import TrainSummary
from bigdl.dllib.optim.optimizer import ValidationSummary
train_summary = TrainSummary(log_dir=self.log_dir, app_name=self.app_name)
self.nn_estimator.setTrainSummary(train_summary)
val_summary = ValidationSummary(log_dir=self.log_dir, app_name=self.app_name)
self.nn_estimator.setValidationSummary(val_summary)
if self.model_dir is not None and checkpoint_trigger is not None:
checkpoint_trigger = Trigger.convert_trigger(checkpoint_trigger)
self.nn_estimator.setCheckpoint(self.model_dir, checkpoint_trigger)
self.nn_model = self.nn_estimator.fit(data)
self.is_nnframe_fit = True
elif isinstance(data, SparkXShards):
from bigdl.orca.data.utils import xshard_to_sample
end_trigger = MaxEpoch(epochs)
checkpoint_trigger = Trigger.convert_trigger(checkpoint_trigger)
if isinstance(data, SparkXShards):
train_rdd = data.rdd.flatMap(xshard_to_sample)
train_feature_set = FeatureSet.sample_rdd(train_rdd)
if validation_data is None:
val_feature_set = None
else:
invalidInputError(isinstance(validation_data, SparkXShards),
"validation_data should be a XShards")
val_feature_set = FeatureSet.sample_rdd(
validation_data.rdd.flatMap(xshard_to_sample))
if self.log_dir is not None and self.app_name is not None:
self.estimator.set_tensorboard(self.log_dir, self.app_name)
self.estimator.train(train_feature_set, self.loss, end_trigger, checkpoint_trigger,
val_feature_set, self.metrics, batch_size)
self.is_nnframe_fit = False
else:
invalidInputError(False,
"Data and validation data should be XShards, but get " +
data.__class__.__name__)
else:
invalidInputError(False,
"Data should be XShards or Spark DataFrame, but get " +
data.__class__.__name__)
return self
def predict(self, data, batch_size=4, feature_cols="features", sample_preprocessing=None):
"""
Predict input data
:param data: predict input data. It can be XShards or Spark DataFrame.
If data is XShards, each partition is a dictionary of {'x': feature}, where feature
is a numpy array or a list of numpy arrays.
:param batch_size: Batch size used for inference. Default: 4.
:param feature_cols: Feature column name(s) of data. Only used when data is a Spark
DataFrame. Default: "features".
:param sample_preprocessing: Used when data is a Spark DataFrame. If the user want change
the default feature_preprocessing specified in Estimator.from_bigdl, the user can
pass the new sample_preprocessing methods.
:return: predicted result.
If input data is Spark DataFrame, the predict result is a DataFrame which includes
original columns plus 'prediction' column. The 'prediction' column can be
FloatType, VectorUDT or Array of VectorUDT depending on model outputs shape.
If input data is an XShards, the predict result is a XShards, each partition
of the XShards is a dictionary of {'prediction': result}, where result is a numpy
array or a list of numpy arrays.
"""
if isinstance(data, DataFrame):
if isinstance(feature_cols, list):
data, _, feature_cols = \
BigDLEstimator._combine_cols(data, feature_cols, col_name="features")
self.nn_model.setBatchSize(batch_size).setFeaturesCol(feature_cols)
if sample_preprocessing is not None:
self.nn_model.setSamplePreprocessing(sample_preprocessing)
return self.nn_model.transform(data)
elif isinstance(data, SparkXShards):
from bigdl.orca.data.utils import xshard_to_sample
from bigdl.orca.learn.utils import convert_predict_rdd_to_xshard
sample_rdd = data.rdd.flatMap(xshard_to_sample)
result_rdd = self.model.predict(sample_rdd)
return convert_predict_rdd_to_xshard(data, result_rdd)
else:
invalidInputError(False,
"Data should be XShards or Spark DataFrame, but get " +
data.__class__.__name__)
def evaluate(self, data, batch_size=32, feature_cols="features", label_cols="label"):
"""
Evaluate model.
:param data: validation data. It can be XShardsor or Spark DataFrame, each partition is
a dictionary of {'x': feature, 'y': label}, where feature(label) is a numpy array
or a list of numpy arrays.
:param batch_size: Batch size used for validation. Default: 32.
:param feature_cols: (Not supported yet) Feature column name(s) of data. Only used when
data is a Spark DataFrame. Default: None.
:param label_cols: (Not supported yet) Label column name(s) of data. Only used when data
is a Spark DataFrame. Default: None.
:return:
"""
invalidInputError(data is not None, "validation data shouldn't be None")
invalidInputError(self.metrics is not None,
"metrics shouldn't be None, please specify the metrics"
" argument when creating this estimator.")
if isinstance(data, DataFrame):
if isinstance(feature_cols, list):
data, _, feature_cols = \
BigDLEstimator._combine_cols(data, [feature_cols], col_name="features")
if isinstance(label_cols, list):
data, _, label_cols = \
BigDLEstimator._combine_cols(data, label_cols, col_name="label")
self.nn_estimator._setNNBatchSize(batch_size)._setNNFeaturesCol(feature_cols) \
._setNNLabelCol(label_cols)
self.nn_estimator.setValidation(None, None,
self.metrics, batch_size)
if self.log_dir is not None and self.app_name is not None:
from bigdl.dllib.optim.optimizer import TrainSummary
from bigdl.dllib.optim.optimizer import ValidationSummary
val_summary = ValidationSummary(log_dir=self.log_dir, app_name=self.app_name)
self.nn_estimator.setValidationSummary(val_summary)
result = self.nn_estimator._eval(data)
elif isinstance(data, SparkXShards):
from bigdl.orca.data.utils import xshard_to_sample
val_feature_set = FeatureSet.sample_rdd(data.rdd.flatMap(xshard_to_sample))
result = self.estimator.evaluate(val_feature_set, self.metrics, batch_size)
else:
invalidInputError(False,
"Data should be XShards or Spark DataFrame, but get " +
data.__class__.__name__)
return bigdl_metric_results_to_dict(result)
def get_model(self):
"""
Get the trained BigDL model
:return: The trained BigDL model
"""
return self.model
def save(self, model_path):
"""
Save the BigDL model to model_path
:param model_path: path to save the trained model.
:return:
"""
try:
model = self.get_model()
model.saveModel(model_path + ".bigdl", model_path + ".bin", True)
except ValueError:
invalidInputError(False,
"You should fit before calling save")
def load(self, checkpoint, optimizer=None, loss=None, feature_preprocessing=None,
label_preprocessing=None, model_dir=None, is_checkpoint=False):
"""
Load existing BigDL model or checkpoint
:param checkpoint: Path to the existing model or checkpoint.
:param optimizer: BigDL optimizer.
:param loss: BigDL criterion.
:param feature_preprocessing: Used when data in `fit` and `predict` is a Spark DataFrame.
The param converts the data in feature column to a Tensor or to a Sample directly.
It expects a List of Int as the size of the converted Tensor, or a Preprocessing[F,
Tensor[T]]
If a List of Int is set as feature_preprocessing, it can only handle the case that
feature column contains the following data types:
Float, Double, Int, Array[Float], Array[Double], Array[Int] and MLlib Vector. The
feature data are converted to Tensors with the specified sizes before
sending to the model. Internally, a SeqToTensor is generated according to the
size, and used as the feature_preprocessing.
Alternatively, user can set feature_preprocessing as Preprocessing[F, Tensor[T]]
that transforms the feature data to a Tensor[T]. Some pre-defined Preprocessing are
provided in package bigdl.dllib.feature. Multiple Preprocessing can be combined as a
ChainedPreprocessing.
The feature_preprocessing will also be copied to the generated NNModel and applied
to feature column during transform.
:param label_preprocessing: Used when data in `fit` and `predict` is a Spark DataFrame.
similar to feature_preprocessing, but applies to Label data.
:param model_dir: The path to save model. During the training, if checkpoint_trigger is
defined and triggered, the model will be saved to model_dir.
:param is_checkpoint: Whether the path is a checkpoint or a saved BigDL model.
Default: False.
:return: The loaded estimator object.
"""
if loss is not None:
self.loss = loss
if optimizer is not None:
self.optimizer = optimizer
if feature_preprocessing is not None:
self.feature_preprocessing = feature_preprocessing
if label_preprocessing is not None:
self.label_preprocessing = label_preprocessing
if model_dir is not None:
self.model_dir = model_dir
if is_checkpoint:
self.load_orca_checkpoint(checkpoint)
else:
from bigdl.dllib.net import Net
self.model = Net.load_bigdl(checkpoint + ".bigdl", checkpoint + ".bin")
self.nn_estimator = NNEstimator(self.model, self.loss, self.feature_preprocessing,
self.label_preprocessing)
if self.optimizer is None:
from bigdl.dllib.optim.optimizer import SGD
self.optimizer = SGD()
self.nn_estimator.setOptimMethod(self.optimizer)
self.estimator = SparkEstimator(self.model, self.optimizer, self.model_dir)
self.nn_model = NNModel(self.model, feature_preprocessing=self.feature_preprocessing)
return self
def __load_bigdl_model(self, path, bigdl_type="float"):
from bigdl.dllib.utils.common import callBigDlFunc
from bigdl.dllib.nn.layer import Layer
jmodel = callBigDlFunc(bigdl_type, "loadBigDL", path)
return Layer.of(jmodel)
def load_orca_checkpoint(self, path, version=None, prefix=None):
"""
Load existing checkpoint. To load a specific checkpoint, please provide both `version`
and `perfix`. If `version` is None, then the latest checkpoint under the specified
directory will be loaded.
:param path: Path to the existing checkpoint (or directory containing Orca checkpoint
files).
:param version: checkpoint version, which is the suffix of model.* file, i.e., for
modle.4 file, the version is 4. If it is None, then load the latest checkpoint.
:param prefix: optimMethod prefix, for example 'optimMethod-Sequentialf53bddcc'
:return:
"""
from bigdl.dllib.nn.layer import Model, Container
from bigdl.dllib.optim.optimizer import OptimMethod
from bigdl.orca.learn.utils import find_latest_checkpoint
import os
if version is None:
path, prefix, version = find_latest_checkpoint(path, model_type="bigdl")
if path is None:
invalidInputError(False,
"Cannot find BigDL checkpoint, please check your checkpoint"
" path.")
else:
invalidInputError(prefix is not None,
"You should provide optimMethod prefix, "
"for example 'optimMethod-TorchModelf53bddcc'")
try:
self.model = self.__load_bigdl_model(os.path.join(path, "model.{}".format(version)))
invalidInputError(isinstance(self.model, Container),
"The loaded model should be a Container, please check your"
" checkpoint type.")
self.optimizer = OptimMethod.load(os.path.join(path,
"{}.{}".format(prefix, version)))
except Exception:
invalidInputError(False,
"Cannot load BigDL checkpoint, please check your checkpoint path "
"and checkpoint type.")
self.estimator = SparkEstimator(self.model, self.optimizer, self.model_dir)
self.nn_estimator = NNEstimator(self.model, self.loss, self.feature_preprocessing,
self.label_preprocessing)
if self.optimizer is not None:
self.nn_estimator.setOptimMethod(self.optimizer)
self.nn_model = NNModel(self.model, feature_preprocessing=self.feature_preprocessing)
def clear_gradient_clipping(self):
"""
Clear gradient clipping parameters. In this case, gradient clipping will not be applied.
In order to take effect, it needs to be called before fit.
:return:
"""
self.nn_estimator.clearGradientClipping()
self.estimator.clear_gradient_clipping()
def set_constant_gradient_clipping(self, min, max):
"""
Set constant gradient clipping during the training process.
In order to take effect, it needs to be called before fit.
:param min: The minimum value to clip by.
:param max: The maximum value to clip by.
:return:
"""
self.nn_estimator.setConstantGradientClipping(min, max)
self.estimator.set_constant_gradient_clipping(min, max)
def set_l2_norm_gradient_clipping(self, clip_norm):
"""
Clip gradient to a maximum L2-Norm during the training process.
In order to take effect, it needs to be called before fit.
:param clip_norm: Gradient L2-Norm threshold.
:return:
"""
self.nn_estimator.setGradientClippingByL2Norm(clip_norm)
self.estimator.set_l2_norm_gradient_clipping(clip_norm)
def get_train_summary(self, tag=None):
"""
Get the scalar from model train summary.
This method will return a list of summary data of
[iteration_number, scalar_value, timestamp].
:param tag: The string variable represents the scalar wanted
"""
# Exception handle
if tag != "Loss" and tag != "LearningRate" and tag != "Throughput":
invalidInputError(False,
'Only "Loss", "LearningRate", "Throughput"'
+ 'are supported in train summary')
if self.is_nnframe_fit:
train_summary = self.nn_estimator.getTrainSummary()
return train_summary.read_scalar(tag=tag)
else:
return self.estimator.get_train_summary(tag=tag)
def get_validation_summary(self, tag=None):
"""
Get the scalar from model validation summary.
This method will return a list of summary data of
[iteration_number, scalar_value, timestamp].
Note that the metric and tag may not be consistent.
Please look up following form to pass tag parameter.
Left side is your metric during compile.
Right side is the tag you should pass.
>>> 'Accuracy' | 'Top1Accuracy'
>>> 'BinaryAccuracy' | 'Top1Accuracy'
>>> 'CategoricalAccuracy' | 'Top1Accuracy'
>>> 'SparseCategoricalAccuracy' | 'Top1Accuracy'
>>> 'AUC' | 'AucScore'
>>> 'HitRatio' | 'HitRate@k' (k is Top-k)
>>> 'Loss' | 'Loss'
>>> 'MAE' | 'MAE'
>>> 'NDCG' | 'NDCG'
>>> 'TFValidationMethod' | '${name + " " + valMethod.toString()}'
>>> 'Top5Accuracy' | 'Top5Accuracy'
>>> 'TreeNNAccuracy' | 'TreeNNAccuracy()'
>>> 'MeanAveragePrecision' | 'MAP@k' (k is Top-k) (BigDL)
>>> 'MeanAveragePrecision' | 'PascalMeanAveragePrecision' (Zoo)
>>> 'StatelessMetric' | '${name}'
:param tag: The string variable represents the scalar wanted
"""
if self.is_nnframe_fit:
invalidInputError(tag is not None,
"You should provide tag which should match the name of "
"the ValidationMethod set into the optimizer. "
"e.g.'MAE', 'Top1AccuracyLoss', 'Top1Accuracy' or "
"'Top5Accuracy'.")
val_summary = self.nn_estimator.getValidationSummary()
return val_summary.read_scalar(tag=tag)
else:
return self.estimator.get_validation_summary(tag=tag)
@staticmethod
def _combine_cols(data, cols, col_name="features", val_data=None):
if isinstance(cols, list):
if len(cols) == 1:
col_name = cols[0]
else:
from pyspark.ml.feature import VectorAssembler
assembler = VectorAssembler(
inputCols=cols,
outputCol=col_name)
data = assembler.transform(data)
if val_data is not None:
val_data = assembler.transform(val_data)
return data, val_data, col_name
| 51.103578
| 99
| 0.621315
|
79515e4e562fb7390476bdcffeb32decbf7ee83b
| 1,853
|
py
|
Python
|
behavior_analysis/apps/utils/table_subjects.py
|
Tauffer-Consulting/behavior-analysis
|
2286354d733e1ff947ba9bbc410aa51aaa85340b
|
[
"MIT"
] | null | null | null |
behavior_analysis/apps/utils/table_subjects.py
|
Tauffer-Consulting/behavior-analysis
|
2286354d733e1ff947ba9bbc410aa51aaa85340b
|
[
"MIT"
] | null | null | null |
behavior_analysis/apps/utils/table_subjects.py
|
Tauffer-Consulting/behavior-analysis
|
2286354d733e1ff947ba9bbc410aa51aaa85340b
|
[
"MIT"
] | null | null | null |
from ...models import Experiment, Group, Subject
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State, ALL
from dash.dash import no_update
import random
import string
class SubjectsTable(dbc.Table):
def __init__(self, parent_app, subjects, id=None):
super().__init__()
self.parent_app = parent_app
# User chosen or randomly generated unique id
if id is None:
self.id = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(6))
else:
self.id = id
self.bordered = False
self.hover = True
self.responsive = True
self.striped = False
table_header = html.Thead(html.Tr([
html.Th(dbc.Checkbox(id='check-subject-all')),
html.Th("ID"),
html.Th("Experiment"),
html.Th("Group"),
html.Th("Date")
]))
table_body = html.Tbody([])
for sub in subjects:
group = sub.group
# Group.query.filter_by(name=s['group']).first()
row = html.Tr([
html.Td(dbc.Checkbox(id={'type': 'check-subject', 'index': sub.id})),
html.Td(sub.id),
html.Td(group.experiment.name),
html.Td(group.name),
html.Td(str(sub.date.date()))
])
table_body.children.append(row)
# @self.parent_app.callback(
# Output({'type': 'check-subject', 'index': 2}, 'checked'),
# [Input('check-subject-all', 'checked')],
# )
# def toggle_check_all(chk):
# return no_update
self.children = [
table_header,
table_body
]
| 31.40678
| 117
| 0.562331
|
79515e70ffeb688736deadf3743e6e19f3b7b77d
| 10,701
|
py
|
Python
|
filter/filter.py
|
CRAWLZSTAGE/infra
|
d0f4d60a63cabf424f76dfbeba33dd64b48c7c98
|
[
"MIT"
] | null | null | null |
filter/filter.py
|
CRAWLZSTAGE/infra
|
d0f4d60a63cabf424f76dfbeba33dd64b48c7c98
|
[
"MIT"
] | null | null | null |
filter/filter.py
|
CRAWLZSTAGE/infra
|
d0f4d60a63cabf424f76dfbeba33dd64b48c7c98
|
[
"MIT"
] | null | null | null |
import os, sys
import pika
import json
import time
import traceback
from peewee import *
from datetime import datetime
MQTT_HOST = os.environ.get('MQTT_HOST')
MQTT_USER = os.environ.get('MQTT_USER')
MQTT_PASSWORD = os.environ.get('MQTT_PASSWORD')
DB_HOST = os.environ.get('DB_HOST')
DB_PASSWORD = os.environ.get('DB_PASSWORD')
DB_USER = os.environ.get('DB_USER')
DB_NAME = os.environ.get('DB_NAME')
RECORD_TIMEOUT = os.environ.get('RECORD_TIMEOUT')
"""In Seconds"""
import sys
import signal
def handler(signum, frame):
sys.exit(1)
signal.signal(signal.SIGTERM, handler)
"""
PSQL ORM courtesy of PeeWee
No need for schema.sql since PeeWee can take care of this for us!
"""
psql_db = PostgresqlDatabase(DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)
class BaseModel(Model):
class Meta:
database = psql_db
class Record_LinkedIn(BaseModel):
url = CharField(primary_key=True)
last_accessed = DateTimeField(default=datetime.utcnow())
class Record_Fb(BaseModel):
fb_id = CharField(primary_key=True)
last_accessed = DateTimeField(default=datetime.utcnow())
class Record_Fsquare(BaseModel):
fsquare_id = CharField(primary_key=True)
last_accessed = DateTimeField(default=datetime.utcnow())
class Record_Google(BaseModel):
google_id = CharField(primary_key=True)
last_accessed = DateTimeField(default=datetime.utcnow())
while True:
try:
psql_db.connect()
break
except Exception:
time.sleep(5)
if not Record_LinkedIn.table_exists():
Record_LinkedIn.create_table()
if not Record_Fb.table_exists():
Record_Fb.create_table()
if not Record_Fsquare.table_exists():
Record_Fsquare.create_table()
if not Record_Google.table_exists():
Record_Google.create_table()
"""
RabbitMQ support courtesy of Pika
"""
while True:
try:
_credentials = pika.PlainCredentials(MQTT_USER, MQTT_PASSWORD)
mqtt_connection = pika.BlockingConnection(pika.ConnectionParameters(host=MQTT_HOST, credentials=_credentials))
break
except Exception:
time.sleep(5)
pqdata = dict()
pqdata['x-max-priority'] = 5
ingress_channel = mqtt_connection.channel()
ingress_channel.exchange_declare(exchange='admin', type='fanout')
ingress_channel.queue_declare(queue='filter', durable=True, arguments=pqdata)
admin_queue = ingress_channel.queue_declare(arguments=pqdata)
ingress_channel.queue_bind(exchange="admin", queue=admin_queue.method.queue)
egress_channel = mqtt_connection.channel()
egress_channel.queue_declare(queue='fetch', durable=True, arguments=pqdata)
"""
Selectors
"""
def retrieve_Fb(facebook_id):
return Record_Fb.select().where(Record_Fb.fb_id == facebook_id).get()
def seen_fb(facebook_id):
try:
retrieve_Fb(facebook_id)
return True
except Exception:
return False
def retrieve_LinkedIn(website):
return Record_LinkedIn.select().where(Record_LinkedIn.url == website).get()
def seen_website(website):
"""
TODO: test this!
"""
try:
retrieve_LinkedIn(website)
return True
except Exception:
return False
def retrieve_Fsquare(foursquare_id):
return Record_Fsquare.select().where(Record_Fsquare.fsquare_id == foursquare_id).get()
def seen_fsquare(foursquare_id):
try:
retrieve_Fsquare(foursquare_id)
return True
except Exception:
return False
def retrieve_Google(google_id):
return Record_Google.select().where(Record_Google.google_id == google_id).get()
def seen_google(google_id):
try:
retrieve_Google(google_id)
return True
except Exception:
return False
"""
Message Handling
"""
def seen_fb_time_ago(lead):
if (datetime.utcnow() - retrieve_Fb(lead).last_accessed).seconds > RECORD_TIMEOUT:
return True
return False
def seen_linkedin_time_ago(lead):
if (datetime.utcnow() - retrieve_LinkedIn(lead).last_accessed).seconds > RECORD_TIMEOUT:
return True
return False
def seen_fsquare_time_ago(lead):
if (datetime.utcnow() - retrieve_Fsquare(lead).last_accessed).seconds > RECORD_TIMEOUT:
return True
return False
def seen_google_time_ago(lead):
if (datetime.utcnow() - retrieve_Google(lead).last_accessed).seconds > RECORD_TIMEOUT:
return True
return False
def callback(ch, method, properties, body):
try:
raw_data = json.loads(body)
if (not raw_data.has_key("potential_leads") or not raw_data.has_key("protocol") or not raw_data.has_key("depth")):
if raw_data.has_key("delete") and raw_data.has_key("resource_locator") and raw_data.has_key("protocol"):
if raw_data["protocol"] == "fb":
"""
sys.stderr.write("Deleted: " + str(raw_data["resource_locator"]) + "\n")
sys.stderr.flush()
"""
if seen_fb(raw_data["resource_locator"]):
retrieve_Fb(raw_data["resource_locator"]).delete_instance()
return
if raw_data["protocol"] == "linkedin":
if seen_website(raw_data["resource_locator"]):
retrieve_LinkedIn(raw_data["resource_locator"]).delete_instance()
return
if raw_data["protocol"] == "fsquare":
if seen_fsquare(raw_data["resource_locator"]):
retrieve_Fsquare(raw_data["resource_locator"]).delete_instance()
return
if raw_data["protocol"] == "google":
if seen_google(raw_data["resource_locator"]):
retrieve_Google(raw_data["resource_locator"]).delete_instance()
return
raise Exception("Unknown protocol requested during deletion")
else:
raise Exception("Body malformed")
potential_leads = raw_data["potential_leads"]
protocol = raw_data["protocol"]
for lead in potential_leads:
try:
if protocol == "fb":
if not seen_fb(lead):
newRecord = Record_Fb(fb_id=lead, last_accessed = datetime.utcnow())
newRecord.save(force_insert=True)
"""
TODO: Handle elif difference
"""
elif seen_fb_time_ago(lead):
Record_Fb.update(last_accessed = datetime.utcnow()).where(fb_id == lead).execute()
sys.stderr.write("Updating: \n" + lead + "\n")
sys.stderr.flush()
else:
#return
continue
if protocol == "linkedin":
if not seen_website(lead):
newRecord = Record_LinkedIn(url=lead, last_accessed = datetime.utcnow())
newRecord.save(force_insert=True)
"""
TODO: Handle elif difference
"""
elif seen_linkedin_time_ago(lead):
Record_LinkedIn.update(last_accessed = datetime.utcnow()).where(url == lead).execute()
sys.stderr.write("Updating: \n" + lead + "\n")
sys.stderr.flush()
else:
return
if protocol == "fsquare":
if not seen_fsquare(lead):
newRecord = Record_Fsquare(fsquare_id=lead, last_accessed= datetime.utcnow())
newRecord.save(force_insert=True)
elif seen_fsquare_time_ago(lead):
Record_Fsquare.update(last_accessed = datetime.utcnow()).where(fsquare_id == lead).execute()
sys.stderr.write("Updating: \n" + lead + "\n")
sys.stderr.flush()
else:
continue
#return
if protocol == "google":
if not seen_google(lead):
newRecord = Record_Google(google_id=lead, last_accessed= datetime.utcnow())
newRecord.save(force_insert=True)
elif seen_google_time_ago(lead):
Record_Google.update(last_accessed=datetime.utcnow()).where(google_id == lead).execute()
sys.stderr.write("Updating: \n" + lead + "\n")
sys.stderr.flush()
else:
# we go on to the next lead if we see a familiar lead
# and if that familiar lead is not due for an update
continue
except Exception as e:
try:
sys.stderr.write("Attempting to rollback db: \n" + str(e) + "\n")
psql_db.rollback()
except Exception as e:
sys.stderr.write("DB connection is messed up: \n" + str(e) + "\n")
psql_db.close()
psql_db.connect()
fetch_data = {"protocol": raw_data["protocol"], "resource_locator": lead, "depth": raw_data["depth"]}
egress_channel.basic_publish(
exchange='',
routing_key='fetch',
body=json.dumps(fetch_data),
properties=pika.BasicProperties(
delivery_mode = 1,
priority = int(raw_data["priority"]) if raw_data.has_key("priority") else 0 # default priority
)
)
except Exception as e:
sys.stderr.write(str(e) + "Unable to filter: \n" + body + "\n")
traceback.print_exc()
try:
psql_db.rollback()
except:
psql_db.close()
psql_db.connect()
sys.stderr.flush()
finally:
ingress_channel.basic_ack(delivery_tag = method.delivery_tag)
def admin_callback(ch, method, properties, body):
try:
data = json.loads(body)
return
except Exception as e:
sys.stderr.write(str(e) + "Unable to fetch: \n" + body + "\n")
traceback.print_exc()
sys.stderr.flush()
finally:
ingress_channel.basic_ack(delivery_tag = method.delivery_tag)
ingress_channel.basic_qos(prefetch_count=1)
ingress_channel.basic_consume(callback, queue='filter')
ingress_channel.basic_consume(admin_callback, queue=admin_queue.method.queue)
ingress_channel.start_consuming()
| 34.298077
| 122
| 0.59387
|
79515f4ed96d0b75eedf24bf6631955c7498b999
| 1,608
|
py
|
Python
|
byceps/services/ticketing/dbmodels/archived_attendance.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 23
|
2015-08-03T23:28:54.000Z
|
2018-12-12T20:11:45.000Z
|
byceps/services/ticketing/dbmodels/archived_attendance.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 1
|
2018-09-30T18:18:24.000Z
|
2018-09-30T18:18:24.000Z
|
byceps/services/ticketing/dbmodels/archived_attendance.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 9
|
2015-08-06T16:41:36.000Z
|
2018-09-25T11:17:31.000Z
|
"""
byceps.services.ticketing.dbmodels.archived_attendance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime
from ....database import db
from ....typing import PartyID, UserID
from ....util.instances import ReprBuilder
class ArchivedAttendance(db.Model):
"""A user's attendance of a party.
This is a link between a party and a user that attended it.
While such a link is usually established through a ticket for a
party that is assigned to a user, this entity was introduced for
legacy data for which no information on tickets, orders, seating
areas and so on exists anymore (or should not be migrated).
The data for this entity is expected to be inserted from the
outside. BYCEPS itself currently does not write any archived
attendances (but incorporates them to be displayed on user
profiles).
"""
__tablename__ = 'user_archived_party_attendances'
user_id = db.Column(db.Uuid, db.ForeignKey('users.id'), primary_key=True)
party_id = db.Column(db.UnicodeText, db.ForeignKey('parties.id'), primary_key=True)
created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
def __init__(self, user_id: UserID, party_id: PartyID) -> None:
self.user_id = user_id
self.party_id = party_id
def __repr__(self) -> str:
return ReprBuilder(self) \
.add('user_id', str(self.user_id)) \
.add('party_id', self.party_id) \
.build()
| 34.212766
| 87
| 0.682836
|
795160b511a4ad501525fe77bb37ed063e487a72
| 3,116
|
py
|
Python
|
src/graphql/utilities/value_from_ast_untyped.py
|
KingDarBoja/graphql-core
|
22970e94f1016e813848fc0ab5d1e7ab9ad612e4
|
[
"MIT"
] | 590
|
2015-10-06T18:22:49.000Z
|
2022-03-22T16:32:17.000Z
|
src/graphql/utilities/value_from_ast_untyped.py
|
KingDarBoja/graphql-core
|
22970e94f1016e813848fc0ab5d1e7ab9ad612e4
|
[
"MIT"
] | 300
|
2015-10-06T18:58:11.000Z
|
2022-03-22T14:01:44.000Z
|
src/graphql/utilities/value_from_ast_untyped.py
|
KingDarBoja/graphql-core
|
22970e94f1016e813848fc0ab5d1e7ab9ad612e4
|
[
"MIT"
] | 270
|
2015-10-08T19:47:38.000Z
|
2022-03-10T04:17:51.000Z
|
from math import nan
from typing import Any, Callable, Dict, Optional, Union
from ..language import (
ValueNode,
BooleanValueNode,
EnumValueNode,
FloatValueNode,
IntValueNode,
ListValueNode,
NullValueNode,
ObjectValueNode,
StringValueNode,
VariableNode,
)
from ..pyutils import inspect, Undefined
__all__ = ["value_from_ast_untyped"]
def value_from_ast_untyped(
value_node: ValueNode, variables: Optional[Dict[str, Any]] = None
) -> Any:
"""Produce a Python value given a GraphQL Value AST.
Unlike :func:`~graphql.utilities.value_from_ast`, no type is provided.
The resulting Python value will reflect the provided GraphQL value AST.
=================== ============== ================
GraphQL Value JSON Value Python Value
=================== ============== ================
Input Object Object dict
List Array list
Boolean Boolean bool
String / Enum String str
Int / Float Number int / float
Null null None
=================== ============== ================
"""
func = _value_from_kind_functions.get(value_node.kind)
if func:
return func(value_node, variables)
# Not reachable. All possible value nodes have been considered.
raise TypeError( # pragma: no cover
f"Unexpected value node: {inspect(value_node)}."
)
def value_from_null(_value_node: NullValueNode, _variables: Any) -> Any:
return None
def value_from_int(value_node: IntValueNode, _variables: Any) -> Any:
try:
return int(value_node.value)
except ValueError:
return nan
def value_from_float(value_node: FloatValueNode, _variables: Any) -> Any:
try:
return float(value_node.value)
except ValueError:
return nan
def value_from_string(
value_node: Union[BooleanValueNode, EnumValueNode, StringValueNode], _variables: Any
) -> Any:
return value_node.value
def value_from_list(
value_node: ListValueNode, variables: Optional[Dict[str, Any]]
) -> Any:
return [value_from_ast_untyped(node, variables) for node in value_node.values]
def value_from_object(
value_node: ObjectValueNode, variables: Optional[Dict[str, Any]]
) -> Any:
return {
field.name.value: value_from_ast_untyped(field.value, variables)
for field in value_node.fields
}
def value_from_variable(
value_node: VariableNode, variables: Optional[Dict[str, Any]]
) -> Any:
variable_name = value_node.name.value
if not variables:
return Undefined
return variables.get(variable_name, Undefined)
_value_from_kind_functions: Dict[str, Callable] = {
"null_value": value_from_null,
"int_value": value_from_int,
"float_value": value_from_float,
"string_value": value_from_string,
"enum_value": value_from_string,
"boolean_value": value_from_string,
"list_value": value_from_list,
"object_value": value_from_object,
"variable": value_from_variable,
}
| 28.072072
| 88
| 0.64249
|
795161216b55a848c6544b630313a9a7c9ec5eed
| 3,780
|
py
|
Python
|
clients/python/openapi_client/api/search_api.py
|
Soluto/tweek-openapi-clients
|
feee32006743ea4bb815f2608bd95950439388c3
|
[
"Apache-2.0"
] | null | null | null |
clients/python/openapi_client/api/search_api.py
|
Soluto/tweek-openapi-clients
|
feee32006743ea4bb815f2608bd95950439388c3
|
[
"Apache-2.0"
] | null | null | null |
clients/python/openapi_client/api/search_api.py
|
Soluto/tweek-openapi-clients
|
feee32006743ea4bb815f2608bd95950439388c3
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Tweek
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
class SearchApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def search(self, **kwargs): # noqa: E501
"""search # noqa: E501
Search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_with_http_info(**kwargs) # noqa: E501
return data
def search_with_http_info(self, **kwargs): # noqa: E501
"""search # noqa: E501
Search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/search', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[object]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 30.983607
| 124
| 0.607937
|
795163f7738e16cd3c27c8c3f0469c4324e77114
| 81
|
py
|
Python
|
chobo/week3/10818.py
|
Kwak-JunYoung/154Algoritm-5weeks
|
fa18ae5f68a1ee722a30a05309214247f7fbfda4
|
[
"MIT"
] | 3
|
2022-01-24T03:06:32.000Z
|
2022-01-30T08:43:58.000Z
|
chobo/week3/10818.py
|
Kwak-JunYoung/154Algoritm-5weeks
|
fa18ae5f68a1ee722a30a05309214247f7fbfda4
|
[
"MIT"
] | null | null | null |
chobo/week3/10818.py
|
Kwak-JunYoung/154Algoritm-5weeks
|
fa18ae5f68a1ee722a30a05309214247f7fbfda4
|
[
"MIT"
] | 2
|
2022-01-24T02:27:40.000Z
|
2022-01-30T08:57:03.000Z
|
input()
nums = list(map(int, input().split()))
print(f'{min(nums)} {max(nums)}')
| 20.25
| 38
| 0.604938
|
7951641b6f30250d91b693fda49f1bfd1072509d
| 1,459
|
py
|
Python
|
allennlp/common/testing/test_case.py
|
craigbig/allennlp
|
3f15a8bdcae366f3ef732eec1e9df26d91521582
|
[
"Apache-2.0"
] | 1
|
2020-02-24T10:21:37.000Z
|
2020-02-24T10:21:37.000Z
|
allennlp/common/testing/test_case.py
|
craigbig/allennlp
|
3f15a8bdcae366f3ef732eec1e9df26d91521582
|
[
"Apache-2.0"
] | null | null | null |
allennlp/common/testing/test_case.py
|
craigbig/allennlp
|
3f15a8bdcae366f3ef732eec1e9df26d91521582
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import pathlib
import shutil
import tempfile
from unittest import TestCase
from allennlp.common.checks import log_pytorch_version_info
TEST_DIR = tempfile.mkdtemp(prefix="allennlp_tests")
class AllenNlpTestCase(TestCase):
"""
A custom subclass of `unittest.TestCase` that disables some of the more verbose AllenNLP
logging and that creates and destroys a temp directory as a test fixture.
"""
PROJECT_ROOT = (pathlib.Path(__file__).parent / ".." / ".." / "..").resolve()
MODULE_ROOT = PROJECT_ROOT / "allennlp"
TOOLS_ROOT = MODULE_ROOT / "tools"
TESTS_ROOT = MODULE_ROOT / "tests"
FIXTURES_ROOT = TESTS_ROOT / "fixtures"
def setUp(self):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.DEBUG
)
# Disabling some of the more verbose logging statements that typically aren't very helpful
# in tests.
logging.getLogger("allennlp.common.params").disabled = True
logging.getLogger("allennlp.nn.initializers").disabled = True
logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO)
logging.getLogger("urllib3.connectionpool").disabled = True
log_pytorch_version_info()
self.TEST_DIR = pathlib.Path(TEST_DIR)
os.makedirs(self.TEST_DIR, exist_ok=True)
def tearDown(self):
shutil.rmtree(self.TEST_DIR)
| 33.930233
| 98
| 0.697738
|
795164b26388a35bad25c9f4a102944a758b997a
| 18,797
|
py
|
Python
|
consolidation.py
|
frc1678/server-2019
|
5ccf17945cbed9dd9587d55349a59a73ed06063c
|
[
"MIT"
] | 2
|
2019-07-31T14:18:11.000Z
|
2019-11-19T14:16:36.000Z
|
consolidation.py
|
frc1678/server-2019
|
5ccf17945cbed9dd9587d55349a59a73ed06063c
|
[
"MIT"
] | null | null | null |
consolidation.py
|
frc1678/server-2019
|
5ccf17945cbed9dd9587d55349a59a73ed06063c
|
[
"MIT"
] | 3
|
2019-07-30T21:57:21.000Z
|
2020-01-01T16:03:06.000Z
|
"""Functions that are used for the consolidation of tempTIMDs into one TIMD.
The main function in this file is consolidate_temp_timds(), which is
called in calculate_timd.py."""
# External imports
import collections
import numpy as np
# No internal imports
def consolidate_times(times):
"""Takes in multiple time options and consolidates them into one time.
times is a dictionary of each scout to their respective time value."""
times = {scout: time for scout, time in times.items() if time is not None}
# Creates a list of the times in the form of floats instead of their
# tempTIMD format of strings. Does this in order to use them for
# calculations.
float_list = [float(time) for time in times.values()]
# Finds the mean and standard deviation for the array of floats,
# these metrics are used for the reciprocal z-scores used later on.
mean = np.mean(float_list)
std = np.std(float_list)
# If the standard deviation is zero, all the times are the same, so
# it just returns the mean.
if std == 0:
return round(mean, 1)
# If one of the float_list items is equivalent to the mean, it's
# weight will be undefined, so we can just return the mean.
if mean in float_list:
return mean
# Creates a list of tuples with the first item as the time and the
# second item as the weight (squared reciprocal of the z-score for
# each time). These values are how much each time is weighted when
# calculating the final weighted average. The lower the value on
# this list the time is, the farther away from the mean it is, and
# the less it is weighted.
reciprocal_zscores = [(number, (1 / ((mean - number) / std)) ** 2)
for number in float_list]
# Multiplies each time by its corresponding reciprocal z-score
# value, creating a weighted time.
weighted_times = [number * zscore_weight for number, zscore_weight
in reciprocal_zscores]
# Adds up all the weighted times and divides it by the sum of the
# reciprocal_zscore_list. Does this in order to get a reasonable
# time, if this step is not taken, the weighted time makes no sense.
weighted_average = sum(weighted_times) / sum([zscore[1] for zscore \
in reciprocal_zscores])
# Formats each average to a float with one decimal place.
return round(weighted_average, 1)
def convert_float_time(time):
"""Converts a time from a string to a float.
time is the time that needs to be converted."""
# If an asterisk is in the time, the time is in the wrong time
# period. If the asterisk time is in teleop, the time period is
# supposed to be in sandstorm, so it sets the time to the lowest
# time in sandstorm, and vice versa for when the time is in
# sandstorm.
if '*' in time:
if float(time[:-1]) >= 135.1:
return 135.0
else:
return 135.1
else:
return float(time)
def max_occurrences(comparison_list, sprking):
"""Takes in a dictionary of scouts to their value and returns the majority.
If there is no clear winner, returns the value for the best spr
scout.
comparison_list is a dictionary of each of the scouts to their input
on a specific decision (value for a data field, amount of actions,
etc).
sprking is the scout with the best spr out of the scouts, used if
there is no clear majority."""
# If the sprking is not part of the comparison_list, another scout
# is randomly selected.
if sprking not in list(comparison_list.keys()):
correct_scout = list(comparison_list.keys())[-1]
else:
correct_scout = sprking
# Each item in the list to how many times it appeared in the list.
# Uses the collections module to count how many appearances each
# item has in the list.
occurence_list = dict(collections.Counter(comparison_list.values()))
# Handling for an empty occurrence list.
if len(occurence_list.values()) == 0:
return None
# If the highest occurrence on the occurrence list is the same as
# the lowest occurrence, the correct value for the data point is
# the value output by the scout with the best spr. This triggers
# both when all the scout values are the same (The max and min
# would both be three) and when all the scout values are
# different (The max and min are both 1). In the case of any
# other scenario, the max is trusted because it would suggest
# the max is the 2 in a 2 scout versus 1 split decision.
elif max(occurence_list.values()) == min(occurence_list.values()):
return comparison_list[correct_scout]
else:
return max(occurence_list, key=occurence_list.get)
def consolidate_timeline_action(temp_timd_timelines, action_type, sprking):
"""Takes an action type out of timelines and consolidates it separately.
Returns a consolidated timeline only made up of the action type that
was passed as action_type.
input_timelines is the dictionary of the scouts to their specific
timelines.
action_type is the action type that the function is consolidating.
sprking is the scout with the best spr out of the scouts, used when
max_occurrences is called."""
# The dictionary of three timelines with only the types specified
# in the function.
simplified_timelines = {scout: [] for scout in temp_timd_timelines.keys()}
# Takes the three different timelines and cuts out any types of
# data points which are not the specified types.
for scout, timeline in temp_timd_timelines.items():
for action in timeline:
if action.get('type') == action_type:
simplified_timelines[scout].append(action)
# For each action in each scouts list of actions, the time is
# converted from a string to a float.
for scout, simplified_timeline in simplified_timelines.items():
for action in simplified_timeline:
action['time'] = convert_float_time(action['time'])
# Scouts to the amount of actions of the specified type are in the
# timeline.
count_timelines = {scout: len(timeline) for
scout, timeline in simplified_timelines.items()}
# Finds the majority amount of actions in the timeline to see
# which amount of actions is the correct amount.
majority_length = max_occurrences(count_timelines, sprking)
# Creates a dictionary of scouts to their timelines which follow the
# majority length of timeline.
correct_length_timelines = {}
for scout, timeline_length in count_timelines.items():
if timeline_length == majority_length:
correct_length_timelines[scout] = simplified_timelines[scout]
# If there are scouts that don't agree with the majority timeline,
# creates a time_reference to line up against.
time_reference = {}
if sprking in correct_length_timelines.keys():
correct_scout = sprking
else:
correct_scout = list(correct_length_timelines.keys())[-1]
reference_timeline = correct_length_timelines[correct_scout]
time_reference[correct_scout] = [action['time'] for action in
reference_timeline]
# If there are scouts that do not agree with the correct timeline
# length, find out which of their action times agree with the time
# reference the best, and line it up against the reference in the
# correct_length_timelines dictionary.
for scout in simplified_timelines.keys():
if scout not in correct_length_timelines.keys():
correct_length_timelines[scout] = [{} for action in
range(majority_length)]
# In order to find the best option for timings, it sets
# up a matrix of time differences between each action in
# each tempTIMD.
timings = np.zeros((len(simplified_timelines[scout]),
majority_length))
for false_index, false_action in \
enumerate(simplified_timelines[scout]):
for comparison_index, comparison_action in \
enumerate(list(time_reference.values())[0]):
timings[false_index][comparison_index] = \
abs(float(comparison_action) -
float(false_action['time']))
# Once the matrix of timing differences has been
# created, the lowest difference is used to line up the
# incorrect length timeline with the correct length
# timeline. To avoid one action being compared with multiple
# other actions, all other instances of the action (The row
# and column) are set to 200 to signify that it has been
# used. 200 is used because it is higher than any possible
# time difference.
if timings.size > 0:
# The loop runs until there are no more time differences
# in the matrix less than 200.
while timings.min() < 200:
# lowest_index is in the format of ([y coordinate],
# [x coordinate]), which requires lowest_index[1][0]
# to get the x coordinate, and lowest_index[0][0]
# for the y coordinate.
lowest_index = np.where(timings == timings.min())
correct_length_timelines[scout][lowest_index[1][0]] = \
simplified_timelines[scout][lowest_index[0][0]]
timings[int(lowest_index[0][0])] = \
np.full([1, len(timings[0])], 200)
for row in range(len(timings)):
timings[row][int(lowest_index[1][0])] = 200
final_simplified_timd = [{} for action in range(majority_length)]
# Iterates through the sprking's timeline to compare all the actions.
# If the majority 'type' for the action is None, the majority of
# scouts did not record this action, and this action should not
# appear in the consolidated TIMD.
for action_index, action in enumerate(correct_length_timelines[sprking]):
comparison_dict = {scout: timeline[action_index] for scout,
timeline in correct_length_timelines.items()}
types = {scout: action.get('type') for scout, action in
comparison_dict.items()}
if max_occurrences(types, sprking) is None:
# Skips current iteration
continue
# Deletes scouts that did not record this action.
for scout in list(comparison_dict):
if comparison_dict[scout] == {}:
comparison_dict.pop(scout)
# All of the possible keys for a tempTIMD for this action.
keys = set()
for action in comparison_dict.values():
for key in action.keys():
keys.add(key)
for key in keys:
# For every key that isn't time, which can't realistically
# have a majority, the majority opinion is set to the final
# timd.
scout_to_keys = {scout: action.get(key) for scout,
action in comparison_dict.items()}
if key == 'time':
# If the key is time, finds the correct time using the
# consolidate_times algorithm.
final_simplified_timd[action_index]['time'] = \
consolidate_times(scout_to_keys)
else:
# For every key in the dictionary other than time, it just
# takes the majority value for the key.
final_simplified_timd[action_index][key] = \
max_occurrences(scout_to_keys, sprking)
# Returns the final created timeline
return final_simplified_timd
def climb_consolidation(input_timelines, sprking):
"""Takes climb out of the timelines of the tempTIMDs and consolidates it.
Returns a timeline only with climb inside it to add to the final
timeline for the timd.
input_timelines is the dictionary of the scouts to their specific
timelines.
sprking is the scout with the best spr out of the scouts, used when
max_occurrences is called. More info in the docstring for
max_occurrences.
"""
# Scout name to climb dictionary.
simplified_timelines = {}
# Fills in 'simplified_timelines' with the scout and the climb
# dictionary from the three tempTIMDs.
for scout, timeline in input_timelines.items():
for action in timeline:
if action.get('type') == 'climb':
simplified_timelines[scout] = action
# Returns None if no climb was recorded.
if simplified_timelines == {}:
return None
final_simplified_timd = {'type': 'climb', 'attempted': {}, 'actual': {}}
# Consolidates time first
final_simplified_timd['time'] = consolidate_times({
scout: convert_float_time(climb['time']) for scout,
climb in simplified_timelines.items()})
for key in ['attempted', 'actual']:
for robot in ['self', 'robot1', 'robot2']:
final_simplified_timd[key][robot] = max_occurrences({
scout: climb[key][robot] for scout, climb in
simplified_timelines.items()}, sprking)
# Returns the final created timeline
return final_simplified_timd
def consolidate_temp_timds(temp_timds):
"""Consolidates between 1-3 temp_timds into one final timd.
This is the main function of consolidation.py, and is called by
calculate_timd.py.
temp_timds is a dictionary with scout names as keys and their
respective tempTIMD as a value.
"""
# 'sprking' is the scout with the best (lowest) SPR
#TODO: Implement spr system
sprking = list(temp_timds.keys())[0]
final_timd = {}
# Iterates through the keys of the best scout's tempTIMD and
# consolidates each data_field one at a time.
for data_field in list(temp_timds[sprking]):
if data_field == 'timeline':
# In order to compute the timeline properly, it is split
# into a list of the timelines.
timelines = {}
for scout, temp_timd in temp_timds.items():
temp_timeline = temp_timd.get('timeline', [])
timelines[scout] = temp_timeline
# If the list of timelines only includes one timeline, that
# timeline is taken as the correct one and put into the
# final TIMD.
if len(timelines.values()) == 1:
# Converts all times to floats and removes asterisk to
# put it into the format of a timd.
final_timeline = []
for action in timelines[sprking]:
action_time = action.get('time')
# Takes the time before the asterisk, if there is no
# asterisk, .split() still returns a list, a list of
# only the time, meaning [0] works in both
# instances.
action['time'] = float(action_time.split('*')[0])
final_timd['timeline'] = timelines[sprking]
# If the list has more than one tempTIMD, the process for
# computation has to be split up by each of the types of
# actions in the timeline.
else:
# Creates the final timeline which is passed as the
# timeline for the final timd at the end of
# consolidation.
final_timeline = []
# Separates all the basic actions out and consolidates
# them one at a time. All the actions are consolidated
# separately so that the timings on each action are
# split apart, making it more easy to line up, identify,
# and consolidate the timeline.
for action_type in ['pinningFoul', 'incap', 'unincap', \
'drop', 'startDefense', 'endDefense', \
'placement', 'intake']:
final_timeline += consolidate_timeline_action(
timelines, action_type, sprking)
# Also consolidates climb separately in order to
# separate it from intakes and placements. Climb needs a
# separate function because of its relatively strange
# structure.
climb = climb_consolidation(timelines, sprking)
if climb is not None:
final_timeline.append(climb)
# Deletes any blank actions.
final_timeline = [action for action in final_timeline if
action != {}]
# Once the timeline is finally completed, it is sorted
# by time, and added to the final timd.
final_timd['timeline'] = sorted(final_timeline, \
key=lambda action: action['time'], reverse=True)
# When consolidating non-timed keys, it is easy to consolidate
# them, as you can simply find which value is the most common in
# the set of three possibilities. The other data_fields that
# are not included in this set, such as timerStarted, are scout
# diagnostics, and not included in the final TIMD.
elif data_field not in ['timeline', 'timerStarted',
'currentCycle', 'scoutID', 'scoutName',
'appVersion', 'assignmentMode',
'assignmentFileTimestamp',
'matchesNotScouted']:
# Creates a dictionary of each scout to the key from their
# tempTIMD to compare against each other. (Code note - This
# code is using .get and not simply referencing the key out
# of the dictionary because .get doesn't error out when the
# key doesn't exist. Instead, .get returns NoneType).
data_field_comparison_list = {}
for scout, temp_timd in temp_timds.items():
temp_data_field = temp_timd.get(data_field)
if temp_data_field is not None:
data_field_comparison_list[scout] = temp_data_field
# Uses the max_occurrences function to find the correct value
# for the data field.
data_occurence_max = max_occurrences(
data_field_comparison_list, sprking)
final_timd[data_field] = data_occurence_max
return final_timd
| 45.293976
| 79
| 0.63026
|
795164e9b019d5e0233e60502428b4c2cb401ddf
| 4,647
|
py
|
Python
|
scripts/scrape_cgc.py
|
eklipse2009/ZX-Pokemaster
|
113bf2e242347b475cca9eadbae4f1b67f498466
|
[
"MIT"
] | 8
|
2018-11-18T00:37:25.000Z
|
2020-12-06T13:17:53.000Z
|
scripts/scrape_cgc.py
|
eklipse2009/ZX-Pokemaster
|
113bf2e242347b475cca9eadbae4f1b67f498466
|
[
"MIT"
] | 8
|
2017-08-21T10:07:58.000Z
|
2020-03-29T18:23:37.000Z
|
scripts/scrape_cgc.py
|
eklipse2009/ZX-Pokemaster
|
113bf2e242347b475cca9eadbae4f1b67f498466
|
[
"MIT"
] | 1
|
2021-03-04T17:43:36.000Z
|
2021-03-04T17:43:36.000Z
|
import os
import glob
import shutil
import zipfile
from functions.game_name_functions import *
if (os.getcwd().endswith('scripts')):
os.chdir('..')
from classes.scraper import *
def scrape_csscgc():
# if os.path.exists('tosec\\CSSCGC Games'):
# shutil.rmtree('tosec\\CSSCGC Games')
s = Scraper()
template = 'https://www.yoursinclair.co.uk/csscgc/csscgc.cgi?year='
for year in range(1996, 2017):
files_extracted = []
page = template + str(year)
selector = s.loadUrl(page)
games_tables = selector.xpath('//table[@border="1"]').extract_all()
for game_table in games_tables:
cells = Selector(game_table).xpath('//td//text()').extract_all()
game_name = cells[0]
author = cells[2]
if not author.startswith('Mr'):
author = putInitialsToEnd(author)
filenames = list(set(cells[4].split(' ')+[cells[4]]))
format = cells[10]
game_represented = False
for filename in filenames:
if not filename:
continue
filename = os.path.basename(filename)
ext = os.path.splitext(filename)[-1].lower()
tosec_name = '{} ({})({})({})[CSSCGC]{}'.format(game_name, str(year), author, format, ext)
tosec_name = tosec_name.replace('(Spectrum)', '').replace('ZX Spectrum ', '').replace('(48K)', '')
tosec_name = tosec_name.replace('(128K Spectrum)', '(128K)')
tosec_name = tosec_name.replace('(128K-+2)', '(+2)')
tosec_name =tosec_name.replace('(unknown)', '(-)')
tosec_name = getFileSystemFriendlyName(tosec_name)
src = os.path.join('tosec', 'csscgc scrape', 'CSSCGC' + str(year), filename)
dest = os.path.join('tosec', 'CSSCGC Games', str(year), tosec_name)
# print(src, dest)
if not os.path.exists(src):
# print('File does not exist:', filename, 'Year:', year)
continue
if os.path.exists(dest):
print('Conflict:', tosec_name, filename, 'Year:', year)
continue
os.makedirs(os.path.dirname(dest), exist_ok=True)
if ext == '.zip':
with zipfile.ZipFile(src, 'r') as zf:
files_to_extract = []
conflict = False
for zfname in zf.namelist():
zfname_ext = zfname.split('.')[-1].lower()
if zfname_ext in GAME_EXTENSIONS:
files_to_extract.append(zfname)
for each in GAME_EXTENSIONS:
if len([x for x in files_to_extract if x.endswith(each)])>1:
print('Conflict:', tosec_name, src, files_to_extract, 'Year:', year)
conflict = True
break
if not conflict and files_to_extract:
for file in files_to_extract:
data = zf.read(files_to_extract[0])
ext = os.path.splitext(files_to_extract[0])[-1].lower()
dest = dest.replace('.zip', ext)
with open(dest, 'wb+') as output:
output.write(data)
game_represented = True
files_extracted.append(src)
else:
shutil.copy(src, dest)
files_extracted.append(src)
game_represented = True
if not game_represented:
print('Game not represented:', tosec_name, cells[4], 'Year:', year)
for src in glob.glob(os.path.join('tosec', 'csscgc scrape', 'CSSCGC'+str(year), '*')):
filename, ext = os.path.splitext(os.path.basename(src))
if ext[1:] not in GAME_EXTENSIONS+['zip']:
continue
if src in files_extracted:
continue
else:
tosec_name = '{} ({})(-)[CSSCGC]{}'.format(filename.title() , str(year), ext)
dest = os.path.join('tosec', 'CSSCGC Games', str(year), 'unsorted', tosec_name)
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copy(src, dest)
print('Copied: ', src, 'to:', dest, 'Year:', year)
if __name__=='__main__':
scrape_csscgc()
| 49.967742
| 114
| 0.497095
|
795165d401ebb174438275a9ed6fbb6232480959
| 53,255
|
py
|
Python
|
python/ccxt/async_support/bittrex.py
|
xeddmc/ccxt
|
9ddd88e6bbc4b2162cf45d331995bb86235d2a59
|
[
"MIT"
] | 1
|
2021-03-01T17:45:33.000Z
|
2021-03-01T17:45:33.000Z
|
python/ccxt/async_support/bittrex.py
|
xeddmc/ccxt
|
9ddd88e6bbc4b2162cf45d331995bb86235d2a59
|
[
"MIT"
] | 4
|
2020-09-07T00:20:58.000Z
|
2021-05-10T09:04:56.000Z
|
python/ccxt/async_support/bittrex.py
|
xeddmc/ccxt
|
9ddd88e6bbc4b2162cf45d331995bb86235d2a59
|
[
"MIT"
] | 1
|
2019-11-08T12:36:13.000Z
|
2019-11-08T12:36:13.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import AddressPending
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
class bittrex (Exchange):
def describe(self):
return self.deep_extend(super(bittrex, self).describe(), {
'id': 'bittrex',
'name': 'Bittrex',
'countries': ['US'],
'version': 'v1.1',
'rateLimit': 1500,
'certified': True,
# new metainfo interface
'has': {
'CORS': True,
'createMarketOrder': False,
'fetchDepositAddress': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchMyTrades': 'emulated',
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchTickers': True,
'withdraw': True,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchTransactions': False,
},
'timeframes': {
'1m': 'oneMin',
'5m': 'fiveMin',
'30m': 'thirtyMin',
'1h': 'hour',
'1d': 'day',
},
'hostname': 'bittrex.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766352-cf0b3c26-5ed5-11e7-82b7-f3826b7a97d8.jpg',
'api': {
'public': 'https://{hostname}/api',
'account': 'https://{hostname}/api',
'market': 'https://{hostname}/api',
'v2': 'https://{hostname}/api/v2.0/pub',
'v3': 'https://api.bittrex.com/v3',
'v3public': 'https://api.bittrex.com/v3',
},
'www': 'https://bittrex.com',
'doc': [
'https://bittrex.github.io/api/',
'https://bittrex.github.io/api/v3',
'https://www.npmjs.com/package/bittrex-node',
],
'fees': [
'https://bittrex.zendesk.com/hc/en-us/articles/115003684371-BITTREX-SERVICE-FEES-AND-WITHDRAWAL-LIMITATIONS',
'https://bittrex.zendesk.com/hc/en-us/articles/115000199651-What-fees-does-Bittrex-charge-',
],
},
'api': {
'v3': {
'get': [
'account',
'addresses',
'addresses/{currencySymbol}',
'balances',
'balances/{currencySymbol}',
'currencies',
'currencies/{symbol}',
'deposits/open',
'deposits/closed',
'deposits/ByTxId/{txId}',
'deposits/{depositId}',
'orders/closed',
'orders/open',
'orders/{orderId}',
'ping',
'subaccounts/{subaccountId}',
'subaccounts',
'withdrawals/open',
'withdrawals/closed',
'withdrawals/ByTxId/{txId}',
'withdrawals/{withdrawalId}',
],
'post': [
'addresses',
'orders',
'subaccounts',
'withdrawals',
],
'delete': [
'orders/{orderId}',
'withdrawals/{withdrawalId}',
],
},
'v3public': {
'get': [
'markets',
'markets/summaries',
'markets/{marketSymbol}',
'markets/{marketSymbol}/summary',
'markets/{marketSymbol}/orderbook',
'markets/{marketSymbol}/trades',
'markets/{marketSymbol}/ticker',
'markets/{marketSymbol}/candles',
],
},
'v2': {
'get': [
'currencies/GetBTCPrice',
'market/GetTicks',
'market/GetLatestTick',
'Markets/GetMarketSummaries',
'market/GetLatestTick',
],
},
'public': {
'get': [
'currencies',
'markethistory',
'markets',
'marketsummaries',
'marketsummary',
'orderbook',
'ticker',
],
},
'account': {
'get': [
'balance',
'balances',
'depositaddress',
'deposithistory',
'order',
'orders',
'orderhistory',
'withdrawalhistory',
'withdraw',
],
},
'market': {
'get': [
'buylimit',
'buymarket',
'cancel',
'openorders',
'selllimit',
'sellmarket',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.0025,
'taker': 0.0025,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.0005,
'LTC': 0.01,
'DOGE': 2,
'VTC': 0.02,
'PPC': 0.02,
'FTC': 0.2,
'RDD': 2,
'NXT': 2,
'DASH': 0.05,
'POT': 0.002,
'BLK': 0.02,
'EMC2': 0.2,
'XMY': 0.2,
'GLD': 0.0002,
'SLR': 0.2,
'GRS': 0.2,
},
'deposit': {
'BTC': 0,
'LTC': 0,
'DOGE': 0,
'VTC': 0,
'PPC': 0,
'FTC': 0,
'RDD': 0,
'NXT': 0,
'DASH': 0,
'POT': 0,
'BLK': 0,
'EMC2': 0,
'XMY': 0,
'GLD': 0,
'SLR': 0,
'GRS': 0,
},
},
},
'exceptions': {
# 'Call to Cancel was throttled. Try again in 60 seconds.': DDoSProtection,
# 'Call to GetBalances was throttled. Try again in 60 seconds.': DDoSProtection,
'APISIGN_NOT_PROVIDED': AuthenticationError,
'INVALID_SIGNATURE': AuthenticationError,
'INVALID_CURRENCY': ExchangeError,
'INVALID_PERMISSION': AuthenticationError,
'INSUFFICIENT_FUNDS': InsufficientFunds,
'QUANTITY_NOT_PROVIDED': InvalidOrder,
'MIN_TRADE_REQUIREMENT_NOT_MET': InvalidOrder,
'ORDER_NOT_OPEN': OrderNotFound,
'INVALID_ORDER': InvalidOrder,
'UUID_INVALID': OrderNotFound,
'RATE_NOT_PROVIDED': InvalidOrder, # createLimitBuyOrder('ETH/BTC', 1, 0)
'WHITELIST_VIOLATION_IP': PermissionDenied,
'DUST_TRADE_DISALLOWED_MIN_VALUE': InvalidOrder,
},
'options': {
'parseOrderStatus': False,
'hasAlreadyAuthenticatedSuccessfully': False, # a workaround for APIKEY_INVALID
'symbolSeparator': '-',
# With certain currencies, like
# AEON, BTS, GXS, NXT, SBD, STEEM, STR, XEM, XLM, XMR, XRP
# an additional tag / memo / payment id is usually required by exchanges.
# With Bittrex some currencies imply the "base address + tag" logic.
# The base address for depositing is stored on self.currencies[code]
# The base address identifies the exchange as the recipient
# while the tag identifies the user account within the exchange
# and the tag is retrieved with fetchDepositAddress.
'tag': {
'NXT': True, # NXT, BURST
'CRYPTO_NOTE_PAYMENTID': True, # AEON, XMR
'BITSHAREX': True, # BTS
'RIPPLE': True, # XRP
'NEM': True, # XEM
'STELLAR': True, # XLM
'STEEM': True, # SBD, GOLOS
# https://github.com/ccxt/ccxt/issues/4794
# 'LISK': True, # LSK
},
'subaccountId': None,
# see the implementation of fetchClosedOrdersV3 below
'fetchClosedOrdersMethod': 'fetch_closed_orders_v3',
'fetchClosedOrdersFilterBySince': True,
},
'commonCurrencies': {
'BITS': 'SWIFT',
'CPC': 'CapriCoin',
},
})
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['price'], DECIMAL_PLACES)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, TRUNCATE, self.markets[symbol]['precision']['price'], DECIMAL_PLACES)
async def fetch_markets(self, params={}):
response = await self.v3publicGetMarkets(params)
#
# [
# {
# "symbol":"LTC-BTC",
# "baseCurrencySymbol":"LTC",
# "quoteCurrencySymbol":"BTC",
# "minTradeSize":"0.01686767",
# "precision":8,
# "status":"ONLINE", # "OFFLINE"
# "createdAt":"2014-02-13T00:00:00Z"
# },
# {
# "symbol":"VDX-USDT",
# "baseCurrencySymbol":"VDX",
# "quoteCurrencySymbol":"USDT",
# "minTradeSize":"300.00000000",
# "precision":8,
# "status":"ONLINE", # "OFFLINE"
# "createdAt":"2019-05-23T00:41:21.843Z",
# "notice":"USDT has swapped to an ERC20-based token as of August 5, 2019."
# }
# ]
#
result = []
# markets = self.safe_value(response, 'result')
for i in range(0, len(response)):
market = response[i]
baseId = self.safe_string(market, 'baseCurrencySymbol')
quoteId = self.safe_string(market, 'quoteCurrencySymbol')
# bittrex v2 uses inverted pairs, v3 uses regular pairs
# we use v3 for fetchMarkets and v2 throughout the rest of self implementation
# therefore we swap the base ←→ quote here to be v2-compatible
# https://github.com/ccxt/ccxt/issues/5634
# id = self.safe_string(market, 'symbol')
id = quoteId + self.options['symbolSeparator'] + baseId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
pricePrecision = self.safe_integer(market, 'precision', 8)
precision = {
'amount': 8,
'price': pricePrecision,
}
status = self.safe_string(market, 'status')
active = (status == 'ONLINE')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'info': market,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(market, 'minTradeSize'),
'max': None,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
},
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.accountGetBalances(params)
balances = self.safe_value(response, 'result')
result = {'info': balances}
indexed = self.index_by(balances, 'Currency')
currencyIds = list(indexed.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
balance = indexed[currencyId]
account['free'] = self.safe_float(balance, 'Available')
account['total'] = self.safe_float(balance, 'Balance')
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'market': self.market_id(symbol),
'type': 'both',
}
response = await self.publicGetOrderbook(self.extend(request, params))
orderbook = response['result']
if 'type' in params:
if params['type'] == 'buy':
orderbook = {
'buy': response['result'],
'sell': [],
}
elif params['type'] == 'sell':
orderbook = {
'buy': [],
'sell': response['result'],
}
return self.parse_order_book(orderbook, None, 'buy', 'sell', 'Rate', 'Quantity')
def parse_ticker(self, ticker, market=None):
timestamp = self.parse8601(self.safe_string(ticker, 'TimeStamp'))
symbol = None
if market is not None:
symbol = market['symbol']
previous = self.safe_float(ticker, 'PrevDay')
last = self.safe_float(ticker, 'Last')
change = None
percentage = None
if last is not None:
if previous is not None:
change = last - previous
if previous > 0:
percentage = (change / previous) * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'High'),
'low': self.safe_float(ticker, 'Low'),
'bid': self.safe_float(ticker, 'Bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'Ask'),
'askVolume': None,
'vwap': None,
'open': previous,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float(ticker, 'Volume'),
'quoteVolume': self.safe_float(ticker, 'BaseVolume'),
'info': ticker,
}
async def fetch_currencies(self, params={}):
response = await self.publicGetCurrencies(params)
#
# {
# "success": True,
# "message": "",
# "result": [
# {
# "Currency": "BTC",
# "CurrencyLong":"Bitcoin",
# "MinConfirmation":2,
# "TxFee":0.00050000,
# "IsActive":true,
# "IsRestricted":false,
# "CoinType":"BITCOIN",
# "BaseAddress":"1N52wHoVR79PMDishab2XmRHsbekCdGquK",
# "Notice":null
# },
# ...,
# ]
# }
#
currencies = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'Currency')
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
code = self.safe_currency_code(id)
precision = 8 # default precision, todo: fix "magic constants"
address = self.safe_value(currency, 'BaseAddress')
fee = self.safe_float(currency, 'TxFee') # todo: redesign
result[code] = {
'id': id,
'code': code,
'address': address,
'info': currency,
'type': currency['CoinType'],
'name': currency['CurrencyLong'],
'active': currency['IsActive'],
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': fee,
'max': math.pow(10, precision),
},
},
}
return result
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetMarketsummaries(params)
tickers = self.safe_value(response, 'result')
result = {}
for t in range(0, len(tickers)):
ticker = tickers[t]
marketId = self.safe_string(ticker, 'MarketName')
market = None
symbol = marketId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = self.parse_symbol(marketId)
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetMarketsummary(self.extend(request, params))
ticker = response['result'][0]
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(trade['TimeStamp'] + '+00:00')
side = None
if trade['OrderType'] == 'BUY':
side = 'buy'
elif trade['OrderType'] == 'SELL':
side = 'sell'
id = self.safe_string_2(trade, 'Id', 'ID')
symbol = None
if market is not None:
symbol = market['symbol']
cost = None
price = self.safe_float(trade, 'Price')
amount = self.safe_float(trade, 'Quantity')
if amount is not None:
if price is not None:
cost = price * amount
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': None,
'type': 'limit',
'takerOrMaker': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetMarkethistory(self.extend(request, params))
if 'result' in response:
if response['result'] is not None:
return self.parse_trades(response['result'], market, since, limit)
raise ExchangeError(self.id + ' fetchTrades() returned None response')
def parse_ohlcv(self, ohlcv, market=None, timeframe='1d', since=None, limit=None):
timestamp = self.parse8601(ohlcv['T'] + '+00:00')
return [
timestamp,
ohlcv['O'],
ohlcv['H'],
ohlcv['L'],
ohlcv['C'],
ohlcv['V'],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'tickInterval': self.timeframes[timeframe],
'marketName': market['id'],
}
response = await self.v2GetMarketGetTicks(self.extend(request, params))
if 'result' in response:
if response['result']:
return self.parse_ohlcvs(response['result'], market, timeframe, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.marketGetOpenorders(self.extend(request, params))
result = self.safe_value(response, 'result', [])
orders = self.parse_orders(result, market, since, limit)
return self.filter_by_symbol(orders, symbol)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
market = self.market(symbol)
method = 'marketGet' + self.capitalize(side) + type
request = {
'market': market['id'],
'quantity': self.amount_to_precision(symbol, amount),
'rate': self.price_to_precision(symbol, price),
}
# if type == 'limit':
# order['rate'] = self.price_to_precision(symbol, price)
response = await getattr(self, method)(self.extend(request, params))
orderIdField = self.get_order_id_field()
orderId = self.safe_string(response['result'], orderIdField)
return {
'info': response,
'id': orderId,
'symbol': symbol,
'type': type,
'side': side,
'status': 'open',
}
def get_order_id_field(self):
return 'uuid'
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
orderIdField = self.get_order_id_field()
request = {}
request[orderIdField] = id
response = await self.marketGetCancel(self.extend(request, params))
#
# {
# "success": True,
# "message": "''",
# "result": {
# "uuid": "614c34e4-8d71-11e3-94b5-425861b86ab6"
# }
# }
#
return self.extend(self.parse_order(response), {
'status': 'canceled',
})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
# https://support.bittrex.com/hc/en-us/articles/115003723911
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
response = await self.accountGetDeposithistory(self.extend(request, params))
#
# {success: True,
# message: "",
# result: [{ Id: 22578097,
# Amount: 0.3,
# Currency: "ETH",
# Confirmations: 15,
# LastUpdated: "2018-06-10T07:12:10.57",
# TxId: "0xf50b5ba2ca5438b58f93516eaa523eaf35b4420ca0f24061003df1be7…",
# CryptoAddress: "0xb25f281fa51f1635abd4a60b0870a62d2a7fa404" }]}
#
# we cannot filter by `since` timestamp, as it isn't set by Bittrex
# see https://github.com/ccxt/ccxt/issues/4067
# return self.parseTransactions(response['result'], currency, since, limit)
return self.parseTransactions(response['result'], currency, None, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
# https://support.bittrex.com/hc/en-us/articles/115003723911
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
response = await self.accountGetWithdrawalhistory(self.extend(request, params))
#
# {
# "success" : True,
# "message" : "",
# "result" : [{
# "PaymentUuid" : "b32c7a5c-90c6-4c6e-835c-e16df12708b1",
# "Currency" : "BTC",
# "Amount" : 17.00000000,
# "Address" : "1DfaaFBdbB5nrHj87x3NHS4onvw1GPNyAu",
# "Opened" : "2014-07-09T04:24:47.217",
# "Authorized" : True,
# "PendingPayment" : False,
# "TxCost" : 0.00020000,
# "TxId" : null,
# "Canceled" : True,
# "InvalidAddress" : False
# }, {
# "PaymentUuid" : "d193da98-788c-4188-a8f9-8ec2c33fdfcf",
# "Currency" : "XC",
# "Amount" : 7513.75121715,
# "Address" : "TcnSMgAd7EonF2Dgc4c9K14L12RBaW5S5J",
# "Opened" : "2014-07-08T23:13:31.83",
# "Authorized" : True,
# "PendingPayment" : False,
# "TxCost" : 0.00002000,
# "TxId" : "d8a575c2a71c7e56d02ab8e26bb1ef0a2f6cf2094f6ca2116476a569c1e84f6e",
# "Canceled" : False,
# "InvalidAddress" : False
# }
# ]
# }
#
return self.parseTransactions(response['result'], currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# Id: 72578097,
# Amount: 0.3,
# Currency: "ETH",
# Confirmations: 15,
# LastUpdated: "2018-06-17T07:12:14.57",
# TxId: "0xb31b5ba2ca5438b58f93516eaa523eaf35b4420ca0f24061003df1be7…",
# CryptoAddress: "0x2d5f281fa51f1635abd4a60b0870a62d2a7fa404"
# }
#
# fetchWithdrawals
#
# {
# "PaymentUuid" : "e293da98-788c-4188-a8f9-8ec2c33fdfcf",
# "Currency" : "XC",
# "Amount" : 7513.75121715,
# "Address" : "EVnSMgAd7EonF2Dgc4c9K14L12RBaW5S5J",
# "Opened" : "2014-07-08T23:13:31.83",
# "Authorized" : True,
# "PendingPayment" : False,
# "TxCost" : 0.00002000,
# "TxId" : "b4a575c2a71c7e56d02ab8e26bb1ef0a2f6cf2094f6ca2116476a569c1e84f6e",
# "Canceled" : False,
# "InvalidAddress" : False
# }
#
id = self.safe_string_2(transaction, 'Id', 'PaymentUuid')
amount = self.safe_float(transaction, 'Amount')
address = self.safe_string_2(transaction, 'CryptoAddress', 'Address')
txid = self.safe_string(transaction, 'TxId')
updated = self.parse8601(self.safe_string(transaction, 'LastUpdated'))
opened = self.parse8601(self.safe_string(transaction, 'Opened'))
timestamp = opened if opened else updated
type = 'deposit' if (opened is None) else 'withdrawal'
currencyId = self.safe_string(transaction, 'Currency')
code = self.safe_currency_code(currencyId, currency)
status = 'pending'
if type == 'deposit':
#
# deposits numConfirmations never reach the minConfirmations number
# we set all of them to 'ok', otherwise they'd all be 'pending'
#
# numConfirmations = self.safe_integer(transaction, 'Confirmations', 0)
# minConfirmations = self.safe_integer(currency['info'], 'MinConfirmation')
# if numConfirmations >= minConfirmations:
# status = 'ok'
# }
#
status = 'ok'
else:
authorized = self.safe_value(transaction, 'Authorized', False)
pendingPayment = self.safe_value(transaction, 'PendingPayment', False)
canceled = self.safe_value(transaction, 'Canceled', False)
invalidAddress = self.safe_value(transaction, 'InvalidAddress', False)
if invalidAddress:
status = 'failed'
elif canceled:
status = 'canceled'
elif pendingPayment:
status = 'pending'
elif authorized and(txid is not None):
status = 'ok'
feeCost = self.safe_float(transaction, 'TxCost')
if feeCost is None:
if type == 'deposit':
# according to https://support.bittrex.com/hc/en-us/articles/115000199651-What-fees-does-Bittrex-charge-
feeCost = 0 # FIXME: remove hardcoded value that may change any time
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': None,
'status': status,
'type': type,
'updated': updated,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_symbol(self, id):
quoteId, baseId = id.split(self.options['symbolSeparator'])
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
return base + '/' + quote
def parse_order(self, order, market=None):
if 'marketSymbol' in order:
return self.parse_order_v3(order, market)
else:
return self.parse_order_v2(order, market)
def parse_orders(self, orders, market=None, since=None, limit=None, params={}):
if self.options['fetchClosedOrdersFilterBySince']:
return super(bittrex, self).parse_orders(orders, market, since, limit, params)
else:
return super(bittrex, self).parse_orders(orders, market, None, limit, params)
def parse_order_status(self, status):
statuses = {
'CLOSED': 'closed',
'OPEN': 'open',
'CANCELLED': 'canceled',
'CANCELED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_v3(self, order, market=None):
#
# {
# id: '1be35109-b763-44ce-b6ea-05b6b0735c0c',
# marketSymbol: 'LTC-ETH',
# direction: 'BUY',
# type: 'LIMIT',
# quantity: '0.50000000',
# limit: '0.17846699',
# timeInForce: 'GOOD_TIL_CANCELLED',
# fillQuantity: '0.50000000',
# commission: '0.00022286',
# proceeds: '0.08914915',
# status: 'CLOSED',
# createdAt: '2018-06-23T13:14:28.613Z',
# updatedAt: '2018-06-23T13:14:30.19Z',
# closedAt: '2018-06-23T13:14:30.19Z'
# }
#
marketSymbol = self.safe_string(order, 'marketSymbol')
symbol = None
feeCurrency = None
if marketSymbol is not None:
baseId, quoteId = marketSymbol.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
feeCurrency = quote
direction = self.safe_string_lower(order, 'direction')
createdAt = self.safe_string(order, 'createdAt')
updatedAt = self.safe_string(order, 'updatedAt')
closedAt = self.safe_string(order, 'closedAt')
lastTradeTimestamp = None
if closedAt is not None:
lastTradeTimestamp = self.parse8601(closedAt)
elif updatedAt:
lastTradeTimestamp = self.parse8601(updatedAt)
timestamp = self.parse8601(createdAt)
type = self.safe_string_lower(order, 'type')
quantity = self.safe_float(order, 'quantity')
limit = self.safe_float(order, 'limit')
fillQuantity = self.safe_float(order, 'fillQuantity')
commission = self.safe_float(order, 'commission')
proceeds = self.safe_float(order, 'proceeds')
status = self.safe_string_lower(order, 'status')
average = None
remaining = None
if fillQuantity is not None:
if proceeds is not None:
if fillQuantity > 0:
average = proceeds / fillQuantity
elif proceeds == 0:
average = 0
if quantity is not None:
remaining = quantity - fillQuantity
return {
'id': self.safe_string(order, 'id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'side': direction,
'price': limit,
'cost': proceeds,
'average': average,
'amount': quantity,
'filled': fillQuantity,
'remaining': remaining,
'status': status,
'fee': {
'cost': commission,
'currency': feeCurrency,
},
'info': order,
}
def parse_order_v2(self, order, market=None):
#
# {
# "Uuid": "string(uuid)",
# "OrderUuid": "8925d746-bc9f-4684-b1aa-e507467aaa99",
# "Exchange": "BTC-LTC",
# "OrderType": "string",
# "Quantity": 100000,
# "QuantityRemaining": 100000,
# "Limit": 1e-8,
# "CommissionPaid": 0,
# "Price": 0,
# "PricePerUnit": null,
# "Opened": "2014-07-09T03:55:48.583",
# "Closed": null,
# "CancelInitiated": "boolean",
# "ImmediateOrCancel": "boolean",
# "IsConditional": "boolean"
# }
#
side = self.safe_string_2(order, 'OrderType', 'Type')
isBuyOrder = (side == 'LIMIT_BUY') or (side == 'BUY')
isSellOrder = (side == 'LIMIT_SELL') or (side == 'SELL')
if isBuyOrder:
side = 'buy'
if isSellOrder:
side = 'sell'
# We parse different fields in a very specific order.
# Order might well be closed and then canceled.
status = None
if ('Opened' in list(order.keys())) and order['Opened']:
status = 'open'
if ('Closed' in list(order.keys())) and order['Closed']:
status = 'closed'
if ('CancelInitiated' in list(order.keys())) and order['CancelInitiated']:
status = 'canceled'
if ('Status' in list(order.keys())) and self.options['parseOrderStatus']:
status = self.parse_order_status(self.safe_string(order, 'Status'))
symbol = None
if 'Exchange' in order:
marketId = self.safe_string(order, 'Exchange')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = self.parse_symbol(marketId)
else:
if market is not None:
symbol = market['symbol']
timestamp = None
opened = self.safe_string(order, 'Opened')
if opened is not None:
timestamp = self.parse8601(opened + '+00:00')
created = self.safe_string(order, 'Created')
if created is not None:
timestamp = self.parse8601(created + '+00:00')
lastTradeTimestamp = None
lastTimestamp = self.safe_string(order, 'TimeStamp')
if lastTimestamp is not None:
lastTradeTimestamp = self.parse8601(lastTimestamp + '+00:00')
closed = self.safe_string(order, 'Closed')
if closed is not None:
lastTradeTimestamp = self.parse8601(closed + '+00:00')
if timestamp is None:
timestamp = lastTradeTimestamp
fee = None
feeCost = self.safe_float_2(order, 'Commission', 'CommissionPaid')
if feeCost is not None:
fee = {
'cost': feeCost,
}
if market is not None:
fee['currency'] = market['quote']
elif symbol is not None:
currencyIds = symbol.split('/')
quoteCurrencyId = currencyIds[1]
fee['currency'] = self.safe_currency_code(quoteCurrencyId)
price = self.safe_float(order, 'Limit')
cost = self.safe_float(order, 'Price')
amount = self.safe_float(order, 'Quantity')
remaining = self.safe_float(order, 'QuantityRemaining')
filled = None
if amount is not None and remaining is not None:
filled = amount - remaining
if (status == 'closed') and(remaining > 0):
status = 'canceled'
if not cost:
if price and filled:
cost = price * filled
if not price:
if cost and filled:
price = cost / filled
average = self.safe_float(order, 'PricePerUnit')
id = self.safe_string_2(order, 'OrderUuid', 'OrderId')
return {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = None
try:
orderIdField = self.get_order_id_field()
request = {}
request[orderIdField] = id
response = await self.accountGetOrder(self.extend(request, params))
except Exception as e:
if self.last_json_response:
message = self.safe_string(self.last_json_response, 'message')
if message == 'UUID_INVALID':
raise OrderNotFound(self.id + ' fetchOrder() error: ' + self.last_http_response)
raise e
if not response['result']:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return self.parse_order(response['result'])
def order_to_trade(self, order):
# self entire method should be moved to the base class
timestamp = self.safe_integer_2(order, 'lastTradeTimestamp', 'timestamp')
return {
'id': self.safe_string(order, 'id'),
'side': self.safe_string(order, 'side'),
'order': self.safe_string(order, 'id'),
'price': self.safe_float(order, 'average'),
'amount': self.safe_float(order, 'filled'),
'cost': self.safe_float(order, 'cost'),
'symbol': self.safe_string(order, 'symbol'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': self.safe_value(order, 'fee'),
'info': order,
}
def orders_to_trades(self, orders):
# self entire method should be moved to the base class
result = []
for i in range(0, len(orders)):
result.append(self.order_to_trade(orders[i]))
return result
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
orders = await self.fetch_closed_orders(symbol, since, limit, params)
return self.orders_to_trades(orders)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
method = self.safe_string(self.options, 'fetchClosedOrdersMethod', 'fetch_closed_orders_v3')
return await getattr(self, method)(symbol, since, limit, params)
async def fetch_closed_orders_v2(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.accountGetOrderhistory(self.extend(request, params))
result = self.safe_value(response, 'result', [])
orders = self.parse_orders(result, market, since, limit)
if symbol is not None:
return self.filter_by_symbol(orders, symbol)
return orders
async def fetch_closed_orders_v3(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if limit is not None:
request['pageSize'] = limit
if since is not None:
request['startDate'] = self.ymdhms(since, 'T') + 'Z'
market = None
if symbol is not None:
market = self.market(symbol)
# because of self line we will have to rethink the entire v3
# in other words, markets define all the rest of the API
# and v3 market ids are reversed in comparison to v2
# v3 has to be a completely separate implementation
# otherwise we will have to shuffle symbols and currencies everywhere
# which is prone to errors, as was shown here
# https://github.com/ccxt/ccxt/pull/5219#issuecomment-499646209
request['marketSymbol'] = market['base'] + '-' + market['quote']
response = await self.v3GetOrdersClosed(self.extend(request, params))
orders = self.parse_orders(response, market, since, limit)
if symbol is not None:
return self.filter_by_symbol(orders, symbol)
return orders
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.accountGetDepositaddress(self.extend(request, params))
#
# {"success": False, "message": "ADDRESS_GENERATING", "result": null}
#
# {success: True,
# message: "",
# result: {Currency: "INCNT",
# Address: "3PHvQt9bK21f7eVQVdJzrNPcsMzXabEA5Ha"} }}
#
address = self.safe_string(response['result'], 'Address')
message = self.safe_string(response, 'message')
if not address or message == 'ADDRESS_GENERATING':
raise AddressPending(self.id + ' the address for ' + code + ' is being generated(pending, not ready yet, retry again later)')
tag = None
if currency['type'] in self.options['tag']:
tag = address
address = currency['address']
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
'address': address,
}
if tag is not None:
request['paymentid'] = tag
response = await self.accountGetWithdraw(self.extend(request, params))
result = self.safe_value(response, 'result', {})
id = self.safe_string(result, 'uuid')
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.implode_params(self.urls['api'][api], {
'hostname': self.hostname,
}) + '/'
if api != 'v2' and api != 'v3' and api != 'v3public':
url += self.version + '/'
if api == 'public':
url += api + '/' + method.lower() + path
if params:
url += '?' + self.urlencode(params)
elif api == 'v3public':
url += path
if params:
url += '?' + self.urlencode(params)
elif api == 'v2':
url += path
if params:
url += '?' + self.urlencode(params)
elif api == 'v3':
url += path
if params:
url += '?' + self.rawencode(params)
contentHash = self.hash(self.encode(''), 'sha512', 'hex')
timestamp = str(self.milliseconds())
auth = timestamp + url + method + contentHash
subaccountId = self.safe_value(self.options, 'subaccountId')
if subaccountId is not None:
auth += subaccountId
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512)
headers = {
'Api-Key': self.apiKey,
'Api-Timestamp': timestamp,
'Api-Content-Hash': contentHash,
'Api-Signature': signature,
}
if subaccountId is not None:
headers['Api-Subaccount-Id'] = subaccountId
else:
self.check_required_credentials()
url += api + '/'
if ((api == 'account') and(path != 'withdraw')) or (path == 'openorders'):
url += method.lower()
request = {
'apikey': self.apiKey,
}
disableNonce = self.safe_value(self.options, 'disableNonce')
if (disableNonce is None) or not disableNonce:
request['nonce'] = self.nonce()
url += path + '?' + self.urlencode(self.extend(request, params))
signature = self.hmac(self.encode(url), self.encode(self.secret), hashlib.sha512)
headers = {'apisign': signature}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response):
if response is None:
return # fallback to default error handler
#
# {success: False, message: "message"}
#
if body[0] == '{':
success = self.safe_value(response, 'success')
if success is None:
raise ExchangeError(self.id + ': malformed response: ' + self.json(response))
if isinstance(success, basestring):
# bleutrade uses string instead of boolean
success = True if (success == 'true') else False
if not success:
message = self.safe_string(response, 'message')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if message == 'APIKEY_INVALID':
if self.options['hasAlreadyAuthenticatedSuccessfully']:
raise DDoSProtection(feedback)
else:
raise AuthenticationError(feedback)
# https://github.com/ccxt/ccxt/issues/4932
# the following two lines are now redundant, see line 171 in describe()
#
# if message == 'DUST_TRADE_DISALLOWED_MIN_VALUE_50K_SAT':
# raise InvalidOrder(self.id + ' order cost should be over 50k satoshi ' + self.json(response))
#
if message == 'INVALID_ORDER':
# Bittrex will return an ambiguous INVALID_ORDER message
# upon canceling already-canceled and closed orders
# therefore self special case for cancelOrder
# url = 'https://bittrex.com/api/v1.1/market/cancel?apikey=API_KEY&uuid=ORDER_UUID'
cancel = 'cancel'
indexOfCancel = url.find(cancel)
if indexOfCancel >= 0:
urlParts = url.split('?')
numParts = len(urlParts)
if numParts > 1:
query = urlParts[1]
params = query.split('&')
numParams = len(params)
orderId = None
for i in range(0, numParams):
param = params[i]
keyValue = param.split('=')
if keyValue[0] == 'uuid':
orderId = keyValue[1]
break
if orderId is not None:
raise OrderNotFound(self.id + ' cancelOrder ' + orderId + ' ' + self.json(response))
else:
raise OrderNotFound(self.id + ' cancelOrder ' + self.json(response))
if message in exceptions:
raise exceptions[message](feedback)
if message is not None:
if message.find('throttled. Try again') >= 0:
raise DDoSProtection(feedback)
if message.find('problem') >= 0:
raise ExchangeNotAvailable(feedback) # 'There was a problem processing your request. If self problem persists, please contact...')
raise ExchangeError(feedback)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
# a workaround for APIKEY_INVALID
if (api == 'account') or (api == 'market'):
self.options['hasAlreadyAuthenticatedSuccessfully'] = True
return response
| 41.540562
| 156
| 0.489738
|
795165d849e9147efabacf6e1a21c6ee5e001e71
| 2,609
|
py
|
Python
|
cogs/utils/activity.py
|
keyb0ards/reaction-light
|
7297c6b9aad9c1fd06480f93f0edd1e59922f5a5
|
[
"MIT"
] | 123
|
2019-12-07T02:32:50.000Z
|
2022-03-15T14:38:13.000Z
|
cogs/utils/activity.py
|
keyb0ards/reaction-light
|
7297c6b9aad9c1fd06480f93f0edd1e59922f5a5
|
[
"MIT"
] | 72
|
2020-01-21T06:45:13.000Z
|
2022-03-27T01:41:54.000Z
|
cogs/utils/activity.py
|
keyb0ards/reaction-light
|
7297c6b9aad9c1fd06480f93f0edd1e59922f5a5
|
[
"MIT"
] | 80
|
2019-11-11T13:55:10.000Z
|
2022-03-18T21:44:20.000Z
|
"""
MIT License
Copyright (c) 2019-present eibex
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import csv
from os import path
from shutil import copy
from itertools import cycle
class Activities:
def __init__(self, file):
self.file = file
self.load()
def load(self):
self.activity_list = []
if not path.isfile(self.file):
# Create activities.csv from the sample if it does not already exist
copy(f"{self.file}.sample", self.file)
with open(self.file, "r") as f:
# Get activities.csv contents
reader = csv.reader(f, delimiter=",")
for row in reader:
try:
activity = row[0]
self.activity_list.append(activity)
except IndexError:
pass
if not self.activity_list:
self.activity_list = ["with reactions"]
self.loop = cycle(self.activity_list)
def get(self):
return next(self.loop)
def add(self, activity):
with open(self.file, "a", encoding="utf-8") as f:
w = csv.writer(f, delimiter=",", lineterminator="\n")
w.writerow([activity])
self.load()
def remove(self, activity):
if activity not in self.activity_list:
return False
self.activity_list.remove(activity)
with open(self.file, "w", encoding="utf-8") as f:
w = csv.writer(f, delimiter=",", lineterminator="\n")
for row in self.activity_list:
w.writerow([row])
self.load()
return True
| 32.6125
| 80
| 0.653124
|
795166d065cee9e4322604c4ab94b5f40733aad8
| 5,538
|
py
|
Python
|
tanit/master/client/user_service.py
|
yassineazzouz/kraken
|
30d536eae2583e6fff51becbff836301058b8e69
|
[
"MIT"
] | 1
|
2020-09-01T15:16:11.000Z
|
2020-09-01T15:16:11.000Z
|
tanit/master/client/user_service.py
|
yassineazzouz/kraken
|
30d536eae2583e6fff51becbff836301058b8e69
|
[
"MIT"
] | null | null | null |
tanit/master/client/user_service.py
|
yassineazzouz/kraken
|
30d536eae2583e6fff51becbff836301058b8e69
|
[
"MIT"
] | null | null | null |
import abc
import logging as lg
import json
from thrift.protocol import TBinaryProtocol
from thrift.protocol.TMultiplexedProtocol import TMultiplexedProtocol
from thrift.transport import TSocket
from thrift.transport import TTransport
import six
from ..core.worker.worker import WorkerStats
from ...common.thrift.utils import connect
from ...filesystem.model import FileSystem
from ...filesystem.model import FileSystemMounts
from ...common.model.worker import Worker
from ...thrift.master.service import MasterUserService
from ...thrift.common.model.ttypes import FileSystem as TFileSystem
from ...common.config.configuration_keys import Keys
from ...common.config.configuration import TanitConfigurationException, TanitConfiguration
_logger = lg.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class UserServiceClientIFace(object):
@abc.abstractmethod
def start(self):
raise NotImplementedError
@abc.abstractmethod
def list_workers(self):
raise NotImplementedError
@abc.abstractmethod
def deactivate_worker(self, wid):
raise NotImplementedError
@abc.abstractmethod
def activate_worker(self, wid):
raise NotImplementedError
@abc.abstractmethod
def register_filesystem(self, filesystem):
raise NotImplementedError
@abc.abstractmethod
def mount_filesystem(self, name, path):
raise NotImplementedError
@abc.abstractmethod
def umount_filesystem(self, mount_point):
raise NotImplementedError
@abc.abstractmethod
def stop(self):
raise NotImplementedError
class ThriftUserServiceClient(UserServiceClientIFace):
def __init__(self):
self.configuration = TanitConfiguration.getInstance()
master_host = self.configuration.get(Keys.MASTER_HOSTNAME)
if master_host is None:
raise TanitConfigurationException("Missing required configuration '%s'", Keys.MASTER_HOSTNAME)
rpc_port = self.configuration.get_int(Keys.MASTER_RPC_PORT)
if rpc_port is None:
raise TanitConfigurationException("Missing required configuration '%s'", Keys.MASTER_RPC_PORT)
self.transport = TTransport.TFramedTransport(
TSocket.TSocket(master_host, rpc_port)
)
self.client = MasterUserService.Client(
TMultiplexedProtocol(
TBinaryProtocol.TBinaryProtocol(self.transport),
"UserService"
)
)
def start(self):
connect(
self.transport,
self.configuration.get(Keys.RPC_CLIENT_MAX_RETRIES),
self.configuration.get(Keys.RPC_CLIENT_RETRY_INTERVAL)/1000
)
def list_workers(self):
wkr_list = []
for wkr in self.client.list_workers():
wkr_list.append(Worker(wkr.wid, wkr.address, wkr.port))
return wkr_list
def deactivate_worker(self, wid):
self.client.deactivate_worker(wid)
def activate_worker(self, wid):
self.client.activate_worker(wid)
def worker_stats(self, wid):
stats = self.client.worker_stats(wid)
return WorkerStats(
wid=stats.wid,
state=stats.state,
last_heartbeat=stats.last_heartbeat,
running_tasks=stats.running_tasks,
pending_tasks=stats.pending_tasks,
available_cores=stats.available_cores,
)
def register_filesystem(self, filesystem):
self.client.register_filesystem(
TFileSystem(
filesystem.name,
filesystem.type,
json.dumps(filesystem.parameters)
)
)
def mount_filesystem(self, name, mount_point, mount_path=""):
if mount_path is None:
mount_path = ""
self.client.mount_filesystem(name, mount_point, mount_path)
def umount_filesystem(self, mount_point):
self.client.umount_filesystem(mount_point)
def list_filesystems(self):
filesystems = []
for filesystem_mount in self.client.list_filesystems():
filesystems.append(
FileSystemMounts(
FileSystem(
filesystem_mount.filesystem.name, filesystem_mount.filesystem.type, {}
),
filesystem_mount.mounts
)
)
return filesystems
def stop(self):
self.transport.close()
class LocalUserServiceClient(UserServiceClientIFace):
def __init__(self, master, configuration=None):
self.master = master
def configure(self, configuration):
pass
def start(self):
# do nothing
return
def list_workers(self):
return self.master.list_workers()
def deactivate_worker(self, wid):
self.master.deactivate_worker(wid)
def activate_worker(self, wid):
self.master.activate_worker(wid)
def worker_stats(self, wid):
return self.master.get_worker_stats(wid)
def register_filesystem(self, filesystem):
self.master.register_filesystem(filesystem)
def mount_filesystem(self, name, mount_point, mount_path=""):
if mount_path is None:
mount_path = ""
self.master.mount_filesystem(name, mount_point, mount_path)
def umount_filesystem(self, mount_point):
self.master.umount_filesystem(mount_point)
def list_filesystems(self):
return self.master.list_filesystems()
def stop(self):
# do nothing
return
| 29.935135
| 106
| 0.670639
|
795167846abefd942dbedb043c952ea8e794be24
| 4,655
|
py
|
Python
|
CWS/binance.py
|
aktasr/CWS
|
9829e5fcca958e5c875d8ffdeeffe5d9b1ac68a6
|
[
"MIT"
] | 2
|
2021-04-21T21:31:52.000Z
|
2021-09-10T00:43:04.000Z
|
CWS/binance.py
|
aktasr/CWS
|
9829e5fcca958e5c875d8ffdeeffe5d9b1ac68a6
|
[
"MIT"
] | null | null | null |
CWS/binance.py
|
aktasr/CWS
|
9829e5fcca958e5c875d8ffdeeffe5d9b1ac68a6
|
[
"MIT"
] | null | null | null |
#Author : Ramazan AKTAS
#Date : 20.02.2021 22:30
from base.exchange import exchange
import time
class binance(exchange):
"""description of class"""
# user can be subscribe one or more market(symbol). F.e: suscriber_trade = ['BTCTRY', 'BTCUSDT']
# also user can be subscribe all markets(symbols). F.e: suscriber_trade = ['all']
def __init__(self, subscribe_ticker:list = [], subscribe_trade:list = [], subscribe_orderbook:list = [], subscribe_obDiff:list = []):
super().__init__({ 'onOpenEvent' : {
'subscribe_ticker' : subscribe_ticker,
'subscribe_trade' : subscribe_trade,
'subscribe_orderbook' : subscribe_orderbook,
'subscribe_orderbookDiff' : subscribe_obDiff
}})
self.connect()
def describe(self):
return self.deep_extend(super().describe(), {
'id':'binance',
'name':'BINANCE',
'url':'wss://stream.binance.com:9443/ws',
'wsLogEnable' : 'False',
'Subscribe' : 'SUBSCRIBE',
'Result' : 'result',
'OrderBook' : 'depth20',
'OrderBookDifference' : 'depth',
'TickerAll' : 401,
'TickerPair' : 402,
'Channel':['ticker','trades','depth20@100ms','depth'],
'Symbols': {
'BTCUSDT' : 'btcusdt',
'BTCTRY' : 'btctry',
'XRPUSDT' : 'xrpusdt',
'ETHUSDT' : 'ethusdt',
'USDTTRY' : 'usdttry',
'ADAUSDT' : 'adausdt',
'TRXUSDT' : 'trxusdt',
'EOSUSDT' : 'eosusdt',
'DOTUSDT' : 'dotusdt',
'XTZUSDT' : 'xtzusdt',
'BTCUSDT' : 'btcusdt',
'XRPTRY' : 'xrptry',
'MATICUSDT' : 'maticusdt',
'ETHTRY' : 'ethtry',
'DOGETRY' : 'dogetry',
'DOGEUSDT' : 'dogeusdt',
'AVAXUSDT': 'avaxusdt',
'AVAXTRY': 'avaxtry',
'SOLTRY': 'soltry',
'SOLUSDT': 'solusdt',
'CHZTRY': 'chztry',
'DOTTRY': 'dottry',
'MATICTRY': 'matictry',
'NEOTRY': 'neotry',
'ALL' : 'all',
},
'has': {
'tickerSubscribe': True,
'tradeSubscribe': True,
'orderbookSubscribe': True,
'orderbookDiffSubscribe': True
}})
def subscribe_ticker(self, symbol, params={}):
try:
self.subscribe(self.Channel[0], self.Symbols[symbol])
except:
print("Holaa")
def subscribe_trade(self, symbol, params={}):
self.subscribe(self.Channel[1],self.Symbols[symbol])
def subscribe_orderbook(self, symbolArr:list = [], params = {}):
symbols = []
for x in symbolArr:
symbols.append(self.Symbols[x])
self.subscribe(self.Channel[2],symbols)
def subscribe_orderbookDiff(self, symbol, params={}):
self.subscribe(self.Channel[3],self.Symbols[symbol])
def subscribe(self, *params):
if self.isConnected is False:
raise PermissionError('Not connected to websocket server.')
subscribeParams = []
symbols = params[1]
channel = params[0]
for symbol in symbols:
param = symbol + "@" + channel
subscribeParams.append(param)
jsonData = {"method": self.Subscribe, "params" : subscribeParams, "id" : 1}
self.sendMessage(self.json(jsonData))
def orderbook_received(self, message):
symbol = self.getSymbolFromStream(message['stream'])
orderbook = message['data']
self.parse_orderbook(symbol, orderbook)
def onMessage(self, message):
channel = self.getChannelFromStream(message['stream'])
self.parseAndRouteMessage(channel, message)
def onOpen(self):
''' in this project, combined subscribe is used. Response data must include owner stream information. '''
data = {"method": "SET_PROPERTY", "params": ["combined",True],"id": 5}
self.sendMessage(self.json(data))
super().onOpen()
def getSymbolFromStream(self, stream):
''' class specific function '''
return (self.parseStream(stream)[0])
def getChannelFromStream(self, stream):
''' class specific function '''
return (self.parseStream(stream)[1])
def parseStream(self, stream):
''' class specific function '''
return stream.split("@")
| 35.807692
| 137
| 0.533405
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.