content
stringlengths 5
1.05M
|
|---|
from __future__ import annotations
from textwrap import dedent
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from pandas.util._decorators import doc
from pandas.util._validators import validate_bool_kwarg
from dtoolkit._typing import SeriesOrFrame
from dtoolkit.accessor._util import get_mask
from dtoolkit.accessor.register import register_dataframe_method
from dtoolkit.accessor.series import cols as s_cols
from dtoolkit.accessor.series import expand as s_expand
from dtoolkit.accessor.series import top_n as s_top_n
if TYPE_CHECKING:
from typing import Iterable
@register_dataframe_method
@doc(
s_cols,
returns=dedent(
"""
Returns
-------
list of str or int
The column names.
""",
),
)
def cols(df: pd.DataFrame) -> list[str | int]:
return df.columns.tolist()
@register_dataframe_method
def drop_inf(
df: pd.DataFrame,
axis: int | str = 0,
how: str = "any",
inf: str = "all",
subset: list[str] | None = None,
inplace: bool = False,
) -> pd.DataFrame | None:
"""
Remove ``inf`` values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain ``inf`` values are
removed.
* 0, or 'index' : Drop rows which contain ``inf`` values.
* 1, or 'columns' : Drop columns which contain ``inf`` value.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from :obj:`~pandas.DataFrame`,
when we have at least one ``inf`` or all ``inf``.
* 'any' : If any ``inf`` values are present, drop that row or column.
* 'all' : If all values are ``inf``, drop that row or column.
inf : {'all', 'pos', 'neg'}, default 'all'
* 'all' : Remove ``inf`` and ``-inf``.
* 'pos' : Only remove ``inf``.
* 'neg' : Only remove ``-inf``.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame or None
DataFrame with ``inf`` entries dropped from it or None if
``inplace=True``.
See Also
--------
dtoolkit.accessor.series.drop_inf
:obj:`~pandas.Series` drops ``inf`` values.
Examples
--------
>>> import dtoolkit.accessor
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.inf, 'Batmobile', 'Bullwhip'],
... "born": [np.inf, pd.Timestamp("1940-04-25"),
... -np.inf]})
>>> df
name toy born
0 Alfred inf inf
1 Batman Batmobile 1940-04-25 00:00:00
2 Catwoman Bullwhip -inf
Drop the rows where at least one element is inf and -inf.
>>> df.drop_inf()
name toy born
1 Batman Batmobile 1940-04-25 00:00:00
Drop the columns where at least one element is inf and -inf.
>>> df.drop_inf(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are inf and -inf.
>>> df.drop_inf(how='all')
name toy born
0 Alfred inf inf
1 Batman Batmobile 1940-04-25 00:00:00
2 Catwoman Bullwhip -inf
Drop the rows where at least one element is -inf.
>>> df.drop_inf(inf='neg')
name toy born
0 Alfred inf inf
1 Batman Batmobile 1940-04-25 00:00:00
Define in which columns to look for inf and -inf values.
>>> df.drop_inf(subset=['name', 'toy'])
name toy born
1 Batman Batmobile 1940-04-25 00:00:00
2 Catwoman Bullwhip -inf
Keep the DataFrame with valid entries in the same variable.
>>> df.drop_inf(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25 00:00:00
"""
from dtoolkit.accessor._util import get_inf_range
inplace = validate_bool_kwarg(inplace, "inplace")
axis = df._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = df
if subset is not None:
ax = df._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = df.take(indices, axis=agg_axis)
inf_range = get_inf_range(inf)
mask = agg_obj.isin(inf_range)
mask = get_mask(how, mask, agg_axis)
result = df.loc(axis=axis)[~mask]
if not inplace:
return result
df._update_inplace(result)
@register_dataframe_method
def filter_in(
df: pd.DataFrame,
condition: Iterable | pd.Series | pd.DataFrame | dict[str, list[str]],
axis: int | str = 0,
how: str = "all",
inplace: bool = False,
) -> pd.DataFrame | None:
"""
Filter :obj:`~pandas.DataFrame` contents.
Similar to :meth:`~pandas.DataFrame.isin`, but the return is value not
bool.
Parameters
----------
condition : iterable, Series, DataFrame or dict
The result will only be true at a location if all the labels match.
* If ``condition`` is a :obj:`dict`, the keys must be the row/column
names, which must match. And ``how`` only works on these gave keys.
- ``axis`` is 0 or 'index', keys would be recognize as column
names.
- ``axis`` is 1 or 'columns', keys would be recognize as index
names.
* If ``condition`` is a :obj:`~pandas.Series`, that's the index.
* If ``condition`` is a :obj:`~pandas.DataFrame`, then both the index
and column labels must match.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain value are filtered.
* 0, or 'index' : Filter rows which contain value.
* 1, or 'columns' : Filter columns which contain value.
how : {'any', 'all'}, default 'all'
Determine if row or column is filtered from :obj:`~pandas.DataFrame`,
when we have at least one value or all value.
* 'any' : If any values are present, filter that row or column.
* 'all' : If all values are present, filter that row or column.
inplace : bool, default is False
If True, do operation inplace and return None.
Returns
-------
DataFrame
See Also
--------
pandas.DataFrame.isin
Whether each element in the DataFrame is contained in values.
pandas.DataFrame.filter
Subset the dataframe rows or columns according to the specified index
labels.
Examples
--------
>>> import dtoolkit.accessor
>>> import pandas as pd
>>> df = pd.DataFrame({'num_legs': [2, 4, 2], 'num_wings': [2, 0, 0]},
... index=['falcon', 'dog', 'cat'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
cat 2 0
When ``condition`` is a list check whether every value in the DataFrame is
present in the list (which animals have 0 or 2 legs or wings).
Filter rows.
>>> df.filter_in([0, 2])
num_legs num_wings
falcon 2 2
cat 2 0
Filter columns.
>>> df.filter_in([0, 2], axis=1)
num_wings
falcon 2
dog 0
cat 0
When ``condition`` is a :obj:`dict`, we can pass values to check for each
row/column (depend on ``axis``) separately.
Filter rows, to check under the column (key) whether contains the value.
>>> df.filter_in({'num_legs': [2], 'num_wings': [2]})
num_legs num_wings
falcon 2 2
Filter columns, to check under the index (key) whether contains the value.
>>> df.filter_in({'cat': [2]}, axis=1)
num_legs
falcon 2
dog 4
cat 2
When ``values`` is a Series or DataFrame the index and column must match.
Note that 'spider' doesn't match based on the number of legs in ``other``.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> other
num_legs num_wings
spider 8 0
falcon 2 2
>>> df.filter_in(other)
num_legs num_wings
falcon 2 2
"""
from dtoolkit.accessor._util import isin
inplace = validate_bool_kwarg(inplace, "inplace")
axis = df._get_axis_number(axis)
another_axis = 1 - axis
mask = isin(df, condition, axis)
if isinstance(condition, dict):
# 'how' only works on condition's keys
names = condition.keys()
mask = mask[names] if axis == 0 else mask.loc[names]
mask = get_mask(how, mask, another_axis)
result = df.loc(axis=axis)[mask]
if not inplace:
return result
df._update_inplace(result)
@register_dataframe_method
def repeat(
df: pd.DataFrame,
repeats: int | list[int],
axis: int | str = 0,
) -> pd.DataFrame | None:
"""
Repeat row or column of a :obj:`~pandas.DataFrame`.
Returns a new DataFrame where each row/column is repeated
consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
:obj:`~pandas.DataFrame`.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to repeat.
* 0, or 'index' : Along the row to repeat.
* 1, or 'columns' : Along the column to repeat.
Returns
-------
DataFrame
Newly created DataFrame with repeated elements.
See Also
--------
numpy.repeat : This transformer's prototype method.
Examples
--------
>>> import pandas as pd
>>> import dtoolkit.accessor
>>> df = pd.DataFrame({'a': [1, 2], 'b':[3, 4]})
>>> df
a b
0 1 3
1 2 4
Each row repeat two times.
>>> df.repeat(2)
a b
0 1 3
0 1 3
1 2 4
1 2 4
Each column repeat two times.
>>> df.repeat(2, 1)
a a b b
0 1 1 3 3
1 2 2 4 4
``a`` column repeat 1 times, ``b`` column repeat 2 times.
>>> df.repeat([1, 2], 1)
a b b
0 1 3 3
1 2 4 4
"""
axis = df._get_axis_number(axis)
return pd.DataFrame(
np.repeat(
df._values,
repeats,
axis=axis,
),
index=df.index.repeat(repeats) if axis == 0 else df.index,
columns=df.columns.repeat(repeats) if axis == 1 else df.columns,
)
@register_dataframe_method
def top_n(
df: pd.DataFrame,
n: int,
largest: bool = True,
keep: str = "first",
prefix: str = "top",
delimiter: str = "_",
element: str = "index",
) -> pd.DataFrame:
"""
Returns each row's top `n`.
Parameters
----------
n : int
Number of top to return.
largest : bool, default True
- True, the top is the largest.
- False, the top is the smallest.
keep : {"first", "last", "all"}, default "first"
Where there are duplicate values:
- first : prioritize the first occurrence(s).
- last : prioritize the last occurrence(s).
- all : do not drop any duplicates, even it means selecting more than
n items.
prefix : str, default "top"
The prefix name of the new DataFrame column.
delimiter : str, default "_"
The delimiter between `prefix` and number.
element : {"both", "index", "value"}, default "index"
To control the structure of return dataframe value.
- both: the structure of value is ``({column index}, {value})``.
- index: the structure of value is only ``{column index}``.
- value: the structure of value is only ``{value}``.
Returns
-------
DataFrame
- The structure of column name is ``{prefix}{delimiter}{number}``.
- The default structure of value is ``{column index}`` and could be
controlled via ``element``.
See Also
--------
dtoolkit.accessor.dataframe.expand
Transform each element of a list-like to a column.
Notes
-----
Q: Any different to :meth:`~pandas.DataFrame.nlargest` and
:meth:`~pandas.DataFrame.nsmallest`?
A: :meth:`~pandas.DataFrame.nlargest` and
:meth:`~pandas.DataFrame.nsmallest` base one column to return all selected
columns dataframe top `n`.
Examples
--------
>>> import dtoolkit.accessor
>>> import pandas as pd
>>> df = pd.DataFrame(
... {
... "a": [1, 3, 2, 1],
... "b": [3, 2, 1, 1],
... "c": [2, 1, 3, 1],
... },
... )
>>> df
a b c
0 1 3 2
1 3 2 1
2 2 1 3
3 1 1 1
Get each row's largest top **2**.
>>> df.top_n(2)
top_1 top_2
0 b c
1 a b
2 c a
3 a b
Get each row's both **index** and **value** of largest top 2.
>>> df.top_n(2, element="both")
top_1 top_2
0 (b, 3) (c, 2)
1 (a, 3) (b, 2)
2 (c, 3) (a, 2)
3 (a, 1) (b, 1)
Get each row's smallest top **1** and **keep** the duplicated values.
>>> df.top_n(1, largest=False, keep="all")
top_1 top_2 top_3
0 a NaN NaN
1 c NaN NaN
2 b NaN NaN
3 a b c
"""
if element not in ("both", "index", "value"):
raise ValueError('element must be either "both", "index" or "value"')
def wrap_s_top_n(*args, **kwargs) -> pd.Series:
top = s_top_n(*args, **kwargs)
if element == "both":
data = zip(top.index, top.values)
elif element == "index":
data = top.index
elif element == "value":
data = top.values
return pd.Series(data, index=pd.RangeIndex(1, len(top) + 1))
return df.apply(
wrap_s_top_n,
axis=1,
n=n,
largest=largest,
keep=keep,
).add_prefix(prefix + delimiter)
@register_dataframe_method
@doc(
s_expand,
see_also=dedent(
"""
See Also
--------
dtoolkit.accessor.series.expand
Transform each element of a list-like to a column.
pandas.DataFrame.explode
Transform each element of a list-like to a row.
""",
),
examples=dedent(
"""
Examples
--------
>>> import dtoolkit.accessor
>>> import pandas as pd
>>> import numpy as np
Expand the *list-like* element.
>>> df = pd.DataFrame({'A': [[0, 1, 2], 'foo', [], [3, 4]],
... 'B': 1,
... 'C': [['a', 'b', 'c'], np.nan, [], ['d', 'e']]})
>>> df.expand()
A_0 A_1 A_2 B C_0 C_1 C_2
0 0 1.0 2.0 1 a b c
1 foo NaN NaN 1 NaN None None
2 None NaN NaN 1 None None None
3 3 4.0 NaN 1 d e None
Expand *sub-element* type is list-like.
>>> df = pd.DataFrame({"col1": [1, 2], "col2": [("a", "b"), (3, (5, 6))]})
>>> df.expand(flatten=True)
col1 col2_0 col2_1 col2_2
0 1 a b NaN
1 2 3 5 6.0
Set the columns of name.
>>> df = pd.DataFrame({"col1": [1, 2], "col2": [("a", 3), ("b", 4)]})
>>> df.expand(suffix=["index", "value"], delimiter="-")
col1 col2-index col2-value
0 1 a 3
1 2 b 4
Also could handle **different lengths** of element and suffix list.
>>> df = pd.DataFrame({"col1": [1, 2], "col2": [(3, 4), (5, 6, 7)]})
>>> df.expand()
col1 col2_0 col2_1 col2_2
0 1 3 4 NaN
1 2 5 6 7.0
>>> df.expand(suffix=["a", "b", "c", "d"])
col1 col2_a col2_b col2_c
0 1 3 4 NaN
1 2 5 6 7.0
""",
),
)
def expand(
df: pd.DataFrame,
suffix: list[str | int] | None = None,
delimiter: str = "_",
flatten: bool = False,
) -> pd.DataFrame:
return pd.concat(
(
df.get(key=column).expand(
suffix=suffix,
delimiter=delimiter,
flatten=flatten,
)
for column in df.cols()
),
axis=1,
)
@register_dataframe_method
def to_series(df: pd.DataFrame, name: str | int = None) -> SeriesOrFrame:
"""
Transform one column :class:`~pandas.DataFrame` to :class:`~pandas.Series`.
Parameters
----------
name : str or int, optional
The name of returned Series
Returns
-------
Series or DataFrame
Series if ``df`` is one column else would be DataFrame.
Examples
--------
>>> import dtoolkit.accessor
>>> import pandas as pd
>>> df = pd.DataFrame(range(3))
>>> df
0
0 0
1 1
2 2
>>> df.to_series()
0 0
1 1
2 2
Name: 0, dtype: int64
>>> df = pd.DataFrame({'a': [1, 2]})
>>> df
a
0 1
1 2
>>> df.to_series()
0 1
1 2
Name: a, dtype: int64
"""
if df.shape[1] == 1: # one column DataFrame
column = df.columns[0]
return df.get(column).rename(name or column)
return df
|
# Meta / Dialog / Closable
# Display a #dialog having a close button, and detect when it's closed. #meta
# ---
from h2o_wave import main, app, Q, ui
@app('/demo')
async def serve(q: Q):
if not q.client.initialized:
# Create an empty meta_card to hold the dialog
q.page['meta'] = ui.meta_card(box='')
# Display a button to launch dialog dialog
q.page['example'] = ui.form_card(box='1 1 2 1', items=[
ui.button(name='launch_dialog', label='Launch dialog', primary=True)
])
q.client.initialized = True
# Was the launch_dialog button clicked?
if q.args.launch_dialog:
# Create a dialog with a close button
q.page['meta'].dialog = ui.dialog(
title='Hello!',
name='my_dialog',
items=[
ui.text('Click the X button to close this dialog.'),
],
# Enable a close button (displayed at the top-right of the dialog)
closable=True,
# Get notified when the dialog is dismissed.
events=['dismissed'],
)
# Did we get events from the dialog?
if q.events.my_dialog:
# Was the dialog dismissed?
if q.events.my_dialog.dismissed:
# Delete the dialog
q.page['meta'].dialog = None
await q.page.save()
|
from dataset_specifications.housing import HousingSet
class HouseAgeSet(HousingSet):
def __init__(self):
super().__init__()
self.name = "house_age"
self.x_dim = 2
# Modified version of the California house price dataset (housing)
# Predict house age only from coordinates of house
def preprocess(self, file_path):
# Load all data at first
loaded_data = super().preprocess(file_path)
# Indexes: 1 - house age, 6 - latitude, 7 longitude
return loaded_data[:,(6,7,1)]
|
import pickle
import os
import time
from enum import Enum
from .. import config
class Status(Enum):
""" Enum containing all statuses Ricecooker can have
Steps:
INIT: Ricecooker process has been started
CONSTRUCT_CHANNEL: Ricecooker is ready to call sushi chef's construct_channel method
CREATE_TREE: Ricecooker is ready to create relationships between nodes
DOWNLOAD_FILES: Ricecooker is ready to start downloading files
GET_FILE_DIFF: Ricecooker is ready to get file diff from Kolibri Studio
START_UPLOAD: Ricecooker is ready to start uploading files to Kolibri Studio
UPLOADING_FILES: Ricecooker is in the middle of uploading files
UPLOAD_CHANNEL: Ricecooker is ready to upload the channel to Kolibri Studio
PUBLISH_CHANNEL: Ricecooker is ready to publish the channel to Kolibri
DONE: Ricecooker is done
LAST: Place where Ricecooker left off
"""
INIT = 0
CONSTRUCT_CHANNEL = 1
CREATE_TREE = 2
DOWNLOAD_FILES = 3
GET_FILE_DIFF = 4
START_UPLOAD = 5
UPLOADING_FILES = 6
UPLOAD_CHANNEL = 7
PUBLISH_CHANNEL = 8
DONE = 9
LAST = 10
class RestoreManager:
""" Manager for handling resuming rice cooking process
Attributes:
restore_path (str): path to .pickle file to store progress
channel (Channel): channel Ricecooker is creating
tree (ChannelManager): manager Ricecooker is using
files_downloaded ([str]): list of files that have been downloaded
file_mapping ({filename:...}): filenames mapped to metadata
files_failed ([str]): list of files that failed to download
file_diff ([str]): list of files that don't exist on Kolibri Studio
files_uploaded ([str]): list of files that have been successfully uploaded
channel_link (str): link to uploaded channel
channel_id (str): id of channel that has been uploaded
status (str): status of Ricecooker
"""
def __init__(self):
self.channel = None
self.tree = None
self.files_downloaded = []
self.file_mapping = {}
self.files_failed = []
self.file_diff = []
self.files_uploaded = []
self.channel_link = None
self.channel_id = None
self.status = Status.INIT
self.timestamp = time.time()
def check_for_session(self, status=None):
""" check_for_session: see if session is in progress
Args:
status (str): step to check if last session reached (optional)
Returns: boolean indicating if session exists
"""
status = Status.LAST if status is None else status
return os.path.isfile(self.get_restore_path(status)) and os.path.getsize(self.get_restore_path(status)) > 0
def get_restore_path(self, status=None):
""" get_restore_path: get path to restoration file
Args:
status (str): step to get restore file (optional)
Returns: string path to restoration file
"""
status = self.get_status() if status is None else status
return config.get_restore_path(status.name.lower())
def __record_progress(self, next_step=None):
""" __record_progress: save progress to respective restoration file
Args: None
Returns: None
"""
with open(self.get_restore_path(Status.LAST), 'wb') as handle, open(self.get_restore_path(), 'wb') as step_handle:
pickle.dump(self, handle)
pickle.dump(self, step_handle)
def load_progress(self, resume_step):
""" load_progress: loads progress from restoration file
Args: resume_step (str): step at which to resume session
Returns: manager with progress from step
"""
resume_step = Status[resume_step]
progress_path = self.get_restore_path(resume_step)
# If progress is corrupted, revert to step before
while not self.check_for_session(resume_step):
config.LOGGER.error("Ricecooker has not reached {0} status. Reverting to earlier step...".format(resume_step.name))
# All files are corrupted or absent, restart process
if resume_step.value - 1 < 0:
self.init_session()
return self
resume_step = Status(resume_step.value - 1)
progress_path = self.get_restore_path(resume_step)
config.LOGGER.error("Starting from status {0}".format(resume_step.name))
# Load manager
with open(progress_path, 'rb') as handle:
manager = pickle.load(handle)
if isinstance(manager, RestoreManager):
return manager
else:
return self
def get_status(self):
""" get_status: retrieves current status of Ricecooker
Args: None
Returns: string status of Ricecooker
"""
return self.status
def get_status_val(self):
""" get_status_val: retrieves value of status of Ricecooker
Args: None
Returns: number value of status of Ricecooker
"""
return self.status.value
def init_session(self):
""" init_session: sets session to beginning status
Args: None
Returns: None
"""
# Clear out previous session's restoration files
for status in Status:
path = self.get_restore_path(status)
if os.path.isfile(path):
os.remove(path)
self.__record_progress()
self.__record_progress(Status.CONSTRUCT_CHANNEL)
def set_channel(self, channel):
""" set_channel: records progress from constructed channel
Args: channel (Channel): channel Ricecooker is creating
Returns: None
"""
self.channel = channel
self.__record_progress(Status.CREATE_TREE)
def set_tree(self, tree):
""" set_channel: records progress from creating the tree
Args: tree (ChannelManager): manager Ricecooker is using
Returns: None
"""
self.tree = tree
self.__record_progress(Status.DOWNLOAD_FILES)
def set_files(self, files_downloaded, files_failed):
""" set_files: records progress from downloading files
Args:
files_downloaded ([str]): list of files that have been downloaded
files_failed ([str]): list of files that failed to download
Returns: None
"""
self.files_downloaded = files_downloaded
self.files_failed = files_failed
self.__record_progress(Status.GET_FILE_DIFF)
def set_diff(self, file_diff):
""" set_diff: records progress from getting file diff
Args: file_diff ([str]): list of files that don't exist on Kolibri Studio
Returns: None
"""
self.file_diff = file_diff
self.__record_progress(Status.START_UPLOAD)
def set_uploading(self, files_uploaded):
""" set_uploading: records progress during uploading files
Args: files_uploaded ([str]): list of files that have been successfully uploaded
Returns: None
"""
self.files_uploaded = files_uploaded
self.__record_progress(Status.UPLOADING_FILES)
def set_uploaded(self, files_uploaded):
""" set_uploaded: records progress after uploading files
Args: files_uploaded ([str]): list of files that have been successfully uploaded
Returns: None
"""
self.files_uploaded = files_uploaded
self.__record_progress(Status.UPLOAD_CHANNEL)
def set_channel_created(self, channel_link, channel_id):
""" set_channel_created: records progress after creating channel on Kolibri Studio
Args:
channel_link (str): link to uploaded channel
channel_id (str): id of channel that has been uploaded
Returns: None
"""
self.channel_link = channel_link
self.channel_id = channel_id
self.__record_progress(Status.PUBLISH_CHANNEL if config.PUBLISH else Status.DONE)
def set_published(self):
""" set_published: records progress after channel has been published
Args: None
Returns: None
"""
self.__record_progress(Status.DONE)
def set_done(self):
""" set_done: records progress after Ricecooker process has been completed
Args: None
Returns: None
"""
self.__record_progress(Status.DONE)
# Delete restoration point for last step to indicate process has been completed
os.remove(self.get_restore_path(Status.LAST))
|
from PyQt6.QtWidgets import (
QMainWindow,
QTreeWidgetItem,
QWidget
)
from PyQt6.QtGui import (
QIcon
)
from .mainwindow_ui import Ui_MainWindow
from .defmodules import WIDGETS_DEFAULT
from .symmetric import WIDGETS_SYMMETRIC
from .asymmetric import WIDGETS_ASYMMETRIC
from .cryptotools import WIDGETS_CRYPTOTOOLS
WIDGETS_CIPHERS = {
"Symmetric ciphers": WIDGETS_SYMMETRIC,
"Asymmetric ciphers": WIDGETS_ASYMMETRIC,
"Crypto tools": WIDGETS_CRYPTOTOOLS
}
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle("Crypto methods")
self.ui.splitter.setStretchFactor(1, 1)
self.ui.tree_widget.clicked.connect(self.tree_widget_item_clicked)
self.load_modules()
def load_modules(self):
# Load default modules
for widget in WIDGETS_DEFAULT:
self.ui.stacked_widget.addWidget(widget())
# Load other widgets
items = []
for title, widgets in WIDGETS_CIPHERS.items():
parent = QTreeWidgetItem((title,))
parent.setIcon(0, QIcon("icons:folder.png"))
children = []
for widget in widgets:
widget = widget()
self.ui.stacked_widget.addWidget(widget)
child = QTreeWidgetItem((widget.title,))
child.setIcon(0, QIcon("icons:file.png"))
child.setData(1, 1, widget)
children.append(child)
parent.addChildren(children)
items.append(parent)
self.ui.tree_widget.invisibleRootItem().addChildren(items)
def tree_widget_item_clicked(self):
item = self.ui.tree_widget.currentItem()
widget = item.data(1, 1)
match widget:
case QWidget():
self.ui.group_box_right.setTitle(item.text(0))
self.ui.stacked_widget.setCurrentWidget(widget)
case _:
pass
|
def fileInput(mainFilePath, inputFilePath):
with open(inputFilePath, 'a') as file:
file.write('\n')
with open(mainFilePath) as file:
script = "fileOpenAsInput = open('"+inputFilePath+"')\n"
line = file.readline()
while line != '':
if "fileInput" in line:
line = file.readline()
continue
if "input()" in line:
line = line.replace(
"input()", "fileOpenAsInput.readline()[:-1]")
elif "sys.stdin.readline()" in line:
line = line.replace("sys.stdin.readline()",
"fileOpenAsInput.readline()[:-1]")
script += line
line = file.readline()
exec(script)
quit()
|
"""Plotting referendum results in pandas.
In short, we want to make beautiful map to report results of a referendum. In
some way, we would like to depict results with something similar to the maps
that you can find here:
https://github.com/x-datascience-datacamp/datacamp-assignment-pandas/blob/main/example_map.png
To do that, you will load the data as pandas.DataFrame, merge the info and
aggregate them by regions and finally plot them on a map using `geopandas`.
"""
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
def load_data():
"""Load data from the CSV files referundum/regions/departments."""
referendum = pd.read_csv("./data/referendum.csv", sep=";")
regions = pd.read_csv("./data/regions.csv")
departments = pd.read_csv("./data/departments.csv")
return referendum, regions, departments
def merge_regions_and_departments(regions, departments):
"""Merge regions and departments in one DataFrame.
The columns in the final DataFrame should be:
['code_reg', 'name_reg', 'code_dep', 'name_dep']
"""
columns = ['code_reg',
'name_reg',
'code_dep',
'name_dep']
merge_regions_and_departments = departments.merge(regions,
how="left",
left_on=["region_code"],
right_on=["code"],
suffixes=["_dep", "_reg"])
return merge_regions_and_departments[columns]
def merge_referendum_and_areas(referendum, regions_and_departments):
"""Merge referendum and regions_and_departments in one DataFrame.
You can drop the lines relative to DOM-TOM-COM departments, and the
french living abroad.
"""
abroad = ["DOM", "TOM", "COM"]
r_d_cdep = regions_and_departments["code_dep"]
referendum["Department code"] = pd.Series(referendum["Department code"],
dtype="string").str.zfill(2)
filter_abroad_regions = ~regions_and_departments["code_reg"].isin(abroad)
regions_and_departments = regions_and_departments[filter_abroad_regions]
r_d_cdep = pd.Series(r_d_cdep, dtype="string")
merge_referendum_and_areas = referendum.merge(regions_and_departments,
how="left",
left_on=["Department code"],
right_on=['code_dep'])
merge_referendum_and_areas = merge_referendum_and_areas.dropna()
return merge_referendum_and_areas
def compute_referendum_result_by_regions(referendum_and_areas):
"""Return a table with the absolute count for each region.
The return DataFrame should be indexed by `code_reg` and have columns:
['name_reg', 'Registered', 'Abstentions', 'Null', 'Choice A', 'Choice B']
"""
columns = ['name_reg',
'Registered',
'Abstentions',
'Null',
'Choice A',
'Choice B']
cols = columns + ["code_reg"]
gby = ["code_reg", "name_reg"]
referendum_result_by_regions = referendum_and_areas[cols].groupby(gby).sum()
referendum_result_by_regions = referendum_result_by_regions.reset_index()
return referendum_result_by_regions[columns]
def plot_referendum_map(referendum_result_by_regions):
"""Plot a map with the results from the referendum.
* Load the geographic data with geopandas from `regions.geojson`.
* Merge these info into `referendum_result_by_regions`.
* Use the method `GeoDataFrame.plot` to display the result map. The results
should display the rate of 'Choice A' over all expressed ballots.
* Return a gpd.GeoDataFrame with a column 'ratio' containing the results.
"""
geographic_regions = gpd.read_file('./data/regions.geojson')
referendum_map = referendum_result_by_regions.merge(geographic_regions,
how="left",
left_on=["name_reg"],
right_on=['nom']
)
A = referendum_map['Choice A'].astype(int)
B = referendum_map["Choice B"].astype(int)
tot_ref = A + B
referendum_map["ratio"] = referendum_map["Choice A"].astype(int) / tot_ref
referendum_map_geodataframe = gpd.GeoDataFrame(referendum_map)
referendum_map_geodataframe.plot("ratio")
return referendum_map_geodataframe
if __name__ == "__main__":
referendum, df_reg, df_dep = load_data()
regions_and_departments = merge_regions_and_departments(
df_reg, df_dep
)
referendum_and_areas = merge_referendum_and_areas(
referendum, regions_and_departments
)
referendum_results = compute_referendum_result_by_regions(
referendum_and_areas
)
print(referendum_results)
plot_referendum_map(referendum_results)
plt.show()
|
import bson.json_util
import json
import math
import numpy
import pymongo
import scipy.linalg
import sys
import time
from findNodes import run as find_nodes
from neighborhood import run as get_neighborhood
class Cache:
def __init__(self, radius, namefield, host, db, coll):
self.radius = radius
self.namefield = namefield
self.host = host
self.db = db
self.coll = coll
self.cache = {}
def get(self, handle):
if handle not in self.cache:
node = bson.json_util.loads(find_nodes(host=self.host, db=self.db, coll=self.coll, spec=json.dumps({self.namefield: handle}), singleton=json.dumps(True)))
self.cache[handle] = signature(get_neighborhood(host=self.host, db=self.db, coll=self.coll, center=node["_id"], radius=self.radius))
return self.cache[handle]
def adjacency_matrix(g):
index_map = {h: i for i, h in enumerate([x["_id"]["$oid"] for x in g["nodes"]])}
links = ({"source": index_map[x["source"]], "target": index_map[x["target"]]} for x in g["links"])
m = numpy.eye(len(g["nodes"]))
for link in links:
s = link["source"]
t = link["target"]
m[s][t] = m[t][s] = 1.0
return m
def signature(g):
m = adjacency_matrix(g)
return {"det": scipy.linalg.det(m),
"trace": numpy.trace(m)}
def norm2(v):
def sq(x):
return x*x
return math.sqrt(sq(v[0]) + sq(v[1]))
def similarity(s1, s2):
return math.pow(1.01, -norm2([s1["det"] - s2["det"], s1["trace"] - s2["trace"]]))
def process(db, source, source_namefield, target, target_namefield):
source_cache = Cache(2, source_namefield, "localhost", db, source)
target_cache = Cache(2, target_namefield, "localhost", db, target)
print "Computing %s -> %s" % (source, target)
topk = pymongo.MongoClient("localhost")[db]["topk_%s_%s" % (source, target)]
records = topk.find(timeout=False)
count = records.count()
print "%d records" % (count)
start = time.time()
for i, rec in enumerate(records):
if "2spectral" not in rec:
rec["2spectral"] = similarity(source_cache.get(rec["ga"]), target_cache.get(rec["entity"]))
topk.save(rec)
if i % 10 == 0 and i > 0:
elapsed = time.time() - start
estimated = (elapsed / i) * count - elapsed
m, s = divmod(estimated, 60)
h, m = divmod(m, 60)
print "%d of %d records complete (time remaining: %02d:%02d:%02d)" % (i, count, h, m, s)
def main():
db = "july"
twitter = "twitter_nodelink"
twitter_namefield = "user_name"
instagram = "instagram_nodelink"
instagram_namefield = "user_name"
#process(db, twitter, twitter_namefield, instagram, instagram_namefield)
process(db, instagram, instagram_namefield, twitter, twitter_namefield)
if __name__ == "__main__":
sys.exit(main())
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 14:51:15 2021
@author: crtjur
"""
# from tkinter import *
import tkinter as tk
# class GPIO(tk.Frame):
# """Each GPIO class draws a Tkinter frame containing:
# - A Label to show the GPIO Port Name
# - A data direction spin box to select pin as input or output
# - A checkbox to set an output pin on or off
# - An LED widget to show the pin's current state
# - A Label to indicate the GPIOs current function"""
# gpio_modes = ("Passive","Input","Output")
# def __init__(self,parent,pin=0,name=None,**kw):
# self.pin = pin
# if name == None:
# self.name = "GPIO %02d" % (self.pin)
# tk.Frame.__init__(self,parent,width=150,height=20,relief=tk.SUNKEN,bd=1,padx=5,pady=5)
# ##Future capability
# ##self.bind('<Double-Button-1>', lambda e, s=self: self._configurePin(e.y))
# self.parent = parent
# # self.configure(**kw)
# self.state = False
# self.cmdState = tk.IntVar()
# self.Label = tk.Label(self,text=self.name)
# # # self.mode_sel = tk.Spinbox(self,values=gpio_modes,wrap=True,command=self.setMode)
# # self.mode_sel = tk.Spinbox(self,values=gpio_modes,wrap=True)
# # self.set_state = tk.Checkbutton(self,text="High/Low",variable=self.cmdState,command=self.toggleCmdState)
# # self.led = LED(self,20)
# self.Label.grid(column=0,row=0)
# # self.mode_sel.grid(column=1,row=0)
# # self.set_state.grid(column=2,row=0)
# # self.current_mode = StringVar()
# # self.led.grid(column=3,row=0)
# # self.set_state.config(state=DISABLED)
# # function = self.getPinFunctionName()
# # if function not in ['Input','Output']:
# # self.mode_sel.delete(0,'end')
# # self.mode_sel.insert(0,function)
# # self.mode_sel['state'] = DISABLED
class digitalPin(tk.Frame, number):
def __init__(self, frame):
self.Label = tk.Label(frame, text=number)
self.Label.grid(column=0,row=0)
def set_state(self):
pass
def get_state(self):
pass
# create tkinter window
root = tk.Tk()
root.title("Welcome to LikeGeeks app")
root.geometry('460x350')
# create all of the main containers
header_frame = tk.Frame(root, bg='grey', width=450, height=50, pady=3)
top_frame = tk.Frame(root, bg='grey', width=450, height=50, pady=3)
center = tk.Frame(root, bg='gray2', width=50, height=40, padx=3, pady=3)
btm_frame = tk.Frame(root, bg='white', width=450, height=45, pady=3)
btm_frame2 = tk.Frame(root, bg='lavender', width=450, height=60, pady=3)
# layout all of the main containers
root.grid_rowconfigure(1, weight=1)
root.grid_columnconfigure(0, weight=1)
header_frame.grid(row=0, sticky="ew")
top_frame.grid(row=1, sticky="ew")
center.grid(row=2, sticky="nsew")
btm_frame.grid(row=3, sticky="ew")
btm_frame2.grid(row=4, sticky="ew")
gpio_mode = ('input', 'output')
gpio_type = ('digital', 'PWM', 'analog')
# create digital pins 2-14
for i in range(2,14):
gpio_frame = tk.Frame(top_frame, bg='white', pady=3, highlightbackground="black", highlightthickness=1)
gpio_frame.grid(row=i, sticky="ew")
# headers
if not i:
tk.Label(gpio_frame, text='PIN N', width = 5).grid(row=i, column = 0) #, bg = 'grey'
tk.Label(gpio_frame, text='MODE', width = 10).grid(row=i, column = 1)
tk.Label(gpio_frame, text='TYPE', width = 10).grid(row=i, column = 2)
tk.Label(gpio_frame, text='STATUS', width = 10).grid(row=i, column = 3)
tk.Label(gpio_frame, text='VALUE', width = 10).grid(row=i, column = 4)
print('asd')
else:
# create the widgets for the top frame
pin_text = str(i)
pin_label = tk.Label(gpio_frame, text=pin_text, width = 5)
mode_sel = tk.Spinbox(gpio_frame, width = 10, values = gpio_mode, wrap=True)
type_sel = tk.Spinbox(gpio_frame,width = 10, values = gpio_type, wrap=True)
button_label = tk.Button(gpio_frame,width = 10) #height = 100,
value = tk.Entry(gpio_frame,background="pink", width = 10)
# layout the widgets in the top frame
pin_label.grid(row=i, column = 0) # columnspan=1
mode_sel.grid(row=i, column = 1)
type_sel.grid(row=i, column = 2)
button_label.grid(row=i, column = 3)
value.grid(row=i, column=4)
# entry_L.grid(row=1, column=3)
gpio_frame = tk.Frame(top_frame, bg='white', pady=3, highlightbackground="black", highlightthickness=1)
gpio_frame.grid(row=i, sticky="ew")
pin = digitalPin(gpio_frame, number =i)
# main loop
root.mainloop()
|
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .checks import check_user_id
class User(object):
""" The User class can be used to call user specific functions.
"""
def __init__(self, api, user_id, displayname=None):
check_user_id(user_id)
self.user_id = user_id
self.displayname = displayname
self.api = api
def get_display_name(self):
""" Get this users display name.
See also get_friendly_name()
Returns:
str: Display Name
"""
if not self.displayname:
self.displayname = self.api.get_display_name(self.user_id)
return self.displayname
def get_friendly_name(self):
display_name = self.api.get_display_name(self.user_id)
return display_name if display_name is not None else self.user_id
def set_display_name(self, display_name):
""" Set this users display name.
Args:
display_name (str): Display Name
"""
self.displayname = display_name
return self.api.set_display_name(self.user_id, display_name)
def get_avatar_url(self):
mxcurl = self.api.get_avatar_url(self.user_id)
url = None
if mxcurl is not None:
url = self.api.get_download_url(mxcurl)
return url
def set_avatar_url(self, avatar_url):
""" Set this users avatar.
Args:
avatar_url (str): mxc url from previously uploaded
"""
return self.api.set_avatar_url(self.user_id, avatar_url)
|
# test_utils.py
"""Tests for rom_operator_inference.utils.py."""
import pytest
import numpy as np
from scipy import linalg as la
import rom_operator_inference as roi
# utils.lstsq_reg() -----------------------------------------------------------
def _test_lstq_reg_single(k,d,r):
"""Do one test of utils.lstsq_reg()."""
A = np.random.random((k, d))
b = np.random.random(k)
B = np.random.random((k,r))
I = np.eye(d)
# VECTOR TEST
x = la.lstsq(A, b)[0]
# Ensure that the least squares solution is the usual one when P=0.
x_ = roi.utils.lstsq_reg(A, b, P=0)[0]
assert np.allclose(x, x_)
# Ensure that the least squares solution is the usual one when P=0 (matrix)
x_ = roi.utils.lstsq_reg(A, b, np.zeros((d,d)))[0]
assert np.allclose(x, x_)
# Check that the regularized least squares solution has decreased norm.
x_ = roi.utils.lstsq_reg(A, b, P=2)[0]
assert la.norm(x_) <= la.norm(x)
x_ = roi.utils.lstsq_reg(A, b, P=2*I)[0]
assert la.norm(x_) <= la.norm(x)
# MATRIX TEST
X = la.lstsq(A, B)[0]
# Ensure that the least squares solution is the usual one when P=0.
X_ = roi.utils.lstsq_reg(A, B, P=0)[0]
assert np.allclose(X, X_)
# Ensure that the least squares solution is the usual one when P=0 (matrix)
X_ = roi.utils.lstsq_reg(A, B, P=0)[0]
assert np.allclose(X, X_)
# Ensure that the least squares solution is the usual one when P=0 (list)
X_ = roi.utils.lstsq_reg(A, B, P=[np.zeros((d,d))]*r)[0]
assert np.allclose(X, X_)
# Check that the regularized least squares solution has decreased norm.
X_ = roi.utils.lstsq_reg(A, B, P=2)[0]
assert la.norm(X_) <= la.norm(X)
X_ = roi.utils.lstsq_reg(A, B, P=2)[0]
assert la.norm(X_) <= la.norm(X)
X_ = roi.utils.lstsq_reg(A, B, P=[2*I]*r)[0]
assert la.norm(X_) <= la.norm(X)
def test_lstsq_reg(n_tests=5):
"""Test utils.lstsq_reg()."""
A = np.random.random((20,10))
B = np.random.random((20, 5))
# Negative regularization parameter not allowed.
with pytest.raises(ValueError) as exc:
roi.utils.lstsq_reg(A, B, -1)
assert exc.value.args[0] == "regularization parameter must be nonnegative"
# b must be one- or two-dimensional
with pytest.raises(ValueError) as exc:
roi.utils.lstsq_reg(A, np.random.random((2,2,2)))
assert exc.value.args[0] == "`b` must be one- or two-dimensional"
with pytest.raises(ValueError) as exc:
roi.utils.lstsq_reg(A, np.array(5))
assert exc.value.args[0] == "`b` must be one- or two-dimensional"
with pytest.raises(ValueError) as exc:
roi.utils.lstsq_reg(A, B[:,0],
[np.random.random((10,10)) for i in range(5)])
assert exc.value.args[0] == "`b` must be two-dimensional with multiple P"
# Badly shaped regularization matrix.
with pytest.raises(ValueError) as exc:
roi.utils.lstsq_reg(A, B, np.random.random((5,5)))
assert exc.value.args[0] == \
"P must be (d,d) with d = number of columns of A"
with pytest.raises(ValueError) as exc:
roi.utils.lstsq_reg(A, B,
[np.random.random((10,10)) for i in range(3)])
assert exc.value.args[0] == \
"list P must have r entries with r = number of columns of b"
# Do individual tests.
k, m = 200, 20
for r in np.random.randint(4, 50, n_tests):
s = r*(r+1)//2
_test_lstq_reg_single(k, r+s+m+1, r)
# utils.kron_compact() --------------------------------------------------------
def _test_kron_compact_single_vector(n):
"""Do one vector test of utils.kron_compact()."""
x = np.random.random(n)
x2 = roi.utils.kron_compact(x)
assert x2.ndim == 1
assert x2.shape[0] == n*(n+1)//2
for i in range(n):
assert np.allclose(x2[i*(i+1)//2:(i+1)*(i+2)//2], x[i]*x[:i+1])
def _test_kron_compact_single_matrix(n):
"""Do one matrix test of utils.kron_compact()."""
X = np.random.random((n,n))
X2 = roi.utils.kron_compact(X)
assert X2.ndim == 2
assert X2.shape[0] == n*(n+1)//2
assert X2.shape[1] == n
for i in range(n):
assert np.allclose(X2[i*(i+1)//2:(i+1)*(i+2)//2], X[i]*X[:i+1])
def test_kron_compact(n_tests=100):
"""Test utils.kron_compact()."""
# Try with bad input.
with pytest.raises(ValueError) as exc:
roi.utils.kron_compact(np.random.random((3,3,3)))
assert exc.value.args[0] == "x must be one- or two-dimensional"
# Correct inputs.
for n in np.random.randint(2, 100, n_tests):
_test_kron_compact_single_vector(n)
_test_kron_compact_single_matrix(n)
# utils.kron_col() ------------------------------------------------------------
def _test_kron_col_single_vector(n, m):
"""Do one vector test of utils.kron_col()."""
x = np.random.random(n)
y = np.random.random(m)
xy = roi.utils.kron_col(x, y)
assert xy.ndim == 1
assert xy.shape[0] == x.shape[0] * y.shape[0]
for i in range(n):
assert np.allclose(xy[i*m:(i+1)*m], x[i]*y)
def _test_kron_col_single_matrix(n, k, m):
"""Do one matrix test of utils.kron_col()."""
X = np.random.random((n,k))
Y = np.random.random((m,k))
XY = roi.utils.kron_col(X, Y)
assert XY.ndim == 2
assert XY.shape[0] == X.shape[0] * Y.shape[0]
assert XY.shape[1] == X.shape[1]
for i in range(n):
assert np.allclose(XY[i*m:(i+1)*m], X[i]*Y)
def test_kron_col(n_tests=100):
"""Test utils.kron_compact()."""
# Try with bad inputs.
with pytest.raises(ValueError) as exc:
roi.utils.kron_col(np.random.random(5), np.random.random((5,5)))
assert exc.value.args[0] == \
"x and y must have the same number of dimensions"
x = np.random.random((3,3))
y = np.random.random((3,4))
with pytest.raises(ValueError) as exc:
roi.utils.kron_col(x, y)
assert exc.value.args[0] == "x and y must have the same number of columns"
x, y = np.random.random((2,3,3,3))
with pytest.raises(ValueError) as exc:
roi.utils.kron_col(x, y)
assert exc.value.args[0] == "x and y must be one- or two-dimensional"
for n in np.random.randint(4, 100, n_tests):
m = np.random.randint(2, n)
k = np.random.randint(2, 100)
_test_kron_col_single_vector(n, m)
_test_kron_col_single_matrix(n, k, m)
# utils.expand_Hc() -----------------------------------------------------------
def _test_expand_Hc_single(r):
"""Do one test of utils.expand_Hc()."""
x = np.random.random(r)
# Do a valid expand_Hc() calculation and check dimensions.
s = r*(r+1)//2
Hc = np.random.random((r,s))
H = roi.utils.expand_Hc(Hc)
assert H.shape == (r,r**2)
# Check that Hc(x^2) == H(x⊗x).
Hxx = H @ np.kron(x,x)
assert np.allclose(Hc @ roi.utils.kron_compact(x), Hxx)
# Check properties of the tensor for H.
Htensor = H.reshape((r,r,r))
assert np.allclose(Htensor @ x @ x, Hxx)
for subH in H:
assert np.allclose(subH, subH.T)
def test_expand_Hc(n_tests=100):
"""Test utils.expand_Hc()."""
# Try to do expand_Hc() with a bad second dimension.
r = 5
sbad = r*(r+3)//2
Hc = np.random.random((r, sbad))
with pytest.raises(ValueError) as exc:
roi.utils.expand_Hc(Hc)
assert exc.value.args[0] == \
f"invalid shape (r,s) = {(r,sbad)} with s != r(r+1)/2"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 100, n_tests):
_test_expand_Hc_single(r)
# utils.compress_H() ----------------------------------------------------------
def _test_compress_H_single(r):
"""Do one test of utils.expand_Hc()."""
x = np.random.random(r)
# Do a valid compress_H() calculation and check dimensions.
H = np.random.random((r,r**2))
s = r*(r+1)//2
Hc = roi.utils.compress_H(H)
assert Hc.shape == (r,s)
# Check that Hc(x^2) == H(x⊗x).
Hxx = H @ np.kron(x,x)
assert np.allclose(Hxx, Hc @ roi.utils.kron_compact(x))
# Check that expand_Hc() and compress_H() are inverses up to symmetry.
H2 = roi.utils.expand_Hc(Hc)
Ht = H.reshape((r,r,r))
Htnew = np.empty_like(Ht)
for l in range(r):
Htnew[l] = (Ht[l] + Ht[l].T) / 2
assert np.allclose(H2, Htnew.reshape(H.shape))
def test_compress_H(n_tests=100):
"""Test utils.expand_Hc()."""
# Try to do compress_H() with a bad second dimension.
r = 5
r2bad = r**2 + 1
H = np.random.random((r, r2bad))
with pytest.raises(ValueError) as exc:
roi.utils.compress_H(H)
assert exc.value.args[0] == \
f"invalid shape (r,a) = {(r,r2bad)} with a != r**2"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 100, n_tests):
_test_compress_H_single(r)
|
"""
Setup for different kinds of Goldair climate devices
"""
from . import DOMAIN
from .const import (
CONF_CLIMATE,
CONF_DEVICE_ID,
CONF_TYPE,
CONF_TYPE_AUTO,
CONF_TYPE_DEHUMIDIFIER,
CONF_TYPE_FAN,
CONF_TYPE_GECO_HEATER,
CONF_TYPE_GPCV_HEATER,
CONF_TYPE_GPPH_HEATER,
)
from .dehumidifier.climate import GoldairDehumidifier
from .fan.climate import GoldairFan
from .geco_heater.climate import GoldairGECOHeater
from .gpcv_heater.climate import GoldairGPCVHeater
from .heater.climate import GoldairHeater
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Goldair climate device according to its type."""
data = hass.data[DOMAIN][discovery_info[CONF_DEVICE_ID]]
device = data["device"]
if discovery_info[CONF_TYPE] == CONF_TYPE_AUTO:
discovery_info[CONF_TYPE] = await device.async_inferred_type()
if discovery_info[CONF_TYPE] is None:
raise ValueError(f"Unable to detect type for device {device.name}")
if discovery_info[CONF_TYPE] == CONF_TYPE_GPPH_HEATER:
data[CONF_CLIMATE] = GoldairHeater(device)
elif discovery_info[CONF_TYPE] == CONF_TYPE_DEHUMIDIFIER:
data[CONF_CLIMATE] = GoldairDehumidifier(device)
elif discovery_info[CONF_TYPE] == CONF_TYPE_FAN:
data[CONF_CLIMATE] = GoldairFan(device)
elif discovery_info[CONF_TYPE] == CONF_TYPE_GECO_HEATER:
data[CONF_CLIMATE] = GoldairGECOHeater(device)
elif discovery_info[CONF_TYPE] == CONF_TYPE_GPCV_HEATER:
data[CONF_CLIMATE] = GoldairGPCVHeater(device)
if CONF_CLIMATE in data:
async_add_entities([data[CONF_CLIMATE]])
async def async_setup_entry(hass, config_entry, async_add_entities):
config = {**config_entry.data, **config_entry.options}
discovery_info = {
CONF_DEVICE_ID: config[CONF_DEVICE_ID],
CONF_TYPE: config[CONF_TYPE],
}
await async_setup_platform(hass, {}, async_add_entities, discovery_info)
|
#!/usr/bin/env python
# Import modules
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
import rospy
import tf
from geometry_msgs.msg import Pose
from std_msgs.msg import Float64
from std_msgs.msg import Int32
from std_msgs.msg import String
from pr2_robot.srv import *
from rospy_message_converter import message_converter
import yaml
# Helper function to get surface normals
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
# Helper function to create a yaml friendly dictionary from ROS messages
def make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):
yaml_dict = {}
yaml_dict["test_scene_num"] = test_scene_num.data
yaml_dict["arm_name"] = arm_name.data
yaml_dict["object_name"] = object_name.data
yaml_dict["pick_pose"] = message_converter.convert_ros_message_to_dictionary(pick_pose)
yaml_dict["place_pose"] = message_converter.convert_ros_message_to_dictionary(place_pose)
return yaml_dict
# Helper function to output to yaml file
def send_to_yaml(yaml_filename, dict_list):
data_dict = {"object_list": dict_list}
with open(yaml_filename, 'w') as outfile:
yaml.dump(data_dict, outfile, default_flow_style=False)
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
cloud = ros_to_pcl(pcl_msg)
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
LEAF_SIZE = 0.003
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
### Statistical Outlier Removal Filter
# Much like the previous filters, we start by creating a filter object:
outlier_filter = cloud_filtered.make_statistical_outlier_filter()
# Set the number of neighboring points to analyze for any given point
outlier_filter.set_mean_k(50)
# Set threshold scale factor
x = 1.0
# Any point with a mean distance larger than global (mean distance+x*std_dev) will be considered outlier
outlier_filter.set_std_dev_mul_thresh(x)
cloud_filtered = outlier_filter.filter()
### PassThrough Filter
# Create a PassThrough filter object first across the Z axis
passThroughZ = cloud_filtered.make_passthrough_filter()
filter_axis = 'z'
passThroughZ.set_filter_field_name(filter_axis)
axis_min = 0.6
axis_max = 1.1
passThroughZ.set_filter_limits(axis_min, axis_max)
cloud_filtered = passThroughZ.filter()
## Now, Create a PassThrough filter object across the Y axis
passThroughY = cloud_filtered.make_passthrough_filter()
filter_axis = 'y'
passThroughY.set_filter_field_name(filter_axis)
axis_min = -0.5
axis_max = 0.5
passThroughY.set_filter_limits(axis_min, axis_max)
cloud_filtered = passThroughY.filter()
### RANSAC Plane Segmentation
# Create the segmentation object
seg = cloud_filtered.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
max_distance = 0.01
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
### Extract inliers and outliers
pcl_table_cloud = cloud_filtered.extract(inliers, negative=False)
pcl_object_cloud = cloud_filtered.extract(inliers, negative=True)
### Euclidean Clustering
# Go from XYZRGB to RGB since to build the k-d tree we only needs spatial data
white_cloud = XYZRGB_to_XYZ(pcl_object_cloud)
# Apply function to convert XYZRGB to XYZ
tree = white_cloud.make_kdtree()
ec = white_cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(0.005)
ec.set_MinClusterSize(150)
ec.set_MaxClusterSize(50000)
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
#Create Cluster-Mask Point Cloud to visualize each cluster separately
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
#Convert PCL data to ROS messages
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
ros_object_cloud = pcl_to_ros(pcl_object_cloud)
ros_table_cloud = pcl_to_ros(pcl_table_cloud)
#Publish ROS messages
pcl_objects_pub.publish(ros_object_cloud)
pcl_table_pub.publish(ros_table_cloud)
pcl_cluster_pub.publish(ros_cluster_cloud)
det_obj_label = []
det_obj = []
# Grab the points for the cluster
for index, pts_list in enumerate(cluster_indices):
pcl_cluster = pcl_object_cloud.extract(pts_list)
ros_cluster = pcl_to_ros(pcl_cluster)
# Compute associated feature vector
color_hist = compute_color_histograms(ros_cluster, using_hsv=True)
norms = get_normals(ros_cluster)
norm_hist = compute_normal_histograms(norms)
# Make prediction
features = np.concatenate((color_hist, norm_hist))
predict = clf.predict(scaler.transform(features.reshape(1,-1)))
label = encoder.inverse_transform(predict)[0]
det_obj_label.append(label)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += .2
object_markers_pub.publish(make_label(label,label_pos,index))
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
det_obj.append(do)
rospy.loginfo('Detected {} objects: {}'.format(len(det_obj_label), det_obj_label))
# Publish the list of detected objects
detected_objects_pub.publish(det_obj)
# Suggested location for where to invoke your pr2_mover() function within pcl_callback()
true_object_list = rospy.get_param('/object_list')
dropbox_param = rospy.get_param('/dropbox')
true_list = []
for i in range(len(true_object_list)):
true_list.append(true_object_list[i]['name'])
print "\n"
print "Need to find: "
print true_list
print "\n"
trueset = set(true_list)
detset = set(det_obj_label)
# Could add some logic to determine whether or not your object detections are robust
# before calling pr2_mover()
if detset == trueset:
try:
pr2_mover(det_obj)
except rospy.ROSInterruptException:
pass
else:
rospy.loginfo("Wrong object(s) detected")
# function to load parameters and request PickPlace service
def pr2_mover(object_list):
#Initialize variables
test_scene_num = Int32()
object_name = String()
pick_pose = Pose()
place_pose = Pose()
arm_name = String()
yaml_dict_list = []
test_scene_num.data = 2
dropbox_pose = []
dropbox_name = []
dropbox_group = []
true_object_list = rospy.get_param('/object_list')
dropbox_param = rospy.get_param('/dropbox')
for i in range(0, len(dropbox_param)):
dropbox_pose.append(dropbox_param[i]['position'])
dropbox_group.append(dropbox_param[i]['group'])
centroids = [] # to be list of tuples (x, y, z)
labels = []
for i in range(0, len(true_object_list)):
object_name.data = true_object_list[i]['name' ]
object_group = true_object_list[i]['group']
for j in object_list:
labels.append(j.label)
point_arr = ros_to_pcl(j.cloud).to_array()
avg = np.mean(point_arr, axis=0)[:3]
centroids.append(avg)
if object_group == 'red':
arm_name.data = 'left'
place_pose.position.x = dropbox_pose[0][0] - random.randint(.05,.1)
place_pose.position.y = dropbox_pose[0][1]
place_pose.position.z = dropbox_pose[0][2]
else:
arm_name.data = 'right'
place_pose.position.x = dropbox_pose[1][0] - random.randint(.05,.1)
place_pose.position.y = dropbox_pose[1][1]
place_pose.position.z = dropbox_pose[1][2]
try:
index = labels.index(object_name.data)
pick_pose.position.x = np.asscalar(centroids[index][0])
pick_pose.position.y = np.asscalar(centroids[index][1])
pick_pose.position.z = np.asscalar(centroids[index][2])
except ValueError:
continue
# Create a list of dictionaries for later output to yaml format
yaml_dict = make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose)
yaml_dict_list.append(yaml_dict)
rospy.wait_for_service('pick_place_routine')
try:
pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace)
#Insert your message variables to be sent as a service requestc
resp = pick_place_routine(test_scene_num, object_name, arm_name, pick_pose, place_pose)
print ("Response: ",resp.success)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
#Output your request parameters into output yaml file
yaml_filename = 'output_'+str(test_scene_num.data)+'.yaml'
send_to_yaml(yaml_filename, yaml_dict_list)
return
if __name__ == '__main__':
# ROS node initialization
rospy.init_node('clustering', anonymous=True)
# Create Subscribers
pcl_sub = rospy.Subscriber("/pr2/world/points", pc2.PointCloud2, pcl_callback,queue_size=1)
# Create Publishers
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2,queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
pcl_cluster_pub = rospy.Publisher("/pcl_cluster", PointCloud2, queue_size=1)
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
pr2_base_mover_pub = rospy.Publisher("/pr2/world_joint_controller/command", Float64, queue_size=10)
#Load Model From disk
model = pickle.load(open('model.sav','rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
|
from collections import deque
class ZigzagIterator2:
"""
@param: vecs: a list of 1d vectors
"""
def __init__(self, vecs):
# do intialization if necessary
self.queue = [deque(vector) for vector in vecs]
"""
@return: An integer
"""
def next(self):
# write your code here
for i in range(len(self.queue)):
if self.queue[i]:
ele = self.queue[i].popleft()
self.queue.append(self.queue.pop(i))
break
return ele
"""
@return: True if has next
"""
def hasNext(self):
# write your code here
for q in self.queue:
if q:
return True
return False
# Your ZigzagIterator2 object will be instantiated and called as such:
# solution, result = ZigzagIterator2(vecs), []
# while solution.hasNext(): result.append(solution.next())
# Output result
|
import re
ranks = {
'S': -1,
'G': -2,
'F': -3,
'O': -4,
'P': -5
}
class KrakenTaxon:
__slots__ = ('percent', 'cum_reads', 'unique_reads', 'rank', 'taxid', 'name', 'indent', 'parent')
def __init__(self, percent, cum_reads, unique_reads, rank, taxid, name, indent):
self.percent = percent
self.cum_reads = cum_reads
self.unique_reads = unique_reads
self.rank = rank
self.taxid = taxid
self.name = name
self.indent = indent
self.parent = None
def __repr__(self):
return 'KrakenTaxon(percent={}, cum_reads={}, unique_reads={}, rank={}, taxid={}, name={})'.format(
self.percent, self.cum_reads, self.unique_reads, self.rank, self.taxid, self.name)
@classmethod
def create_tree(cls, taxons):
indent_stack = {}
for t in taxons:
if t.name == 'unclassified':
yield t
continue
elif t.name == 'root':
indent_stack[t.indent] = t
yield t
else:
t.parent = indent_stack[t.indent - 1]
indent_stack[t.indent] = t
yield t
def read_kraken_report(f):
for line in f:
parts = line.strip().split('\t')
percent = float(parts[0])
cum_reads = int(parts[1])
unique_reads = int(parts[2])
rank = parts[3]
taxid = int(parts[4])
mo = re.search('^( *)(.*)', parts[5])
indent = len(mo.group(1)) / 2
yield KrakenTaxon(percent, cum_reads, unique_reads, rank, taxid, mo.group(2), indent)
def generate_tree(kraken_taxons):
tree = KrakenTaxon.create_tree(kraken_taxons)
return tree
def process_kraken_report(kraken_report_fn, taxid=None):
with open(kraken_report_fn) as f:
kraken_taxons = read_kraken_report(f)
tree = list(generate_tree(kraken_taxons))
root_taxon = tree[1]
if taxid:
genus_taxon = None
species_taxon = None
for t in tree:
if taxid == t.taxid:
assert t.rank == 'S'
species_taxon = t
tp = t
while True:
tp = tp.parent
if tp.rank == 'G':
genus_taxon = tp
if tp.taxid == 1:
break
percents = []
if species_taxon is not None:
species_percent = species_taxon.percent
else:
species_percent = 0
if genus_taxon is not None:
genus_percent = genus_taxon.percent
else:
genus_percent = 0
try:
percents = ['{:.4f}'.format(x/100.) for x in [species_percent, genus_percent, root_taxon.percent]]
except:
print(kraken_report_fn, taxid)
raise
return [str(taxid)] + percents
|
"""Shows some basic dispatch examples."""
# Local package imports
from lhrhost.messaging.dispatch import Dispatcher
from lhrhost.messaging.presentation import Message, MessagePrinter
def print_dispatcher(dispatcher):
"""Print some deserializations."""
messages = [
Message('e', 123),
Message('e0', 123),
Message('v'),
Message('v0'),
Message('v1'),
Message('v2'),
Message('v3'),
Message('l', 0),
Message('ll', 0),
Message('lb', 1),
Message('lbn', 1),
Message('lbp', 1)
]
for message in messages:
print(message)
dispatcher.on_message(message)
def main():
"""Run some tests of BasicTranslator."""
printer_e = MessagePrinter(prefix=' Printer e: ')
printer_v = MessagePrinter(prefix=' Printer v: ')
printer_l = MessagePrinter(prefix=' Printer l: ')
printer = MessagePrinter(prefix=' Printer: ')
literal_dispatcher = Dispatcher(receivers={
'e': [printer_e],
'v0': [printer_v],
'v1': [printer_v],
'v2': [printer_v],
'l': [printer_l],
'lb': [printer_l],
'lbn': [printer_l],
None: [printer]
})
prefix_dispatcher = Dispatcher(prefix_receivers={
'e': [printer_e],
'v': [printer_v],
'l': [printer_l],
'': [printer]
})
print('Literal dispatcher:')
print_dispatcher(literal_dispatcher)
print('\nPrefix dispatcher:')
print_dispatcher(prefix_dispatcher)
if __name__ == '__main__':
main()
|
# Using batch size 4 instead of 1024 decreases runtime from 35 secs to 4 secs.
# Standard Library
import os
import shutil
import time
from datetime import datetime
# Third Party
import mxnet as mx
import pytest
from mxnet import autograd, gluon, init
from mxnet.gluon import nn
from mxnet.gluon.data.vision import datasets, transforms
# First Party
from smdebug import SaveConfig, modes
from smdebug.core.access_layer.utils import has_training_ended
from smdebug.core.config_constants import CHECKPOINT_CONFIG_FILE_PATH_ENV_VAR
from smdebug.mxnet.hook import Hook as t_hook
from smdebug.trials import create_trial
def acc(output, label):
return (output.argmax(axis=1) == label.astype("float32")).mean().asscalar()
def run_mnist(
hook=None,
set_modes=False,
num_steps_train=None,
num_steps_eval=None,
epochs=2,
save_interval=None,
save_path="./saveParams",
):
batch_size = 4
normalize_mean = 0.13
mnist_train = datasets.FashionMNIST(train=True)
X, y = mnist_train[0]
("X shape: ", X.shape, "X dtype", X.dtype, "y:", y)
text_labels = [
"t-shirt",
"trouser",
"pullover",
"dress",
"coat",
"sandal",
"shirt",
"sneaker",
"bag",
"ankle boot",
]
transformer = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(normalize_mean, 0.31)]
)
mnist_train = mnist_train.transform_first(transformer)
mnist_valid = gluon.data.vision.FashionMNIST(train=False)
train_data = gluon.data.DataLoader(
mnist_train, batch_size=batch_size, shuffle=True, num_workers=4
)
valid_data = gluon.data.DataLoader(
mnist_valid.transform_first(transformer), batch_size=batch_size, num_workers=4
)
# Create Model in Gluon
net = nn.HybridSequential()
net.add(
nn.Conv2D(channels=6, kernel_size=5, activation="relu"),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(channels=16, kernel_size=3, activation="relu"),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Flatten(),
nn.Dense(120, activation="relu"),
nn.Dense(84, activation="relu"),
nn.Dense(10),
)
net.initialize(init=init.Xavier(), ctx=mx.cpu())
if hook is not None:
# Register the forward Hook
hook.register_hook(net)
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.1})
hook.register_hook(softmax_cross_entropy)
# Start the training.
for epoch in range(epochs):
train_loss, train_acc, valid_acc = 0.0, 0.0, 0.0
tic = time.time()
if set_modes:
hook.set_mode(modes.TRAIN)
i = 0
for data, label in train_data:
data = data.as_in_context(mx.cpu(0))
# forward + backward
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
# update parameters
trainer.step(batch_size)
# calculate training metrics
train_loss += loss.mean().asscalar()
train_acc += acc(output, label)
i += 1
if num_steps_train is not None and i >= num_steps_train:
break
# calculate validation accuracy
if set_modes:
hook.set_mode(modes.EVAL)
i = 0
for data, label in valid_data:
data = data.as_in_context(mx.cpu(0))
val_output = net(data)
valid_acc += acc(val_output, label)
loss = softmax_cross_entropy(val_output, label)
i += 1
if num_steps_eval is not None and i >= num_steps_eval:
break
print(
"Epoch %d: loss %.3f, train acc %.3f, test acc %.3f, in %.1f sec"
% (
epoch,
train_loss / len(train_data),
train_acc / len(train_data),
valid_acc / len(valid_data),
time.time() - tic,
)
)
if save_interval is not None and (epoch % save_interval) == 0:
net.save_parameters("{0}/params_{1}.params".format(save_path, epoch))
@pytest.mark.slow # 0:01 to run
def test_spot_hook():
os.environ[
CHECKPOINT_CONFIG_FILE_PATH_ENV_VAR
] = "./tests/mxnet/test_json_configs/checkpointconfig.json"
checkpoint_path = "/tmp/savedParams"
if not os.path.exists(checkpoint_path):
os.mkdir(checkpoint_path)
save_config = SaveConfig(save_steps=[10, 11, 12, 13, 14, 40, 50, 60, 70, 80])
"""
Run the training for 2 epochs and save the parameter after every epoch.
We expect that steps 0 to 14 will be written.
"""
run_id_1 = "trial_" + datetime.now().strftime("%Y%m%d-%H%M%S%f")
out_dir_1 = "/tmp/newlogsRunTest/" + run_id_1
hook = t_hook(
out_dir=out_dir_1, save_config=save_config, include_collections=["weights", "gradients"]
)
assert has_training_ended(out_dir_1) == False
run_mnist(
hook=hook,
num_steps_train=10,
num_steps_eval=10,
epochs=2,
save_interval=1,
save_path=checkpoint_path,
)
"""
Run the training again for 4 epochs and save the parameter after every epoch.
We DONOT expect that steps 0 to 14 are written.
We expect to read steps 40, 50, 60, 70 and 80
"""
run_id_2 = "trial_" + datetime.now().strftime("%Y%m%d-%H%M%S%f")
out_dir_2 = "/tmp/newlogsRunTest/" + run_id_2
hook = t_hook(
out_dir=out_dir_2, save_config=save_config, include_collections=["weights", "gradients"]
)
assert has_training_ended(out_dir_2) == False
run_mnist(
hook=hook,
num_steps_train=10,
num_steps_eval=10,
epochs=4,
save_interval=1,
save_path=checkpoint_path,
)
# Unset the environ variable before validation so that it won't affect the other scripts in py test environment.
del os.environ[CHECKPOINT_CONFIG_FILE_PATH_ENV_VAR]
# Validation
print("Created the trial with out_dir {0} for the first training".format(out_dir_1))
tr = create_trial(out_dir_1)
assert tr
available_steps_1 = tr.steps()
assert 40 not in available_steps_1
assert 80 not in available_steps_1
print(available_steps_1)
print("Created the trial with out_dir {0} for the second training".format(out_dir_2))
tr = create_trial(out_dir_2)
assert tr
available_steps_2 = tr.steps()
assert 40 in available_steps_2
assert 50 in available_steps_2
assert 60 in available_steps_2
assert 70 in available_steps_2
assert 80 in available_steps_2
assert 0 not in available_steps_2
assert 10 not in available_steps_2
assert 11 not in available_steps_2
assert 12 not in available_steps_2
print(available_steps_2)
print("Cleaning up.")
shutil.rmtree(os.path.dirname(out_dir_1))
shutil.rmtree(checkpoint_path, ignore_errors=True)
|
import math
import money
class adpackmoney:
def __init__(self, bal, advert):
self.advert = advert
self.balance = bal
class adpack:
def __init__(self, worth, course, args):
self.worth = worth
self.course = course
self.args = args
self.earning = adpackmoney(0.0, 0.0)
def getDistrbutedEarnings(self, amount):
dollars = money.money(self.args)
self.earning = adpackmoney(dollars.CalcBalance(amount), dollars.CalcWinningsAdAcc(amount))
return self.earning
def GetDailyEarning(self):
if self.worth >= self.course:
self.worth -= self.course
return self.getDistrbutedEarnings(self.course)
holder = self.worth
self.worth = 0
return self.getDistrbutedEarnings(holder)
class adpacks:
def __init__(self, adpacks, bal, advert, worth, course, args, date):
self.balance = float(bal)
self.total = float(bal)
self.advert = float(advert)
self.date = date
self.worth = float(worth)
self.course = float(course)
self.args = args
self.adpacks = self.genAdpacks(int(adpacks))
self.totalsub = 0
self.totalBought = int(adpacks)
def genAdpacks(self, amount):
i = 0
arr = []
while i < amount:
arr.append(adpack(self.worth, self.course, self.args))
i += 1
return arr
def getAdpacksInXDays(self, days):
self.SimulateXDaysReinvest(days)
return len(self.adpacks)
def SimulateXDaysReinvest(self, days):
i = 0
self.total = self.balance
_money = money.money(self.args)
while i < days:
# gain the money.money from your adpacks
j = 0
while j < len(self.adpacks):
earnings = self.adpacks[j].GetDailyEarning()
self.balance += earnings.balance
self.advert += earnings.advert
self.total += earnings.balance
if self.args.payoutprofit:
substract = _money.CalcWinningsBalance(earnings.balance)
self.balance -= substract
self.totalsub += substract
if self.adpacks[j].worth <= 0:
del self.adpacks[j]
j += 1
# check to see if you can buy a new adpack
if self.balance > 50:
amount = math.floor(self.balance / 50)
if amount > (1000 - len(self.adpacks)):
amount = 1000 - len(self.adpacks)
k = 0
while k < amount:
self.adpacks.append(adpack(self.worth, self.course, self.args))
self.totalBought+=1
k += 1
self.balance -= (50.0 * amount)
i += 1
self.printInfo()
def SimulateXDaysNoReinvest(self, days):
i = 0
while i < days:
# gain the money.money from your adpacks
j = 0
while j < len(self.adpacks):
earnings = self.adpacks[j].GetDailyEarning()
self.balance += earnings.balance
self.advert += earnings.advert
self.total += earnings.balance
if self.adpacks[j].worth == 0:
del self.adpacks[j]
j += 1
i += 1
return self.balance
def predictAdpacks(self):
if self.args.days and self.args.adpacks and self.args.balance:
f_date = self.args.date
Newdate = self.date.getDateFromString(f_date)
days = self.date.getDaysFromNow(Newdate)
if self.args.reinvest:
amount = self.getAdpacksInXDays(days)
print("You will have " + str(amount) + " adpacks in " + str(days) + " days")
self.TotalDailyProfit(int(self.args.adpacks),amount)
elif self.args.printbalance:
amount = self.SimulateXDaysNoReinvest(days)
print("After " + str(days) + " days you will have earned " + str(amount) + " dollar")
if self.args.total:
print("And your total accumulated balance is the same")
money.money(self.args).profit(True, amount, days)
def TotalDailyProfit(self,original,now):
if self.args.totalprofit:
MoneyVar = money.money(self.args)
delta = now - original #adpacks that purely generate profit
originalBal = MoneyVar.CalcWinningsBalance(self.calcAmountFromAdpack(original)) #calculate the profit the original adpacks generate
deltaBal = self.calcAmountFromAdpack(delta) - MoneyVar.CalcWinningsAdAcc(self.calcAmountFromAdpack(delta))
total = originalBal + deltaBal
print("The total profit you gain each day is " + str(total) + " dollars know that this will slowly remove your adpacks until you have " + str(original) + " left.")
def AdpackProfit(self):
moneyVar = money.money(self.args)
if self.args.adpacks:
adpacks = int(self.args.adpacks)
winnings = self.calcAmountFromAdpack(adpacks)
bal = winnings - moneyVar.CalcWinningsAdAcc(winnings)
add = moneyVar.CalcWinningsAdAcc(winnings)
print("Your daily profit will be " + str(winnings) + " dollars")
print("Your daily advertising account profit will be " + str(add) + " dollars")
money.money(self.args).profit(self.args.profit, bal, 1)
def CalcBalancefromWinning(self, winning):
moneyVar = money.money(self.args)
# profit of one adpack
winst_one = moneyVar.profitOnePack()
return int(winning / winst_one) + 1
def calcAmountFromAdpack(self, packs):
if self.args.holiday:
return packs * float(self.args.course) * 0.9
return packs * float(self.args.course)
def calcAmountFromAdpackNOHd(self, packs):
return packs * float(self.args.course)
def targetProfit(self):
moneyVar = money.money(self.args)
if self.args.targetprofit:
#adpacks = float(self.args.targetprofit) / (((((float(self.args.course)-h) / 60.0) * (57.0)) / 120) * (20.0-h2))
adpacks = self.CalcBalancefromWinning(float(self.args.targetprofit))
winnings = self.calcAmountFromAdpackNOHd(adpacks)
bal = winnings - moneyVar.CalcWinningsAdAcc(winnings)
profit = moneyVar.CalcWinningsBalance(winnings)
print("You will need a minimum of " + str((adpacks)) + " adpacks this will generate " + str(
self.calcAmountFromAdpack(adpacks)) + " dollars and " + str(profit) + " dollars of profit daily")
def amountOver(self, value, earning):
rest = value / earning
Irest = int(rest)
over = rest - float(Irest)
reverse = 1.0 - over
return earning * reverse
def predictTime(self):
h = 0.0
if self.args.holiday:
h = float(self.args.course) * 0.1
if self.args.extra and self.args.adpacks:
adpacks = int(self.args.adpacks)
required = 50.0
DailyProfit = adpacks * ((float(self.args.course) - h) / 60.0) * 57.0
amount = math.floor(required / DailyProfit) + 1
if DailyProfit < 50.0:
rest = self.amountOver(50.0, DailyProfit)
else:
rest = DailyProfit - 50.0
print("In " + str(amount) + " days you will be able to buy an extra adpack and will have " + str(
rest) + " spare dollars")
def printInfo(self):
if self.args.printbalance:
print("your balance is " + str(self.balance))
if self.args.total:
print("your total accumulated balance is: " + str(self.total))
if self.args.printadvert:
print("Your advertisment balance is: " + str(self.advert))
if self.args.payoutprofit:
print("Your payout is " + str(self.totalsub))
if self.args.totalbought:
print("The total amount of adpacks you have bought is: " + str(self.totalBought))
|
import os
from pytest_shutil import env, run
TEMP_NAME = 'JUNK123_456_789'
def test_set_env_ok_if_exists():
ev = os.environ[TEMP_NAME] = 'junk_name'
try:
with env.set_env(TEMP_NAME, 'anything'):
out = run.run('env', capture_stdout=True)
for o in out.split('\n'):
if o.startswith(TEMP_NAME):
assert o == '%s=anything' % TEMP_NAME
break
else:
assert False, '%s not found in os.environ' % TEMP_NAME
assert os.environ[TEMP_NAME] == ev
finally:
if TEMP_NAME in os.environ:
del os.environ[TEMP_NAME]
def test_set_env_ok_if_not_exists():
if TEMP_NAME in os.environ:
del os.environ[TEMP_NAME]
with env.set_env(TEMP_NAME, 'anything'):
out = run.run('env', capture_stdout=True)
for o in out.split('\n'):
if o.startswith(TEMP_NAME):
assert o == '%s=anything' % TEMP_NAME
break
else:
assert False, '%s not found in os.environ' % TEMP_NAME
def test_subprocecmdline():
ev = os.environ[TEMP_NAME] = 'junk_name'
try:
with env.no_env(TEMP_NAME):
out = run.run('env', capture_stdout=True)
for o in out.split('\n'):
if o.startswith(TEMP_NAME):
assert False, '%s found in os.environ' % TEMP_NAME
assert os.environ[TEMP_NAME] == ev
finally:
if TEMP_NAME in os.environ:
del os.environ[TEMP_NAME]
def test_no_env_ok_if_not_exists():
if TEMP_NAME in os.environ:
del os.environ[TEMP_NAME]
with env.no_env(TEMP_NAME):
out = run.run('env', capture_stdout=True)
for o in out.split('\n'):
if o.startswith(TEMP_NAME):
assert False, '%s found in os.environ' % TEMP_NAME
assert TEMP_NAME not in os.environ
|
def summation(num):
return sum(xrange(1, num + 1))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-20 23:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import portfolio.models
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('publish', models.BooleanField(default=False)),
('name', models.CharField(max_length=140)),
('caption', models.CharField(blank=True, max_length=140)),
('image', models.ImageField(upload_to=portfolio.models.imageLocation)),
('imgType', models.CharField(choices=[('gal', 'Gallery')], default='gal', max_length=3)),
('order', models.PositiveIntegerField(default=0)),
('dateCreated', models.DateField(auto_now_add=True)),
],
options={
'ordering': ['order', 'dateCreated'],
},
),
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=140)),
('link', models.URLField()),
('order', models.PositiveIntegerField(default=0)),
],
options={
'ordering': ['order', 'name'],
},
),
migrations.CreateModel(
name='LinkCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=140)),
('image', models.ImageField(null=True, upload_to=portfolio.models.imageLocation)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('publish', models.BooleanField(default=False)),
('name', models.CharField(max_length=140)),
('smallDescription', models.CharField(blank=True, max_length=140)),
('description', models.TextField(blank=True)),
('image', models.ImageField(upload_to=portfolio.models.imageLocation)),
('order', models.PositiveIntegerField(default=0)),
('date', models.DateField(null=True)),
('dateCreated', models.DateField(auto_now_add=True)),
('dateUpdated', models.DateField(auto_now=True)),
],
options={
'ordering': ['order', 'name'],
},
),
migrations.CreateModel(
name='ProjectCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=140)),
('order', models.PositiveIntegerField(default=0)),
('smallDescription', models.CharField(blank=True, max_length=140)),
('description', models.TextField(blank=True)),
('image', models.ImageField(null=True, upload_to='')),
],
options={
'ordering': ['order', 'name'],
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('publish', models.BooleanField(default=False)),
('name', models.CharField(max_length=140)),
('caption', models.CharField(blank=True, max_length=140)),
('video', models.FileField(upload_to=portfolio.models.videoLocation)),
('order', models.PositiveIntegerField(default=0)),
('dateCreated', models.DateField(auto_now_add=True)),
],
options={
'ordering': ['order', 'dateCreated'],
},
),
migrations.AddField(
model_name='project',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.ProjectCategory'),
),
migrations.AddField(
model_name='project',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='link',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.LinkCategory'),
),
]
|
from Rationale_Analysis.models.rationale_extractors.base_rationale_extractor import RationaleExtractor
from allennlp.models.model import Model
import math
import numpy as np
@Model.register("max_length")
class MaxLengthRationaleExtractor(RationaleExtractor) :
def __init__(self, max_length_ratio: float) :
self._max_length_ratio = max_length_ratio
super().__init__()
def forward(self, attentions, metadata) :
rationales = self.extract_rationale(attentions=attentions, metadata=metadata)
output_dict = {'metadata' : metadata, 'rationale' : rationales}
return output_dict
def extract_rationale(self, attentions, metadata):
# attentions : (B, L), metadata: List[Dict] of size B
cumsumed_attention = attentions.cumsum(-1)
sentences = [x["tokens"] for x in metadata]
rationales = []
for b in range(cumsumed_attention.shape[0]):
attn = cumsumed_attention[b]
sentence = [x.text for x in sentences[b]]
best_v = np.zeros((len(sentence),))
max_length = math.ceil(len(sentence) * self._max_length_ratio)
for i in range(0, len(sentence) - max_length + 1):
j = i + max_length
best_v[i] = attn[j - 1] - (attn[i - 1] if i - 1 >= 0 else 0)
index = np.argmax(best_v)
i, j, v = index, index + max_length, best_v[index]
top_ind = list(range(i, j))
rationales.append({
'document' : " ".join([x for idx, x in enumerate(sentence) if idx in top_ind]),
'spans' : [{'span' : (i, j), 'value' : float(v)}],
'metadata' : None
})
return rationales
|
from wand.contrib.linux import get_hostname
from wand.apps.relations.kafka_relation_base import KafkaRelationBase
__all__ = [
"KafkaSRURLNotSetError",
"KafkaSchemaRegistryRelation",
"KafkaSchemaRegistryProvidesRelation",
"KafkaSchemaRegistryRequiresRelation",
]
class KafkaSRURLNotSetError(Exception):
def __init__(self,
message="Missing URL to connect to Schema Registry."):
super().__init__(message)
class KafkaSchemaRegistryRelation(KafkaRelationBase):
def __init__(self, charm, relation_name,
user="", group="", mode=0):
super().__init__(charm, relation_name, user, group, mode)
@property
def get_schema_url(self):
if "url" not in self.relation.data[self.model.app]:
raise KafkaSRURLNotSetError()
return self.relation.data[self.model.app]["url"]
def on_schema_registry_relation_joined(self, event):
pass
def on_schema_registry_relation_changed(self, event):
pass
class KafkaSchemaRegistryProvidesRelation(KafkaSchemaRegistryRelation):
def __init__(self, charm, relation_name,
user="", group="", mode=0):
super().__init__(charm, relation_name, user, group, mode)
self._clientauth = False
@property
def schema_url(self):
if not self.relations:
return
for r in self.relations:
if "url" in r.data[self.model.app]:
return r.data[self.model.app]["url"]
@schema_url.setter
def schema_url(self, u):
if not self.relations:
return
if not self.unit.is_leader():
return
for r in self.relations:
if u != r.data[self.model.app].get("url", ""):
r.data[self.model.app]["url"] = u
def set_converter(self, converter):
if not self.relations:
return
if not self.unit.is_leader():
return
for r in self.relations:
if converter != r.data[self.model.app].get("converter", ""):
r.data[self.model.app]["converter"] = converter
def set_enhanced_avro_support(self, enhanced_avro):
if not self.relations:
return
if not self.unit.is_leader():
return
a = str(enhanced_avro)
for r in self.relations:
if a != r.data[self.model.app].get("enhanced_avro", ""):
r.data[self.model.app]["enhanced_avro"] = a
def set_schema_url(self, url, port, prot):
"""DEPRECATED
Use schema_url instead"""
if not self.relations:
return
if not self.unit.is_leader():
return
# URL is set to config on schema registry, then the same value will
# be passed by each of the schema registry instances. On the requester
# side, the value collected is put on a set, which will end as one
# single URL.
for r in self.relations:
r.data[self.model.app]["url"] = \
"{}://{}:{}".format(
prot if len(prot) > 0 else "https",
url if len(url) > 0 else get_hostname(
self.advertise_addr),
port)
def set_client_auth(self, clientauth):
if not self.relation:
return
c = str(self._clientauth)
if c != self.relation.data[self.unit]["client_auth"]:
self.relation.data[self.unit]["client_auth"] = c
class KafkaSchemaRegistryRequiresRelation(KafkaSchemaRegistryRelation):
def __init__(self, charm, relation_name,
user="", group="", mode=0):
super().__init__(charm, relation_name, user, group, mode)
@property
def converter(self):
return self.get_param("converter")
@property
def enhanced_avro(self):
return self.get_param("enhanced_avro")
@property
def url(self):
# Set is used to avoid repetitive URLs if schema_url config
# is set instead of get_hostname of each advertise_addr
return self.get_param("url")
def get_param(self, param):
if not self.relations:
return None
for r in self.relations:
if param in r.data[r.app]:
return r.data[r.app][param]
return None
def generate_configs(self,
ts_path,
ts_pwd,
enable_ssl,
ks_path,
ks_pwd):
if not self.relations:
return None
sr_props = {}
if len(ts_path) > 0:
sr_props["schema.registry.ssl.truststore.location"] = ts_path
sr_props["schema.registry.ssl.truststore.password"] = ts_pwd
if enable_ssl:
sr_props["schema.registry.ssl.key.password"] = ks_pwd
sr_props["schema.registry.ssl.keystore.location"] = ks_path
sr_props["schema.registry.ssl.keystore.password"] = ks_pwd
sr_props["schema.registry.url"] = self.url
return sr_props if len(sr_props) > 0 else None
|
#-*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import sys, os.path
pkg_dir = os.path.dirname(os.path.realpath(__file__)) + '/../../'
sys.path.append(pkg_dir)
from scipy.stats import t
from collections import Counter
from MPBNP import *
np.set_printoptions(suppress=True)
class CollapsedGibbs(BaseSampler):
def __init__(self, cl_mode = True, alpha = 1.0, cl_device = None, record_best = True):
"""Initialize the class.
"""
BaseSampler.__init__(self, record_best, cl_mode, cl_device)
if cl_mode:
program_str = open(pkg_dir + 'MPBNP/crp/kernels/crp_cl.c', 'r').read()
self.prg = cl.Program(self.ctx, program_str).build()
# set some prior hyperparameters
self.alpha = np.float32(alpha)
def read_csv(self, filepath, header=True):
"""Read the data from a csv file.
"""
BaseSampler.read_csv(self, filepath, header)
# convert the data to floats
self.new_obs = []
for row in self.obs:
self.new_obs.append([float(_) for _ in row])
self.obs = np.array(self.new_obs).astype(np.float32)
try: self.dim = np.int32(self.obs.shape[1])
except IndexError: self.dim = np.int32(1)
if self.dim == 1:
self.gamma_alpha0, self.gamma_beta0 = np.float32(1.0), np.float32(1.0)
self.gaussian_mu0, self.gaussian_k0 = np.float32(0.0), np.float32(0.001)
else:
self.wishart_v0 = np.float32(self.dim)
self.wishart_T0 = np.identity(self.dim, dtype=np.float32)
self.gaussian_mu0 = np.zeros(self.dim, dtype=np.float32)
self.gaussian_k0 = np.float32(0.001)
if self.cl_mode:
self.d_obs = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = self.obs)
return
def do_inference(self, init_labels = None, output_file = None):
"""Perform inference on the given observations assuming
data are generated by a Gaussian CRP Mixture Model.
"""
BaseSampler.do_inference(self, output_file)
if init_labels is None:
init_labels = np.random.randint(low = 0, high = min(self.N, 10), size = self.N).astype(np.int32)
else:
init_labels = init_labels.astype(np.int32)
if output_file is not None:
print(*(['d%d' % _ for _ in xrange(self.N)]), file = output_file, sep=',')
if self.cl_mode:
if self.dim == 1:
timing_stats = self.cl_infer_1dgaussian(init_labels = init_labels, output_file = output_file)
else:
timing_stats = self.cl_infer_kdgaussian(init_labels = init_labels, output_file = output_file)
else:
if self.dim == 1:
timing_stats = self.infer_1dgaussian(init_labels = init_labels, output_file = output_file)
else:
timing_stats = self.infer_kdgaussian(init_labels = init_labels, output_file = output_file)
if self.record_best and output_file:
print(*self.best_sample[0], file=output_file, sep=',')
return timing_stats
def infer_1dgaussian(self, init_labels, output_file = None):
"""Perform inference on class labels assuming data are 1-d gaussian,
without OpenCL acceleration.
"""
total_time = 0
a_time = time()
cluster_labels = init_labels
if self.record_best: self.auto_save_sample(cluster_labels)
for i in xrange(self.niter):
# identify existing clusters and generate a new one
uniq_labels = np.unique(cluster_labels)
_, _, new_cluster_label = smallest_unused_label(uniq_labels)
uniq_labels = np.hstack((new_cluster_label, uniq_labels))
# compute the sufficient statistics of each cluster
logpost = np.empty((self.N, uniq_labels.shape[0]))
for label_index in xrange(uniq_labels.shape[0]):
label = uniq_labels[label_index]
if label == new_cluster_label:
n, mu, var = 0, 0, 0
else:
cluster_obs = self.obs[np.where(cluster_labels == label)]
n = cluster_obs.shape[0]
mu = np.mean(cluster_obs)
var = np.var(cluster_obs)
k_n = self.gaussian_k0 + n
mu_n = (self.gaussian_k0 * self.gaussian_mu0 + n * mu) / k_n
alpha_n = self.gamma_alpha0 + n / 2
beta_n = self.gamma_beta0 + 0.5 * var * n + \
self.gaussian_k0 * n * (mu - self.gaussian_mu0) ** 2 / (2 * k_n)
Lambda = alpha_n * k_n / (beta_n * (k_n + 1))
t_frozen = t(df = 2 * alpha_n, loc = mu_n, scale = (1 / Lambda) ** 0.5)
logpost[:,label_index] = t_frozen.logpdf(self.obs[:,0])
logpost[:,label_index] += np.log(n/(self.N + self.alpha)) if n > 0 else np.log(self.alpha/(self.N+self.alpha))
# sample and implement the changes
temp_cluster_labels = np.empty(cluster_labels.shape, dtype=np.int32)
for j in xrange(self.N):
target_cluster = sample(a = uniq_labels, p = lognormalize(logpost[j]))
temp_cluster_labels[j] = target_cluster
if self.record_best:
if self.auto_save_sample(temp_cluster_labels):
cluster_labels = temp_cluster_labels
if self.no_improvement(500):
break
else:
if i >= self.burnin and i % self.thining == 0:
print(*temp_cluster_labels, file = output_file, sep=',')
self.total_time += time() - a_time
return self.gpu_time, self.total_time, Counter(cluster_labels).most_common()
def cl_infer_1dgaussian(self, init_labels, output_file = None):
"""Implementing concurrent sampling of class labels with OpenCL.
"""
total_a_time = time()
cluster_labels = init_labels
if self.record_best: self.auto_save_sample(cluster_labels)
gpu_a_time = time()
d_hyper_param = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.array([self.gaussian_mu0, self.gaussian_k0,
self.gamma_alpha0, self.gamma_beta0, self.alpha]).astype(np.float32))
d_labels = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = cluster_labels)
self.gpu_time += time() - gpu_a_time
for i in xrange(self.niter):
uniq_labels = np.unique(cluster_labels)
_, _, new_cluster_label = smallest_unused_label(uniq_labels)
uniq_labels = np.hstack((new_cluster_label, uniq_labels)).astype(np.int32)
suf_stats = np.empty((uniq_labels.shape[0], 4))
for label_index in xrange(uniq_labels.shape[0]):
label = uniq_labels[label_index]
if label == new_cluster_label:
suf_stats[label_index] = (label, 0, 0, 0)
else:
cluster_obs = self.obs[np.where(cluster_labels == label)]
cluster_mu = np.mean(cluster_obs)
cluster_ss = np.var(cluster_obs) * cluster_obs.shape[0]
suf_stats[label_index] = (label, cluster_mu, cluster_ss, cluster_obs.shape[0])
gpu_a_time = time()
d_uniq_label = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = uniq_labels)
d_mu = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = suf_stats[:,1].astype(np.float32))
d_ss = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = suf_stats[:,2].astype(np.float32))
d_n = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = suf_stats[:,3].astype(np.int32))
d_logpost = cl.array.empty(self.queue,(self.obs.shape[0], uniq_labels.shape[0]), np.float32, allocator=self.mem_pool)
d_rand = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.random.random(self.obs.shape).astype(np.float32))
if self.device_type == cl.device_type.CPU:
self.prg.normal_1d_logpost_loopy(self.queue, self.obs.shape, None,
d_labels, self.d_obs, d_uniq_label, d_mu, d_ss, d_n,
np.int32(uniq_labels.shape[0]), d_hyper_param, d_rand,
d_logpost.data)
else:
self.prg.normal_1d_logpost(self.queue, (self.obs.shape[0], uniq_labels.shape[0]), None,
d_labels, self.d_obs, d_uniq_label, d_mu, d_ss, d_n,
np.int32(uniq_labels.shape[0]), d_hyper_param, d_rand,
d_logpost.data)
self.prg.resample_labels(self.queue, (self.obs.shape[0],), None,
d_labels, d_uniq_label, np.int32(uniq_labels.shape[0]),
d_rand, d_logpost.data)
temp_cluster_labels = np.empty(cluster_labels.shape, dtype=np.int32)
cl.enqueue_copy(self.queue, temp_cluster_labels, d_labels)
self.gpu_time += time() - gpu_a_time
if self.record_best:
if self.auto_save_sample(temp_cluster_labels):
cluster_labels = temp_cluster_labels
if self.no_improvement():
break
else:
if i >= self.burnin and i % self.thining == 0:
print(*temp_cluster_labels, file = output_file, sep=',')
self.total_time += time() - total_a_time
return self.gpu_time, self.total_time, Counter(cluster_labels).most_common()
def infer_kdgaussian(self, init_labels, output_file = None):
"""Implementing concurrent sampling of partition labels without OpenCL.
"""
a_time = time()
cluster_labels = init_labels
for i in xrange(self.niter):
# at the beginning of each iteration, identify the unique cluster labels
uniq_labels = np.unique(cluster_labels)
_, _, new_cluster_label = smallest_unused_label(uniq_labels)
uniq_labels = np.hstack((new_cluster_label, uniq_labels))
# compute the sufficient statistics of each cluster
n = np.empty(uniq_labels.shape)
mu = np.empty((uniq_labels.shape[0], self.dim))
cov_mu0 = np.empty((uniq_labels.shape[0], self.dim, self.dim))
cov_obs = np.empty((uniq_labels.shape[0], self.dim, self.dim))
logpost = np.empty((self.N, uniq_labels.shape[0]))
for label_index in xrange(uniq_labels.shape[0]):
label = uniq_labels[label_index]
if label == new_cluster_label:
cov_obs[label_index], cov_mu0[label_index] = (0,0)
mu[label_index], n[label_index] = (0,0)
else:
cluster_obs = self.obs[np.where(cluster_labels == label)]
mu[label_index] = np.mean(cluster_obs, axis = 0)
obs_deviance = cluster_obs - mu[label_index]
mu0_deviance = np.reshape(gaussian_mu0 - mu[label_index], (self.dim, 1))
cov_obs[label_index] = np.dot(obs_deviance.T, obs_deviance)
cov_mu0[label_index] = np.dot(mu0_deviance, mu0_deviance.T)
n[label_index] = cluster_obs.shape[0]
kn = self.gaussian_k0 + n[label_index]
vn = self.wishart_v0 + n[label_index]
sigma = (self.wishart_T0 + cov_obs[label_index] + (self.gaussian_k0 * n[label_index]) / kn * cov_mu0[label_index]) * (kn + 1) / kn / (vn - self.dim + 1)
det = np.linalg.det(sigma)
inv = np.linalg.inv(sigma)
df = vn - self.dim + 1
logpost[:,label_index] = math.lgamma(df / 2.0 + self.dim / 2.0) - math.lgamma(df / 2.0) - 0.5 * np.log(det) - 0.5 * self.dim * np.log(df * math.pi) - \
0.5 * (df + self.dim) * np.log(1.0 + (1.0 / df) * np.dot(np.dot(self.obs - mu[label_index], inv), (self.obs - mu[label_index]).T).diagonal())
logpost[:,label_index] += np.log(n[label_index]) if n[label_index] > 0 else np.log(self.alpha)
# resample the labels and implement the changes
temp_cluster_labels = np.empty(cluster_labels.shape, dtype=np.int32)
for j in xrange(self.N):
target_cluster = sample(a = uniq_labels, p = lognormalize(logpost[j]))
temp_cluster_labels[j] = target_cluster
if self.record_best:
if self.auto_save_sample(temp_cluster_labels):
cluster_labels = temp_cluster_labels
if self.no_improvement():
break
else:
if i >= self.burnin and i % self.thining == 0:
print(*temp_cluster_labels, file = output_file, sep=',')
self.total_time += time() - a_time
return self.gpu_time, self.total_time, Counter(cluster_labels).most_common()
def cl_infer_kdgaussian(self, init_labels, output_file = None):
"""Implementing concurrent sampling of class labels with OpenCL.
"""
total_time = time()
# set some prior hyperparameters
d_T0 = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = self.wishart_T0.astype(np.float32))
cluster_labels = init_labels.astype(np.int32)
# push initial labels onto the openCL device
d_labels = cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf = cluster_labels)
for i in xrange(self.niter):
if output_file is not None and i >= self.burnin:
print(*cluster_labels, file = output_file, sep = ',')
# at the beginning of each iteration, identity the unique cluster labels
uniq_labels = np.unique(cluster_labels)
_, _, new_cluster_label = smallest_unused_label(uniq_labels)
uniq_labels = np.hstack((new_cluster_label, uniq_labels)).astype(np.int32)
num_of_clusters = np.int32(uniq_labels.shape[0])
# compute the sufficient statistics of each cluster
h_n = np.empty(uniq_labels.shape).astype(np.int32)
h_mu = np.empty((uniq_labels.shape[0], self.dim)).astype(np.float32)
h_cov_mu0 = np.empty((uniq_labels.shape[0], self.dim, self.dim)).astype(np.float32)
h_cov_obs = np.empty((uniq_labels.shape[0], self.dim, self.dim)).astype(np.float32)
h_sigma = np.empty((uniq_labels.shape[0], self.dim, self.dim)).astype(np.float32)
for label_index in xrange(uniq_labels.shape[0]):
label = uniq_labels[label_index]
if label == new_cluster_label:
h_cov_obs[label_index], h_cov_mu0[label_index] = (0,0)
h_mu[label_index], h_n[label_index] = (0,0)
else:
cluster_obs = self.obs[np.where(cluster_labels == label)]
h_mu[label_index] = np.mean(cluster_obs, axis = 0)
obs_deviance = cluster_obs - h_mu[label_index]
mu0_deviance = np.reshape(self.gaussian_mu0 - h_mu[label_index], (self.dim, 1))
h_cov_obs[label_index] = np.dot(obs_deviance.T, obs_deviance)
h_cov_mu0[label_index] = np.dot(mu0_deviance, mu0_deviance.T)
h_n[label_index] = cluster_obs.shape[0]
# using OpenCL to compute the log posterior of each item and perform resampling
gpu_time = time()
d_n = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = h_n)
d_mu = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = h_mu)
d_cov_mu0 = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = h_cov_mu0)
d_cov_obs = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = h_cov_obs)
d_sigma = cl.array.empty(self.queue, h_cov_obs.shape, np.float32, allocator=self.mem_pool)
self.prg.normal_kd_sigma_matrix(self.queue, h_cov_obs.shape, None,
d_n, d_cov_obs, d_cov_mu0,
d_T0, self.gaussian_k0, self.wishart_v0, d_sigma.data)
# copy the sigma matrix to host memory and calculate determinants and inversions
h_sigma = d_sigma.get()
h_determinants = np.linalg.det(h_sigma).astype(np.float32)
h_inverses = np.array([np.linalg.inv(_) for _ in h_sigma]).astype(np.float32)
d_uniq_label = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = uniq_labels)
d_determinants = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = h_determinants)
d_inverses = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = h_inverses)
d_rand = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = np.random.random(self.N).astype(np.float32))
d_logpost = cl.array.empty(self.queue, (self.N, uniq_labels.shape[0]), np.float32, allocator = self.mem_pool)
# if the OpenCL device is CPU, use the kernel with loops over clusters
if self.device_type == cl.device_type.CPU:
self.prg.normal_kd_logpost_loopy(self.queue, (self.obs.shape[0],), None,
d_labels, self.d_obs, d_uniq_label,
d_mu, d_n, d_determinants, d_inverses,
num_of_clusters, self.alpha,
self.dim, self.wishart_v0, d_logpost.data, d_rand)
# otherwise, use the kernel that fully unrolls data points and clusters
else:
self.prg.normal_kd_logpost(self.queue, (self.obs.shape[0], uniq_labels.shape[0]), None,
d_labels, self.d_obs, d_uniq_label,
d_mu, d_n, d_determinants, d_inverses,
num_of_clusters, self.alpha,
self.dim, self.wishart_v0, d_logpost.data, d_rand)
self.prg.resample_labels(self.queue, (self.obs.shape[0],), None,
d_labels, d_uniq_label, num_of_clusters,
d_rand, d_logpost.data)
temp_cluster_labels = np.empty(cluster_labels.shape, dtype=np.int32)
cl.enqueue_copy(self.queue, temp_cluster_labels, d_labels)
self.gpu_time += time() - gpu_time
if self.record_best:
if self.auto_save_sample(temp_cluster_labels):
cluster_labels = temp_cluster_labels
if self.no_improvement(1000):
break
else:
if i >= self.burnin and i % self.thining == 0:
print(*temp_cluster_labels, file = output_file, sep=',')
self.total_time = time() - total_time
return self.gpu_time, self.total_time, Counter(cluster_labels).most_common()
def _logprob(self, sample):
"""Calculate the joint log probability of data and model given a sample.
"""
assert(len(sample) == len(self.obs))
total_logprob = 0
if self.dim == 1 and self.cl_mode == False:
cluster_dict = {}
N = 0
for label, obs in zip(sample, self.obs):
obs = obs[0]
if label in cluster_dict:
n = len(cluster_dict[label])
y_bar = np.mean(cluster_dict[label])
var = np.var(cluster_dict[label])
else:
n, y_bar, var = 0, 0, 0
k_n = self.gaussian_k0 + n
mu_n = (self.gaussian_k0 * self.gaussian_mu0 + n * y_bar) / k_n
alpha_n = self.gamma_alpha0 + n / 2
beta_n = self.gamma_beta0 + 0.5 * var * n + \
self.gaussian_k0 * n * (y_bar - self.gaussian_mu0) ** 2 / (2 * k_n)
Lambda = alpha_n * k_n / (beta_n * (k_n + 1))
t_frozen = t(df = 2 * alpha_n, loc = mu_n, scale = (1 / Lambda) ** 0.5)
loglik = t_frozen.logpdf(obs)
loglik += np.log(n / (N + self.alpha)) if n > 0 else np.log(self.alpha / (N + self.alpha))
# modify the counts and dict
try: cluster_dict[label].append(obs)
except KeyError: cluster_dict[label] = [obs]
N += 1
total_logprob += loglik
if self.dim == 1 and self.cl_mode:
gpu_a_time = time()
d_labels = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR, hostbuf = sample)
d_hyper_param = cl.Buffer(self.ctx, self.mf.READ_ONLY | self.mf.COPY_HOST_PTR,
hostbuf = np.array([self.gaussian_mu0, self.gaussian_k0,
self.gamma_alpha0, self.gamma_beta0, self.alpha]).astype(np.float32))
d_logprob = cl.array.empty(self.queue, (self.N,), np.float32, allocator=self.mem_pool)
self.prg.joint_logprob_1d(self.queue, self.obs.shape, None,
d_labels, self.d_obs, d_hyper_param, d_logprob.data)
total_logprob = d_logprob.get().sum()
self.gpu_time += time() - gpu_a_time
if self.dim > 1:# and self.cl_mode == False:
cluster_dict = {}
N = 0
for label, obs in zip(sample, self.obs):
if label in cluster_dict:
cluster_obs = np.array(cluster_dict[label])
n = cluster_obs.shape[0]
mu = np.mean(cluster_obs, axis = 0)
obs_deviance = cluster_obs - mu
mu0_deviance = np.reshape(self.gaussian_mu0 - mu, (self.dim, 1))
cov_obs = np.dot(obs_deviance.T, obs_deviance)
cov_mu0 = np.dot(mu0_deviance, mu0_deviance.T)
else:
n, mu, cov_obs, cov_mu0 = 0, 0, 0, 0
kn = self.gaussian_k0 + n
vn = self.wishart_v0 + n
sigma = (self.wishart_T0 + cov_obs + (self.gaussian_k0 * n) / kn * cov_mu0) * (kn + 1) / kn / (vn - self.dim + 1)
det = np.linalg.det(sigma)
inv = np.linalg.inv(sigma)
df = vn - self.dim + 1
loglik = math.lgamma(df / 2.0 + self.dim / 2.0) - math.lgamma(df / 2.0) - 0.5 * np.log(det) - 0.5 * self.dim * np.log(df * math.pi) - \
0.5 * (df + self.dim) * np.log(1.0 + (1.0 / df) * np.dot(np.dot(obs - mu, inv), (obs - mu).T))
loglik += np.log(n / (N + self.alpha)) if n > 0 else np.log(self.alpha / (N + self.alpha))
# modify the counts and dict
try: cluster_dict[label].append(obs)
except KeyError: cluster_dict[label] = [obs]
N += 1
total_logprob += loglik
return total_logprob
|
"proto_ts_library.bzl provides the proto_ts_library rule"
load("@build_bazel_rules_nodejs//:providers.bzl", "DeclarationInfo", "JSModuleInfo")
def _proto_ts_library_impl(ctx):
"""
Implementation for proto_ts_library rule
Args:
ctx: the rule context object
Returns:
list of providers
"""
# list<depset<File>>
transitive_dts = [depset(ctx.files.deps)]
# list<depset<File>>>
transitive_js_modules = []
# gather transitive .d.ts files
for dep in ctx.attr.deps:
if DeclarationInfo in dep:
transitive_dts.append(dep[DeclarationInfo].transitive_declarations)
if JSModuleInfo in dep:
transitive_js_modules.append(dep[JSModuleInfo].sources)
# list<File>: .d.ts files that will be created by the tsc action
dts_outputs = []
# .js files that will be created by the tsc action
js_outputs = []
# all srcs files are expected to be .ts files.
for f in ctx.files.srcs:
(base, _, _) = f.basename.rpartition(".")
dts_outputs.append(
ctx.actions.declare_file(base + ".d.ts", sibling = f),
)
# js_outputs.append(
# ctx.actions.declare_file(base + ".js", sibling = f),
# )
# all outputs (.d.ts + .js)
outputs = js_outputs + dts_outputs
# for the JSModuleInfo provider
js_module = depset(js_outputs)
transitive_js_modules.append(js_module)
# build the tsc command
command = [ctx.executable.tsc.path, " --declaration"]
command += ctx.attr.args
command += [f.path for f in ctx.files.srcs]
ctx.actions.run_shell(
command = " ".join(command),
inputs = depset(
direct = ctx.files.srcs,
transitive = transitive_dts,
),
mnemonic = "TscProtoLibrary",
outputs = outputs,
progress_message = "tsc %s" % ctx.label,
tools = [ctx.executable.tsc],
)
return [
# primary provider needed for downstream rules such as ts_project.
DeclarationInfo(
declarations = dts_outputs,
# ts_project wants direct /and/ transitive .d.ts in 'transitive'
transitive_declarations = depset(
transitive = [depset(dts_outputs)] + transitive_dts,
),
type_blocklisted_declarations = depset(),
),
# default info contains all files, in case someone wanted it for a
# filegroup etc.
DefaultInfo(files = depset(direct = outputs)),
# js files, in the event someone would like this for js_library etc.
JSModuleInfo(
direct_sources = js_module,
sources = depset(transitive = transitive_js_modules),
),
]
proto_ts_library = rule(
implementation = _proto_ts_library_impl,
attrs = {
"deps": attr.label_list(
doc = "dependencies that provide .d.ts files (typically other proto_ts_library rules)",
providers = [DeclarationInfo],
),
"srcs": attr.label_list(
allow_files = True,
doc = "source .ts files",
),
"tsc": attr.label(
allow_files = True,
cfg = "host",
mandatory = True,
doc = "typescript compiler executable",
executable = True,
),
"args": attr.string_list(
doc = "additional arguments for the tsc compile action",
),
},
)
|
import unittest
from mock import MagicMock
import bilby
class TestPymultinest(unittest.TestCase):
def setUp(self):
self.likelihood = MagicMock()
self.priors = bilby.core.prior.PriorDict(
dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
)
self.priors["a"] = bilby.core.prior.Prior(boundary="periodic")
self.priors["b"] = bilby.core.prior.Prior(boundary="reflective")
self.sampler = bilby.core.sampler.Pymultinest(
self.likelihood,
self.priors,
outdir="outdir",
label="label",
use_ratio=False,
plot=False,
skip_import_verification=True,
)
def tearDown(self):
del self.likelihood
del self.priors
del self.sampler
def test_default_kwargs(self):
expected = dict(importance_nested_sampling=False, resume=True,
verbose=True, sampling_efficiency='parameter',
n_live_points=500, n_params=2,
n_clustering_params=None, wrapped_params=None,
multimodal=True, const_efficiency_mode=False,
evidence_tolerance=0.5,
n_iter_before_update=100, null_log_evidence=-1e90,
max_modes=100, mode_tolerance=-1e90, seed=-1,
context=0, write_output=True, log_zero=-1e100,
max_iter=0, init_MPI=False, dump_callback='dumper')
self.sampler.kwargs['dump_callback'] = 'dumper' # Check like the dynesty print_func
self.assertListEqual([1, 0], self.sampler.kwargs['wrapped_params']) # Check this separately
self.sampler.kwargs['wrapped_params'] = None # The dict comparison can't handle lists
self.assertDictEqual(expected, self.sampler.kwargs)
def test_translate_kwargs(self):
expected = dict(importance_nested_sampling=False, resume=True,
verbose=True, sampling_efficiency='parameter',
n_live_points=123, n_params=2,
n_clustering_params=None, wrapped_params=None,
multimodal=True, const_efficiency_mode=False,
evidence_tolerance=0.5,
n_iter_before_update=100, null_log_evidence=-1e90,
max_modes=100, mode_tolerance=-1e90, seed=-1,
context=0, write_output=True, log_zero=-1e100,
max_iter=0, init_MPI=False, dump_callback='dumper')
for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs:
new_kwargs = self.sampler.kwargs.copy()
del new_kwargs["n_live_points"]
new_kwargs[
"wrapped_params"
] = None # The dict comparison can't handle lists
new_kwargs['dump_callback'] = 'dumper' # Check this like Dynesty print_func
new_kwargs[equiv] = 123
self.sampler.kwargs = new_kwargs
self.assertDictEqual(expected, self.sampler.kwargs)
if __name__ == "__main__":
unittest.main()
|
def return_cst():
return 1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Using the new (beta) MovieStim2 to play a video file.
Requires:
* vlc, matching your python bitness
* pip install python-vlc
"""
from __future__ import absolute_import, division
from psychopy import visual, core, event, data, logging, monitors
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import time, os, sys, glob
videopath = r'C:\Users\fitch\downloads\huge.mp4'
if not os.path.exists(videopath):
raise RuntimeError("Video path could not be found: " + videopath)
win = visual.Window(
size=[1440, 900], fullscr=True, screen=0,
allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[-1,-1,-1], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
experimenter_text = visual.TextStim(win, "IT'S A MOVIE", pos=(0, -450), units = 'pix')
mov = visual.VlcMovieStim(win, videopath,
units=None,
pos=(0, 0.15),
loop=False)
mov.size = (1.0, .5625)
# ------Prepare to start Routine "experimenter"-------
shouldflip = mov.play()
continueRoutine = True
while continueRoutine:
# Only flip when a new frame should be displayed.
if shouldflip:
# Movie has already been drawn, so just draw text stim and flip
experimenter_text.draw()
win.flip()
else:
# Give the OS a break if a flip is not needed
time.sleep(0.001)
shouldflip = mov.draw()
for key in event.getKeys():
if key in ['escape', 'q']:
# TODO: Let them select a different file?
win.close()
core.quit()
elif key in ['space']:
continueRoutine = False
|
from dataclasses import dataclass, asdict
from enum import Enum, auto
from typing import Type
import torch
import wandb
from datargs import argsclass
from torch import nn, Tensor
from preprocessor_meta import Preprocessor, filter_sig_meta
from gw_data import *
from models import HyperParameters, ModelManager
class RegressionHead(Enum):
LINEAR = auto()
AVG_LINEAR = auto()
def to_odd(i: int) -> int:
return (i // 2) * 2 + 1
@argsclass(name="sig_cnn")
@dataclass
class SigCnnHp(HyperParameters):
batch: int = 512
epochs: int = 10
lr: float = 0.0003
dtype: torch.dtype = torch.float32
conv1w: int = 105
conv1out: int = 150
conv1stride: int = 1
mp1w: int = 1
conv2w: int = 8
conv2out: int = 40
conv2stride: int = 1
mp2w: int = 4
conv3w: int = 8
conv3out: int = 50
conv3stride: int = 1
mp3w: int = 1
conv4w: int = 33
conv4out: int = 50
conv4stride: int = 1
mp4w: int = 3
head: RegressionHead = RegressionHead.AVG_LINEAR
lindrop: float = 0.22
@property
def manager_class(self) -> Type[ModelManager]:
return Manager
def __post_init__(self):
self.conv1w = to_odd(self.conv1w)
self.conv2w = to_odd(self.conv2w)
self.conv3w = to_odd(self.conv3w)
self.conv4w = to_odd(self.conv4w)
class ConvBlock(nn.Module):
def __init__(
self, w: int, in_channels: int, out_channels: int, stride: int, mpw: int
):
super().__init__()
self.conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(w,),
stride=(stride,),
padding=w // 2,
)
self.bn = nn.BatchNorm1d(out_channels)
self.activation = nn.ReLU()
self.mp = nn.MaxPool1d(kernel_size=mpw)
def forward(self, x: Tensor, use_activation: bool):
out = self.conv(x)
out = self.bn(out)
if use_activation:
out = self.activation(out)
out = self.mp(out)
return out
class SigCnn(nn.Module):
"""
Applies a CNN to the output of preprocess filter_sig and produces one logit as output.
input size: (batch_size, N_SIGNALS, SIGNAL_LEN)
output size: (batch_size, 1)
"""
def __init__(self, device: torch.device, hp: SigCnnHp):
super().__init__()
self.hp = hp
self.device = device
self.conv1 = ConvBlock(
w=hp.conv1w,
in_channels=N_SIGNALS,
out_channels=hp.conv1out,
stride=hp.conv1stride,
mpw=hp.mp1w,
)
self.conv2 = ConvBlock(
w=hp.conv2w,
in_channels=hp.conv1out,
out_channels=hp.conv2out,
stride=hp.conv2stride,
mpw=hp.mp2w,
)
self.conv3 = ConvBlock(
w=hp.conv3w,
in_channels=hp.conv2out,
out_channels=hp.conv3out,
stride=hp.conv3stride,
mpw=hp.mp3w,
)
self.conv4 = ConvBlock(
w=hp.conv4w,
in_channels=hp.conv3out,
out_channels=hp.conv4out,
stride=hp.conv4stride,
mpw=hp.mp4w,
)
self.outw = (
SIGNAL_LEN
// hp.conv1stride
// hp.mp1w
// hp.conv2stride
// hp.mp2w
// hp.conv3stride
// hp.mp3w
// hp.conv4stride
// hp.mp4w
)
if self.outw == 0:
raise ValueError("strides and maxpools took output width to zero")
self.linear_dropout = nn.Dropout(hp.lindrop)
linear_in_features = hp.conv4out * (
self.outw if hp.head == RegressionHead.LINEAR else 1
)
self.linear = nn.Linear(in_features=linear_in_features, out_features=1)
def forward(self, x: Tensor) -> Tensor:
batch_size = x.size()[0]
out = self.conv1(x, use_activation=True)
out = self.conv2(out, use_activation=True)
out = self.conv3(out, use_activation=True)
out = self.conv4(out, use_activation=False)
if self.hp.head == RegressionHead.LINEAR:
out = torch.flatten(out, start_dim=1)
else:
assert self.hp.head == RegressionHead.AVG_LINEAR
# Average across w, leaving (batch, channels)
out = torch.mean(out, dim=2)
out = self.linear_dropout(out)
out = self.linear(out)
assert out.size() == (batch_size, 1)
return out
class Manager(ModelManager):
def train(
self,
data_dir: Path,
n: Optional[int],
device: torch.device,
hp: HyperParameters,
submission: bool,
):
if not isinstance(hp, SigCnnHp):
raise ValueError("wrong hyper-parameter class: {hp}")
wandb.init(project="g2net-" + __name__, entity="wisdom", config=asdict(hp))
self._train(
SigCnn(device, hp), device, data_dir, n, [filter_sig_meta], hp, submission
)
|
from hsm.api.openstack import wsgi
from hsm.api.views import versions as views_versions
VERSIONS = {
"v1.0": {
"id": "v1.0",
"status": "CURRENT",
"updated": "2012-01-04T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "application/pdf",
"href": "http://jorgew.github.com/block-storage-api/"
"content/os-block-storage-1.0.pdf",
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.1/application.wadl",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.storage+xml;version=1",
},
{
"base": "application/json",
"type": "application/vnd.openstack.storage+json;version=1",
}
],
}
}
class Versions(wsgi.Resource):
def __init__(self):
super(Versions, self).__init__(None)
def index(self, req):
"""Return all versions."""
builder = views_versions.get_view_builder(req)
return builder.build_versions(VERSIONS)
@wsgi.response(300)
def multi(self, req):
"""Return multiple choices."""
builder = views_versions.get_view_builder(req)
return builder.build_choices(VERSIONS, req)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
args = {}
if request_environment['PATH_INFO'] == '/':
args['action'] = 'index'
else:
args['action'] = 'multi'
return args
class HardwareVersionV1(object):
def show(self, req):
builder = views_versions.get_view_builder(req)
return builder.build_version(VERSIONS['v1.0'])
def create_resource():
return wsgi.Resource(HardwareVersionV1())
|
from urllib.parse import urlparse
import os
from archive_api.models import DataSet, MeasurementVariable, Site, Person, Plot, Author
from django.urls import resolve
from django.db import transaction
from rest_framework import serializers
from rest_framework.reverse import reverse
class StringToIntReadOnlyField(serializers.ReadOnlyField):
"""
Readonly field that uses a character externally and integer internally
"""
def to_representation(self, obj):
return str(obj)
class StringToIntField(serializers.Field):
"""
field that uses a character externally and integer internally
"""
def to_internal_value(self, data):
return int(data)
def to_representation(self, obj):
return str(obj)
class AuthorsField(serializers.SerializerMethodField):
"""
Author objects are serialized and deserialized for reading and writing
"""
def __init__(self, **kwargs):
super(AuthorsField, self).__init__(**kwargs)
self.read_only = False
def to_internal_value(self, data):
"""
Resolve person Urls into Person objects
:param data: list of Person urls
:type data: list
:return: dict with 'authors' key containing a list of Person objects
"""
authors = []
for author in data:
path = urlparse(author).path
resolved_func, __, resolved_kwargs = resolve(path)
person = resolved_func.cls.queryset.get(pk=resolved_kwargs['pk'])
authors.append(person)
return {'authors': authors}
class DataSetSerializer(serializers.HyperlinkedModelSerializer):
"""
DataSet serializer that converts models.DataSet
"""
managed_by = serializers.ReadOnlyField(source='managed_by.username')
modified_by = serializers.ReadOnlyField(source='modified_by.username')
archive_filename = serializers.SerializerMethodField()
submission_date = serializers.ReadOnlyField()
publication_date = serializers.ReadOnlyField()
authors = AuthorsField()
archive = serializers.SerializerMethodField()
status = StringToIntReadOnlyField()
qaqc_status = StringToIntField(required=False,allow_null=True)
access_level = StringToIntField(required=False, allow_null=True)
def get_archive_filename(self,instance):
if instance.archive:
base, filename = os.path.split(instance.archive.name)
return filename
else:
return None
def get_archive(self, instance):
""" Returns the archive access url"""
if instance.archive:
url_kwargs = {
'pk': instance.pk,
}
url = reverse('dataset-detail', kwargs=url_kwargs, request=self.context["request"])
url += "archive/"
return url
return None
def get_authors(self, instance):
"""
Serialize the authors. This should be an ordered list of authors
:param instance:
:return:
"""
# Get Authors in the specified order
author_order = Author.objects \
.filter(dataset_id=instance.id) \
.order_by('order')
# Put in a list
authors = [a.author for a in author_order]
# Return a list of person urls
serializers = PersonSerializer(authors, many=True, context={'request': self.context['request']}).data
return [p["url"] for p in serializers]
class Meta:
model = DataSet
fields = ('url', 'data_set_id', 'name', 'version', 'status', 'description', 'status_comment',
'doi', 'start_date', 'end_date', 'qaqc_status', 'qaqc_method_description',
'ngee_tropics_resources', 'funding_organizations', 'doe_funding_contract_numbers',
'acknowledgement', 'reference', 'additional_reference_information',
'access_level', 'additional_access_information', 'originating_institution',
'submission_date', 'contact', 'sites', 'authors', 'plots', 'variables', 'archive',
'archive_filename',
'managed_by', 'created_date', 'modified_by', 'modified_date'
, 'cdiac_import', 'cdiac_submission_contact','publication_date')
read_only_fields = ('cdiac_import', 'cdiac_submission_contact',
'url', 'version', 'managed_by', 'created_date', 'modified_by', 'modified_date', 'status', 'archive',
'archive_filename',
'submission_date', 'data_set_id','publication_date')
def validate(self, data):
"""
Validate the fields.
"""
errors = dict()
# Validate the data range
if {'start_date', 'end_date'}.issubset(data.keys()) and data['start_date'] and data['end_date'] and data[
'start_date'] > data['end_date']:
errors["end_date"]="Start date must come before end date"
# Validate the selected plots
if 'plots' in data.keys():
if 'sites' not in data.keys():
errors.setdefault('plots', [])
errors["plots"].append("A site must be selected.")
else:
for plot in data["plots"]:
if plot.site not in data["sites"]:
errors.setdefault('plots', [])
errors["plots"].append("Select the site corresponding to plot {}:{}".format(plot.plot_id, plot.name))
# If the dataset is approved or submitted there are an extra set of fields
# that are required
if self.instance and self.instance.status > DataSet.STATUS_DRAFT:
if not self.instance.archive:
errors.setdefault('missingRequiredFields', [])
errors['missingRequiredFields'].append("archive")
for field in ['sites', 'authors', 'name', 'description', 'contact', 'variables',
'ngee_tropics_resources', 'funding_organizations', 'originating_institution',
'access_level']: # Check for required fields
if field in data.keys():
if data[field] is None or (isinstance(data[field], (list, tuple, str)) and not data[field]):
errors.setdefault('missingRequiredFields', [])
errors['missingRequiredFields'].append(field)
else:
errors.setdefault('missingRequiredFields', [])
errors['missingRequiredFields'].append(field)
if len(errors) > 0:
raise serializers.ValidationError(errors)
return data
def create(self, validated_data):
"""
Override the serializer create method to handle Dataset and Author
creation in an atomic transaction
:param validated_data:
:return: dataset
"""
# Use an atomic transaction for managing dataset and authors
with transaction.atomic():
# Pop off authors data, if exists
author_data = []
sites_data = []
plots_data = []
variables_data = []
if "authors" in validated_data.keys():
author_data = validated_data.pop('authors')
if "sites" in validated_data.keys():
sites_data = validated_data.pop('sites')
if "plots" in validated_data.keys():
plots_data = validated_data.pop('plots')
if "variables" in validated_data.keys():
variables_data = validated_data.pop('variables')
# Create dataset first
dataset = DataSet.objects.create(**validated_data)
dataset.clean()
dataset.save()
# save the author data
self.add_authors(author_data, dataset)
for obj in sites_data:
dataset.sites.add(obj)
for obj in plots_data:
dataset.plots.add(obj)
for obj in variables_data:
dataset.variables.add(obj)
dataset.save()
return dataset
def update(self, instance, validated_data):
"""
Override the serializer update method to handle Dataset and Author
update in an atomic transaction
:param validated_data:
:return: dataset
"""
# Use an atomic transaction for managing dataset and authors
with transaction.atomic():
# pop off the authors data
if "authors" in validated_data.keys():
author_data = validated_data.pop('authors')
# remove the existing authors
Author.objects.filter(dataset_id=instance.id).delete() # delete first
self.add_authors(author_data, instance)
# Update Dataset metadata
super(self.__class__, self).update(instance=instance, validated_data=validated_data)
return instance
def add_authors(self, author_data, instance):
"""
Enumerate over author data and create ordered author objects
:param author_data: Person objects
:type author_data: list
:param instance: dataset to add authors to
:type instance: DataSet
"""
for idx, author in enumerate(author_data):
Author.objects.create(dataset=instance, order=idx, author=author)
class MeasurementVariableSerializer(serializers.HyperlinkedModelSerializer):
"""
MeasurementVariable serializer that convers models.MeasurementVariable
"""
class Meta:
model = MeasurementVariable
fields = '__all__'
class SiteSerializer(serializers.HyperlinkedModelSerializer):
"""
Site serializer that converts models.Site
"""
class Meta:
model = Site
fields = '__all__'
class PersonSerializer(serializers.HyperlinkedModelSerializer):
"""
Person serializer that converts models.Person
"""
class Meta:
model = Person
fields = ('url', 'first_name', 'last_name', 'email', 'institution_affiliation', 'orcid')
class PlotSerializer(serializers.HyperlinkedModelSerializer):
"""
Plot serializer that converts models.Plot
"""
class Meta:
model = Plot
fields = '__all__'
|
#q.no.1
year=int(input("enter a year to check it is leap year or not:"))
if year%4==0:
print("it is a leap year")
else:
print("it is not a leap year")
#q.no.2
length=int(input("enter length:"))
breadth=int(input("enter breadth:"))
def dimensions(l,b):
if l==b:
return("dimensions of square")
else:
return ("dimensions of rectangle")
print(dimensions(length,breadth))
#q.no.3
p1=int(input("enter the first age of a person:"))
p2=int(input("enter the age of second person:"))
p3=int(input("enter the age of third person:"))
old_person=max(p1,p2,p3)
youngest_person=min(p1,p2,p3)
print("the oldest of the three person is:",old_person)
print("the youngest of the three person is:",youngest_person)
#q.no.4
emp_age=int(input("enter the age:"))
emp_sex = str(input("enter the sex(M or F):"))
emp_status = str(input("enter the martial status(Y or N):"))
if(emp_sex==('F')):
print("she will work in urban areas")
elif(emp_sex==('M'))and (20<=emp_age<40):
print("he work in anywhere")
elif(emp_sex==('M'))and (40<=emp_age<60):
print("he will work in urban areas")
else:
print("ERROR!!!!")
#Q.NO.5
qt=int(input("enter the quantity of items to buy:"))
cost=(qt*100)
if cost>1000:
discount=(cost)*.10
cost-=discount
print("the cost of the iem is:",cost)
else:
print("the cost of item is:",cost)
#Q.NO.6
x=[]
for i in range(10):
x.append(input("enter a number:"))
print(x,sep='\n')
#Q.NO.7
while True:
print("infinite loop")
#q.no.9
l=[1,"danish",3,5]
l1=[]
l2=[]
l3=[]
for i in l:
if type(i)==int:
l1.append(i)
print(l1)
elif type(i)==int:
l2.append(i)
print(l2)
else:
l.append(i)
print(3)
#Q.N0.10
l=[]
for i in range(1,101):
if i>1:
for j in range(2,i):
if i%j==0:
break
else:
l.append(i)
print(l)
#q.no.11
for i in range(1,5):
print("* *i")
#Q.NO.12
n=int(input("enter total number of elements:"))
l=[]
for i in range(n):
n1=int(input("enter number:"))
l.append(n1)
print(l)
n2=int(input("enter element to be searched:"))
for j in 1:
if j==n2:
k=l.index(j)
l.pop()
print(l)
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
from base.dependency.dependency_if import ModuleGrouper
from base.modules_if import ModuleListSupply
from commons.configurator import Configurator
from commons.config_util import ClassLoader
import logging
import sys
config_module_list_supply = ModuleListSupply()
config_module_grouper = ModuleGrouper
def main():
logging.basicConfig(stream=sys.stderr,level=logging.DEBUG)
Configurator().default()
if len(sys.argv) > 1:
module_grouper_class = ClassLoader.get_class(qualified_class_name = sys.argv[1])
else:
module_grouper_class = config_module_grouper
module_grouper = module_grouper_class(config_module_list_supply.get_module_list())
for prefix in sorted(module_grouper.node_group_prefixes()):
print "%s (%s)" % (prefix, list(module for module in config_module_list_supply.get_module_list() if prefix==module_grouper.get_node_group_prefix(module)))
if __name__ == "__main__":
main()
|
####
# EECS1015: Lab 5
# Name: Mahfuz Rahman
# Student ID: 217847518
# Email: mafu@my.yorku.ca
####
import random
names = ("Masha", "Kevin", "Ruigang", "Vlad", "Ramesh", \
"Aditi", "Caroline", "Panos", "Chuck", "Grani", \
"Rutha", "Stan", "Qiong", "Alexi", "Carlos")
cities = ("Toronto", "Ottawa", "Hamilton")
testDict = {"Richard": "Toronto", "Jia-Tao": "Toronto", "Justin": "Ottawa", "Lars": "Ottawa"}
def getRandomItem(x):
return x[random.randint(0, len(x)-1)] # Getting random item from the tuple passed
def createNameDictionary():
citizens = {}
for a in range(len(names)):
citizens[names[a]] = getRandomItem(cities) # Assigning cities to people randomly
return citizens
def fromCity(dict, city):
city_list = []
for a in dict: # Variable a extracts the keys of dictionary as well helps in iterating the loop
if (dict[a] == city): # Checking the values of dictionary with the city name
city_list.append(a)
return city_list
def removePeopleFrom(dict, city):
dict_copy = dict.copy() # Making a copy of the dictionary for using in the loop as the dict parameter gets modified
for a in dict_copy:
if (dict[a] == city):
del dict[a]
def printNameDict(dict,city):
serial=1 # This variable will be used for numbering
for a in city: # Extracting the city name
print(f"People from {a}:")
if a in dict.values(): # Checking if the city is in the dictionary
for b in dict: # Extracting the dictionary key
if dict[b] == a: # Checking if value of extracted key(b) equals to the city(a)
print(f"{serial}.{b}")
serial=serial+1
serial=1
else: # If city is not in dictionary
print("*None*")
def main():
print("Part 1 - Testing functions with `testDict' ")
print("Testing getRandomItem() function")
print(cities)
print("Item = "+ getRandomItem(cities))
print("Testing fromCity() function")
print(fromCity(testDict, "Toronto"))
print(fromCity(testDict, "Ottawa"))
print("Testing printNameDict() function")
printNameDict(testDict, ("Toronto", "Ottawa"))
print("Testing removePeopleFrom() function")
removePeopleFrom(testDict, "Ottawa")
printNameDict(testDict, ("Toronto", "Ottawa"))
print("\nPart 2 - Main\n")
citizens = createNameDictionary()
printNameDict(citizens,cities)
for a in cities:
print(f"{a} List:")
print(fromCity(citizens,a))
print("Removing all people from Toronto...")
removePeopleFrom(citizens,"Toronto")
print("Removing all people from Hamilton...")
removePeopleFrom(citizens,"Hamilton")
printNameDict(citizens,cities)
input()
if __name__ == '__main__':
main()
|
from .views import ProductViewSet, UserAPIView
from django.urls import path
urlpatterns = [
path('products', ProductViewSet.as_view({
'get': 'getAllProducts',
'post': 'postProduct'
})),
path('products/<str:pk>', ProductViewSet.as_view({
'get': 'getProduct',
'put': 'updateProduct',
'delete': 'deleteProduct'
})),
path('user', UserAPIView.as_view()),
]
|
# -*- coding: utf-8 -*-
"""This is a module demonstrating a script
A collection of functions that print 'script'.
Example:
This is an example that exemplifies in the ``Example`` section::
$ python script.py
New Section.
Attributes:
module_variable (str): A variable mentioned in the ``Attributes`` section:
Todo:
* Something with your life.
.. _This is a link: https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html
"""
module_variable = 'script'
def printscript():
"""Prints 'script'
Args:
Returns:
str: The string returned
"""
print(module_variable)
return module_variable
def script_generator(n):
"""Generates repeated words.
Args:
n(int): The upper limit of the number of the range to generate
Yields:
str: the `module_variable`
Examples:
Doctest example.
>>> print([s for s in script_generator(3)])
['script', 'script', 'script']
"""
for i in range(n):
yield module_variable
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import torch
import torch.optim as optim
from sketch import SketchAlgorithm
import torch.nn.functional as F
import logging
import bagua.torch_api as bagua
from bagua.torch_api.algorithms import gradient_allreduce
USE_SKETCH = True
sketch_biases = False
def main():
torch.manual_seed(42)
torch.cuda.set_device(bagua.get_local_rank())
bagua.init_process_group()
logging.getLogger().setLevel(logging.INFO)
model = torch.nn.Sequential(torch.nn.Linear(200, 1)).cuda()
for m in model.modules():
if hasattr(m, "bias") and m.bias is not None:
m.bias.do_sketching = sketch_biases
if USE_SKETCH:
optimizer = optim.SGD(model.parameters(), lr=1)
algorithm = SketchAlgorithm(optimizer)
else:
optimizer = optim.SGD(model.parameters(), lr=0.01)
algorithm = gradient_allreduce.GradientAllReduceAlgorithm()
model = model.with_bagua(
[optimizer],
algorithm,
do_flatten=True,
)
model.train()
X = torch.randn(1000, 200).cuda()
y = torch.zeros(1000, 1).cuda()
for epoch in range(1, 101):
optimizer.zero_grad()
output = model(X)
loss = F.mse_loss(output, y)
loss.backward()
optimizer.step()
logging.info(f"it {epoch}, loss: {loss.item():.6f}")
if __name__ == "__main__":
main()
|
#vim: fileencoding=utf8
from __future__ import division, print_function
import sys, os.path, codecs, traceback
from rays import *
from rays.compat import *
from .base import *
import pytest
class TestEmbpy(Base): # {{{
def template(self, name):
return os.path.join(self.TEST_DIR, "templates", name)
def cache(self, name):
return os.path.join(self.TEST_DIR, "templates", "caches", name)
def clear_caches(self):
for v in os.listdir( os.path.join(self.TEST_DIR, "templates", "caches")):
os.remove(os.path.join(self.TEST_DIR, "templates", "caches", v))
def setup_method(self, method):
try:
os.mkdir(os.path.join(self.TEST_DIR, "templates", "caches"))
except:
pass
Base.setup_method(self, method)
self.clear_caches()
return
def teardown_method(self, method):
Base.teardown_method(self, method)
self.clear_caches()
try:
os.rmdir(os.path.join(self.TEST_DIR, "templates", "caches"))
except:
pass
return
def test_set_and_get_template_globals(self):
embpy = Embpy(codecs.open(self.template("index.html")),
self.cache("index.html"),
{"global_value": "global"})
assert {"u_":u_, "b_":b_, "n_":n_, "global_value":"global"} == embpy.template_globals
def test_render_without_filter(self):
embpy = Embpy(codecs.open(self.template("index.html")),
self.cache("index.html"))
assert u_("""<div>
値1
値2
</div>
<div>
<p>value1</p>
</div>
""") == embpy.render({"text_values": [u_("値1"), u_("値2")],
"html_values": [u_("<p>value1</p>")]})
return embpy
def test_cache(self):
embpy = self.test_render_without_filter()
assert embpy.is_cached()
self.test_render_without_filter()
data = open(self.template("index.html"), "rb").read()
open(self.template("index.html"), "wb").write(data)
assert (not embpy.is_cached())
self.test_render_without_filter()
def test_render_with_filter(self):
def filter(s):
return "filtered_"+s
embpy = Embpy(codecs.open(self.template("index.html")),
self.cache("index.html"), filter = filter)
assert u_("""<div>
filtered_値1
filtered_値2
</div>
<div>
<p>value1</p>
</div>
""") == embpy.render({"text_values": [u_("値1"), u_("値2")],
"html_values": [u_("<p>value1</p>")]})
def test_compound_statements(self):
embpy = Embpy(codecs.open(self.template("compound_statements.html")),
self.cache("compound_statements.html"), encoding="cp932")
assert u_(""" 表示される(SJIS)
while
""") == embpy.render()
def test_syntax_error(self):
embpy = Embpy(codecs.open(self.template("syntax_error.html")),
self.cache("syntax_error.html"))
try:
embpy.render({"text_values":[]})
assert False
except SyntaxError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
output = "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
assert "File \"<string>\", line 6" in output
def test_occur_errors_in_rendering(self):
embpy = Embpy(codecs.open(self.template("index.html")),
self.cache("index.html"))
try:
embpy.render() # <= template values does not given by the caller
except NameError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
output = "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
print(output)
assert "File \"<string>\", line 6" in output
assert "NameError: name 'text_values' is not defined" in output
else:
assert False
# }}}
class TestRendererAndHelper(Base):
TEMPLATE_DIR = os.path.join(Base.TEST_DIR, "templates")
CACHE_DIR = os.path.join(Base.TEST_DIR, "templates", "caches")
def clear_caches(self):
if os.path.exists(self.CACHE_DIR):
for v in os.listdir(self.CACHE_DIR):
os.remove(os.path.join(self.CACHE_DIR, v))
else:
os.mkdir(self.CACHE_DIR)
def setup_method(self, method):
Base.setup_method(self, method)
self.clear_caches()
return
def teardown_method(self, method):
Base.teardown_method(self, method)
self.clear_caches()
return
def test_render_file(self):
renderer = Renderer(self.TEMPLATE_DIR, self.CACHE_DIR)
assert u_("""<div>
値1
値2
</div>
<div>
<p>value1</p>
</div>
""") == renderer.render_file("index", {"text_values": [u_("値1"), u_("値2")],
"html_values": [u_("<p>value1</p>")]})
def test_render_file_with_encoding(self):
renderer = Renderer(self.TEMPLATE_DIR, self.CACHE_DIR)
assert u_(""" 表示される(SJIS)
while
""") == renderer.render_file("compound_statements", encoding="cp932")
def test_render_string(self):
renderer = Renderer(self.TEMPLATE_DIR, self.CACHE_DIR)
assert u_("""文字:あ""") == renderer.render_string(u_("""文字:<%= v %>"""), {"v": u_("あ")})
def test_render_with_layouts(self):
renderer = Renderer(self.TEMPLATE_DIR, self.CACHE_DIR)
assert u_("""<body>
<h1>layout test</h1>
<div>body</div>
</body>
""") == renderer.contents({})
def test_capture(self):
renderer = Renderer(self.TEMPLATE_DIR, self.CACHE_DIR)
assert u_("""
連結
<div>
ok
</div>
""") == renderer.capture_outputs({})
def test_htmlquote(self):
h = Helper()
assert u_("&<>'"") == h.htmlquote(u_("&<>'\""))
assert u_("&<>'"") == Helper.htmlquote(u_("&<>'\""))
|
'''
Classifier : K Nearest Neighbour
DataSet : Inbuilt Winne Predictor Dataset
Features : Alcohol, Malic acid , Ash, Alcalinity of ash , Magnesium ,Total phenols , Flavanoids ,
: Nonflavanoid phenols , Proanthocyanins , Color intensity, Hue , OD280/OD315 of diluted wines, Proline.
Labels : Class 1, Class 2, Class 3
Training Dataset : 70% of 178 Entries
Testing Dataset : 30% of 178 Entries
Author : Prasad Dangare
Function Name : Wine_Predictor
'''
# ********************
# Imports
# ********************
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import tree
# ********************
# ML Operation
# ********************
def Wine_Predictor():
wine = load_wine()
#print(wine.data)
print("\nTotal Volume Of Dataset..")
print(wine.data.shape)
print("\n")
#print(wine.target)
print("Grade Of Wines In Class..")
print(wine.target_names)
print("\n")
print("13 Features Of Wines...")
print(wine.feature_names)
print("\n")
Data = wine.data
Target = wine.target
Data_train, Data_test, Target_train, Target_test = train_test_split(Data, Target, test_size = 0.5)
print("Data Split For Training Is 50%")
print(Data_train.shape)
print("\n")
print("Data Split For Testing Is 50%")
print(Data_test.shape)
print("\n")
#k = 7
#cobj = KNeighborsClassifier(n_neighbors = k)
cobj = tree.DecisionTreeClassifier()
cobj.fit(Data_train, Target_train)
output = cobj.predict(Data_test)
Accuracy = accuracy_score(Target_test, output)
print("Accuracy Using Decision Tree is : ", Accuracy*100, "%")
#Test_Dataset = [[14.23,1.71,2.43,15.6,127,2.8,3.06,0.28,2.29,5.64,1.04,3.92,1065]] # step 3, 5 (class 0)
#Test_Dataset = [[12.37,0.94,1.36,10.6,88,1.98,0.57,0.28,0.42,1.95,1.05,1.82,520]] # (class 1)
Test_Dataset = [[12.86,1.35,2.32,18,122,1.51,1.25,0.21,0.94,4.1,0.76,1.29,630]] # (class 2)
Result = cobj.predict(Test_Dataset)
print("Wine Grade Is : ", wine.target_names [Result])
print("\n")
# ********************
# Entry Point
# ********************
def main():
print("\t\t__________________Wine Predictor___________________")
Wine_Predictor()
# ********************
# Starter
# ********************
if __name__ == "__main__":
main()
|
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import healpy as hp
from scipy.stats import binned_statistic
from ipywidgets import widgets
from IPython.display import display
import time
# LSST libraries, MAF metrics
import lsst.sims.maf.slicers as slicers
import lsst.sims.maf.metrics as metrics
import lsst.sims.maf.db as db
from lsst.sims.featureScheduler import utils as schedUtils
import lsst.sims.maf.metricBundles as mb
import lsst.sims.maf.plots as plots
import lsst.sims.maf.batches as batches
|
class IdentifierManager(object):
def __init__(self, start_id = 0):
self.next_id = start_id
def next(self):
self.next_id += 1
return self.next_id - 1
def next_n(self, n):
ret = xrange(self.next_id, self.next_id + n)
self.next_id += n
return ret
def get_identifiers(self):
return set(xrange(self.next_id))
def all_ids(self):
return set(xrange(self.next_id))
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorboard.plugins.mesh.summary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins.mesh import summary
from tensorboard.plugins.mesh import metadata
from tensorboard.plugins.mesh import plugin_data_pb2
from tensorboard.plugins.mesh import test_utils
class MeshSummaryTest(tf.test.TestCase):
def pb_via_op(self, summary_op):
"""Parses pb proto."""
actual_pbtxt = summary_op.eval()
actual_proto = summary_pb2.Summary()
actual_proto.ParseFromString(actual_pbtxt)
return actual_proto
def get_components(self, proto):
return metadata.parse_plugin_metadata(
proto.metadata.plugin_data.content
).components
def verify_proto(self, proto, name):
"""Validates proto."""
self.assertEqual(3, len(proto.value))
self.assertEqual("%s_VERTEX" % name, proto.value[0].tag)
self.assertEqual("%s_FACE" % name, proto.value[1].tag)
self.assertEqual("%s_COLOR" % name, proto.value[2].tag)
self.assertEqual(14, self.get_components(proto.value[0]))
self.assertEqual(14, self.get_components(proto.value[1]))
self.assertEqual(14, self.get_components(proto.value[2]))
def test_get_tensor_summary(self):
"""Tests proper creation of tensor summary with mesh plugin
metadata."""
name = "my_mesh"
display_name = "my_display_name"
description = "my mesh is the best of meshes"
tensor_data = test_utils.get_random_mesh(100)
components = 14
with tf.compat.v1.Graph().as_default():
tensor_summary = summary._get_tensor_summary(
name,
display_name,
description,
tensor_data.vertices,
plugin_data_pb2.MeshPluginData.VERTEX,
components,
"",
None,
)
with self.test_session():
proto = self.pb_via_op(tensor_summary)
self.assertEqual("%s_VERTEX" % name, proto.value[0].tag)
self.assertEqual(
metadata.PLUGIN_NAME,
proto.value[0].metadata.plugin_data.plugin_name,
)
self.assertEqual(
components, self.get_components(proto.value[0])
)
def test_op(self):
"""Tests merged summary with different types of data."""
name = "my_mesh"
tensor_data = test_utils.get_random_mesh(
100, add_faces=True, add_colors=True
)
config_dict = {"foo": 1}
with tf.compat.v1.Graph().as_default():
tensor_summary = summary.op(
name,
tensor_data.vertices,
faces=tensor_data.faces,
colors=tensor_data.colors,
config_dict=config_dict,
)
with self.test_session() as sess:
proto = self.pb_via_op(tensor_summary)
self.verify_proto(proto, name)
plugin_metadata = metadata.parse_plugin_metadata(
proto.value[0].metadata.plugin_data.content
)
self.assertEqual(
json.dumps(config_dict, sort_keys=True),
plugin_metadata.json_config,
)
def test_pb(self):
"""Tests merged summary protobuf with different types of data."""
name = "my_mesh"
tensor_data = test_utils.get_random_mesh(
100, add_faces=True, add_colors=True
)
config_dict = {"foo": 1}
proto = summary.pb(
name,
tensor_data.vertices,
faces=tensor_data.faces,
colors=tensor_data.colors,
config_dict=config_dict,
)
self.verify_proto(proto, name)
plugin_metadata = metadata.parse_plugin_metadata(
proto.value[0].metadata.plugin_data.content
)
self.assertEqual(
json.dumps(config_dict, sort_keys=True), plugin_metadata.json_config
)
if __name__ == "__main__":
tf.test.main()
|
from .anvilGoogleAuth import *
|
import pytest
from fileDownloader import FileDownloader
import requests
import os
from customErrors import DirectoryNotFoundError
import logging
from zipfile import ZipFile
class TestFileDownloader:
def test_validUrl(self, tmpdir):
with pytest.raises(requests.exceptions.MissingSchema):
FileDownloader('dakfldfjasfk', str(tmpdir))
@pytest.mark.webtest
def test_onlineUrl(self, tmpdir):
with pytest.raises(requests.exceptions.HTTPError):
#url is a valid test url that returns a 404
FileDownloader('https://reqres.in/api/users/23', str(tmpdir))
def test_validSaveDir(self, tmpdir):
validUrl = r'https://reqres.in/api/users?page=2'
invalidDir = os.path.join(str(tmpdir), 'nonExistent')
with pytest.raises(DirectoryNotFoundError):
FileDownloader(validUrl, invalidDir)
@pytest.mark.webtest
def test_downloadZip(self, tmpdir):
fileUrl = r'https://www.stats.govt.nz/assets/Uploads/Business-financial-data/Business-financial-data-March-2021-quarter/Download-data/business-financial-data-march-2021-quarter-csv.zip'
fileName = r'business-financial-data-march-2021-quarter-csv.zip'
downloader = FileDownloader(fileUrl, str(tmpdir))
try:
path = downloader.download()
except Exception as e:
logging.exception(e)
assert False
else:
assert path == os.path.join(str(tmpdir), fileName)
#asserting that the zipfile is valid and uncorrupted
assert ZipFile(path).testzip() is None
|
import discord
from discord.ext import commands
import cogs
import settings
import random
bot = commands.Bot(command_prefix='=')
bot.add_cog(cogs.Moderation(bot))
bot.add_cog(cogs.Fun(bot))
bot.add_cog(cogs.Utility(bot))
@bot.event
async def on_ready():
activity = discord.Activity(name=bot.command_prefix+'help', type=discord.ActivityType.watching)
await bot.change_presence(activity = activity)
print('Logged in as '+str(bot.user)+'\n---\nGuilds: '+str(len(bot.guilds)))
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, discord.ext.commands.MissingPermissions):
await ctx.send("You don't have the required permissions to use that command.\nMissing permission(s): `"+', '.join(error.missing_perms)+'`.')
if isinstance(error, discord.ext.commands.BotMissingPermissions):
await ctx.send("I don't have permission to do that here!\nAsk a server owner to give me the `"+', '.join(error.missing_perms)+"` permission(s).")
if isinstance(error, discord.ext.commands.MissingRequiredArgument):
await ctx.send("Looks like you're missing a required argument there.")
if isinstance(error, discord.ext.commands.BadArgument):
await ctx.send("Invalid argument(s) provided.")
print(str(error))
class AutoArchHelpCommand(commands.MinimalHelpCommand):
def get_command_signature(self, command):
return '{0.clean_prefix}{1.qualified_name} {1.signature}'.format(self, command)
async def send_bot_help(self, mapping):
ctx = self.context
help_embed = discord.Embed(
title = 'Commands',
description = self.get_opening_note(),
colour = discord.Colour.gold()
)
help_embed.set_footer(text = random.choice(settings.quotes_short))
help_embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
for cog in mapping.keys():
name = 'Other'
if cog != None:
name = cog.qualified_name
if hasattr(cog, 'custom_icon'):
name = cog.custom_icon + ' ' + name
help_embed.add_field(name = name, value = ', '.join([command.name for command in mapping[cog]]))
await self.get_destination().send(embed = help_embed)
async def send_command_help(self, command):
ctx = self.context
help_embed = discord.Embed(
title = self.get_command_signature(command),
description = command.help,
colour = discord.Colour.gold()
)
help_embed.set_footer(text = random.choice(settings.quotes_short))
help_embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
await self.get_destination().send(embed = help_embed)
async def send_cog_help(self, cog):
ctx = self.context
commands = ', '.join([command.qualified_name for command in cog.get_commands()])
help_embed = discord.Embed(
title = cog.qualified_name,
description = commands,
colour = discord.Colour.gold()
)
help_embed.set_footer(text = random.choice(settings.quotes_short))
help_embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
await self.get_destination().send(embed = help_embed)
async def send_group_help(self, group):
ctx = self.context
commands = ', '.join([command.qualified_name for command in group.commands])
help_embed = discord.Embed(
title = group.qualified_name + ' Subcommands',
description = commands,
colour = discord.Colour.gold()
)
help_embed.set_footer(text = random.choice(settings.quotes_short))
help_embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
await self.get_destination().send(embed = help_embed)
@bot.command(help = 'Link to the bot\'s GitHub repo.')
async def source(ctx):
source_embed = discord.Embed(
title = 'Bot Source',
description = 'The full source code is available at\n'+settings.source+'\nand is licensed under '+settings.license+'.',
colour = discord.Colour.gold()
)
source_embed.set_thumbnail(url = ctx.bot.user.avatar_url)
source_embed.set_footer(text = random.choice(settings.quotes_short))
source_embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
await ctx.send(embed = source_embed)
bot.help_command = AutoArchHelpCommand()
bot.run(settings.token)
|
daftar = ['jeruk','mangga','apel']
while True:
cari = raw_input('Cari BUah : ')
if not cari:
print "selesai"
break
if cari in daftar:
print "Ditemukan pada index ke ",
print 1+ daftar.index(cari)
else:
print "tidak ketemu"
|
import os
import requests
import logging
import json
from time import time
from glob import glob
from .file_system_cache import FileSystemCache
DEFAULT_ENDPOINT = 'https://pangeabio.io'
logger = logging.getLogger('pangea_api') # Same name as calling module
logger.addHandler(logging.NullHandler()) # No output unless configured by calling program
def clean_url(url):
if url[-1] == '/':
url = url[:-1]
return url
class TokenAuth(requests.auth.AuthBase):
"""Attaches MetaGenScope Token Authentication to the given Request object."""
def __init__(self, token):
self.token = token
def __call__(self, request):
"""Add authentication header to request."""
request.headers['Authorization'] = f'Token {self.token}'
return request
def __str__(self):
"""Return string representation of TokenAuth."""
return self.token
class PangeaGeneralError(requests.exceptions.HTTPError):
pass
class PangeaNotFoundError(PangeaGeneralError):
pass
class PangeaForbiddenError(PangeaGeneralError):
pass
class PangeaInternalError(PangeaGeneralError):
pass
class PangeaOtherError(PangeaGeneralError):
pass
class Knex:
def __init__(self, endpoint_url=DEFAULT_ENDPOINT):
self.endpoint_url = endpoint_url
self.endpoint_url += '/api'
self.auth = None
self.headers = {'Accept': 'application/json'}
self.cache = FileSystemCache()
def _logging_info(self, **kwargs):
base = {'endpoint_url': self.endpoint_url, 'headers': self.headers}
base.update(kwargs)
return base
def _clean_url(self, url, url_options={}):
url = clean_url(url)
url = url.replace(self.endpoint_url, '')
if url[0] == '/':
url = url[1:]
if url_options:
opts = [f'{key}={val}' for key, val in url_options.items()]
opts = '&'.join(opts)
if '?' in url:
url += '&' + opts
else:
url += '?' + opts
return url
def add_auth_token(self, token):
self.auth = TokenAuth(token)
def login(self, username, password):
d = self._logging_info(email=username, password='*' * len(password))
logger.debug(f'Sending log in request. {d}')
blob = self.cache.get_cached_blob(username)
if not blob:
response = requests.post(
f'{self.endpoint_url}/auth/token/login',
headers=self.headers,
json={
'email': username,
'password': password,
}
)
response.raise_for_status()
logger.debug(f'Received log in response. {response.json()}')
blob = response.json()
self.cache.cache_blob(username, blob)
self.add_auth_token(blob['auth_token'])
return self
def _handle_response(self, response, json_response=True):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if response.status_code == 403:
raise PangeaForbiddenError(e)
if response.status_code == 404:
raise PangeaNotFoundError(e)
if response.status_code == 500:
raise PangeaInternalError(e)
raise PangeaOtherError(e)
except Exception as e:
logger.debug(f'Request failed. {response}\n{response.content}')
raise
if json_response:
return response.json()
return response
def get(self, url, url_options={}, **kwargs):
url = self._clean_url(url, url_options=url_options)
d = self._logging_info(url=url, auth_token=self.auth)
logger.debug(f'Sending GET request. {d}')
response = requests.get(
f'{self.endpoint_url}/{url}',
headers=self.headers,
auth=self.auth,
)
return self._handle_response(response, **kwargs)
def post(self, url, json={}, url_options={}, **kwargs):
url = self._clean_url(url, url_options=url_options)
d = self._logging_info(url=url, auth_token=self.auth, json=json)
logger.debug(f'Sending POST request. {d}')
response = requests.post(
f'{self.endpoint_url}/{url}',
headers=self.headers,
auth=self.auth,
json=json
)
return self._handle_response(response, **kwargs)
def put(self, url, json={}, url_options={}, **kwargs):
url = self._clean_url(url, url_options=url_options)
d = self._logging_info(url=url, auth_token=self.auth, json=json)
logger.debug(f'Sending PUT request. {d}')
response = requests.put(
f'{self.endpoint_url}/{url}',
headers=self.headers,
auth=self.auth,
json=json
)
return self._handle_response(response, **kwargs)
def patch(self, url, json={}, url_options={}, **kwargs):
url = self._clean_url(url, url_options=url_options)
d = self._logging_info(url=url, auth_token=self.auth, json=json)
logger.debug(f'Sending PATCH request. {d}')
response = requests.patch(
f'{self.endpoint_url}/{url}',
headers=self.headers,
auth=self.auth,
json=json
)
return self._handle_response(response, **kwargs)
def delete(self, url, url_options={}, **kwargs):
url = self._clean_url(url, url_options=url_options)
d = self._logging_info(url=url, auth_token=self.auth)
logger.debug(f'Sending DELETE request. {d}')
response = requests.delete(
f'{self.endpoint_url}/{url}',
headers=self.headers,
auth=self.auth,
)
logger.debug(f'DELETE request response:\n{response}')
return self._handle_response(response, json_response=False, **kwargs)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/billing/budgets_v1beta1/proto/budget_model.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.type import money_pb2 as google_dot_type_dot_money__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/billing/budgets_v1beta1/proto/budget_model.proto",
package="google.cloud.billing.budgets.v1beta1",
syntax="proto3",
serialized_options=_b(
"\n(com.google.cloud.billing.budgets.v1beta1P\001ZKgoogle.golang.org/genproto/googleapis/cloud/billing/budgets/v1beta1;budgets"
),
serialized_pb=_b(
'\n=google/cloud/billing/budgets_v1beta1/proto/budget_model.proto\x12$google.cloud.billing.budgets.v1beta1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x17google/type/money.proto"\xde\x03\n\x06\x42udget\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12H\n\rbudget_filter\x18\x03 \x01(\x0b\x32,.google.cloud.billing.budgets.v1beta1.FilterB\x03\xe0\x41\x01\x12G\n\x06\x61mount\x18\x04 \x01(\x0b\x32\x32.google.cloud.billing.budgets.v1beta1.BudgetAmountB\x03\xe0\x41\x02\x12Q\n\x0fthreshold_rules\x18\x05 \x03(\x0b\x32\x33.google.cloud.billing.budgets.v1beta1.ThresholdRuleB\x03\xe0\x41\x02\x12S\n\x10\x61ll_updates_rule\x18\x06 \x01(\x0b\x32\x34.google.cloud.billing.budgets.v1beta1.AllUpdatesRuleB\x03\xe0\x41\x01\x12\x11\n\x04\x65tag\x18\x07 \x01(\tB\x03\xe0\x41\x01:]\xea\x41Z\n$billingbudgets.googleapis.com/Budget\x12\x32\x62illingAccounts/{billing_account}/budgets/{budget}"\xa5\x01\n\x0c\x42udgetAmount\x12.\n\x10specified_amount\x18\x01 \x01(\x0b\x32\x12.google.type.MoneyH\x00\x12T\n\x12last_period_amount\x18\x02 \x01(\x0b\x32\x36.google.cloud.billing.budgets.v1beta1.LastPeriodAmountH\x00\x42\x0f\n\rbudget_amount"\x12\n\x10LastPeriodAmount"\xcd\x01\n\rThresholdRule\x12\x1e\n\x11threshold_percent\x18\x01 \x01(\x01\x42\x03\xe0\x41\x02\x12S\n\x0bspend_basis\x18\x02 \x01(\x0e\x32\x39.google.cloud.billing.budgets.v1beta1.ThresholdRule.BasisB\x03\xe0\x41\x01"G\n\x05\x42\x61sis\x12\x15\n\x11\x42\x41SIS_UNSPECIFIED\x10\x00\x12\x11\n\rCURRENT_SPEND\x10\x01\x12\x14\n\x10\x46ORECASTED_SPEND\x10\x02"H\n\x0e\x41llUpdatesRule\x12\x19\n\x0cpubsub_topic\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1b\n\x0eschema_version\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x90\x02\n\x06\x46ilter\x12\x15\n\x08projects\x18\x01 \x03(\tB\x03\xe0\x41\x01\x12\x66\n\x16\x63redit_types_treatment\x18\x04 \x01(\x0e\x32\x41.google.cloud.billing.budgets.v1beta1.Filter.CreditTypesTreatmentB\x03\xe0\x41\x01\x12\x15\n\x08services\x18\x03 \x03(\tB\x03\xe0\x41\x01"p\n\x14\x43reditTypesTreatment\x12&\n"CREDIT_TYPES_TREATMENT_UNSPECIFIED\x10\x00\x12\x17\n\x13INCLUDE_ALL_CREDITS\x10\x01\x12\x17\n\x13\x45XCLUDE_ALL_CREDITS\x10\x02\x42y\n(com.google.cloud.billing.budgets.v1beta1P\x01ZKgoogle.golang.org/genproto/googleapis/cloud/billing/budgets/v1beta1;budgetsb\x06proto3'
),
dependencies=[
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_api_dot_resource__pb2.DESCRIPTOR,
google_dot_type_dot_money__pb2.DESCRIPTOR,
],
)
_THRESHOLDRULE_BASIS = _descriptor.EnumDescriptor(
name="Basis",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule.Basis",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="BASIS_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="CURRENT_SPEND", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="FORECASTED_SPEND",
index=2,
number=2,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=992,
serialized_end=1063,
)
_sym_db.RegisterEnumDescriptor(_THRESHOLDRULE_BASIS)
_FILTER_CREDITTYPESTREATMENT = _descriptor.EnumDescriptor(
name="CreditTypesTreatment",
full_name="google.cloud.billing.budgets.v1beta1.Filter.CreditTypesTreatment",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="CREDIT_TYPES_TREATMENT_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="INCLUDE_ALL_CREDITS",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="EXCLUDE_ALL_CREDITS",
index=2,
number=2,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=1300,
serialized_end=1412,
)
_sym_db.RegisterEnumDescriptor(_FILTER_CREDITTYPESTREATMENT)
_BUDGET = _descriptor.Descriptor(
name="Budget",
full_name="google.cloud.billing.budgets.v1beta1.Budget",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.billing.budgets.v1beta1.Budget.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="display_name",
full_name="google.cloud.billing.budgets.v1beta1.Budget.display_name",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="budget_filter",
full_name="google.cloud.billing.budgets.v1beta1.Budget.budget_filter",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="amount",
full_name="google.cloud.billing.budgets.v1beta1.Budget.amount",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="threshold_rules",
full_name="google.cloud.billing.budgets.v1beta1.Budget.threshold_rules",
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="all_updates_rule",
full_name="google.cloud.billing.budgets.v1beta1.Budget.all_updates_rule",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="etag",
full_name="google.cloud.billing.budgets.v1beta1.Budget.etag",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b(
"\352AZ\n$billingbudgets.googleapis.com/Budget\0222billingAccounts/{billing_account}/budgets/{budget}"
),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=189,
serialized_end=667,
)
_BUDGETAMOUNT = _descriptor.Descriptor(
name="BudgetAmount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="specified_amount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount.specified_amount",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_period_amount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount.last_period_amount",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="budget_amount",
full_name="google.cloud.billing.budgets.v1beta1.BudgetAmount.budget_amount",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=670,
serialized_end=835,
)
_LASTPERIODAMOUNT = _descriptor.Descriptor(
name="LastPeriodAmount",
full_name="google.cloud.billing.budgets.v1beta1.LastPeriodAmount",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=837,
serialized_end=855,
)
_THRESHOLDRULE = _descriptor.Descriptor(
name="ThresholdRule",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="threshold_percent",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule.threshold_percent",
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="spend_basis",
full_name="google.cloud.billing.budgets.v1beta1.ThresholdRule.spend_basis",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_THRESHOLDRULE_BASIS],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=858,
serialized_end=1063,
)
_ALLUPDATESRULE = _descriptor.Descriptor(
name="AllUpdatesRule",
full_name="google.cloud.billing.budgets.v1beta1.AllUpdatesRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="pubsub_topic",
full_name="google.cloud.billing.budgets.v1beta1.AllUpdatesRule.pubsub_topic",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="schema_version",
full_name="google.cloud.billing.budgets.v1beta1.AllUpdatesRule.schema_version",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1065,
serialized_end=1137,
)
_FILTER = _descriptor.Descriptor(
name="Filter",
full_name="google.cloud.billing.budgets.v1beta1.Filter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="projects",
full_name="google.cloud.billing.budgets.v1beta1.Filter.projects",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="credit_types_treatment",
full_name="google.cloud.billing.budgets.v1beta1.Filter.credit_types_treatment",
index=1,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="services",
full_name="google.cloud.billing.budgets.v1beta1.Filter.services",
index=2,
number=3,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_FILTER_CREDITTYPESTREATMENT],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1140,
serialized_end=1412,
)
_BUDGET.fields_by_name["budget_filter"].message_type = _FILTER
_BUDGET.fields_by_name["amount"].message_type = _BUDGETAMOUNT
_BUDGET.fields_by_name["threshold_rules"].message_type = _THRESHOLDRULE
_BUDGET.fields_by_name["all_updates_rule"].message_type = _ALLUPDATESRULE
_BUDGETAMOUNT.fields_by_name[
"specified_amount"
].message_type = google_dot_type_dot_money__pb2._MONEY
_BUDGETAMOUNT.fields_by_name["last_period_amount"].message_type = _LASTPERIODAMOUNT
_BUDGETAMOUNT.oneofs_by_name["budget_amount"].fields.append(
_BUDGETAMOUNT.fields_by_name["specified_amount"]
)
_BUDGETAMOUNT.fields_by_name[
"specified_amount"
].containing_oneof = _BUDGETAMOUNT.oneofs_by_name["budget_amount"]
_BUDGETAMOUNT.oneofs_by_name["budget_amount"].fields.append(
_BUDGETAMOUNT.fields_by_name["last_period_amount"]
)
_BUDGETAMOUNT.fields_by_name[
"last_period_amount"
].containing_oneof = _BUDGETAMOUNT.oneofs_by_name["budget_amount"]
_THRESHOLDRULE.fields_by_name["spend_basis"].enum_type = _THRESHOLDRULE_BASIS
_THRESHOLDRULE_BASIS.containing_type = _THRESHOLDRULE
_FILTER.fields_by_name[
"credit_types_treatment"
].enum_type = _FILTER_CREDITTYPESTREATMENT
_FILTER_CREDITTYPESTREATMENT.containing_type = _FILTER
DESCRIPTOR.message_types_by_name["Budget"] = _BUDGET
DESCRIPTOR.message_types_by_name["BudgetAmount"] = _BUDGETAMOUNT
DESCRIPTOR.message_types_by_name["LastPeriodAmount"] = _LASTPERIODAMOUNT
DESCRIPTOR.message_types_by_name["ThresholdRule"] = _THRESHOLDRULE
DESCRIPTOR.message_types_by_name["AllUpdatesRule"] = _ALLUPDATESRULE
DESCRIPTOR.message_types_by_name["Filter"] = _FILTER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Budget = _reflection.GeneratedProtocolMessageType(
"Budget",
(_message.Message,),
dict(
DESCRIPTOR=_BUDGET,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""A budget is a plan that describes what you expect to spend on Cloud
projects, plus the rules to execute as spend is tracked against that
plan, (for example, send an alert when 90% of the target spend is met).
Currently all plans are monthly budgets so the usage period(s) tracked
are implied (calendar months of usage back-to-back).
Attributes:
name:
Output only. Resource name of the budget. The resource name
implies the scope of a budget. Values are of the form
``billingAccounts/{billingAccountId}/budgets/{budgetId}``.
display_name:
User data for display name in UI. Validation: <= 60 chars.
budget_filter:
Optional. Filters that define which resources are used to
compute the actual spend against the budget.
amount:
Required. Budgeted amount.
threshold_rules:
Required. Rules that trigger alerts (notifications of
thresholds being crossed) when spend exceeds the specified
percentages of the budget.
all_updates_rule:
Optional. Rules to apply to all updates to the actual spend,
regardless of the thresholds set in ``threshold_rules``.
etag:
Optional. Etag to validate that the object is unchanged for a
read-modify-write operation. An empty etag will cause an
update to overwrite other changes.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.Budget)
),
)
_sym_db.RegisterMessage(Budget)
BudgetAmount = _reflection.GeneratedProtocolMessageType(
"BudgetAmount",
(_message.Message,),
dict(
DESCRIPTOR=_BUDGETAMOUNT,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""The budgeted amount for each usage period.
Attributes:
budget_amount:
Specification for what amount to use as the budget.
specified_amount:
A specified amount to use as the budget. ``currency_code`` is
optional. If specified, it must match the currency of the
billing account. The ``currency_code`` is provided on output.
last_period_amount:
Use the last period's actual spend as the budget for the
present period.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.BudgetAmount)
),
)
_sym_db.RegisterMessage(BudgetAmount)
LastPeriodAmount = _reflection.GeneratedProtocolMessageType(
"LastPeriodAmount",
(_message.Message,),
dict(
DESCRIPTOR=_LASTPERIODAMOUNT,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""Describes a budget amount targeted to last period's spend. At this time,
the amount is automatically 100% of last period's spend; that is, there
are no other options yet. Future configuration will be described here
(for example, configuring a percentage of last period's spend).
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.LastPeriodAmount)
),
)
_sym_db.RegisterMessage(LastPeriodAmount)
ThresholdRule = _reflection.GeneratedProtocolMessageType(
"ThresholdRule",
(_message.Message,),
dict(
DESCRIPTOR=_THRESHOLDRULE,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""ThresholdRule contains a definition of a threshold which triggers an
alert (a notification of a threshold being crossed) to be sent when
spend goes above the specified amount. Alerts are automatically e-mailed
to users with the Billing Account Administrator role or the Billing
Account User role. The thresholds here have no effect on notifications
sent to anything configured under ``Budget.all_updates_rule``.
Attributes:
threshold_percent:
Required. Send an alert when this threshold is exceeded. This
is a 1.0-based percentage, so 0.5 = 50%. Validation: non-
negative number.
spend_basis:
Optional. The type of basis used to determine if spend has
passed the threshold. Behavior defaults to CURRENT\_SPEND if
not set.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.ThresholdRule)
),
)
_sym_db.RegisterMessage(ThresholdRule)
AllUpdatesRule = _reflection.GeneratedProtocolMessageType(
"AllUpdatesRule",
(_message.Message,),
dict(
DESCRIPTOR=_ALLUPDATESRULE,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""AllUpdatesRule defines notifications that are sent on every update to
the billing account's spend, regardless of the thresholds defined using
threshold rules.
Attributes:
pubsub_topic:
Required. The name of the Cloud Pub/Sub topic where budget
related messages will be published, in the form
``projects/{project_id}/topics/{topic_id}``. Updates are sent
at regular intervals to the topic. The topic needs to be
created before the budget is created; see
https://cloud.google.com/billing/docs/how-to/budgets#manage-
notifications for more details. Caller is expected to have
``pubsub.topics.setIamPolicy`` permission on the topic when
it's set for a budget, otherwise, the API call will fail with
PERMISSION\_DENIED. See
https://cloud.google.com/pubsub/docs/access-control for more
details on Pub/Sub roles and permissions.
schema_version:
Required. The schema version of the notification. Only "1.0"
is accepted. It represents the JSON schema as defined in
https://cloud.google.com/billing/docs/how-
to/budgets#notification\_format
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.AllUpdatesRule)
),
)
_sym_db.RegisterMessage(AllUpdatesRule)
Filter = _reflection.GeneratedProtocolMessageType(
"Filter",
(_message.Message,),
dict(
DESCRIPTOR=_FILTER,
__module__="google.cloud.billing.budgets_v1beta1.proto.budget_model_pb2",
__doc__="""A filter for a budget, limiting the scope of the cost to calculate.
Attributes:
projects:
Optional. A set of projects of the form
``projects/{project_id}``, specifying that usage from only
this set of projects should be included in the budget. If
omitted, the report will include all usage for the billing
account, regardless of which project the usage occurred on.
Only zero or one project can be specified currently.
credit_types_treatment:
Optional. If not set, default behavior is
``INCLUDE_ALL_CREDITS``.
services:
Optional. A set of services of the form
``services/{service_id}``, specifying that usage from only
this set of services should be included in the budget. If
omitted, the report will include usage for all the services.
The service names are available through the Catalog API:
https://cloud.google.com/billing/v1/how-tos/catalog-api.
""",
# @@protoc_insertion_point(class_scope:google.cloud.billing.budgets.v1beta1.Filter)
),
)
_sym_db.RegisterMessage(Filter)
DESCRIPTOR._options = None
_BUDGET.fields_by_name["name"]._options = None
_BUDGET.fields_by_name["budget_filter"]._options = None
_BUDGET.fields_by_name["amount"]._options = None
_BUDGET.fields_by_name["threshold_rules"]._options = None
_BUDGET.fields_by_name["all_updates_rule"]._options = None
_BUDGET.fields_by_name["etag"]._options = None
_BUDGET._options = None
_THRESHOLDRULE.fields_by_name["threshold_percent"]._options = None
_THRESHOLDRULE.fields_by_name["spend_basis"]._options = None
_ALLUPDATESRULE.fields_by_name["pubsub_topic"]._options = None
_ALLUPDATESRULE.fields_by_name["schema_version"]._options = None
_FILTER.fields_by_name["projects"]._options = None
_FILTER.fields_by_name["credit_types_treatment"]._options = None
_FILTER.fields_by_name["services"]._options = None
# @@protoc_insertion_point(module_scope)
|
# ABC167B - Easy Linear Programming
def main():
# input
A, B, C, K = map(int, input().split())
# compute
kotae = A+(K-A)*0-(K-A-B)
# output
print(kotae)
if __name__ == '__main__':
main()
|
import re
import asyncio
import sys
import os
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from utils import *
from instaloader import Profile
from pyrogram import Client, filters
from config import Config
chat_idd = int(Config.chat_idd)
USER = Config.USER
OWNER = Config.OWNER
HOME_TEXT = Config.HOME_TEXT
HOME_TEXT_OWNER = Config.HOME_TEXT_OWNER
HELP = Config.HELP
session = f"./{USER}"
STATUS = Config.STATUS
insta = Config.L
buttons = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("👨🏼💻Developer", url='https://t.me/Shnider_Bots'),
InlineKeyboardButton("🤖Other Bots", url="https://t.me/Shnider_Bots"),
],
[
InlineKeyboardButton("🔗Source Code", url="https://t.me/Shnider_Bots"),
InlineKeyboardButton("🧩Deploy Own Bot", url="https://t.me/Shnider_Bots")
],
[
InlineKeyboardButton("👨🏼🦯How To Use?", callback_data="help#subin"),
InlineKeyboardButton("⚙️Update Channel", url="https://t.me/Shnider_Bots")
]
]
)
@Client.on_message(filters.command("start") & filters.private)
async def account(bot, message):
if str(message.from_user.id) != OWNER:
await message.reply_text(
HOME_TEXT.format(message.from_user.first_name, message.from_user.id, USER, USER, USER, int(OWNER)),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("👨🏼💻Developer", url='https://t.me/Shnider_Bots'),
InlineKeyboardButton("🤖Other Bots", url="https://t.me/Shnider_Bots"),
],
[
InlineKeyboardButton("🔗Source Code", url="https://t.me/Shnider_Bots"),
InlineKeyboardButton("🧩Deploy Own Bot", url="https://t.me/Shnider_Bots")
],
[
InlineKeyboardButton("👨🏼🦯How To Use?", callback_data="help#subin"),
InlineKeyboardButton("⚙️Update Channel", url="https://t.me/Shnider_Bots")
]
]
)
)
return
if 1 in STATUS:
m = await message.reply_text("Getting Your data")
try:
profile = Profile.own_profile(insta.context)
mediacount = profile.mediacount
name = profile.full_name
bio = profile.biography
profilepic = profile.profile_pic_url
username = profile.username
igtvcount = profile.igtvcount
followers = profile.followers
following = profile.followees
reply_markup = InlineKeyboardMarkup(
[
[
InlineKeyboardButton("تحميل المنشورات المحفوضة 📥 ", callback_data=f"saved#{username}")
],
[
InlineKeyboardButton(" ", callback_data="0")
],
]
)
await m.delete()
await bot.send_photo(
chat_id=message.from_user.id,
photo=profilepic,
caption=f"🏷 **Name**: {name}\n🔖 **Username**: {profile.username}\n📝**Bio**: {bio}\n📍 **Account Type**: {acc_type(profile.is_private)}\n🏭 **Is Business Account?**: {yes_or_no(profile.is_business_account)}\n👥 **Total Followers**: {followers}\n👥 **Total Following**: {following}\n📸 **Total Posts**: {mediacount}\n📺 **IGTV Videos**: {igtvcount}",
reply_markup=reply_markup
)
except Exception as e:
await m.edit(e)
else:
await message.reply_text("You must login first by /login")
@Client.on_message(filters.command("saved") & filters.private)
async def saved(bot, message):
if str(message.from_user.id) != OWNER:
await message.reply_text(
HOME_TEXT.format(message.from_user.first_name, message.from_user.id, USER, USER, USER, OWNER),
reply_markup=buttons,
disable_web_page_preview=True
)
return
text = message.text
username = USER
if 1 not in STATUS:
await message.reply_text("يجب عليك تسجيل الدخول أولا / تسجيل الدخول")
return
count = None
if " " in text:
cmd, count = text.split(' ')
m = await message.reply_text(f"جاري تنزيل المحفوضات 🔃 .")
chat_id = message.from_user.id
dir = f"{chat_id}/{username}"
await m.edit("بدء التنزيل ..\nقد يستغرق هذا وقتًا أطول اعتمادًا على عدد المنشورات ✅✅ .")
if count:
command = [
"instaloader",
"--no-metadata-json",
"--no-compress-json",
"--no-profile-pic",
"--no-posts",
"--no-captions",
"--no-video-thumbnails",
"--login", USER,
"-f", session,
"--dirname-pattern", dir,
":saved",
"--count", count
]
else:
command = [
"instaloader",
"--no-metadata-json",
"--no-compress-json",
"--no-profile-pic",
"--no-posts",
"--no-captions",
"--no-video-thumbnails",
"--login", USER,
"-f", session,
"--dirname-pattern", dir,
":saved"
]
await download_insta(command, m, dir)
await upload(m, bot, chat_id, chat_idd, dir)
@Client.on_message(filters.command("help") & filters.private)
async def help(bot, cmd):
await cmd.reply_text(
HELP,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("👨🏼💻Developer", url='https://t.me/Shnider_Bots'),
InlineKeyboardButton("🤖Other Bots", url="https://t.me/Shnider_Bots")
],
[
InlineKeyboardButton("⚙️Update Channel", url="https://t.me/Shnider_Bots")
]
]
)
)
@Client.on_message(filters.command("restart") & filters.private)
async def stop(bot, cmd):
if str(cmd.from_user.id) != OWNER:
await cmd.reply_text(
HOME_TEXT.format(cmd.from_user.first_name, cmd.from_user.id, USER, USER, USER, OWNER),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("👨🏼💻Developer", url='https://t.me/Shnider_Bots'),
InlineKeyboardButton("🤖Other Bots", url="https://t.me/Shnider_Bots")
],
[
InlineKeyboardButton("⚙️Update Channel", url="https://t.me/Shnider_Bots")
]
]
)
)
return
msg = await bot.send_message(
text="Restarting your bot..",
chat_id=cmd.from_user.id
)
await asyncio.sleep(2)
await msg.edit("All Processes Stopped and Restarted")
os.execl(sys.executable, sys.executable, *sys.argv)
@Client.on_message(filters.text & filters.private & filters.incoming)
async def start(bot, cmd):
text = cmd.text
if text == "المطور" and str(cmd.from_user.id) != OWNER:
await cmd.reply_text(
HOME_TEXT.format(cmd.from_user.first_name, cmd.from_user.id, USER, USER, USER, OWNER),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("👨🏼💻Developer", url='https://t.me/Shnider_Bots'),
InlineKeyboardButton("🤖Other Bots", url="https://t.me/Shnider_Bots")
],
[
InlineKeyboardButton("⚙️Update Channel", url="https://t.me/Shnider_Bots")
]
]
)
)
|
# 查票类,目的是将查到的票输出到文件,让用户选票
import json
import re
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning # 忽略 Warning 用
requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # 忽略 Warning 用
class Crawl:
def __init__(self, departure_date, from_station_chs, to_station_chs):
"""此处为整个查询系统需要用到的属性"""
self.__departure_date = departure_date # 出发日期
self.__from_station_CHS = from_station_chs # 出发地的火车站
self.__to_station_CHS = to_station_chs # 目的地火车站
# 读取当前目录下的城市字典 CityCode.txt
with open('CityCode1.txt', 'r', encoding='gb18030') as city_code:
self.__city = city_code.read()
self.__from_station = re.findall(self.__from_station_CHS + '\|(.+)', self.__city)[0] # 将出发地改为相应的城市代码
self.__to_station = re.findall(self.__to_station_CHS + '\|(.+)', self.__city)[0] # 将目的地地改为相应的城市代码
def getTicketsList(self, js):
"""格式化列车的信息"""
print('正在获取今日车次信息列表')
today_list = [] # 用来保存获取到的火车票信息字典
i = 0 # 火车票的序号,方便选择车次
for item in js['data']['result']:
ticket = item.split('|')[1:] # 以字符 | 分割字符串,得到火车票的信息列表
if ticket[0] != '预订': continue # 此处防止备注栏不是'预订',目的是使序号与 12306buyer 中的余票
ticket_list = {
'序号': str(i),
'车次': ticket[2],
'车次状态': ticket[0],
'列车始发地': re.findall('(.*?)\|' + ticket[3], self.__city)[0], # 将获取的始发站的代码转换成中文城市
'列车终点站': re.findall('(.*?)\|' + ticket[4], self.__city)[0], # 同上
'出发时间': ticket[7],
'到达时间': ticket[8],
'时间花费': ticket[9],
'商务座': ticket[-3],
'特等座': ticket[-10],
'一等座': ticket[-4],
'二等座': ticket[-5],
'高级卧铺': ticket[-14],
'软卧': ticket[-12],
'硬卧': ticket[-7],
'软座': ticket[-11],
'硬座': ticket[-6],
'无座': ticket[-9],
'其他': ticket[-8]
} # 构造火车票的信息字典,方便写入文件
today_list.append(ticket_list) # 将火车票的信息字典添加到一个数组中,方便整体输出
i += 1
return today_list
@staticmethod # 由于并没有用到此类本身的属性,故函数需要 static
def storeText(today_list):
"""将当前得到的火车票信息字典写入文件"""
with open('choice.txt', 'w') as out_file:
for tic in today_list:
for t in tic.keys():
out_file.write(t + ' : ' + tic[t] + '\n')
out_file.write('\n')
def getTicketsJson(self):
"""获取 departure_date 在 12306 网站的 json 数据"""
url = 'https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date=' + self.__departure_date + '&le' + \
'ftTicketDTO.from_station=' + self.__from_station + '&leftTicketDTO.to_station=' + self.__to_station + \
'&purpose_codes=ADULT' # 构建 url
html = requests.get(url, verify=False).text # 取得 12306 在 departure_date 日期的车次信息
return json.loads(html) # 用 json 库解析 json 数据
def getData(self):
"""对于今日余票列表的获取函数"""
js = self.getTicketsJson() # 接受从 getTicketJson 函数返回的 json 对象
today_list = self.getTicketsList(js) # 获取 getTicketsList 函数返回的 今日车次信息
self.storeText(today_list) # 调用储存函数
return today_list
def getSpecificTicket(self):
"""对于抢票脚本专门设计的函数,用来获取专门的车次信息从而提高效率"""
pass
if __name__ == '__main__':
Crawl('2017-05-28', '出发地', '目的地').getData()
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
from . import tokens
from ..compat import str
from ..nodes import (Argument, Comment, ExternalLink, Heading, HTMLEntity, Tag,
Template, Text, Wikilink)
from ..nodes.extras import Attribute, Parameter
from ..smart_list import SmartList
from ..wikicode import Wikicode
__all__ = ["Builder"]
class Builder(object):
"""Combines a sequence of tokens into a tree of ``Wikicode`` objects.
To use, pass a list of :py:class:`~.Token`\ s to the :py:meth:`build`
method. The list will be exhausted as it is parsed and a
:py:class:`~.Wikicode` object will be returned.
"""
def __init__(self):
self._tokens = []
self._stacks = []
def _wrap(self, nodes):
"""Properly wrap a list of nodes in a ``Wikicode`` object."""
return Wikicode(SmartList(nodes))
def _push(self):
"""Push a new node list onto the stack."""
self._stacks.append([])
def _pop(self, wrap=True):
"""Pop the current node list off of the stack.
If *wrap* is ``True``, we will call :py:meth:`_wrap` on the list.
"""
if wrap:
return self._wrap(self._stacks.pop())
return self._stacks.pop()
def _write(self, item):
"""Append a node to the current node list."""
self._stacks[-1].append(item)
def _handle_parameter(self, default):
"""Handle a case where a parameter is at the head of the tokens.
*default* is the value to use if no parameter name is defined.
"""
key = None
showkey = False
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.TemplateParamEquals):
key = self._pop()
showkey = True
self._push()
elif isinstance(token, (tokens.TemplateParamSeparator,
tokens.TemplateClose)):
self._tokens.append(token)
value = self._pop()
if key is None:
key = self._wrap([Text(str(default))])
return Parameter(key, value, showkey)
else:
self._write(self._handle_token(token))
def _handle_template(self):
"""Handle a case where a template is at the head of the tokens."""
params = []
default = 1
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.TemplateParamSeparator):
if not params:
name = self._pop()
param = self._handle_parameter(default)
params.append(param)
if not param.showkey:
default += 1
elif isinstance(token, tokens.TemplateClose):
if not params:
name = self._pop()
return Template(name, params)
else:
self._write(self._handle_token(token))
def _handle_argument(self):
"""Handle a case where an argument is at the head of the tokens."""
name = None
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.ArgumentSeparator):
name = self._pop()
self._push()
elif isinstance(token, tokens.ArgumentClose):
if name is not None:
return Argument(name, self._pop())
return Argument(self._pop())
else:
self._write(self._handle_token(token))
def _handle_wikilink(self):
"""Handle a case where a wikilink is at the head of the tokens."""
title = None
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.WikilinkSeparator):
title = self._pop()
self._push()
elif isinstance(token, tokens.WikilinkClose):
if title is not None:
return Wikilink(title, self._pop())
return Wikilink(self._pop())
else:
self._write(self._handle_token(token))
def _handle_external_link(self, token):
"""Handle when an external link is at the head of the tokens."""
brackets, url = token.brackets, None
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.ExternalLinkSeparator):
url = self._pop()
self._push()
elif isinstance(token, tokens.ExternalLinkClose):
if url is not None:
return ExternalLink(url, self._pop(), brackets)
return ExternalLink(self._pop(), brackets=brackets)
else:
self._write(self._handle_token(token))
def _handle_entity(self):
"""Handle a case where an HTML entity is at the head of the tokens."""
token = self._tokens.pop()
if isinstance(token, tokens.HTMLEntityNumeric):
token = self._tokens.pop()
if isinstance(token, tokens.HTMLEntityHex):
text = self._tokens.pop()
self._tokens.pop() # Remove HTMLEntityEnd
return HTMLEntity(text.text, named=False, hexadecimal=True,
hex_char=token.char)
self._tokens.pop() # Remove HTMLEntityEnd
return HTMLEntity(token.text, named=False, hexadecimal=False)
self._tokens.pop() # Remove HTMLEntityEnd
return HTMLEntity(token.text, named=True, hexadecimal=False)
def _handle_heading(self, token):
"""Handle a case where a heading is at the head of the tokens."""
level = token.level
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.HeadingEnd):
title = self._pop()
return Heading(title, level)
else:
self._write(self._handle_token(token))
def _handle_comment(self):
"""Handle a case where an HTML comment is at the head of the tokens."""
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.CommentEnd):
contents = self._pop()
return Comment(contents)
else:
self._write(self._handle_token(token))
def _handle_attribute(self, start):
"""Handle a case where a tag attribute is at the head of the tokens."""
name, quoted = None, False
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.TagAttrEquals):
name = self._pop()
self._push()
elif isinstance(token, tokens.TagAttrQuote):
quoted = True
elif isinstance(token, (tokens.TagAttrStart, tokens.TagCloseOpen,
tokens.TagCloseSelfclose)):
self._tokens.append(token)
if name:
value = self._pop()
else:
name, value = self._pop(), None
return Attribute(name, value, quoted, start.pad_first,
start.pad_before_eq, start.pad_after_eq)
else:
self._write(self._handle_token(token))
def _handle_tag(self, token):
"""Handle a case where a tag is at the head of the tokens."""
close_tokens = (tokens.TagCloseSelfclose, tokens.TagCloseClose)
implicit, attrs, contents, closing_tag = False, [], None, None
wiki_markup, invalid = token.wiki_markup, token.invalid or False
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.TagAttrStart):
attrs.append(self._handle_attribute(token))
elif isinstance(token, tokens.TagCloseOpen):
padding = token.padding or ""
tag = self._pop()
self._push()
elif isinstance(token, tokens.TagOpenClose):
contents = self._pop()
self._push()
elif isinstance(token, close_tokens):
if isinstance(token, tokens.TagCloseSelfclose):
tag = self._pop()
self_closing = True
padding = token.padding or ""
implicit = token.implicit or False
else:
self_closing = False
closing_tag = self._pop()
return Tag(tag, contents, attrs, wiki_markup, self_closing,
invalid, implicit, padding, closing_tag)
else:
self._write(self._handle_token(token))
def _handle_token(self, token):
"""Handle a single token."""
if isinstance(token, tokens.Text):
return Text(token.text)
elif isinstance(token, tokens.TemplateOpen):
return self._handle_template()
elif isinstance(token, tokens.ArgumentOpen):
return self._handle_argument()
elif isinstance(token, tokens.WikilinkOpen):
return self._handle_wikilink()
elif isinstance(token, tokens.ExternalLinkOpen):
return self._handle_external_link(token)
elif isinstance(token, tokens.HTMLEntityStart):
return self._handle_entity()
elif isinstance(token, tokens.HeadingStart):
return self._handle_heading(token)
elif isinstance(token, tokens.CommentStart):
return self._handle_comment()
elif isinstance(token, tokens.TagOpenOpen):
return self._handle_tag(token)
def build(self, tokenlist):
"""Build a Wikicode object from a list tokens and return it."""
self._tokens = tokenlist
self._tokens.reverse()
self._push()
while self._tokens:
node = self._handle_token(self._tokens.pop())
self._write(node)
return self._pop()
|
from datetime import datetime
from unittest.mock import patch
import pytz
from django.core.management import call_command
from django.test import TestCase
from registrations.models import JembiSubmission
class JembiSubmitEventsTests(TestCase):
@patch("registrations.management.commands.jembi_submit_events.push_to_jembi_api")
def test_since_filter(self, task):
"""
Should not submit any events before the "since" date
"""
s = JembiSubmission.objects.create(request_data={})
s.timestamp = datetime(2016, 1, 1, tzinfo=pytz.UTC)
s.save()
call_command(
"jembi_submit_events", since=datetime(2016, 1, 2), submit=True, all=True
)
task.delay.assert_not_called()
@patch("registrations.management.commands.jembi_submit_events.push_to_jembi_api")
def test_until_filter(self, task):
"""
Should not submit any events after the "until" date
"""
s = JembiSubmission.objects.create(request_data={})
s.timestamp = datetime(2016, 1, 2, tzinfo=pytz.UTC)
s.save()
call_command(
"jembi_submit_events", until=datetime(2016, 1, 1), submit=True, all=True
)
task.delay.assert_not_called()
@patch("registrations.management.commands.jembi_submit_events.push_to_jembi_api")
def test_all_filter(self, task):
"""
Should not submit already submitted events if "--all" is not specified
"""
JembiSubmission.objects.create(request_data={}, submitted=True)
call_command("jembi_submit_events", submit=True, all=False)
task.delay.assert_not_called()
@patch("registrations.management.commands.jembi_submit_events.push_to_jembi_api")
def test_submit_filter(self, task):
"""
If submit isn't specified, then we should not submit the events
"""
JembiSubmission.objects.create(request_data={})
call_command("jembi_submit_events")
task.delay.assert_not_called()
@patch("registrations.management.commands.jembi_submit_events.push_to_jembi_api")
def test_submit_event(self, task):
"""
If the event should be submitted, then we should submit it
"""
s = JembiSubmission.objects.create(path="test", request_data={"test": "data"})
call_command("jembi_submit_events", submit=True)
task.delay.assert_called_once_with((s.id, "test", {"test": "data"}))
|
from __future__ import print_function, absolute_import
from .image import ImageSoftmaxEngine, ImageTripletEngine
from .image import ImageTripletEngine_DG, ImageQAConvEngine
from .video import VideoSoftmaxEngine, VideoTripletEngine
from .engine import Engine
|
################################################################################
#
# This file implements the interface of different kinds of
# data modules.
#
# Author(s): Nik Vaessen
################################################################################
from abc import abstractmethod, ABCMeta
from typing import List
from pytorch_lightning import LightningDataModule
from src.evaluation.speaker.evaluation import EvaluationPair
################################################################################
# implement a data module for speaker recognition data
class SpeakerLightningDataModule(LightningDataModule):
@property
@abstractmethod
def num_speakers(self) -> int:
pass
@property
@abstractmethod
def val_pairs(self) -> List[EvaluationPair]:
pass
@property
@abstractmethod
def test_pairs(self) -> List[EvaluationPair]:
pass
@property
@abstractmethod
def test_names(self) -> List[str]:
pass
|
'''
Created on 15 Sep 2017
@author: gavin
'''
from .basehandler import TcpHandler
class netbios(TcpHandler):
NAME = "NetBIOS Session"
DESCRIPTION = '''Responds to NetBIOS session requests. To be used along side SMB handler.'''
CONFIG = {}
def __init__(self, *args):
'''
Constructor
'''
TcpHandler.__init__(self, *args)
def base_handle(self):
data = self.handle_request()
if data[0] == "\x81":
self.send_response(b"\x82\x00\x00\x00")
|
# Generated by Django 3.2.5 on 2021-08-14 10:16
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.contrib.routable_page.models
import wagtail.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0001_app_separation'),
('wagtailimages', '0023_add_choose_permissions'),
('wagtailcore', '0062_comment_models_and_pagesubscription'),
]
operations = [
migrations.CreateModel(
name='HomePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')),
('banner_title', models.CharField(max_length=100, null=True)),
('banner_subtitle', wagtail.core.fields.RichTextField()),
('banner_cta_link_url', models.URLField(blank=True, help_text="Leave empty if link leads to one of this site's pages.", max_length=500)),
('banner_cta_text', models.TextField()),
('banner_cta_link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.page')),
('banner_image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')),
],
options={
'verbose_name': 'Home Page',
'verbose_name_plural': 'Home Pages',
},
bases=(wagtail.contrib.routable_page.models.RoutablePageMixin, 'wagtailcore.page'),
),
migrations.CreateModel(
name='AlbumItems',
fields=[
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('linked_page', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='blog.gameblogpage')),
('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')),
('root_page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='album_images', to='home.homepage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
# Copyright © 2021 Dar Gilboa, Ari Pakman and Thibault Vatter
# This file is part of the mdma library and licensed under the terms of the MIT license.
# For a copy, see the LICENSE file in the root directory.
from mdma import fit
import torch as t
import numpy as np
from experiments.UCI.gas import GAS
from experiments.UCI.hepmass import HEPMASS
from experiments.UCI.miniboone import MINIBOONE
from experiments.UCI.power import POWER
def fit_uci():
h = fit.get_default_h()
data = load_dataset(h)
outs = fit.fit_mdma(h, data)
def load_dataset(h):
if h.dataset == 'gas':
dataset = GAS(h.data_dir + '/gas/ethylene_CO.pickle')
elif h.dataset == 'hepmass':
dataset = HEPMASS(h.data_dir + '/hepmass')
elif h.dataset == 'miniboone':
dataset = MINIBOONE(h.data_dir + '/miniboone/data.npy')
elif h.dataset == 'power':
dataset = POWER(h.data_dir + '/power/data.npy')
else:
raise RuntimeError()
if h.missing_data_pct > 0:
# create missing data masks
mask = np.random.rand(*dataset.trn.x.shape) > h.missing_data_pct
data_and_mask = np.array([dataset.trn.x, mask]).swapaxes(0, 1)
dataset_train = t.utils.data.TensorDataset(t.tensor(data_and_mask).float())
val_mask = np.random.rand(*dataset.val.x.shape) > h.missing_data_pct
val_data_and_mask = np.array([dataset.val.x, val_mask]).swapaxes(0, 1)
dataset_valid = t.utils.data.TensorDataset(
t.tensor(val_data_and_mask).float())
else:
dataset_train = t.utils.data.TensorDataset(
t.tensor(np.expand_dims(dataset.trn.x, 1)).float())
dataset_valid = t.utils.data.TensorDataset(
t.tensor(np.expand_dims(dataset.val.x, 1)).float())
data_loader_train = t.utils.data.DataLoader(dataset_train,
batch_size=h.batch_size,
shuffle=True)
data_loader_valid = t.utils.data.DataLoader(dataset_valid,
batch_size=h.batch_size,
shuffle=False)
dataset_test = t.utils.data.TensorDataset(t.tensor(dataset.tst.x).float())
data_loader_test = t.utils.data.DataLoader(dataset_test,
batch_size=h.batch_size,
shuffle=False)
h.d = dataset.n_dims
h.M = len(dataset_train)
h.M_val = len(dataset_valid)
return [data_loader_train, data_loader_valid, data_loader_test]
if __name__ == '__main__':
fit_uci()
|
import argparse
from io import StringIO
import os
from pathlib import Path
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from utils.annealing import Step, TReciprocal, ExponentialDecay
from utils.logger import Logger
from utils.path import get_run_path
from utils.visualisation import Dashboard
from agent import CartPoleAgent
parser = argparse.ArgumentParser(description='Train a Q-Learning agent on the CartPole problem.')
parser.add_argument('--n-episodes', type=int, default=100, help='the number of episodes to run.')
parser.add_argument('--checkpoint-rate', type=int, default=500, help='how often the logs and model should be checkpointed (in episodes). \
Set to -1 to disable checkpoints')
parser.add_argument('--render', action='store_true', help='flag to indicate the training should be rendered.')
parser.add_argument('--live-plot', action='store_true', help='flag to indicate the training data should be plotted in real-time.')
parser.add_argument('--no-plot', action='store_true', help='flag to indicate the no plot should be shown.')
parser.add_argument('--plot-update-rate', type=int, default=100, help='how often the live-plot should be updated.')
parser.add_argument('--log-verbosity', type=int, default=Logger.Verbosity.MINIMAL, choices=Logger.Verbosity.ALL,
help='the verbosity level of the logger.')
parser.add_argument('--model-name', type=str, default='RoleyPoley', help='the name of the model. Used as the filename when saving the model.')
parser.add_argument('--model-path', type=str, help='the path to a previous model. If this is set the designated model will be used for training.')
args = parser.parse_args()
if args.no_plot:
args.live_plot = False
if not args.no_plot:
dashboard = Dashboard(ema_alpha=1e-2, real_time=args.live_plot)
# Setup logger
logger = Logger(verbosity=args.log_verbosity, filename_prefix=args.model_name)
logger.log('episode_info', 'episode,timesteps')
logger.log('learning_rate', 'learning_rate')
logger.log('exploration_rate', 'exploration_rate')
# Load OpenAI Gym and agent.
env = gym.make('CartPole-v0')
model_filename = args.model_name + '.q'
checkpoint_filename_format = args.model_name + '-checkpoint-{:03d}.q'
if args.model_path:
agent = CartPoleAgent.load(args.model_path)
args.model_name = Path(args.model_path).name
agent.model_path = get_run_path(prefix='data/')
else:
agent = CartPoleAgent(env.action_space, env.observation_space,
n_buckets=6, learning_rate=1, learning_rate_annealing=ExponentialDecay(k=1e-3),
exploration_rate=1, exploration_rate_annealing=Step(k=2e-2, step_after=100),
discount_factor=0.9, input_mask=[0, 1, 1, 1])
start = time.time()
for i_episode in range(args.n_episodes):
# per episode setup
observation = env.reset()
prev_observation = []
prev_action = 0
episode_start = time.time()
# logging
episode = '[Episode {}]'.format(i_episode)
logger.log('observations', episode)
logger.log('rewards', episode)
logger.log('actions', episode)
if agent.learning_rate_annealing:
logger.log('learning_rate', agent.learning_rate_annealing(agent.learning_rate, i_episode))
else:
logger.log('learning_rate', agent.learning_rate)
if agent.exploration_rate_annealing:
logger.log('exploration_rate', agent.exploration_rate_annealing(agent.exploration_rate, i_episode))
else:
logger.log('exploration_rate', agent.exploration_rate)
# checkpointing
if i_episode % args.checkpoint_rate == 0:
checkpoint = i_episode // args.checkpoint_rate
logger.print('Checkpoint #{}'.format(checkpoint))
logger.print('Total elapsed time: {:02.4f}s'.format(time.time() - start))
agent.save(checkpoint_filename_format.format(checkpoint))
if not args.live_plot:
logger.write(mode='a')
logger.clear()
else:
logger.write(mode='w')
if args.live_plot and i_episode == 1:
dashboard.warmup(logger, agent.q_table)
if args.live_plot and (i_episode > 0 and i_episode % args.plot_update_rate == 0):
dashboard.draw(logger, agent.q_table)
cumulative_reward = 0
for t in range(200):
action = agent.get_action(observation, i_episode)
logger.print('Observation:\n{}\nAction:\n{}\n'.format(observation, action), Logger.Verbosity.FULL)
prev_observation = observation
prev_action = action
observation, reward, done, info = env.step(action)
cumulative_reward += reward
logger.print('Reward for last observation: {}'.format(cumulative_reward), Logger.Verbosity.FULL)
agent.update(prev_observation, prev_action, cumulative_reward, observation, i_episode)
logger.log('observations', observation)
logger.log('rewards', reward)
logger.log('actions', action)
if args.render:
env.render()
if done:
msg = "Episode {:02d} finished after {:02d} timesteps in {:02.4f}s".format(i_episode, t + 1, time.time() - episode_start)
logger.log('episode_info', '{:02d}, {:02d}'.format(i_episode, t + 1))
logger.print(msg, Logger.Verbosity.MINIMAL)
break
env.close()
logger.write(mode='w' if args.live_plot else 'a')
agent.save(model_filename)
if args.live_plot or not args.no_plot:
if args.live_plot:
dashboard.draw(logger, agent.q_table)
else:
dashboard.warmup(logger, agent.q_table)
dashboard.keep_on_screen()
dashboard.close()
|
import locale
import warnings
from django.test import TestCase
from django_celery_beat.models import DAYS
import snitch
from snitch.models import Event
from snitch.schedules.models import Schedule
from snitch.schedules.tasks import clean_scheduled_tasks, execute_schedule_task
from tests.app.events import (
ACTIVATED_EVENT,
CONFIRMED_EVENT,
DUMMY_EVENT,
EVERY_HOUR,
SMALL_EVENT,
ActivatedHandler,
ConfirmedHandler,
DummyHandler,
)
from tests.app.factories import (
ActorFactory,
OtherStuffFactory,
StuffFactory,
TargetFactory,
TriggerFactory,
)
from tests.app.helpers import dispatch_dummy_event, dispatch_explicit_dummy_event
from tests.app.models import Notification
from tests.factories import UserFactory
class SnitchTestCase(TestCase):
def test_swappable_notification_model(self):
notification_model_class = snitch.get_notification_model()
self.assertTrue(issubclass(notification_model_class, Notification))
def test_register_event(self):
# This test assume that there is an events.py file in the testing app
self.assertIn(ACTIVATED_EVENT, snitch.manager._verbs)
self.assertIn(ACTIVATED_EVENT, snitch.manager._registry)
self.assertTrue(
issubclass(snitch.manager._registry[ACTIVATED_EVENT], snitch.EventHandler)
)
def test_dispatch_event(self):
self.assertEqual(0, Event.objects.filter(verb=ACTIVATED_EVENT).count())
stuff = StuffFactory()
stuff.activate()
self.assertEqual(1, Event.objects.filter(verb=ACTIVATED_EVENT).count())
event = Event.objects.filter(verb=ACTIVATED_EVENT).first()
self.assertTrue(isinstance(event.handler(), ActivatedHandler))
handler = event.handler()
self.assertEqual(ActivatedHandler.title, handler.get_title())
def test_dispatch_event_with_backends(self):
users = UserFactory.create_batch(size=5)
self.assertEqual(0, Event.objects.filter(verb=CONFIRMED_EVENT).count())
stuff = StuffFactory()
stuff.confirm()
self.assertEqual(1, Event.objects.filter(verb=CONFIRMED_EVENT).count())
event = Event.objects.filter(verb=CONFIRMED_EVENT).first()
self.assertTrue(isinstance(event.handler(), ConfirmedHandler))
handler = event.handler()
self.assertEqual(ConfirmedHandler.title, handler.get_title())
self.assertTrue(event.notified)
self.assertEqual(len(users), Notification.objects.all().count())
notification_handler = Notification.objects.first().handler()
self.assertIsNotNone(notification_handler.notification)
def test_dispatch_event_from_function(self):
self.assertEqual(0, Event.objects.filter(verb=DUMMY_EVENT).count())
dispatch_dummy_event(
actor=ActorFactory(), target=TargetFactory(), trigger=TriggerFactory()
)
self.assertEqual(1, Event.objects.filter(verb=DUMMY_EVENT).count())
event = Event.objects.filter(verb=DUMMY_EVENT).first()
self.assertTrue(isinstance(event.handler(), DummyHandler))
self.assertIsNotNone(event.target)
self.assertIsNotNone(event.trigger)
def test_dispatch_event_from_function_explicit(self):
self.assertEqual(0, Event.objects.filter(verb=DUMMY_EVENT).count())
dispatch_explicit_dummy_event(
actor=ActorFactory(), target=TargetFactory(), trigger=TriggerFactory()
)
self.assertEqual(1, Event.objects.filter(verb=DUMMY_EVENT).count())
event = Event.objects.filter(verb=DUMMY_EVENT).first()
self.assertTrue(isinstance(event.handler(), DummyHandler))
self.assertIsNotNone(event.target)
self.assertIsNotNone(event.trigger)
def test_dispatch_event_from_function_bad_attributes(self):
self.assertEqual(0, Event.objects.filter(verb=DUMMY_EVENT).count())
dispatch_dummy_event(ActorFactory(), TargetFactory(), TriggerFactory())
self.assertEqual(0, Event.objects.filter(verb=DUMMY_EVENT).count())
def test_create_other_stuff(self):
schedules = Schedule.objects.all().count()
self.assertEqual(0, schedules)
OtherStuffFactory()
self.assertEqual(1, Schedule.objects.filter(verb=DUMMY_EVENT).count())
self.assertTrue(Schedule.objects.filter(verb=DUMMY_EVENT).exists())
schedule = Schedule.objects.filter(verb=DUMMY_EVENT).first()
self.assertEqual(DAYS, schedule.period)
self.assertEqual(2, schedule.every)
self.assertEqual(1, schedule.limit)
def test_create_other_stuff_every_hour(self):
schedules = Schedule.objects.all().count()
self.assertEqual(0, schedules)
other_stuff = OtherStuffFactory()
self.assertEqual(1, Schedule.objects.filter(verb=EVERY_HOUR).count())
self.assertTrue(Schedule.objects.filter(verb=EVERY_HOUR).exists())
schedule = Schedule.objects.filter(verb=EVERY_HOUR).first()
self.assertEqual(str(other_stuff.created.minute), schedule.minute)
self.assertEqual("*/1", schedule.hour)
def test_execute_schedule_task(self):
OtherStuffFactory()
schedule = Schedule.objects.filter(verb=DUMMY_EVENT).first()
self.assertEqual(0, schedule.counter)
schedule_pk = execute_schedule_task(schedule.pk)
self.assertEqual(schedule_pk, schedule.pk)
schedule.refresh_from_db()
self.assertEqual(1, schedule.counter)
def test_execute_schedule_task_not_found(self):
OtherStuffFactory()
schedule = Schedule.objects.filter(verb=DUMMY_EVENT).first()
self.assertEqual(0, schedule.counter)
schedule_pk = execute_schedule_task(0)
self.assertIsNone(schedule_pk)
schedule.refresh_from_db()
self.assertEqual(0, schedule.counter)
def test_clean_scheduled_tasks(self):
OtherStuffFactory()
schedule = Schedule.objects.filter(verb=DUMMY_EVENT).first()
execute_schedule_task(schedule.pk)
clean_scheduled_tasks()
self.assertEqual(0, Schedule.objects.filter(verb=DUMMY_EVENT).count())
def test_scheduled_task_without_actor(self):
OtherStuffFactory()
schedule = Schedule.objects.filter(verb=DUMMY_EVENT).first()
schedule.actor = None
schedule.save()
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
schedule.run()
self.assertEqual(1, len(warns))
self.assertEqual(warns[-1].category, UserWarning)
self.assertEqual(0, Event.objects.filter(verb=DUMMY_EVENT).count())
def test_clean_scheduled_tasks_run(self):
OtherStuffFactory()
schedule = Schedule.objects.filter(verb=DUMMY_EVENT).first()
schedule.run()
self.assertEqual(1, Event.objects.filter(verb=DUMMY_EVENT).count())
def test_ephemeral_event(self):
self.assertEqual(0, Event.objects.filter(verb=SMALL_EVENT).count())
stuff = StuffFactory()
stuff.small()
self.assertEqual(1, Event.objects.filter(verb=SMALL_EVENT).count())
self.assertEqual(
0, Notification.objects.filter(event__verb=SMALL_EVENT).count()
)
|
from ...hek.defs.swat import *
|
from event_model import unpack_event_page
from qtpy.QtGui import QStandardItemModel, QStandardItem
from qtpy.QtWidgets import QTableView, QWidget, QVBoxLayout
class BaselineModel(QStandardItemModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setHorizontalHeaderLabels(['Before', 'After'])
def __call__(self, name, doc):
if name == 'event_page':
for event in unpack_event_page(doc):
self.__call__('event', event)
elif name == 'event':
column = doc['seq_num'] - 1
for row, val in enumerate(val for _, val in sorted(doc['data'].items())):
self.setItem(row, column, QStandardItem(str(val)))
self.setVerticalHeaderLabels(doc['data'].keys())
class BaselineWidget(QTableView):
...
class BaselineFactory:
def __init__(self, add_tab):
container = QWidget()
self.layout = QVBoxLayout()
container.setLayout(self.layout)
add_tab(container, 'Baseline')
def __call__(self, name, start_doc):
def subfactory(name, descriptor_doc):
if descriptor_doc.get('name') == 'baseline':
baseline_widget = BaselineWidget()
baseline_model = BaselineModel()
baseline_widget.setModel(baseline_model)
self.layout.addWidget(baseline_widget)
return [baseline_model]
else:
return []
return [], [subfactory]
|
# Author info:
# Github | rlyonheart
# Twitter| rly0nheart
# Instagram | rlyonheart
#import color module
import termcolor
from termcolor import colored,cprint
# import the random module
import random
# import the time module(not that important)
import time
# import sys module(using this to terminate the program)
import sys
cprint("\t\t --P4SSW0RD G3N3RAT0R-- \n\n",'red',attrs=['bold','underline'])
cprint("Generate password:\n",'white',attrs=['bold','underline'])
if __name__ == '__main__':
ui=input().lower()
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYS1234567890"
len = 10
x = ''.join(random.sample(chars, len))
if "passgen" in ui:
time.sleep(1)
cprint("\nPlease Wait",'blue',attrs=['bold'])
time.sleep(1)
cprint(".",'cyan',attrs=['bold'])
time.sleep(1)
cprint(".",'red',attrs=['bold'])
time.sleep(1)
cprint(".",'green',attrs=['bold'])
time.sleep(1)
cprint(".",'blue',attrs=['bold'])
time.sleep(1)
cprint(".",'yellow',attrs=['bold'])
time.sleep(1)
cprint("Password generated!..",'yellow',attrs=['bold'])
time.sleep(2)
cprint("\nyour new password is: ",'red',attrs=['bold'])
time.sleep(0.7)
print("\t\t\t",x)
time.sleep(1)
cprint('''\n\n\n\nTip: Do not show this password
to anyone, if you are going to use it.\n\n''','green',attrs=['bold'])
else:
time.sleep(1)
cprint("\nProcessing",'blue',attrs=['bold'])
time.sleep(1)
cprint(".",'cyan',attrs=['bold'])
time.sleep(1)
cprint(".",'yellow',attrs=['bold'])
time.sleep(1)
cprint(".",'green',attrs=['bold'])
time.sleep(1)
cprint(".",'blue',attrs=['bold'])
time.sleep(1)
cprint(".",'red',attrs=['bold'])
cprint("\nINVALID INPUT!",'red',attrs=['bold'])
time.sleep(1.5)
sys.exit(colored("program terminated due to invalid input by user.",'white','on_red',attrs=['bold']))
# ©2020
# thank you, for using Password generator!
|
from src.harness.agentThread import AgentThread
class DefaultName(AgentThread):
def __init__(self, config, motion_config):
super(DefaultName, self).__init__(config, motion_config)
def initialize_vars(self):
self.locals = {}
self.locals['added'] = False
self.locals['finalsum'] = None
self.create_aw_var('sum', int, 0)
self.create_aw_var('numadded', int, 0)
self.initialize_lock('adding')
def loop_body(self):
if not self.locals['added']:
if not self.lock('adding'):
return
self.write_to_shared('sum', None, self.read_from_shared('sum', None) + self.pid() * 2)
self.write_to_shared('numadded', None, self.read_from_shared('numadded', None) + 1)
self.locals['added'] = True
self.unlock('adding')
return
if self.read_from_shared('numadded', None) == self.num_agents():
self.locals['finalsum'] = self.read_from_shared('sum', None)
self.log(self.locals['finalsum'])
self.stop()
return
|
#!/usr/bin/env python3
from linkedlist import LinkedList, Node
import random
random.seed(42)
class IntersectingLinkedList(LinkedList):
def __init__(self, data_a: list, data_b: list):
self.head_a = self.__build_list(data_a)
self.head_b = self.__build_list(data_b)
self.intersect()
def intersect(self):
intersection_point = 5
current_b = self.head_b
for i in range(intersection_point):
current_b = current_b.next_node
current_a = self.head_a
while current_a.next_node != None:
current_a = current_a.next_node
current_a.next_node = current_b
def __build_list(self, data: list):
head = None
for item in data:
if head == None:
head = Node(item)
current = head
else:
current.next_node = Node(item)
current = current.next_node
current.next_node = None
return head
def get(self, head=None):
if head == None:
return
elif head == "a":
current = self.head_a
elif head == "b":
current = self.head_b
def recurse(node, string=""):
if node.next_node == None:
string += f"{node.data} -> END"
return string
else:
string += f"{node.data} -> "
return recurse(node.next_node, string)
return recurse(current)
class Detector:
def __init__(self, head_a: Node, head_b: Node):
self.head_a = head_a
self.head_b = head_b
def detect(self):
current_a = self.head_a
len_a = 1
while current_a.next_node != None:
len_a += 1
current_a = current_a.next_node
current_b = self.head_b
len_b = 1
while current_b.next_node != None:
len_b += 1
current_b = current_b.next_node
if current_a != current_b:
return f"No Intersections"
ptr_a = self.head_a
ptr_b = self.head_b
if len_a > len_b:
moves = len_a - len_b
for move in range(moves):
ptr_a = ptr_a.next_node
elif len_b > len_a:
moves = len_b - len_a
for move in range(moves):
ptr_b = ptr_b.next_node
while id(ptr_a) != id(ptr_b):
ptr_a = ptr_a.next_node
ptr_b = ptr_b.next_node
return f"Intersects at: {ptr_a}"
def main():
data_a = [num for num in range(11, 18)]
data_b = [random.randint(0, 10) for num in range(8)]
print(f"{data_a}\n{data_b}")
myll = IntersectingLinkedList(data_a, data_b)
print(myll.get("a"))
print(myll.get("b"))
d = Detector(myll.head_a, myll.head_b)
print(d.detect())
myll2 = LinkedList([i for i in range(5)])
myll3 = LinkedList([i for i in range(5)])
d = Detector(myll2.head, myll3.head)
print(d.detect())
if __name__ == "__main__":
main()
|
"""
Based on Scipy's cookbook:
http://scipy-cookbook.readthedocs.io/items/bundle_adjustment.html
"""
import math
import sys
import logging
from functools import lru_cache
import numpy as np
import quaternion
from scipy.sparse import lil_matrix
from scipy.optimize import least_squares
from visnav.algo import tools
# logger = logging.getLogger(__name__)
CHECK_JACOBIAN = 0
USE_WORLD_CAM_FRAME = 0
USE_OWN_PSEUDO_HUBER_LOSS = 1
def bundle_adj(poses: np.ndarray, pts3d: np.ndarray, pts2d: np.ndarray,
cam_idxs: np.ndarray, pt3d_idxs: np.ndarray, K: np.ndarray, px_err_sd: np.ndarray=None,
dtype=np.float64, log_writer=None, max_nfev=None, skip_pose_n=1, poses_only=False, huber_coef=False):
"""
Returns the bundle adjusted parameters, in this case the optimized rotation and translation vectors.
basepose with shape (6,) is the pose of the first frame that is used as an anchor
poses with shape (n_cameras, 6) contains initial estimates of parameters for all cameras.
First 3 components in each row form a rotation vector,
next 3 components form a translation vector
pts3d with shape (n_points, 3)
contains initial estimates of point coordinates in the world frame.
pts2d with shape (n_observations, 2)
contains measured 2-D coordinates of points projected on images in each observations.
cam_idxs with shape (n_observations,)
contains indices of cameras (from 0 to n_cameras - 1) involved in each observation.
pt3d_idxs with shape (n_observations,)
contains indices of points (from 0 to n_points - 1) involved in each observation.
"""
assert len(poses.shape) == 2 and poses.shape[1] == 6, 'wrong shape poses: %s' % (poses.shape,)
assert len(pts3d.shape) == 2 and pts3d.shape[1] == 3, 'wrong shape pts3d: %s' % (pts3d.shape,)
assert len(pts2d.shape) == 2 and pts2d.shape[1] == 2, 'wrong shape pts2d: %s' % (pts2d.shape,)
assert len(cam_idxs.shape) == 1, 'wrong shape cam_idxs: %s' % (cam_idxs.shape,)
assert len(pt3d_idxs.shape) == 1, 'wrong shape pt3d_idxs: %s' % (pt3d_idxs.shape,)
assert K.shape == (3, 3), 'wrong shape K: %s' % (K.shape,)
n_cams = poses.shape[0]
n_pts = pts3d.shape[0]
pose0 = poses[0:skip_pose_n].ravel()
x0 = [poses[skip_pose_n:].ravel()]
if poses_only:
fixed_pt3d = pts3d
else:
fixed_pt3d = np.zeros((0, 3))
x0.append(pts3d.ravel())
x0 = np.hstack(x0)
I = np.stack((np.ones((len(poses) - skip_pose_n, 3), dtype=bool),
np.zeros((len(poses) - skip_pose_n, 3), dtype=bool)), axis=1).ravel()
x0 = Manifold(x0.shape, buffer=x0.astype(dtype), dtype=dtype)
x0.set_so3_groups(I)
px_err_sd = np.ones((len(pts2d),)) if px_err_sd is None else px_err_sd
m = cam_idxs.size * 2
orig_dtype = pts2d.dtype
if dtype != orig_dtype:
pose0, fixed_pt3d, pts2d, K, px_err_sd = map(
lambda x: x.astype(dtype), (pose0, fixed_pt3d, pts2d, K, px_err_sd))
if 0:
err = _costfun(x0, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd,
meas_r, meas_aa, meas_idxs, loc_err_sd, ori_err_sd)
print('ERR: %.4e' % (np.sum(err**2)/2))
if CHECK_JACOBIAN:
if 1:
jac = _jacobian(x0, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd).toarray()
jac_ = numerical_jacobian(lambda x: _costfun(x, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs,
pts2d, K, px_err_sd), x0, 1e-4)
else:
jac = _rot_pt_jac(x0, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd)
jac_ = numerical_jacobian(lambda x: _rotated_points(x, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs,
pt3d_idxs, pts2d, K, px_err_sd), x0, 1e-4)
import matplotlib.pyplot as plt
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True)
axs[0].imshow(np.sign(jac) * np.abs(jac) ** (1/8))
axs[0].set_title('analytical')
axs[1].imshow(np.sign(jac_) * np.abs(jac_) ** (1/8))
axs[1].set_title('numerical')
plt.show()
@lru_cache(1)
def _cfun(x):
return _costfun(x, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd)
def cfun(x):
err = _cfun(tuple(x))
if USE_OWN_PSEUDO_HUBER_LOSS:
sqrt_phl_err = np.sqrt(tools.pseudo_huber_loss(err, huber_coef)) if huber_coef is not False else err
else:
sqrt_phl_err = err
return sqrt_phl_err
def jac(x):
# apply pseudo huber loss derivative
J = _jacobian(x, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd)
if USE_OWN_PSEUDO_HUBER_LOSS and huber_coef is not False:
## derivative of pseudo huber loss can be applied afterwards by multiplying with the err derivative
# NOTE: need extra (.)**0.5 term as least_squares applies extra (.)**2 on top of our cost function
# - d (weight * phl(e))**0.5/dksi = d (weight * phl(e))**0.5/d phl(e) * d phl(e)/de * de/dksi
# - d (weight * phl(e))**0.5/d phl(e) = weight**0.5 * 0.5 * phl(e)**(-0.5)
# - d phl(e)/de = e/sqrt(1+e**2/delta**2)
# - de/dksi is solved next
err = _cfun(tuple(x))
phl_err = tools.pseudo_huber_loss(err, huber_coef)
dhuber = 0.5 * np.sqrt(1 / phl_err) * err / np.sqrt(1 + err ** 2 / huber_coef ** 2)
J = J.multiply(dhuber.reshape((-1, 1)))
return J
tmp = sys.stdout
if log_writer is not None:
sys.stdout = log_writer
res = least_squares(cfun, x0, verbose=2, ftol=1e-4, xtol=1e-5, method='trf',
x_scale='jac', jac=jac, max_nfev=max_nfev,
loss='linear' if USE_OWN_PSEUDO_HUBER_LOSS or huber_coef is None else 'huber',
f_scale=huber_coef or 1.0)
sys.stdout = tmp
# TODO (1): return res.fun somewhat processed (per frame: mean px err, meas errs),
# so that can follow the errors and notice/debug problems
assert isinstance(res.x, Manifold), 'somehow manifold lost during optimization'
new_poses, new_pts3d = _unpack(res.x.to_array(), n_cams - skip_pose_n, 0 if poses_only else n_pts)
if new_poses.dtype != orig_dtype:
new_poses = new_poses.astype(orig_dtype)
new_pts3d = new_pts3d.astype(orig_dtype)
return new_poses, new_pts3d
def _costfun(params, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd):
"""
Compute residuals.
`params` contains camera parameters and 3-D coordinates.
"""
if isinstance(params, (tuple, list)):
params = np.array(params)
params = np.hstack((pose0, params))
poses, pts3d = _unpack(params, n_cams, n_pts if len(fixed_pt3d) == 0 else 0)
points_3d = fixed_pt3d if len(pts3d) == 0 else pts3d
points_proj = _project(points_3d[pt3d_idxs], poses[cam_idxs], K)
px_err = ((pts2d - points_proj) / px_err_sd[:, None]).ravel()
return px_err
def _jacobian(params, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd):
"""
cost function jacobian, from https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6891346/
"""
params = np.hstack((pose0, params))
poses, pts3d = _unpack(params, n_cams, n_pts if len(fixed_pt3d) == 0 else 0)
poses_only = len(fixed_pt3d) > 0
points_3d = fixed_pt3d if poses_only else pts3d
points_3d_rot = _rotate(points_3d[pt3d_idxs], poses[cam_idxs, :3])
points_3d_cf = points_3d_rot + poses[cam_idxs, 3:6]
# error term count (2d reprojection errors)
m = cam_idxs.size * 2
# parameter count (6d poses, n x 3d keypoint locations, 1d time offset)
n1 = n_cams * 6
n2 = (0 if poses_only else n_pts * 3)
n = n1 + n2
J = lil_matrix((m, n), dtype=np.float32)
i = np.arange(cam_idxs.size)
# poses affect reprojection error terms
########################################
# - NOTE: for the following to work, need the poses in cam-to-world, i.e. rotation of world origin first,
# then translation in rotated coordinates
# - https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6891346/ equation (11)
# - also https://ingmec.ual.es/~jlblanco/papers/jlblanco2010geometry3D_techrep.pdf, section 10.3.5, eq 10.23
# - ksi: first three location, second three rotation
# - de/dksi = de/dU' * dU'/dksi
# - de/dU' = -[[fx/Zc, 0, -fx*Xc/Zc**2],
# [0, fy/Zc, -fy*Yc/Zc**2]]
# - dU'/dw = [I3 | -[U']^] = [[1, 0, 0, 0, Zr, -Yr],
# [0, 1, 0, -Zr, 0, Xr],
# [0, 0, 1, Yr, -Xr, 0]]
# - -[[fx/Zc, 0, -Xc*fx/Zc**2, | -Xc*Yr*fx/Zc**2, Xc*Xr*fx/Zc**2 + fx*Zr/Zc, -Yr*fx/Zc],
# [0, fy/Zc, -Yc*fy/Zc**2, | -Yc*Yr*fy/Zc**2 - fy*Zr/Zc, Xr*Yc*fy/Zc**2, Xr*fy/Zc]] / px_err_sd
fx, fy = K[0, 0], K[1, 1]
Xr, Yr, Zr = points_3d_rot[:, 0], points_3d_rot[:, 1], points_3d_rot[:, 2]
Xc, Yc, iZc = points_3d_cf[:, 0], points_3d_cf[:, 1], 1 / points_3d_cf[:, 2]
px_err_sd = px_err_sd.flatten()
# camera location
J[2 * i + 0, cam_idxs*6 + 3] = -(fx*iZc) / px_err_sd
J[2 * i + 0, cam_idxs*6 + 5] = -(-Xc*fx*iZc**2) / px_err_sd
J[2 * i + 1, cam_idxs*6 + 4] = -(fy*iZc) / px_err_sd
J[2 * i + 1, cam_idxs*6 + 5] = -(-Yc*fy*iZc**2) / px_err_sd
# camera rotation
J[2 * i + 0, cam_idxs*6 + 0] = -(-Xc*Yr*fx*iZc**2) / px_err_sd
J[2 * i + 0, cam_idxs*6 + 1] = -(Xc*Xr*fx*iZc**2 + fx*Zr*iZc) / px_err_sd
J[2 * i + 0, cam_idxs*6 + 2] = -(-Yr*fx*iZc) / px_err_sd
J[2 * i + 1, cam_idxs*6 + 0] = -(-Yc*Yr*fy*iZc**2 - fy*Zr*iZc) / px_err_sd
J[2 * i + 1, cam_idxs*6 + 1] = -(Xr*Yc*fy*iZc**2) / px_err_sd
J[2 * i + 1, cam_idxs*6 + 2] = -(Xr*fy*iZc) / px_err_sd
# keypoint locations affect reprojection error terms
####################################################
# similar to above, first separate de/dU => de/dU' * dU'/dU,
# first one (de/dU') is [[fx/Z', 0, -fx*X'/Z'**2],
# [0, fy/Z', -fy*Y'/Z'**2]]
# second one (dU'/dU => d/dU (RU + P) = R) is the camera rotation matrix R
# => i.e. rotate de/dU' by R^-1
if not poses_only:
dEu = -np.stack((fx * iZc, np.zeros((len(iZc),)), -fx * Xc * iZc**2), axis=1) / px_err_sd[:, None]
dEv = -np.stack((np.zeros((len(iZc),)), fy * iZc, -fy * Yc * iZc**2), axis=1) / px_err_sd[:, None]
dEuc = _rotate(dEu, -poses[cam_idxs, :3])
dEvc = _rotate(dEv, -poses[cam_idxs, :3])
J[2 * i + 0, n1 + pt3d_idxs * 3 + 0] = dEuc[:, 0]
J[2 * i + 0, n1 + pt3d_idxs * 3 + 1] = dEuc[:, 1]
J[2 * i + 0, n1 + pt3d_idxs * 3 + 2] = dEuc[:, 2]
J[2 * i + 1, n1 + pt3d_idxs * 3 + 0] = dEvc[:, 0]
J[2 * i + 1, n1 + pt3d_idxs * 3 + 1] = dEvc[:, 1]
J[2 * i + 1, n1 + pt3d_idxs * 3 + 2] = dEvc[:, 2]
# maybe skip first poses
if pose0.size > 0:
J = J[:, pose0.size:]
# for debugging
if 0:
import matplotlib.pyplot as plt
plt.figure(2)
plt.imshow((J.toarray() != 0).astype(int))
plt.show()
return J.tocsr()
def numerical_jacobian(fun, x0, eps=1e-4):
if 1:
from tqdm import tqdm
y0 = fun(x0)
J = np.zeros((len(y0), len(x0)))
for j in tqdm(range(len(x0))):
d = np.zeros((len(x0),))
d[j] = eps
x1 = x0 + d
y1 = fun(x1)
yd = (y1 - y0) / eps
J[:, j] = yd
elif 0:
from scipy.optimize._numdiff import approx_derivative
J = approx_derivative(fun, x0, method='2-point')
else:
from scipy.optimize import approx_fprime
y0 = fun(x0)
J = np.zeros((len(y0), len(x0)))
for i in tqdm(range(10)): #len(y0))):
J[i, :] = approx_fprime(x0, lambda x: fun(x)[i], epsilon=eps)
return J
def _rotated_points(params, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd):
"""
Compute residuals.
`params` contains camera parameters and 3-D coordinates.
"""
if isinstance(params, (tuple, list)):
params = np.array(params)
params = np.hstack((pose0, params))
poses, pts3d, t_off = _unpack(params, n_cams, n_pts if len(fixed_pt3d) == 0 else 0)
points_3d = fixed_pt3d if len(pts3d) == 0 else pts3d
points_3d_cf = _rotate(points_3d[pt3d_idxs], poses[cam_idxs, :3]) + poses[cam_idxs, 3:6]
return points_3d_cf.flatten()
def _rot_pt_jac(params, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd):
params = np.hstack((pose0, params))
poses, pts3d, t_off = _unpack(params, n_cams, n_pts if len(fixed_pt3d) == 0 else 0)
poses_only = len(fixed_pt3d) > 0
points_3d = fixed_pt3d if poses_only else pts3d
points_3d_cf = _rotate(points_3d[pt3d_idxs], poses[cam_idxs, :3]) #+ poses[cam_idxs, 3:6]
# output term count (rotated coords)
m = cam_idxs.size * 3
# parameter count (6d poses)
n = n_cams * 6
J = np.zeros((m, n))
i = np.arange(cam_idxs.size)
# poses affect reprojection error terms
########################################
# - NOTE: for the following to work, need the poses in cam-to-world, i.e. rotation of world origin first,
# then translation in rotated coordinates
# - https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6891346/ equation (11)
# - also https://ingmec.ual.es/~jlblanco/papers/jlblanco2010geometry3D_techrep.pdf, section 10.3.5, eq 10.23
# - ksi: first three location, second three rotation
# - de/dksi = de/dU' * dU'/dksi
# - de/dU' = -[[fx/Zc, 0, -fx*Xc/Zc**2],
# [0, fy/Zc, -fy*Yc/Zc**2]]
# - dU'/dw = [I3 | -[U']^] = [[1, 0, 0, 0, Zc, -Yc],
# [0, 1, 0, -Zc, 0, Xc],
# [0, 0, 1, Yc, -Xc, 0]]
# - -[[fx/Zc, 0, -Xc*fx/Zc**2, | -Xc*Yc*fx/Zc**2, Xc**2*fx/Zc**2 + fx, -Yc*fx/Zc],
# [0, fy/Zc, -Yc*fy/Zc**2, | -Yc**2*fy/Zc**2 - fy, Xc*Yc*fy/Zc**2, Xc*fy/Zc]] / px_err_sd
Xc, Yc, Zc = points_3d_cf[:, 0], points_3d_cf[:, 1], points_3d_cf[:, 2]
# camera location
J[3 * i + 0, cam_idxs*6 + 3] = 1
J[3 * i + 0, cam_idxs*6 + 4] = 0
J[3 * i + 0, cam_idxs*6 + 5] = 0
J[3 * i + 1, cam_idxs*6 + 3] = 0
J[3 * i + 1, cam_idxs*6 + 4] = 1
J[3 * i + 1, cam_idxs*6 + 5] = 0
J[3 * i + 2, cam_idxs*6 + 3] = 0
J[3 * i + 2, cam_idxs*6 + 4] = 0
J[3 * i + 2, cam_idxs*6 + 5] = 1
# camera rotation
J[3 * i + 0, cam_idxs*6 + 0] = 0
J[3 * i + 0, cam_idxs*6 + 1] = Zc
J[3 * i + 0, cam_idxs*6 + 2] = -Yc
J[3 * i + 1, cam_idxs*6 + 0] = -Zc
J[3 * i + 1, cam_idxs*6 + 1] = 0
J[3 * i + 1, cam_idxs*6 + 2] = Xc
J[3 * i + 2, cam_idxs*6 + 0] = Yc
J[3 * i + 2, cam_idxs*6 + 1] = -Xc
J[3 * i + 2, cam_idxs*6 + 2] = 0
return J
def _unpack(params, n_cams, n_pts):
"""
Retrieve camera parameters and 3-D coordinates.
"""
a = n_cams * 6
b = a + n_pts * 3
poses = params[:a].reshape((n_cams, 6))
# poses[:, :3] = poses[:, :3]/180*np.pi
pts3d = params[a:b].reshape((n_pts, 3))
return poses, pts3d
def _rotate(points, rot_vecs):
"""Rotate points by given rotation vectors.
Rodrigues' rotation formula is used.
"""
# return _cached_rotate(tuple(points.flatten()), tuple(rot_vecs.flatten()))
#
#@lru_cache(maxsize=1)
# def _cached_rotate(points, rot_vecs):
# points = np.array(points).reshape((-1, 3))
# rot_vecs = np.array(rot_vecs).reshape((-1, 3))
theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis]
with np.errstate(invalid='ignore'):
k = rot_vecs / theta
k = np.nan_to_num(k)
dot = np.sum(points * k, axis=1)[:, np.newaxis]
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rot_points = cos_theta * points + sin_theta * np.cross(k, points) + dot * (1 - cos_theta) * k
return rot_points
def _rotate_rotations(rot_vecs_adj, rot_vecs_base):
"""Rotate rotation vectors by given rotation vectors.
Rodrigues' rotation formula is used, from
https://math.stackexchange.com/questions/382760/composition-of-two-axis-angle-rotations
"""
a = np.linalg.norm(rot_vecs_adj, axis=1)[:, np.newaxis]
b = np.linalg.norm(rot_vecs_base, axis=1)[:, np.newaxis]
with np.errstate(invalid='ignore'):
sin_a, cos_a = np.sin(0.5 * a), np.cos(0.5 * a)
sin_b, cos_b = np.sin(0.5 * b), np.cos(0.5 * b)
va = rot_vecs_adj / a
vb = rot_vecs_base / b
va = np.nan_to_num(va)
vb = np.nan_to_num(vb)
c = 2 * np.arccos(np.clip(cos_a * cos_b - sin_a * sin_b * np.sum(va * vb, axis=1)[:, None], -1, 1))
sin_c = np.sin(0.5 * c)
with np.errstate(invalid='ignore'):
res = (c / sin_c) * (sin_a * cos_b * va +
cos_a * sin_b * vb +
sin_a * sin_b * np.cross(va, vb))
res = np.nan_to_num(res)
return res
def _project(pts3d, poses, K):
"""Convert 3-D points to 2-D by projecting onto images."""
if USE_WORLD_CAM_FRAME:
pts3d_rot = _rotate(pts3d - poses[:, 3:6], -poses[:, 0:3])
else:
pts3d_rot = _rotate(pts3d, poses[:, 0:3]) + poses[:, 3:6]
pts2d_proj = K.dot(pts3d_rot.T).T # own addition
pts2d_proj = pts2d_proj[:, 0:2] / pts2d_proj[:, 2:3] # own addition
return pts2d_proj
def project(pts3d, poses, K):
return _project(pts3d, poses, K)
def wedge(w):
is_mx = len(w.shape) == 2 and w.shape[1] != 1
w_ = w.reshape((-1, 3))
zero = np.zeros((len(w_),))
what = np.array([[zero, -w_[:, 2], w_[:, 1]],
[w_[:, 2], zero, -w_[:, 0]],
[-w_[:, 1], w_[:, 0], zero]]).transpose((2, 0, 1))
return what if is_mx else what.squeeze()
def vee(R):
R_ = R.reshape((-1, 3, 3))
x = (R_[:, 2, 1] - R_[:, 1, 2]) / 2
y = (R_[:, 0, 2] - R_[:, 2, 0]) / 2
z = (R_[:, 1, 0] - R_[:, 0, 1]) / 2
w = np.array([x, y, z]).T
R_ -= wedge(w)
# assert np.any(np.isclose(np.linalg.norm(R_, axis=(1, 2)) / np.linalg.norm(w, axis=1), 0))
return w if len(R.shape) == 3 else w.squeeze()
def logR(R):
ax1, ax2 = list(range(len(R.shape)))[-2:]
c = (np.trace(R, axis1=ax1, axis2=ax2) - 1) / 2
s, th = np.sqrt(1 - c ** 2), np.arccos(c)
with np.errstate(invalid='ignore'):
tmp = (0.5 * th / s).reshape((-1, *((1,) * ax2)))
tmp = np.nan_to_num(tmp)
log_R = tmp * (R - R.swapaxes(ax1, ax2))
w = vee(log_R)
return w
def dlogR_dR(R):
# from section 10.3.2, eq 10.11, https://ingmec.ual.es/~jlblanco/papers/jlblanco2010geometry3D_techrep.pdf
assert R.shape == (3, 3), 'wrong size rotation matrix'
c = (np.trace(R) - 1) / 2
if np.isclose(c, 1):
S1 = np.array([[0, 0, 0],
[0, 0, -.5],
[0, .5, 0]])
S2 = np.array([[0, 0, .5],
[0, 0, 0],
[-.5, 0, 0]])
S3 = np.array([[0, -.5, 0],
[.5, 0, 0],
[0, 0, 0]])
return np.hstack((S1, S2, S3))
s, th = np.sqrt(1 - c**2), np.arccos(c)
a1, a2, a3 = np.array([R[2, 1] - R[1, 2],
R[0, 2] - R[2, 0],
R[1, 0] - R[0, 1]]) * (0.25 * (th*c - s) / s**3)
b = 0.5 * th / s
S1 = np.array([[a1, 0, 0],
[a2, 0, -b],
[a3, b, 0]])
S2 = np.array([[0, a1, b],
[0, a2, 0],
[-b, a3, 0]])
S3 = np.array([[0, -b, a1],
[b, 0, a2],
[0, 0, a3]])
return np.hstack((S1, S2, S3))
class Manifold(np.ndarray):
def __new__(cls, *args, **kwargs):
self = super().__new__(cls, *args, **kwargs)
# for some strange reason, super().__init__ is called without specifying it
self.so3 = None
self.not_so3 = None
return self
def set_so3_groups(self, so3):
assert isinstance(so3, np.ndarray) and len(so3.shape) == 1 and so3.dtype == bool, \
'invalid index array type, must be 1-d bool ndarray'
self.so3 = so3.copy()
self.not_so3 = np.logical_not(self.so3)
def copy(self, order='C'):
new = super(Manifold, self).copy(order)
new.so3 = self.so3.copy()
new.not_so3 = self.not_so3.copy()
return new
def __add__(self, other):
assert type(other) == np.ndarray, 'Can only sum an ndarray to this Manifold implementation'
new = self.to_array(new=True)
new[self.not_so3] += other[self.not_so3]
new[self.so3] = _rotate_rotations(other[self.so3].reshape((-1, 3)), new[self.so3].reshape((-1, 3))).flatten()
new = Manifold(new.shape, buffer=new, dtype=self.dtype)
new.set_so3_groups(self.so3)
return new
def __sub__(self, other):
assert type(other) in (np.ndarray, Manifold)
if type(other) == Manifold:
assert np.all(self.so3 == other.so3)
return self.__add__(-other.to_array())
else:
return self.__add__(-other)
def to_array(self, new=False):
return np.frombuffer(np.copy(self) if new else self, dtype=self.dtype)
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True):
if dtype in (float, np.float32, np.float64):
return self
return self.to_array().astype(dtype, order, casting, subok, copy)
|
import json
from typing import TYPE_CHECKING, Type, TypedDict, cast
import jwt
import requests
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.utils import timezone
from requests.auth import HTTPBasicAuth
if TYPE_CHECKING:
from apps.users import models
User = cast(Type["models.User"], get_user_model())
CLIENT_ID = settings.DATAPORTEN_ID
UserInfo = TypedDict(
"UserInfo",
{
"sub": str,
"name": str,
"email": str,
"email_verified": bool,
"dataporten-userid_sec": list[str],
"connect-userid_sec": list[str],
},
)
class TokenResponse(TypedDict):
access_token: str
id_token: str
class DataportenAuth:
"""
Class implementing the backend part of authenticating a user with Dataporten.
Upon receiving an authorization code from frontend
this class completes the authentication by obtaining an access token from Dataporten,
which can then be used to access user data at Dataporten.
"""
@staticmethod
def complete_dataporten_auth(code: str) -> TokenResponse:
"""
https://docs.feide.no/service_providers/openid_connect/feide_obtaining_tokens.html
"""
params = {
"code": code,
"grant_type": "authorization_code",
"redirect_uri": settings.DATAPORTEN_REDIRECT_URI,
"client_id": CLIENT_ID,
}
try:
response = requests.post(
"https://auth.dataporten.no/oauth/token",
params,
auth=HTTPBasicAuth(
CLIENT_ID,
settings.DATAPORTEN_SECRET,
),
)
# Raises exceptions upon HTTP errors
response.raise_for_status()
except requests.exceptions.RequestException:
raise RuntimeError("En feil oppstod under fullføring av Dataporten-autentisering.")
return response.json()
@staticmethod
def validate_response(token_response: TokenResponse):
"""
https://docs.feide.no/reference/oauth_oidc/openid_connect_details.html
"""
id_token = token_response["id_token"]
# Collect available public keys, mapping each key's ID to its parsed representation
try:
response = requests.get("https://auth.dataporten.no/openid/jwks")
response.raise_for_status()
jwks = response.json()
except requests.exceptions.RequestException:
raise RuntimeError("En systemfeil oppstod under validering av brukeren.")
public_keys = {}
for jwk in jwks["keys"]:
kid = jwk["kid"]
public_keys[kid] = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(jwk))
# look up the public key corresponding to the private key with which the token was signed
kid = jwt.get_unverified_header(id_token)["kid"]
key = public_keys[kid]
try:
jwt.decode(
id_token,
key=key,
algorithms=["RS256"],
issuer="https://auth.dataporten.no",
audience=CLIENT_ID,
)
except jwt.PyJWTError:
raise ValidationError("Kunne ikke validere brukeren.")
@staticmethod
def confirm_indok_enrollment(access_token: str) -> bool:
params = {
"Authorization": f"Bearer {access_token}",
}
try:
response = requests.get(
"https://groups-api.dataporten.no/groups/me/groups/fc:fs:fs:prg:ntnu.no:MTIØT",
headers=params,
)
response.raise_for_status()
except requests.exceptions.RequestException:
return False
data = response.json()
enrolled = False
if "basic" in data and "active" in data:
enrolled = data["basic"] == "member" and data["active"]
return enrolled
@staticmethod
def get_user(access_token: str) -> "models.User":
"""
https://docs.feide.no/service_providers/openid_connect/oidc_authentication.html
"""
params = {
"Authorization": f"Bearer {access_token}",
}
try:
response = requests.get("https://auth.dataporten.no/openid/userinfo", headers=params)
response.raise_for_status()
except requests.exceptions.RequestException:
raise Exception("Kunne ikke hente brukerinfo fra Dataporten.")
data: UserInfo = response.json()
feide_userid = data["sub"]
first_name, last_name = data["name"].rsplit(" ", 1)
username = next(
(id for id in data["dataporten-userid_sec"] if id.endswith("@ntnu.no")), data["dataporten-userid_sec"][0]
)
username = username[username.index(":") + 1 : username.index("@")]
email = data["email"]
try:
return User.objects.get(feide_userid=feide_userid)
except User.DoesNotExist:
return User(
first_name=first_name,
last_name=last_name,
feide_userid=feide_userid,
email=email,
username=username,
feide_email=email,
)
@classmethod
def authenticate_and_get_user(cls, code: str) -> "models.User":
# Complete authentication of user
response = cls.complete_dataporten_auth(code)
cls.validate_response(response)
access_token = response["access_token"]
# Fetch user info from Dataporten
user = cls.get_user(access_token)
if not user.pk:
user.is_indok = cls.confirm_indok_enrollment(access_token)
user.last_login = timezone.now()
user.save()
return user
|
import json
import logging
class FwtSpec:
def __init__(self, spec_json):
try:
spec_json_obj = json.load(spec_json)
self.column_names = spec_json_obj['ColumnNames']
self.offsets = spec_json_obj['Offsets']
self.include_header = spec_json_obj['IncludeHeader']
self.encoding_format_fwt = spec_json_obj['FixedWidthEncoding']
self.encoding_format_del = spec_json_obj['DelimitedEncoding']
self.offsets_int = [int(i) for i in self.offsets]
if len(self.offsets) == len(self.column_names):
self.spec_dict = dict(zip(self.column_names, self.offsets_int))
else:
raise ValueError("The number of columns do not match the number of offsets in the spec.")
if self.include_header not in ["True", "False"]:
logging.warning("Invalid value for the key 'IncludeHeader', expected one of [\"True\", \"False\"]. "
"Setting 'Include Header' = True.")
self.include_header = True
except AttributeError:
logging.error("Invalid config param, expected : File Pointer.")
except (KeyError, ValueError) as e:
logging.error("Invalid config specification.")
logging.exception("Exception:", exc_info=True)
|
"""
Database Initialization and storage handling module.
See docs for how to set up the database.
Logging messages to a log file is not needed here as this is run directly
as a command-line script. The detailed creation of places might be good to
move to a log file with only summary level data printed to the console.
"""
import os
from sqlobject import SQLObjectNotFound
from sqlobject.dberrors import DuplicateEntryError
import models
from etc import base_data
from lib import locations
from lib.config import AppConf
# Make model objects available on the database module.
from models import * # noqa: F403
from models.connection import conn # noqa: F401
conf = AppConf()
def _getModelClasses():
return [getattr(models, tableName) for tableName in models.__all__]
def _dropTables(verbose=True):
"""
Drop all tables.
"""
modelsList = _getModelClasses()
if verbose:
print("Dropping tables...")
for m in modelsList:
if verbose:
print("-> Dropping {0}".format(m.__name__))
m.dropTable(ifExists=True, cascade=True)
return None
def _createTables(verbose=True):
"""
Create all tables which do not already exist.
"""
modelsList = _getModelClasses()
if verbose:
print("Creating tables...")
for m in modelsList:
if verbose:
print("-> Creating {0}".format(m.__name__))
m.createTable(ifNotExists=True)
return None
def addWorldAndContinents():
"""
Insert default data into the database. This should be called when the
database is initialized.
"""
# Create the world as a Place.
woeid = 1
name = "Worldwide"
try:
world = Supername(woeid=woeid, name=name) # noqa: F405
print("Created - Supername: `{}`.".format(name))
except DuplicateEntryError:
world = Supername.byWoeid(1) # noqa: F405
print("Exists - Supername: `{}`.".format(name))
# Create the continents as Places, with the world as the parent.
for woeid, name in base_data.continentBase.items():
try:
Continent(woeid=woeid, name=name, supernameID=world.id) # noqa: F405
print("Created - Continent: `{}`.".format(name))
except DuplicateEntryError:
print("Exists - Continent: `{}`.".format(name))
def addTownsAndCountries(maxTowns=None):
"""
Add Town and Country level data extracted from Twitter API to the database.
The function in locations will get the sample location file provided with
the repo but can also reference a custom JSON.
:param maxTowns: When running tests, you can set this optionally to an
integer for a maximum number of towns to insert into the DB. The total
is usually around 400.
:return: None
"""
# Load from JSON file of Twitter locations. This is a generator so we don't
# store it as a variable. Otherwise the 2nd time we iterate it is finished.
for loc in locations.getJSON():
if loc["placeType"]["name"].lower() == "country":
woeid = loc["woeid"]
name = loc["name"]
countryCode = loc["countryCode"]
try:
Country(woeid=woeid, name=name, countryCode=countryCode) # noqa: F405
print("Country - created: {}.".format(name))
except DuplicateEntryError:
print("Country - exists: {}.".format(name))
townCount = 0
for loc in locations.getJSON():
if maxTowns is not None and townCount == maxTowns:
break
# Increment on both new and existing town.
townCount += 1
if loc["placeType"]["name"].lower() == "town":
try:
parentCountryID = Country.byWoeid(loc["parentid"]).id # noqa: F405
except SQLObjectNotFound as e:
parentCountryID = None
msg = (
"Unable to find parent country in DB with WOEID {woeid}"
" for town {name}.".format(woeid=loc["parentid"], name=loc["name"])
)
print("ERROR {type}. {msg}".format(type=type(e).__name__, msg=msg))
woeid = loc["woeid"]
name = loc["name"]
try:
Town(woeid=woeid, name=name, countryID=parentCountryID) # noqa: F405
print("Town - created: {}.".format(name))
except DuplicateEntryError:
print("Town - exists: {}.".format(name))
def mapCountriesToContinents():
"""
Iterate through the countries in the database and ensure they have a
parent continent set.
:return: None
"""
for c in Country.select(): # noqa: F405
# If Continent is not already set for the Country, then iterate
# through our mapping to find the appropriate Continent name.
if not c.continent:
for continent, countries in base_data.continentMapping.items():
# Check if the country name in the DB falls in the countries
# list we have mapped to the current continent.
if c.name in countries:
# We have found the right continent.
break
else:
raise ValueError(
"Continent could not be found for country: {}".format(c)
)
# Lookup Continent object. Returns as None if no match.
# Use order by to avoid ambiguity error on id.
continentResults = Continent.selectBy(name=continent).orderBy( # noqa: F405
"place.id"
)
if continentResults:
# Update the country object with the continent we found.
continentRecord = continentResults.getOne()
c.continentID = continentRecord.id
print(
"Link - created: {continent:15} <-- {country:15}".format(
continent=continentRecord.name, country=c.name
)
)
else:
print(
"Link - exists: {continent:15} <-- {country:15}".format(
continent=c.continent.name, country=c.name
)
)
def addLocationData(maxTowns=None):
"""
Add location data and associations to database. Using preset data and
also JSON extracted from Twitter API.
Any existing data is skipped and is not overwritten and should not raise
errors. In development and testing, set maxTowns to a low integer to save
time rather than adding about 400 towns to the db.
TODO: Improve these with bulk insert statements, using Python to build
the insert statements or SQLite. This takes too long to do on setting
up the application.
:return: None
"""
addWorldAndContinents()
addTownsAndCountries(maxTowns)
mapCountriesToContinents()
def _path():
print(conf.dbPath)
def _checkDBexists():
status = os.path.exists(conf.dbPath)
print(conf.dbPath)
print("DB file exists." if status else "DB file not created yet.")
print()
return status
def _baseLabels():
print("Inserting all base labels...")
categoryKeys = ("fetchProfiles", "influencers", "search", "lookupTweets")
campaignKeys = ("fetchTweets", "search", "lookupTweets")
for key in categoryKeys:
label = conf.get("Labels", key)
try:
categoryRec = Category(name=label) # noqa: F405
print("Created category: {0}".format(categoryRec.name))
except DuplicateEntryError:
print("Skipped category: {0}".format(label))
for key in campaignKeys:
label = conf.get("Labels", key)
try:
campaignRec = Campaign(name=label, searchQuery=None) # noqa: F405
print("Created campaign: {0}".format(campaignRec.name))
except DuplicateEntryError:
print("Skipped campaign: {0}".format(label))
def _populate(maxTowns=None):
# TODO Make this and the internal calls not verbose for tests.
print("Adding default data...")
if isinstance(maxTowns, int):
addLocationData(maxTowns)
else:
addLocationData()
print("-> Added fixtures data.\n")
return maxTowns
|
"""
Work on hashtags
"""
import re
HASHTAG_PATTERN=re.compile(r'(^|\s)#([^\d\s]\w+\b)')
def extract_hashtags(content):
"""
Extract twitter-like hashtags from a given content
"""
return [
matches[1]
for matches in HASHTAG_PATTERN.findall(content)
]
|
# -*- coding: utf-8 -*-
"""
This script calculates SEMA score between two AMRs
"""
import argparse
import codecs
import logging
from amr import AMR
class Sema:
"""
SEMA: an Extended Semantic Metric Evaluation for AMR.
"""
def __init__(self):
"""
Constructor. Initiates the variables
"""
# general values
self.m, self.t, self.c = 0.0, 0.0, 0.0
# top values
self.top_m, self.top_t, self.top_c = 0.0, 0.0, 0.0
# concept values
self.concept_m, self.concept_t, self.concept_c = 0.0, 0.0, 0.0
# relation values
self.relation_m, self.relation_t, self.relation_c = 0.0, 0.0, 0.0
def compute_sema(self, amr_test, amr_gold):
"""
Calculates sema metric. Evaluates the test AMR against reference AMR
:param amr_test: test AMR
:param amr_gold: reference AMR
:return:
"""
test_concepts, test_attributes, test_relations = amr_test.get_triples()
gold_concepts, gold_attributes, gold_relations = amr_gold.get_triples()
visited = []
# remove TOP relation
test_attributes, gold_attributes = self.remove_top_relation(test_attributes, gold_attributes)
# test_relations, gold_relations = self.remove_duplicate_relation(test_relations, gold_relations)
self.compute_root(test_concepts[0][2], gold_concepts[0][2], visited)
self.compute_relations(test_relations, gold_relations, test_concepts, gold_concepts, visited)
self.compute_attributes(test_attributes, gold_attributes, test_concepts, gold_concepts, visited)
self.c += self._get_total_triples(test_concepts, test_relations, test_attributes)
self.t += self._get_total_triples(gold_concepts, gold_relations, gold_attributes)
@staticmethod
def remove_duplicate_relation(test_relations, gold_relations):
"""
Removes duplicate relations (Not used)
:param test_relations: test relations
:param gold_relations: reference relations
:return: relations without duplicates
"""
return list(set(test_relations)), list(set(gold_relations))
@staticmethod
def _get_total_triples(concepts, relations, attributes):
"""
Gets the total number of triples
:param concepts: concepts
:param relations: relations
:param attributes: attributes
:return: total number of triples
"""
return len(concepts) + len(relations) + len(attributes)
@staticmethod
def _get_previous_node(pre_g, gold_concepts):
"""
Gets the previous node
:param pre_g: previous gold node
:param gold_concepts: gold concepts
:return: gold node
"""
node_g = [(rel, var, nod) for rel, var, nod in gold_concepts if var == pre_g][0]
return node_g
@staticmethod
def _check_dependence(pre_t, pre_g, test_concepts, gold_concepts):
"""
Checks if the nodes of the test and reference AMRs are dependents
:param pre_t: previous test node
:param pre_g: previous reference node
:param test_concepts: test AMR concepts
:param gold_concepts: reference AMR concepts
:return: true if nodes are equal
"""
node_t = [(rel, var, nod) for rel, var, nod in test_concepts if var == pre_t][0][2]
node_g = [(rel, var, nod) for rel, var, nod in gold_concepts if var == pre_g][0][2]
if node_t == node_g:
return True
return False
@staticmethod
def _get_neighbors(pos_t=None, pos_g=None, test_concepts=None, gold_concepts=None):
"""
Gets neighbors from node
:param pos_t: posterior test node
:param pos_g: posterior reference node
:param test_concepts: test concepts
:param gold_concepts: reference concepts
:return: All neighbors nodes
"""
test_concept = [(rel, var, nod) for rel, var, nod in test_concepts if var == pos_t]
gold_concept = [(rel, var, nod) for rel, var, nod in gold_concepts if var == pos_g]
return [test_concept[0][2]], gold_concept[0], [gold_concept[0][2]]
@staticmethod
def remove_top_relation(test_attributes, gold_attributes):
"""
Removes :TOP relation from test and reference AMRs
:param test_attributes: test attributes
:param gold_attributes: reference attributes
:return: attributes without :TOP relation
"""
index1 = [y[0] for y in test_attributes].index('TOP')
index2 = [y[0] for y in gold_attributes].index('TOP')
del test_attributes[index1]
del gold_attributes[index2]
return test_attributes, gold_attributes
def compute_root(self, test_root, gold_root, visited):
"""
Computes the root node
:param test_root: test root
:param gold_root: reference root
:param visited: visited triples
:return:
"""
if test_root == gold_root:
self.m += 1
self.top_m += 1
visited.append(('instance', 'b0', gold_root))
def compute_relations(self, test_relations, gold_relations, test_concepts, gold_concepts, visited):
"""
Computes relation triples take into account its dependence
:param test_relations: test relations
:param gold_relations: reference relations
:param test_concepts: test concepts
:param gold_concepts: reference concepts
:param visited: visited triples
:return:
"""
for rel_t, pre_t, pos_t in test_relations:
for rel_g, pre_g, pos_g in gold_relations:
# if relations are equal
if rel_t == rel_g:
# if previous concepts are equal
if self._check_dependence(pre_t, pre_g, test_concepts, gold_concepts):
# gets neighbors
test_concept, current_node, gold_concept = self._get_neighbors(pos_t=pos_t, pos_g=pos_g,
test_concepts=test_concepts,
gold_concepts=gold_concepts)
relation = (rel_g, pre_g, pos_g)
previous_node = self._get_previous_node(pre_g, gold_concepts)
self.compute_concepts(test_concept, gold_concept, current_node, relation, previous_node,
visited)
def compute_concepts(self, test_concepts, gold_concepts, current_node, relation, previous_node, visited):
"""
Computes the concepts take into account its dependence
:param test_concepts: test concepts
:param gold_concepts: reference concepts
:param current_node: current node
:param relation: previous relation
:param previous_node: previous node
:param visited: visited triples
:return:
"""
for test_node in test_concepts:
for gold_node in gold_concepts:
if test_node == gold_node:
if current_node not in visited and relation not in visited:
self.m += 2
visited.append(current_node)
visited.append(relation)
if previous_node not in visited:
self.m += 1
visited.append(previous_node)
if current_node in visited and previous_node in visited and relation not in visited:
flag = False
for triple in visited:
if relation[1] == triple[1] and relation[2] == triple[2]:
flag = True
if not flag:
self.m += 1
visited.append(relation)
def compute_attributes(self, test_attributes, gold_attributes, test_concepts, gold_concepts, visited):
"""
Computes attributes test triples against attributes reference triples
:param test_attributes: test attributes
:param gold_attributes: reference attributes
:param test_concepts: test concepts
:param gold_concepts: reference concepts
:param visited: visited triples
:return:
"""
for rel_t, pre_t, pos_t in test_attributes:
for rel_g, pre_g, pos_g in gold_attributes:
# if relation and constant are equal to reference values
if self._check_dependence(pre_t, pre_g, test_concepts, gold_concepts):
if rel_t == rel_g and pos_t == pos_g and (rel_g, pre_g, pos_g) not in visited:
self.m += 1
visited.append((rel_g, pre_g, pos_g))
def get_sema_value(self):
"""
Calculates precision, recall, and f-score
precision = correct triples / produced triples
recall = correct triples / total triples
:return: precision, recall, and f-score
"""
try:
precision = self.m / self.c
recall = self.m / self.t
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
except ZeroDivisionError:
return 0, 0, 0
def main(data):
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger(__name__)
test = codecs.open(data.test, 'r', 'utf-8')
gold = codecs.open(data.gold, 'r', 'utf-8')
flag = False
sema = Sema()
while True:
cur_amr1 = AMR.get_amr_line(test)
cur_amr2 = AMR.get_amr_line(gold)
if cur_amr1 == '' and cur_amr2 == '':
break
if cur_amr1 == '':
logger.error('Error: File 1 has less AMRs than file 2')
logger.error('Ignoring remaining AMRs')
flag = True
break
if cur_amr2 == '':
logger.error('Error: File 2 has less AMRs than file 1')
logger.error('Ignoring remaining AMRs')
flag = True
break
try:
amr1 = AMR.parse_AMR_line(cur_amr1)
except Exception as e:
logger.error('Error in parsing amr 1: %s' % cur_amr1)
logger.error("Please check if the AMR is ill-formatted. Ignoring remaining AMRs")
logger.error("Error message: %s" % str(e))
flag = True
break
try:
amr2 = AMR.parse_AMR_line(cur_amr2)
except Exception as e:
logger.error("Error in parsing amr 2: %s" % cur_amr2)
logger.error("Please check if the AMR is ill-formatted. Ignoring remaining AMRs")
logger.error("Error message: %s" % str(e))
flag = True
break
prefix_test = 'a'
prefix_gold = 'b'
amr1.rename_node(prefix_test)
amr2.rename_node(prefix_gold)
sema.compute_sema(amr1, amr2)
if not flag:
precision, recall, f1 = sema.get_sema_value()
print(f'SEMA: P {precision:.2f} R {recall:.2f} F1 {f1:.2f}')
if __name__ == '__main__':
args = argparse.ArgumentParser(description='SEMA metric',
epilog='Usage: python sema.py -t parsedAMR.txt -g referenceAMR.txt')
args.add_argument('-t', '--test', help='test AMR', required=True)
args.add_argument('-g', '--gold', help='Reference AMR', required=True)
main(args.parse_args())
|
try:
import Tkinter as tk
import tkMessageBox as tkmb
except ImportError:
import tkinter as tk
from tkinter import messagebox as tkmb
def show_error(title, message):
root = tk.Tk()
root.withdraw()
tkmb.showerror(title=title, message=message)
def show_message(title, message):
root = tk.Tk()
root.withdraw()
tkmb.showinfo(title=title, message=message)
if __name__ == '__main__':
import math
show_message('Informative Message', 'You are about to see several\ndemonstrations '
'of tkinter\'s messagebox\nshow_error and show_message methods.')
try:
x = 1
y = 0
print(x/y)
except ZeroDivisionError as e:
show_error('Error', e)
try:
print(math.log(y))
except ValueError as e:
show_error("Error", e)
try:
percent_effort_given = 101
if percent_effort_given < 50:
show_message('Inspirational Message', 'Try harder!')
elif percent_effort_given > 100:
raise ValueError('You can\'t give more than 100%')
except ValueError as e:
show_error('Error', e)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tools for the impurity caluclation plugin and its workflows
"""
#use print('message') instead of print 'message' in python 2.7 as well:
from __future__ import print_function
# redefine raw_input for python 3/2.7 compatilbility
from __future__ import absolute_import
from builtins import object
from sys import version_info
from six.moves import range
from six.moves import input
if version_info[0] >= 3:
def raw_input(msg):
return eval(eval(input(msg)))
__copyright__ = (u"Copyright (c), 2018, Forschungszentrum Jülich GmbH,"
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.3"
__contributors__ = u"Philipp Rüßmann"
class modify_potential(object):
"""
Class for old modify potential script, ported from modify_potential script, initially by D. Bauer
"""
def _check_potstart(self, str1, mode='pot', shape_ver='new'):
if mode=='shape':
if shape_ver=='new':
check1='Shape number' in str1
else:
check1= (len(str1)==11)
else:
check1='exc:' in str1
return check1
def _read_input(self, filepath):
#print(filepath)
with open(filepath) as file:
data = file.readlines()
if 'shapefun' in filepath:
mode = 'shape'
else:
mode = 'pot'
#print(mode, len(data))
# read file
index1=[];index2=[]
for i in range(len(data)):
if self._check_potstart(data[i], mode=mode):
index1.append(i)
if len(index1)>1: index2.append(i-1)
index2.append(i)
# read shapefun if old style is used
if mode=='shape' and len(index1)<1:
index1=[];index2=[]
for i in range(len(data)):
if self._check_potstart(data[i], mode=mode, shape_ver='old'):
index1.append(i)
if len(index1)>1: index2.append(i-1)
index2.append(i)
"""
print(index1)
print(index2)
print('Potential file read')
print('found %i potentials in file'%len(index1))
print('')
"""
return index1, index2, data
def shapefun_from_scoef(self, scoefpath, shapefun_path, atom2shapes, shapefun_new):
"""
Read shapefun and create impurity shapefun using scoef info and shapes array
:param scoefpath: absolute path to scoef file
:param shapefun_path: absolute path to input shapefun file
:param shapes: shapes array for mapping between atom index and shapefunction index
:param shapefun_new: absolute path to output shapefun file to which the new shapefunction will be written
"""
index1, index2, data = self._read_input(shapefun_path)
order=list(range(len(index1)))
natomtemp = int(open(scoefpath).readlines()[0])
filedata=open(scoefpath).readlines()[1:natomtemp+1]
listnew=[]
for line in filedata:
if (len(line.split())>1):
listnew.append(atom2shapes[int(line.split()[3])-1]-1)
order = listnew
datanew=[]
for i in range(len(order)):
for ii in range(index1[order[i]], index2[order[i]]+1 ):
datanew.append(data[ii])
# add header to shapefun_new
tmp = datanew
datanew = []
datanew.append(' %i\n' %(len(order)))
datanew.append(' 1.000000000000E+00\n')
datanew += tmp
with open(shapefun_new,'w') as f:
f.writelines(datanew)
def neworder_potential(self, potfile_in, potfile_out, neworder, potfile_2=None, replace_from_pot2=None):
"""
Read potential file and new potential using a list describing the order of the new potential.
If a second potential is given as input together with an index list, then the corresponding of
the output potential are overwritten with positions from the second input potential.
:param potfile_in: absolute path to input potential
:type potfile_in: str
:param potfile_out: absolute path to output potential
:type potfile_out: str
:param neworder: list after which output potential is constructed from input potential
:type neworder: list
:param potfile_2: optional, absolute path to second potential file if
positions in new list of potentials shall be replaced by positions of
second potential, requires *replace_from_pot* to be given as well
:type potfile_2: str
:param replace_from_pot: optional, list containing tuples of (position
in newlist that is to be replaced, position in pot2 with which position
is replaced)
:type replace_from_pot: list
:usage:
1. modify_potential().neworder_potential(<path_to_input_pot>, <path_to_output_pot>, [])
"""
from numpy import array, shape
index1, index2, data = self._read_input(potfile_in)
if potfile_2 is not None:
index12, index22, data2 = self._read_input(potfile_2)
# check if also replace_from_pot2 is given correctly
if replace_from_pot2 is None:
raise ValueError('replace_from_pot2 not given')
else:
replace_from_pot2 = array(replace_from_pot2)
if shape(replace_from_pot2)[1]!=2:
raise ValueError('replace_from_pot2 needs to be a 2D array!')
else:
if replace_from_pot2 is not None:
raise ValueError('replace_from_pot2 given but potfile_2 not given')
# set order in which potential file is written
# ensure that numbers are integers:
order = [int(i) for i in neworder]
datanew=[]
for i in range(len(order)):
# check if new position is replaced with position from old pot
if replace_from_pot2 is not None and i in replace_from_pot2[:,0]:
replace_index = replace_from_pot2[replace_from_pot2[:,0]==i][0][1]
for ii in range(index12[replace_index], index22[replace_index]+1 ):
datanew.append(data2[ii])
else: # otherwise take new potntial according to input list
for ii in range(index1[order[i]], index2[order[i]]+1 ):
datanew.append(data[ii])
# write out new potential
with open(potfile_out,'w') as f:
f.writelines(datanew)
|
from functools import wraps
from os import PathLike
from typing import Callable, Union
__all__ = [
'read_lines', 'read_json'
]
def read_lines(path: str, *, filter_empty=True):
from typing import List
def decorator(func: Callable[..., List[str]]):
@wraps(func)
def wrapper(*args, **kwargs):
with open(path, 'r') as f:
lines = f.readlines()
if filter_empty:
lines = [line for line in lines if line.strip()]
wrapper.lines = lines
return func(*args, **kwargs)
return wrapper
return decorator
def read_json(path: Union[str, bytes, PathLike, int]):
from typing import Dict
def decorator(func: Callable[..., Dict]):
@wraps(func)
def wrapper(*args, **kwargs):
import json
with open(path, 'r') as f:
jsn = json.loads(f.read())
wrapper.json = jsn
return func(*args, **kwargs)
return wrapper
return decorator
|
import datetime
import unittest
import genetic
def get_fitness(genes):
return genes.count(1)
def display(candidate, startTime):
timeDiff = datetime.datetime.now() - startTime
print(
"{0}...{1}\t{2:3.2f}\t{3}".format(
"".join(map(str, candidate.Genes[:15])),
"".join(map(str, candidate.Genes[-15:])),
candidate.Fitness,
str(timeDiff),
)
)
result = []
for i in candidate.Genes:
result += str(candidate.Genes[i])
return result
class OneMaxTests(unittest.TestCase):
def test_benchmark(self):
genetic.Benchmark.run(lambda: self.test(4000))
def test(self, length=100):
geneset = [0, 1]
startTime = datetime.datetime.now()
def fnDisplay(candidate):
display(candidate, startTime)
def fnGetFitness(genes):
return get_fitness(genes)
optimalFitness = length
best = genetic.get_best(
fnGetFitness, length, optimalFitness, geneset, fnDisplay
)
self.assertEqual(best.Fitness, optimalFitness)
# EOF
|
# -*- coding: utf-8 -*-
r"""
Bending of focusing mirror
--------------------------
Uses :mod:`shadow` backend.
File: `\\examples\\withShadow\\04_06\\05_VFM_bending.py`
Pictures at the sample position,
:ref:`type 1 of global normalization<globalNorm>`
+---------+---------+---------+---------+
| |VFMR1| | |VFMR2| | |VFMR3| | |
+---------+---------+---------+ |VFMR4| |
| |VFMR7| | |VFMR6| | |VFMR5| | |
+---------+---------+---------+---------+
.. |VFMR1| image:: _images/VFM_R0317731_norm1.*
:scale: 35 %
.. |VFMR2| image:: _images/VFM_R0363711_norm1.*
:scale: 35 %
.. |VFMR3| image:: _images/VFM_R0416345_norm1.*
:scale: 35 %
.. |VFMR4| image:: _images/VFM_R0476597_norm1.*
:scale: 35 %
:align: middle
.. |VFMR5| image:: _images/VFM_R0545567_norm1.*
:scale: 35 %
.. |VFMR6| image:: _images/VFM_R0624518_norm1.*
:scale: 35 %
.. |VFMR7| image:: _images/VFM_R0714895_norm1.*
:scale: 35 %
"""
__author__ = "Konstantin Klementiev"
__date__ = "1 Mar 2012"
import sys
sys.path.append(r"c:\Alba\Ray-tracing\with Python")
import numpy as np
import xrt.plotter as xrtp
import xrt.runner as xrtr
import xrt.backends.shadow as shadow
def main():
plot1 = xrtp.XYCPlot('star.04')
plot1.caxis.offset = 6000
plot1.xaxis.limits = [-1, 1]
plot1.yaxis.limits = [-1, 1]
plot1.yaxis.factor *= -1
textPanel1 = plot1.fig.text(0.86, 0.8, '', transform=plot1.fig.transFigure,
size=12, color='r', ha='center')
#==========================================================================
threads = 4
#==========================================================================
start01 = shadow.files_in_tmp_subdirs('start.01', threads)
start04 = shadow.files_in_tmp_subdirs('start.04', threads)
rmirr0 = 744680.
shadow.modify_input(start01, ('RMIRR', str(rmirr0)))
angle = 4.7e-3
tIncidence = 90 - angle * 180 / np.pi
shadow.modify_input(
start01, ('T_INCIDENCE', str(tIncidence)),
('T_REFLECTION', str(tIncidence)))
shadow.modify_input(
start04, ('T_INCIDENCE', str(tIncidence)),
('T_REFLECTION', str(tIncidence)))
rmaj0 = 476597.0
def plot_generator():
for rmaj in np.logspace(-1, 1, 7, base=1.5) * rmaj0:
shadow.modify_input(start04, ('R_MAJ', str(rmaj)))
filename = 'VFM_R%07i' % rmaj
plot1.title = filename
plot1.saveName = [filename + '.pdf', filename + '.png']
textToSet = 'focusing mirror\nmeridional radius\n$R =$ %.1f km'\
% (rmaj * 1e-5)
textPanel1.set_text(textToSet)
yield
xrtr.run_ray_tracing(
plot1, repeats=640, updateEvery=2, threads=threads,
energyRange=[5998, 6002], generator=plot_generator, globalNorm=True,
backend='shadow')
#this is necessary to use multiprocessing in Windows, otherwise the new Python
#contexts cannot be initialized:
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Mikhail Kyosev <mialygk@gmail.com>
# License: BSD - 2-caluse
from __future__ import print_function
from collections import OrderedDict
import re
import glob
import time
import os
import datetime
import functools
import curses
from subprocess import Popen, PIPE
class Readings:
def __init__(self):
self.start_time = int(time.time())
self.delta_t = {}
def run(self):
current = {}
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(True)
try:
while True:
now = int(time.time()) - int(self.start_time)
self.save_data(current, self.fetch_data())
stdscr.clear()
stdscr.refresh()
self.output(current, "C")
print("\rRunning from: {0:10}".format(str(datetime.timedelta(seconds = now))))
print("\rPress CTRL + C to exit...\r\n")
time.sleep(1)
except KeyboardInterrupt:
pass
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
def fetch_data(self):
readings = {}
sensors = sorted(glob.glob("/sys/class/hwmon/hwmon*/"))
for sensor in sensors:
name = self.__readfile(sensor + "name")
readings[name] = {}
temps = sorted(glob.glob(sensor + "temp*_input"))
for temp in temps:
t = int(int(self.__readfile(temp)) / 1000)
index = re.match(r'temp([0-9]+)_input', os.path.basename(temp)).group(1)
try:
label = self.__readfile(sensor + "temp" + index + "_label")
except:
label = "Item #" + index
readings[name][label] = {
'type': 'temp',
'value': t
}
fans = sorted(glob.glob(sensor + "fan*_input"))
for fan in fans:
t = int(self.__readfile(fan))
index = re.match(r'fan([0-9]+)_input', os.path.basename(fan)).group(1)
try:
label = self.__readfile(sensor + "fan" + index + "_label")
except:
label = "Fan #" + index
readings[name][label] = {
'type': "rotary",
'value': t
}
voltage = sorted(glob.glob(sensor + "in*_input"))
for volt in voltage:
t = int(self.__readfile(volt))
index = re.match(r'in([0-9]+)_input', os.path.basename(volt)).group(1)
try:
label = self.__readfile(sensor + "in" + index + "_label")
except:
label = "Voltage #" + index
readings[name][label] = {
'type': 'voltage',
'value': t / 1000.0
}
try:
readings.setdefault("nvidia", {})
readings["nvidia"]["Core"] = {
'type': 'temp',
'value': self.nvidia_temp()
}
except:
pass
try:
readings.setdefault("nvidia", {})
readings['nvidia']['Fan'] = {
'type': 'rotary',
'value': self.nvidia_fan()
}
except:
pass
if not len(readings["nvidia"].keys()):
del(readings["nvidia"])
return readings
def save_data(self, current, new):
for s in new:
if not s in current:
current[s] = {}
for i in new[s]:
val = new[s][i]['value']
typ = new[s][i]['type']
reading = i + "_" + s
if not reading in self.delta_t:
self.delta_t[reading] = {}
if not val in self.delta_t[reading]:
self.delta_t[reading][val] = 1
else:
self.delta_t[reading][val] += 1
if not i in current[s]:
current[s][i] = [
('cur', val),
('min', val),
('max', val),
('avg', val),
('type', typ)
]
else:
current[s][i] = [
('cur', val),
('min', val if val < current[s][i][1][1] else current[s][i][1][1]),
('max', val if val > current[s][i][2][1] else current[s][i][2][1]),
('avg', self.__avg_delta(reading)),
('type', typ)
]
return current
def output(self, readings, scale = 'C'):
# sort sensors
rkeys = readings.keys()
rkeys = sorted(rkeys)
rkeys = sorted(rkeys, key=len, reverse=True)
fmt_string = u"\r{0:>25s}{1:>13s}{2:>13s}{3:>13s}{4:>15s}"
for s in rkeys:
print(fmt_string.format(u"Sensor '" + s + u"'", "Current", "Min", "Max", "Avg"))
print("\r" + ("-" * 80))
# sort labels
keys = sorted(readings[s].items(), key=functools.cmp_to_key(self.__sort_temp))
keys = enumerate(OrderedDict(keys).keys())
last_type = 'temp'
for index, i in keys:
typ = readings[s][i][4][1]
if typ == 'temp':
cur = self.degree(readings[s][i][0][1], scale)
min = self.degree(readings[s][i][1][1], scale)
max = self.degree(readings[s][i][2][1], scale)
avg = self.degree(readings[s][i][3][1], scale, digits=1)
elif typ == "rotary":
cur = self.rpm(readings[s][i][0][1])
min = self.rpm(readings[s][i][1][1])
max = self.rpm(readings[s][i][2][1])
avg = self.rpm(readings[s][i][3][1], digits=1)
elif typ == 'voltage':
cur = self.voltage(readings[s][i][0][1])
min = self.voltage(readings[s][i][1][1])
max = self.voltage(readings[s][i][2][1])
avg = self.voltage(readings[s][i][3][1])
else:
continue
if index and last_type != typ:
print('\n', end='')
print(fmt_string.format(i, cur, min, max, avg))
last_type = typ
print('\n', end='')
def degree(self, temp, scale='C', digits=0):
sign = u'\N{DEGREE SIGN}'
if scale == 'K':
temp = temp + 273.15
sign = ""
elif scale == 'F':
temp = temp * 9/5.0 + 32
else:
scale = "C"
return self.rounding(temp, digits) + ' ' + sign + scale
def rpm(self, value, digits=0):
return self.rounding(value, digits) + ' rpm'
def voltage(self, value, digits=2):
sign = '+'
if value < 0:
sign = '-'
return sign + self.rounding(value, digits) + ' V'
def rounding(self, value, digits=0):
fmt = u"{0:." + str(digits) + "f}"
return (fmt.format(round(value + 0.0, digits)))
def nvidia_temp(self):
try:
process = Popen(['nvidia-settings', '-q', 'gpucoretemp', '-t'], stdout=PIPE, stderr=PIPE)
temp, stderr = process.communicate()
if not temp:
raise Exception("None reading")
return int(temp)
except:
try:
process = Popen(['nvidia-smi', '-q', '-d', 'TEMPERATURE'], stdout=PIPE, stderr=PIPE)
temp, stderr = process.communicate()
temp = int(re.search(r': ([0-9]+) C', temp, re.M | re.I).group(1))
return temp
except:
raise Exception("Invalid reading")
def nvidia_fan(self):
process = Popen(['nvidia-settings', '-q', 'GPUCurrentFanSpeedRPM', '-t'], stdout=PIPE, stderr=PIPE)
val, stderr = process.communicate()
if not val:
raise Exception("None reading")
return int(val)
def __readfile(self, filename):
with open(filename, 'r') as f:
return f.readline().strip()
def __avg_delta(self, name):
total = 0
count = 0
for key, value in self.delta_t[name].items():
count += value
total += key * value
return total / (count + 0.0)
def __sort_temp(self, a, b):
if a[1][4][1] != b[1][4][1]:
return len(a[1][4][1]) - len(b[1][4][1])
# asc sort by names
if len(a[0]) == len(b[0]):
return -1 if a[0] < b[0] else 1 if a[0] > b[0] else 0
# desc sort by length
return len(b[0]) - len(a[0])
if __name__ == "__main__":
app = Readings()
app.run()
|
import logging
import click
from bson.json_util import dumps
from flask.cli import with_appcontext
from scout.commands.utils import builds_option
from scout.export.gene import export_genes
from scout.server.extensions import store
from .utils import build_option, json_option
LOG = logging.getLogger(__name__)
@click.command("genes", short_help="Export genes")
@build_option
@json_option
@with_appcontext
def genes(build, json):
"""Export all genes from a build"""
LOG.info("Running scout export genes")
adapter = store
result = adapter.all_genes(build=build)
gene_list = list(result)
if build == "GRCh38":
for gene_obj in gene_list:
if gene_obj["chromosome"] == "MT":
gene_obj["chromosome"] = "M"
gene_obj["chromosome"] = "".join(["chr", gene_obj["chromosome"]])
if json:
click.echo(dumps(gene_list))
return
gene_string = "{0}\t{1}\t{2}\t{3}\t{4}"
click.echo("#Chromosome\tStart\tEnd\tHgnc_id\tHgnc_symbol")
for gene_obj in gene_list:
click.echo(
gene_string.format(
gene_obj["chromosome"],
gene_obj["start"],
gene_obj["end"],
gene_obj["hgnc_id"],
gene_obj["hgnc_symbol"],
)
)
|
from setuptools import setup, find_packages, Extension
from io import open
from os import path
import pathlib
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# automatically captured required modules for install_requires in requirements.txt and as well as configure dependency links
with open(path.join(HERE, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if ('git+' not in x) and (
not x.startswith('#')) and (not x.startswith('-'))]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs
if 'git+' not in x]
setup(
name='photofitter',
description='Fit your photos into canvases for printing',
version='1.0.1',
packages=find_packages(), # list of all packages
install_requires=install_requires,
python_requires='>=3.6', # any python greater than 2.7
entry_points='''
[console_scripts]
photofitter=photofitter.__main__:main
''',
author="Miguel Ángel (@mianfg)",
keyword="photofitter, photos, canvas, rendering, fitting",
long_description=README,
long_description_content_type="text/markdown",
license='MIT',
url='https://go.mianfg.me/photofitter',
download_url='https://go.mianfg.me/photofitter/download',
dependency_links=dependency_links,
author_email='hello@mianfg.me',
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
]
)
|
""" The Flask Server for the PWA """
import sys
import os
import requests
import cv2
import socket
import numpy as np
import logging
from sys import stdout
from utils import *
from camera import Camera
from flask import Flask, session, render_template, request, url_for, flash, redirect, jsonify, Response
from flask_socketio import SocketIO, emit
sys.path.append("../")
sys.path.append("./")
from Instance_Segementation import live_seg
import time
from ESPNet import test_seg
from Priority import priority
# Initialize Flask
app = Flask(__name__)
app.config["SECRET_KEY"] = 'ks2334'
socketio = SocketIO(app)
# Load ESPNet
modelESPNet, device = test_seg.load_model()
# Load MaskRCNN
modelInstance = live_seg.load_instance_model()
# Load Depth
depth = None
globals()["depth"] = __import__("Depth_Model.depth")
globals()["objectmotion"] = __import__("Depth_Model.objectmotion")
# Load Depth Model
sess, inference_model = depth.depth.load_depth_model(
"../Trained-Models/depth-model/model-199160")
@app.route("/")
def index():
return render_template('index.html')
@socketio.on('connect', namespace='/videoStream')
def videoStream_connect():
print("Client Connected")
# Socket code for connecting to the client for video streaming from client-server
import time
@socketio.on('inputImage', namespace='/videoStream')
def videoStream_message(input):
start = time.time()
print("Received Frame")
input = input.split(",")[1]
# Decoding the base64 image
input_img = base64_to_pil_image(input).convert('RGB')
cvImg = np.array(input_img)
# Convert RGB to BGR
cvImg = cvImg[:, :, ::-1].copy()
cv2.imshow("Received Frame", cvImg)
# Semantic Segmentation
segmented_img = test_seg.evaluate(modelESPNet, cvImg, device)
# Instance Segmentation
instance_img, instance_img_colored = live_seg.run_instance_model(
modelInstance, cvImg)
cv2.imshow("Instance Mask", instance_img_colored)
# Depth and Object Motion
color_map = depth.depth.run_inference(
sess=sess, model=inference_model, frame=cvImg)
color_map = (color_map*255).astype(np.uint8)
color_map = cv2.cvtColor(color_map, cv2.COLOR_RGB2BGR)
cv2.imshow("Depth", color_map)
closest_message = priority.get_the_closest(
color_map, instance_img, instance_img_colored)
path_message = priority.get_the_path_message(segmented_img)
final_message = ""
print("priorityOutput", closest_message + path_message)
emit("priorityOutput", closest_message + path_message)
print("Inference time:", time.time() - start)
if cv2.waitKey(1) == 27:
return
if __name__ == '__main__':
host = os.system("hostname -I")
socketio.run(app, debug=True, host="0.0.0.0", port=5000, keyfile="./key.pem", certfile="./cert.pem", use_reloader=False)
# Code for lower Django version, if you get error for keyfile and certfile syntac
# socketio.run(app, debug=True, host="192.168.43.165", port=5000,
# ssl_context=('./cert.pem', './key.pem'), use_reloader=False)
|
import calendar
import copy
import datetime
import time
from django.conf import settings
from django.db.models.signals import post_save
from django.contrib import auth
from django.contrib.auth.models import User
COOKIE_USER_DATA_KEY = '_UD'
COOKIE_USER_DATA_TS_KEY = '_UDT'
# Defaults to 4 hours in seconds
COOKIE_USER_REFRESH_TIME = getattr(settings, 'COOKIE_USER_REFRESH_TIME',
14400)
auth_get_user = copy.deepcopy(auth.get_user)
def datetime_to_timestamp(dt):
ts = calendar.timegm(dt.timetuple()) + (dt.microsecond / 1000000.0)
return long(ts * 1000000.0)
def timestamp_to_datetime(ts):
return datetime.datetime.utcfromtimestamp(ts / 1000000.0)
def cookie_set_user(request, force=False):
user = request.user
data = [
user.username,
user.first_name,
user.last_name,
user.email,
user.password,
user.is_staff,
user.is_active,
user.is_superuser,
datetime_to_timestamp(user.last_login),
datetime_to_timestamp(user.date_joined),
]
if force or data != request.session.get(COOKIE_USER_DATA_KEY):
request.session[COOKIE_USER_DATA_KEY] = data
request.session[COOKIE_USER_DATA_TS_KEY] = time.time()
def cookie_get_user(request):
# If it's been more than COOKIE_USER_REFRESH_TIME since the last time that
# we set the user data stuff, then pull the user from the backend rather
# than from the cookie.
if COOKIE_USER_DATA_TS_KEY in request.session:
diff = time.time() - request.session[COOKIE_USER_DATA_TS_KEY]
if diff > COOKIE_USER_REFRESH_TIME:
request._force_update_user = True
return auth_get_user(request)
user_id = request.session.get(auth.SESSION_KEY)
if not user_id:
return auth_get_user(request)
data = request.session.get(COOKIE_USER_DATA_KEY)
if not data:
return auth_get_user(request)
user = User()
try:
user.id = user_id
user.username = data[0]
user.first_name = data[1]
user.last_name = data[2]
user.email = data[3]
user.password = data[4]
user.is_staff = data[5]
user.is_active = data[6]
user.is_superuser = data[7]
user.last_login = timestamp_to_datetime(data[8])
user.date_joined = timestamp_to_datetime(data[9])
except (IndexError, TypeError):
return auth_get_user(request)
return user
class SessionUserMiddleware(object):
def __init__(self):
"""
It makes me sad that ``auth.get_user`` can't be customized, but instead we
have to monkeypatch it.
"""
auth.get_user = cookie_get_user
def process_request(self, request):
if getattr(request, 'user', None) is None:
return None
# If the user isn't authenticated, then there's no way that the
# "current user" could see any user attribute changes, since there is
# no "current user"
if not request.user.is_authenticated():
# Set a property we can check later to see if the user logged in
request._force_update_user = True
return None
# We create a function that's has a closure on the current request
def post_user_save(sender, instance, **kwargs):
# If the saved user is different from the current user, bail early
if instance.id != request.user.id:
return None
# Otherwise, replace the request's user with the newly-saved user
request.user = instance
# And then we actually save a reference to it on the request
request.post_user_save = post_user_save
# Connect the signal
post_save.connect(post_user_save, sender=User)
def process_response(self, request, response):
if getattr(request, 'user', None) is None:
return response
# If there's a post_user_save function on the request, disconnect it
post_user_save = getattr(request, 'post_user_save', None)
if post_user_save:
post_save.disconnect(post_user_save, sender=User)
# If the user is now logged in, make sure the cookie matches the data
# that's stored on the request's user object
if request.user.is_authenticated():
# If we've explicitly set the force flag, make it True
force = getattr(request, '_force_update_user', False)
cookie_set_user(request, force=force)
return response
|
#!/usr/bin/python
# poll the AWS Trusted Advisor for resource limits and posts an SNS message to a topic with AZ and resource information
# Import the SDK
import boto3
import uuid
import json
import ec2Limits
import rdsLimits
import cloudformationLimits
import ConfigParser
# Instantiate a new client for AWS Support API and SNS.
config = ConfigParser.ConfigParser()
config_dict = {}
regions = []
config.read("default.config")
options = config.options('Settings')
for option in options:
config_dict[option] = config.get('Settings', option)
sns_arn = config_dict['arn']
regions = config_dict['regions'].split(',')
support_client = boto3.client('support', region_name='us-east-1')
sns_client = boto3.client('sns', region_name='us-west-2')
# SNS ARN. This should be replaced with the name of the topic ARN that you want to publish
# sns_arn = "arn:aws:sns:us-west-2:141820633316:AWS-Limits"
# Configure the regions that you want to poll in the list below
# # regions = ['us-east-1', 'us-west-1', 'us-west-2', 'ap-northeast-1']
def limitPoll():
# call trusted advisor for the limit checks
response = support_client.describe_trusted_advisor_check_result(
checkId='eW7HH0l7J9',
language='en'
)
print "Contacting Trusted Advisor..."
return response;
def publishSNS(warn_list, sns_client, sns_arn):
# if warn_list has data, publish a message to the SNS_ARN
if not warn_list:
print "All systems green!"
else:
print "Publishing message to SNS topic..."
sns_client.publish(
TargetArn=sns_arn,
Message=makeMessage(warn_list)
)
return;
def makeMessage(warn_list):
#make the message that we send to the SNS topic
sns_message = 'You have limits approaching their upper threshold. Please take action accordingly. \n'
sns_message += '\n Region - Resource:'
sns_message += '\n ------------------------'
for rs in warn_list:
sns_message += '\n' + rs
sns_message += '\n'
sns_message += '\n EC2 Usage:'
sns_message += '\n ------------------------'
for rgn in regions:
# if (float(ec2Limits.get_actual(rgn)) / float(ec2Limits.get_limit(rgn)) >= 0.8):
sns_message += "\n Region: " + rgn
sns_message += "\n Instance Limit: "
sns_message += ec2Limits.get_limit(rgn)
sns_message += "\n Actual Usage: "
sns_message += ec2Limits.get_actual(rgn)
sns_message += "\n"
# else:
# sns_message += "\n Region: " + rgn
# sns_message += "\n All Green \n"
sns_message += '\n'
sns_message += '\n RDS Usage:'
sns_message += '\n ------------------------'
for rgn in regions:
rdsLimit, rdsActual = rdsLimits.get_limit(rgn)
sns_message += "\n Region: " + rgn
sns_message += "\n Instance Limit: "
sns_message += str(rdsLimit)
sns_message += "\n Actual Usage: "
sns_message += str(rdsActual)
sns_message += "\n"
sns_message += '\n'
sns_message += '\n Cloudformation Usage:'
sns_message += '\n -------------------------'
for rgn in regions:
# if (int(cloudformationLimits.get_actual(rgn)) / int(cloudformationLimits.get_limit(rgn)) >= 0.8):
sns_message += "\n Region: " + rgn
sns_message += "\n Stack Limit: "
sns_message += cloudformationLimits.get_limit(rgn)
sns_message += "\n Actual Stacks: "
sns_message += cloudformationLimits.get_actual(rgn)
sns_message += "\n"
# else:
# sns_message += "\n Region: " + rgn
# sns_message += "\n All Green \n"
print sns_message
return sns_message;
def lambda_handler(event, context):
response = limitPoll();
# parse the json and find flagged resources that are in warning mode
flag_list = response['result']['flaggedResources']
warn_list=[]
for fr in flag_list:
if fr['metadata'][5] != "Green":
warn_list.append(fr['metadata'][0]+' - '+fr['metadata'][2])
print json.dumps(fr, indent=4, separators=(',', ': '))
publishSNS(warn_list, sns_client, sns_arn);
return;
#test mode
#warn_list = []
#makeMessage(warn_list)
|
from colorama import Fore, init
init(autoreset=True)
def leiaint(numero_inteiro):
"""
:param numero_inteiro: Número inteiro digitado
:return: int(param) else ERROR
"""
while True:
try:
return int(numero_inteiro)
except Exception:
print(Fore.RED + 'ERRO! Digite um valor inteiro válido!')
numero_inteiro = input('Digite novamente um número inteiro: ')
def leiafloat(numero_real):
"""
:param numero_real: Número real digitado
:return: float(param) else ERROR
"""
while True:
try:
return float(numero_real)
except Exception:
print(Fore.RED + 'ERRO! Digite um número real válido!')
numero_real = input('Digite novamente um número real: ')
|
import numpy as np
import math
import sys
#
# this run control defines 8 parameters, with total number of expected instances
# of 1 * 1 * 1 * 1 = 1
#
# fidelity levels, 1 * 1 * 1 = 1
f_rxtr = 0
f_fc = 2
f_loc = 0
# reactor params, 1 = 1
n_rxtr = 1
r_t_f = 1.0
r_th_pu = 1.0
# supplier params, 3 = 3
r_s_th = 0.08 # 2 plants per 25 reactors
r_s_mox_uox = 0.4 # ratio used but uox supports are thrown away
r_s_mox = 0.2 # 1 plant per 5 reactors
r_s_thox = 0.2 # 1 plant per 5 reactors
d_th = [0, 1, 0] # th mox assem
d_f_mox = [0, 0, 1, 0] # f mox assem
d_f_thox = [0, 0, 0, 1] # f thox assem
|
/home/runner/.cache/pip/pool/f6/94/02/3149708d154718ac19c6fc1de1b5b9a7ef6fed55dc7e510b1089878358
|
import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self,input_channels, output_channels):
super().__init__()
self.block = nn.Sequential(
nn.Conv2d(input_channels, output_channels, 3, stride = 1, padding=1),
nn.BatchNorm2d(output_channels),
nn.PReLU(),
nn.Conv2d(output_channels, output_channels, 3, stride = 1, padding =1),
nn.BatchNorm2d(output_channels),
nn.PReLU()
)
def forward(self, x):
x = self.block(x) + x
return x
class Generator(nn.Module):
def __init__(self,in_channels):
super().__init__()
self.l1 = nn.Sequential(
nn.Conv2d(in_channels,64, 9, stride =1, padding = 4),
nn.PReLU()
)
self.r1 = ResidualBlock(64,64)
self.r2 = ResidualBlock(64,64)
self.r3 = ResidualBlock(64,64)
self.r4 = ResidualBlock(64,64)
self.r5 = ResidualBlock(64,64)
self.l2 = nn.Sequential(
nn.Conv2d(64, 64, 3, stride=1, padding =1),
nn.BatchNorm2d(64)
)
self.l3 = nn.Sequential(
nn.Conv2d(64,256, 3, stride =1, padding = 1),
nn.PixelShuffle(2),
nn.PReLU(),
nn.Conv2d(64,256, 3, stride =1, padding = 1),
nn.PixelShuffle(2),
nn.PReLU(),
nn.Conv2d(64, in_channels, 9, stride =1, padding =4)
)
def forward(self, x):
x = self.l1(x)
x = self.r1(x)
x = self.r2(x)
x = self.r3(x)
x = self.r4(x)
x = self.r5(x)
x = x + self.l2(x)
x = self.l3(x)
return x
class Discriminator(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(in_channels, 64, 3, stride =1, padding = 1),
nn.LeakyReLU(0.2),
nn.Conv2d(64,64,3,stride = 2, padding =1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2),
nn.Conv2d(64,128,3,stride = 1, padding =1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128,128,3,stride = 2, padding =1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128,256,3,stride = 1, padding =1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256,256,3,stride = 2, padding =1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256,512,3,stride = 1, padding =1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.Conv2d(512,512,3,stride = 2, padding =1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(512,1024,1),
nn.LeakyReLU(0.2),
nn.Conv2d(1024, 1, 1),
nn.Sigmoid()
)
def forward(self,x):
return self.model(x)
|
from django.db import models
from apps.base.models import Timestampable
class Transaction(Timestampable):
address = models.CharField(max_length=255)
vhost = models.CharField(max_length=255, default='default')
agent_ping_time = models.DateTimeField(blank=True)
time_avg = models.FloatField(default=999, blank=True)
class Meta:
db_table = 'transactions'
def __str__(self):
return self.address
|
class Log:
@staticmethod
def log(message, app, type = 'INFO'):
app.logger.info(message)
return 'LOG:' + message
@staticmethod
def info(message, app):
_message = ' >> ' + message
app.logger.info(_message)
return _message
@staticmethod
def debug(message, app):
_message = ' >> ' + message
app.logger.debug(_message)
return _message
|
# -*- coding: utf-8 -*-
"""Plots and EDA_v1
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Y0k-MwXaSWQIZ64sOD2wDa14P3xRNZTe
"""
!pip install yfinance
import pandas_datareader as web
from pandas_datareader import data as pdr
import yfinance as yfin
import pandas as pd
yfin.pdr_override()
BTC = pdr.get_data_yahoo("BTC-USD", start="2014-01-01", end="2021-10-31")
ETH = pdr.get_data_yahoo("ETH-USD", start="2014-01-01", end="2021-10-31")
DOGE = pdr.get_data_yahoo("DOGE-USD", start="2014-01-01", end="2021-10-31")
SP5 = pdr.get_data_yahoo("^GSPC", start="2014-01-01", end="2021-10-31")
BTC.reset_index(level=0, inplace=True)
ETH.reset_index(level=0, inplace=True)
DOGE.reset_index(level=0, inplace=True)
SP5.reset_index(level=0, inplace=True)
print(BTC.head(1))
print(ETH.head(1))
print(DOGE.head(1))
print(SP5.head(1))
Merged = SP5.merge(ETH, how = 'outer', on = "Date")
Merged = Merged.merge(DOGE, how = 'outer', on = "Date")
Merged = Merged.merge(BTC, how = 'outer', on = "Date")
Merged.head(10)
SP5.describe()
Merged.columns = ["Date","Open_SP5", 'High_SP5' ,'Low_SP5', 'Close_SP5', 'Adj_Close_SP5','Volume_SP5',"Open_ETH", 'High_ETH' ,'Low_ETH', 'Close_ETH', 'Adj_Close_ETH','Volume_ETH',"Open_DOGE", 'High_DOGE' ,'Low_DOGE', 'Close_DOGE', 'Adj_Close_DOGE','Volume_BTC',"Open_BTC", 'High_BTC' ,'Low_BTC', 'Close_BTC', 'Adj_Close_BTC','Volume_BTC']
Merged.head(10)
#Testing Append Merge
BTC2=BTC.copy()
ETH2=ETH.copy()
DOGE2=DOGE.copy()
SP52=SP5.copy()
BTC2['asset']='BTC'
ETH2['asset']='ETH'
DOGE2['asset']='DOGE'
SP52['asset']='SP5'
Merged2 = SP52.append(ETH2, ignore_index=True)
Merged2 = Merged2.append(DOGE2, ignore_index=True)
Merged2=Merged2.append(BTC2, ignore_index=True)
Merged2.head()
Merged2['delta']=(Merged2['Close']-Merged2['Open'])/Merged2['Open']
Merged2
piv_m2= Merged2.pivot(index='Date', columns='asset',values='delta')
piv_m2
piv_m2.corr()
#Making a data frame of the closing price for utilizing percent change of the dataframe. c1 is for change dataframe.
Merged.columns
c1_col=['Date','Adj_Close_SP5','Adj_Close_ETH','Adj_Close_DOGE','Adj_Close_BTC']
c1=Merged[c1_col]
c1['Date']=pd.to_datetime(c1.Date)
#printing c1 to see if it is converting to date.
c1.sort_values('Date')
#Setting the index to date.
c1 = c1.set_index(Merged['Date'])
c1
c1.plot()
#made new percent change df to limit constant recalculation.
pc1= c1.pct_change()
pc1=pc1*100
pc1.plot()
#calculating correlation of the percent change columns.
pc1.corr()
#Creating multiple histograms. Can see from the plots below that eth and BTC
#have significantly more volatility.
import matplotlib.pyplot as plt
import numpy as np
plt.figure(figsize=(8,6))
plt.hist(pc1['Adj_Close_SP5'], bins=30, alpha=.5, label='Adj_Close_SP5');
plt.hist(pc1['Adj_Close_BTC'], bins=30, alpha=.5, label='Adj_Close_BTC');
plt.hist(pc1['Adj_Close_ETH'], bins=30, alpha=.5, label='Adj_Close_ETH');
plt.legend()
#plt.hist(pc1['Adj Close_DOGE'], bins=30, alpha=.5, label='Adj Close_DOGE');
#took out doge because distorted plot.
#asking max if a power transformation would be appropriate here.
#daily variance is much higher for ETH, BTC DOGE. Approx 6-10 percent.
pc1.describe()
pc1.shape
"""Section below is in progress to subset the overall data set when the SP500 has a significant negative movement. """
#Subsetting the SP5 data for when there is a significant drop. This helps evaluation if eth, btc are really
#counter cyclical. 3 standard deviations away from mean would be considered a very large drop.
SP_sdev=-3*pc1['Adj_Close_SP5'].std()
pc1[pc1.Adj_Close_SP5 < SP_sdev]
pc2=pc1
pc2['num']=range(0,pc2.shape[0])
neg_days= pc2.num[pc2.Adj_Close_SP5<SP_sdev]
def twendays(x):
l1=list(range(x,x+20))
return(l1)
neg_days_value =neg_days.apply(twendays)
import itertools
neg_days_index=list(itertools.chain.from_iterable(neg_days_value))
#Subsetting for when the sp 500 is 3 standard deviations negative.
pc_s =pc2.iloc[neg_days_index]
pc_s.drop('num',axis=1,)
#Dropping number column for correlation. Correlation is much greater when
pc_s.drop('num',axis=1,inplace=True)
pc_s.corr()
plt.figure(figsize=(8,6))
plt.hist(pc_s['Adj_Close_SP5'], bins=30, alpha=.5, label='Adj_Close_SP5');
plt.hist(pc_s['Adj_Close_BTC'], bins=30, alpha=.5, label='Adj_Close_BTC');
plt.hist(pc_s['Adj_Close_ETH'], bins=30, alpha=.5, label='Adj_Close_ETH');
plt.legend()
#looks like more negative downside present in the tails of eth and btc distributions.
"""General Time series predictions
Rolling Averages over x days
Using days as a hyper parameter and finding how long it takes the markets to react to a large change
Taking recency into account as well, How does correlation look within the last year vs last 5 year
Outside parameters
"""
|
#!/usr/bin/python
# coding=utf-8
import numpy as np
import pytest
import socket
from http.server import HTTPServer
from threading import Thread
from app import MyHandler
from service import MNIST
@pytest.fixture(scope="session")
def mnist_class():
return MNIST()
@pytest.fixture(scope='session')
def port_number():
return 5050
@pytest.fixture(scope="session")
def json_out(mnist_class):
return mnist_class.format_payload(index=0, y_pred=np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0]))
@pytest.fixture(scope="session")
def test_image():
return np.zeros(shape=(28, 28))
@pytest.fixture(scope='session')
def mock_port():
"""Find a free port to run mock server on."""
test_socket = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
test_socket.bind(('localhost', 0))
_, port = test_socket.getsockname()
test_socket.close()
return port
@pytest.fixture(scope='session')
def mock_thread(mock_port):
"""Set up mock server and run on a new thread.
:param get_free_port: port to start server on
:return: None
"""
mock_server = HTTPServer(('', mock_port), MyHandler)
mock_thread = Thread(target=mock_server.serve_forever)
mock_thread.setDaemon(True)
mock_thread.start()
|
#! usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import logging
import re
from concurrent.futures import ThreadPoolExecutor
from typing import Optional
import sqlalchemy.sql
import whois_alt
from sqlalchemy import select, insert
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.dialects.postgresql import insert
from ..db.model import async_engine, all_domains, dangerous_domains, registrars
logger = logging.getLogger(__name__)
# Executor is needed to run blocking logging operations in a separate thread
executor = ThreadPoolExecutor(max_workers=1)
REG_RU_NAMES = ["registrar of domain names reg.ru llc",
"registrar of domain names reg.ru, llc",
"regru-ru"]
RUCENTER_NAMES = ["regional network information center, jsc dba ru-center",
"ru-center-ru", "ru-center-rf"]
def prepare_url(url: str):
"""Drop 'http://' or 'https://' in the beginning of a url so that it
could be passed to whois API"""
try:
url_match = re.match(r"https?://(.+)", url)
main_url_part = url_match.group(1)
return main_url_part
except AttributeError:
executor.submit(
logger.error, f"Regexp did not find a match for url: {url}"
)
return url
def get_whois_record(url: str) -> dict:
"""Get whois information on url.
:return a dict containing four parameters of a retrieved whois record:
'domain_name', 'owner_name', 'registrar_name', 'abuse_emails'
"""
prepared_url = prepare_url(url)
try:
whois_record = whois_alt.get_whois(prepared_url)
registrar_name = whois_record["registrar"][0] if (
"registrar" in whois_record and whois_record["registrar"]) else ""
abuse_emails = ", ".join(whois_record["emails"]) if (
"emails" in whois_record and whois_record["emails"]) else ""
if "contacts" in whois_record and whois_record["contacts"]:
owner_name = whois_record["contacts"]["registrant"]
owner_name = owner_name["organization"] if (
owner_name and "organization" in owner_name) else ""
else:
owner_name = ""
return {"domain_name": url, "owner_name": owner_name,
"registrar_name": registrar_name, "abuse_emails": abuse_emails}
except whois_alt.shared.WhoisException as e:
executor.submit(
logger.info,
f"Have not found whois record for {prepared_url}. "
f"Error message: {e}")
return {"domain_name": url, "owner_name": "", "registrar_name": "",
"abuse_emails": ""}
async def find_domain_id(domain_name: str) -> Optional[int]:
select_stmt = (
select(all_domains.c.domain_id).
where(all_domains.c.url == domain_name)
)
try:
async with async_engine.begin() as conn:
result = await conn.execute(select_stmt)
domain_id = result.fetchone()
result.close()
if domain_id:
domain_id = domain_id[0]
return domain_id
except Exception as e:
executor.submit(logger.error, f"Unexpected error occurred: {e}")
return sqlalchemy.sql.null()
async def find_registrar_id(registrar_name: str) -> Optional[int]:
select_stmt = select(registrars.c.registrar_id). \
where(registrars.c.registrar_name == registrar_name)
try:
async with async_engine.begin() as conn:
result = await conn.execute(select_stmt)
registrar_id = result.fetchone()
result.close()
if registrar_id:
registrar_id = registrar_id[0]
return registrar_id
except Exception as e:
logger.error(f"Unexpected error occurred: {e}")
async def save_registrar_info(registrar_name: str, abuse_emails: str):
insert_stmt = (
insert(registrars).
values(registrar_name=registrar_name, abuse_emails=abuse_emails)
)
try:
async with async_engine.begin() as conn:
await conn.execute(insert_stmt)
except SQLAlchemyError as e:
logger.error(f"SQL error occurred: {e}")
async def save_domain_info(domain_id: int, owner_name: str,
registrar_id: Optional[int]):
now = datetime.datetime.utcnow()
insert_stmt = (
insert(dangerous_domains).
values(domain_id=domain_id, owner_name=owner_name,
registrar_id=registrar_id, last_updated=now)
)
update_stmt = insert_stmt.on_conflict_do_update(
constraint="pk__dangerous_domains",
set_=dict(owner_name=owner_name, registrar_id=registrar_id,
last_updated=now)
)
async with async_engine.begin() as conn:
await conn.execute(update_stmt)
async def save_whois_record(whois_record: dict):
domain_id = await find_domain_id(whois_record["domain_name"])
registrar_name = whois_record["registrar_name"].lower().strip()
abuse_emails = whois_record["abuse_emails"]
owner_name = whois_record["owner_name"]
registrar_id = sqlalchemy.sql.null()
if registrar_name:
if registrar_name in REG_RU_NAMES:
registrar_name = "regru-ru"
if registrar_name in RUCENTER_NAMES:
registrar_name = "rucenter-ru"
registrar_id = await find_registrar_id(registrar_name)
if not registrar_id:
await save_registrar_info(registrar_name, abuse_emails)
registrar_id = await find_registrar_id(registrar_name)
await save_domain_info(domain_id, owner_name, registrar_id)
logger.info(
f"Saved whois record for {whois_record['domain_name']} to database"
)
|
# -*- coding: utf-8 -*-
'''
Bandidos estocásticos: introducción, algoritmos y experimentos
TFG Informática
Sección 7.1
Figuras 3, 4 y 5
Autor: Marcos Herrero Agustín
'''
import scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
def regretEFFavsFirst(n,m,arms,gaps):
rwds = 2*[0]
for i in range(m):
rwds[0] += arms[0].rvs()
rwds[1] += arms[1].rvs()
bestarm = min([i for i in range(2) if rwds[i] == max(rwds)])
return m*sum(gaps)+(n-2*m)*gaps[bestarm]
def regretEFFavsSecond(n,m,arms,gaps):
rwds = 2*[0]
for i in range(m):
rwds[0] += arms[0].rvs()
rwds[1] += arms[1].rvs()
bestarm = max([i for i in range(2) if rwds[i] == max(rwds)])
return m*sum(gaps)+(n-2*m)*gaps[bestarm]
n = 1000
nsamples = 100
ms = np.array(range(0,201,2))
numms = len(ms)
arms = 2*[None]
arms[0] = stats.norm(0,1)
arms[1] = stats.norm(-0.1,1)
gaps = 2*[0]
gaps[0] = 0
gaps[1] = 0.1
regretFavsFirst = numms*[0]
regretFavsSecond = numms*[0]
for i in range(numms):
for k in range(nsamples):
regretFavsFirst[i] = k/(k+1)*regretFavsFirst[i] + 1/(k+1)*regretEFFavsFirst(n,ms[i],arms,gaps)
regretFavsSecond[i] = k/(k+1)*regretFavsSecond[i] + 1/(k+1)*regretEFFavsSecond(n,ms[i],arms,gaps)
colors = ['tab:blue','tab:red']
legend = ['Fav. brazo 1','Fav. brazo 2']
'''
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(legend, loc = 'center left', bbox_to_anchor=(1, 0.5))
fig.savefig('EFDesempate.pdf',format='pdf')
plt.show()
'''
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(ms,regretFavsFirst,color = colors[0])
ax.plot(ms,regretFavsSecond, color = colors[1])
plt.xlabel('m')
plt.ylabel('Remordimiento esperado')
plt.legend(legend,loc = 'upper right')
fig.savefig('EFDesempate.pdf',format='pdf')
plt.show()
|
# Generated by Django 2.0.5 on 2018-05-06 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patients', '0004_auto_20180506_1612'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='profile_image',
field=models.ImageField(blank=True, height_field='image_height', null=True, upload_to='patients/', width_field='image_width'),
),
]
|
import os
import logging
from abc import ABCMeta, abstractmethod
from pyfasta import Fasta, FastaRecord
from pyfasta.fasta import FastaNotFound
from .helpers import fasta_filepath
from .helpers import supported_build
from .helpers import parse_build
from .md5 import calculate_md5
from .. import SUPPORTED_BUILDS
from .. import SOURCE_DATA_MD5
from .. import MD5_CHECK_FAIL
from .. import errors
logger = logging.getLogger('veppy')
class FastaFile(object):
__metaclass__ = ABCMeta
# -- Util methods
@classmethod
def load_build(klass, build):
try:
translated_build = supported_build(build)
if not klass.BUILD_CACHE.get(translated_build):
klass.BUILD_CACHE[translated_build] = \
klass(build, klass.filepath_for_build(translated_build))
except (FastaNotFound, errors.FastaFileException), e:
logger.fatal(
'Error loading FastA file for build %s: %s' % (build, e)
)
raise
@classmethod
def for_build(klass, build):
_build = supported_build(build)
if not _build:
raise errors.VeppyFileException(
'Unsupported build \'%s\'.' % build)
klass.load_build(_build)
return klass.BUILD_CACHE[_build]
@classmethod
@abstractmethod
def filepath_for_build(klass, build):
pass
def index(self, reindex=False):
return self.get_sequence('1', 1, 1)
# ---
def __init__(self, build, filepath, index=False):
self.build = build
self.filepath = filepath
# check for FastA file...
if not os.path.exists(self.filepath):
raise errors.FastaFileException(
'FastA file \'%s\' not found!' % self.filepath
)
# check for FastA file... index files FIRST (!!) because Fasta(...)
# __init__ checks for indexes and greedily creates
# them if they don't exist...
if not os.path.exists(self.filepath + '.flat') or \
not os.path.exists(self.filepath + '.gdx'):
logger.warn(
'FASTA indexes not found. Indexing...\'%s\'. '
'This may take 10 minutes or more.' % self.filepath
)
self._fasta = Fasta(self.filepath, record_class=FastaRecord,
flatten_inplace=True)
# Ensure the MD5s are correct
# TODO: This is slow (3s per file). Move somewhere else.
files = [self.filepath,
self.filepath + '.flat',
self.filepath + '.gdx']
for f in files:
calculated = calculate_md5(f)
expected = SOURCE_DATA_MD5.get(os.path.basename(f))
if calculated != expected:
message = \
'FASTA file {} did not pass MD5 check. ' \
'Expected: {} Found: {}' \
.format(f, expected, calculated)
if MD5_CHECK_FAIL:
raise errors.FastaFileException(message)
else:
logger.warn(message)
else:
logger.info('Fasta file {} passed MD5 check.'
.format(f))
# TODO: eventually, this should be dynamic and file-specific
self.chromosomes = \
map(lambda x: str(x), range(1, 23)) + ['MT', 'X', 'Y']
def get(self, chromosome, start, stop):
fasta_chromosome = self.get_chromosome(chromosome)
if not fasta_chromosome:
logger.warning('Invalid chromosome requested in FASTA reader: {}'
.format(chromosome))
return None
# protect pyfasta from itself...if start > stop, PyFasta will
# happily fetch the wrapping range of [stop:] + [:start]
# and more than likely OOM you into oblivion in the process.
if start > stop:
logger.debug(
'Invalid range found. chr: %s, range: [%s, %s].' %
(chromosome, start, stop)
)
return ''
return fasta_chromosome[start - 1:stop - 1 + 1]
# backwards compat w/ Sequence API
def get_sequence(self, chromosome, start, stop):
return self.get(chromosome, start, stop)
def get_slice(self, chromosome, _slice):
return self.get(chromosome, _slice.start, _slice.stop - 1)
@abstractmethod
def get_chromosome(self, chromosome):
pass
@abstractmethod
def get_chromosome_accession(self, chromosome):
pass
@staticmethod
def supported_chromosome_accession(chromosome_accession):
return chromosome_accession.upper() in \
REFSEQ_CHROMOSOME_ACCESSIONS
@staticmethod
def get_build_from_chromosome_accession(chromosome_accession):
if FastaFile.supported_chromosome_accession(chromosome_accession):
return REFSEQ_CHROMOSOME_ACCESSIONS[
chromosome_accession.upper()]['build']
return None
@staticmethod
def get_chromosome_from_chromosome_accession(chromosome_accession):
if FastaFile.supported_chromosome_accession(chromosome_accession):
return REFSEQ_CHROMOSOME_ACCESSIONS[
chromosome_accession.upper()]['chromosome']
return None
# Genbank sequence Fasta file
class GenbankFastaFile(FastaFile):
BUILD_CACHE = {k: None for k in SUPPORTED_BUILDS}
FASTA_CHROMOSOME_ACCESSIONS = {
'GRCH37': {
'CM000663.1': 'NC_000001.10',
'CM000664.1': 'NC_000002.11',
'CM000665.1': 'NC_000003.11',
'CM000666.1': 'NC_000004.11',
'CM000667.1': 'NC_000005.9',
'CM000668.1': 'NC_000006.11',
'CM000669.1': 'NC_000007.13',
'CM000670.1': 'NC_000008.10',
'CM000671.1': 'NC_000009.11',
'CM000672.1': 'NC_000010.10',
'CM000673.1': 'NC_000011.9',
'CM000674.1': 'NC_000012.11',
'CM000675.1': 'NC_000013.10',
'CM000676.1': 'NC_000014.8',
'CM000677.1': 'NC_000015.9',
'CM000678.1': 'NC_000016.9',
'CM000679.1': 'NC_000017.10',
'CM000680.1': 'NC_000018.9',
'CM000681.1': 'NC_000019.9',
'CM000682.1': 'NC_000020.10',
'CM000683.1': 'NC_000021.8',
'CM000684.1': 'NC_000022.10',
'CM000685.1': 'NC_000023.10',
'CM000686.1': 'NC_000024.9'
}
}
@classmethod
def filepath_for_build(klass, build):
(release, version) = parse_build(build)
logger.info(
'Loading build %s (release: %s, version: %s).' %
(build, release, version or 'None')
)
return fasta_filepath(build, 'genbank')
# chromosomes entries in *.fa files look like:
# 'gi|224384768|gb|CM000663.1| Homo sapiens chromosome 1, GRC primary reference assembly' # noqa
# given an input chromosome 'chrN', strip leading 'ch(r)' and lookup based
# on keys that match 'N '
def _get_fasta_key_for_chromosome(self, chromosome):
chr_clean = chromosome.upper().lstrip('CHR')
for k in self._fasta.keys():
if k.split(', ')[0].upper().endswith(' %s' % chr_clean):
return k
return None
def get_chromosome(self, chromosome):
k = self._get_fasta_key_for_chromosome(chromosome)
return self._fasta.get(k) if k else None
def get_chromosome_accessions(self):
return self.REFSEQ_CHROMOSOME_ACCESSIONS[self.build.upper()]
def get_chromosome_accession(self, chromosome):
fasta_key = self._get_fasta_key_for_chromosome(chromosome)
if fasta_key:
chr_accession = fasta_key.split('|')[3]
return self.FASTA_CHROMOSOME_ACCESSIONS[self.build.upper()] \
.get(chr_accession)
return None
REFSEQ_CHROMOSOME_ACCESSIONS = {
'NC_000001.10': {
'chromosome': '1',
'build': 'GRCh37'
},
'NC_000002.11': {
'chromosome': '2',
'build': 'GRCh37'
},
'NC_000003.11': {
'chromosome': '3',
'build': 'GRCh37'
},
'NC_000004.11': {
'chromosome': '4',
'build': 'GRCh37'
},
'NC_000005.9': {
'chromosome': '5',
'build': 'GRCh37'
},
'NC_000006.11': {
'chromosome': '6',
'build': 'GRCh37'
},
'NC_000007.13': {
'chromosome': '7',
'build': 'GRCh37'
},
'NC_000008.10': {
'chromosome': '8',
'build': 'GRCh37'
},
'NC_000009.11': {
'chromosome': '9',
'build': 'GRCh37'
},
'NC_000010.10': {
'chromosome': '10',
'build': 'GRCh37'
},
'NC_000011.9': {
'chromosome': '11',
'build': 'GRCh37'
},
'NC_000012.11': {
'chromosome': '12',
'build': 'GRCh37'
},
'NC_000013.10': {
'chromosome': '13',
'build': 'GRCh37'
},
'NC_000014.8': {
'chromosome': '14',
'build': 'GRCh37'
},
'NC_000015.9': {
'chromosome': '15',
'build': 'GRCh37'
},
'NC_000016.9': {
'chromosome': '16',
'build': 'GRCh37'
},
'NC_000017.10': {
'chromosome': '17',
'build': 'GRCh37'
},
'NC_000018.9': {
'chromosome': '18',
'build': 'GRCh37'
},
'NC_000019.9': {
'chromosome': '19',
'build': 'GRCh37'
},
'NC_000020.10': {
'chromosome': '20',
'build': 'GRCh37'
},
'NC_000021.8': {
'chromosome': '21',
'build': 'GRCh37'
},
'NC_000022.10': {
'chromosome': '22',
'build': 'GRCh37'
},
'NC_000023.10': {
'chromosome': 'X',
'build': 'GRCh37'
},
'NC_000024.9': {
'chromosome': 'Y',
'build': 'GRCh37'
}
}
|
import django
import wagtail
from wagtail_storages import backends, utils
HOOK_ORDER = getattr(
django.conf.settings,
'WAGTAIL_STORAGES_DOCUMENT_HOOK_ORDER',
100
)
@wagtail.core.hooks.register('before_serve_document', order=HOOK_ORDER)
def serve_document_from_s3(document, request):
# Skip this hook if not using django-storages boto3 backend.
if not utils.is_s3_boto3_storage_used():
return
# Send document_served signal.
wagtail.documents.models.document_served.send(
sender=wagtail.documents.models.get_document_model(),
instance=document,
request=request
)
# If document has restrictions, generate a signed URL, otherwise
# return its public URL.
if document.collection.get_view_restrictions():
backend = backends.S3Boto3StorageForWagtailDocument()
file_url = backend.url(document.file.name)
else:
file_url = document.file.url
# Generate redirect response and add never_cache headers.
response = django.shortcuts.redirect(file_url)
del response['Cache-control']
django.utils.cache.add_never_cache_headers(response)
return response
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from ._enums import *
__all__ = [
'AgentPropertiesArgs',
'ArgumentArgs',
'AuthInfoArgs',
'BaseImageTriggerArgs',
'CredentialsArgs',
'CustomRegistryCredentialsArgs',
'DockerBuildRequestArgs',
'EncodedTaskRunRequestArgs',
'ExportPipelineTargetPropertiesArgs',
'FileTaskRunRequestArgs',
'IPRuleArgs',
'IdentityPropertiesArgs',
'ImportPipelineSourcePropertiesArgs',
'LoggingPropertiesArgs',
'NetworkRuleSetArgs',
'OverrideTaskStepPropertiesArgs',
'ParentPropertiesArgs',
'PipelineRunRequestArgs',
'PipelineRunSourcePropertiesArgs',
'PipelineRunTargetPropertiesArgs',
'PipelineSourceTriggerPropertiesArgs',
'PipelineTriggerPropertiesArgs',
'PlatformPropertiesArgs',
'PoliciesArgs',
'PrivateEndpointArgs',
'PrivateLinkServiceConnectionStateArgs',
'QuarantinePolicyArgs',
'RetentionPolicyArgs',
'SecretObjectArgs',
'SetValueArgs',
'SkuArgs',
'SourceControlAuthInfoArgs',
'SourcePropertiesArgs',
'SourceRegistryCredentialsArgs',
'SourceRepositoryPropertiesArgs',
'SourceTriggerArgs',
'StorageAccountPropertiesArgs',
'SyncPropertiesArgs',
'TaskRunRequestArgs',
'TaskStepPropertiesArgs',
'TimerTriggerArgs',
'TokenCertificateArgs',
'TokenCredentialsPropertiesArgs',
'TokenPasswordArgs',
'TriggerPropertiesArgs',
'TrustPolicyArgs',
'UserIdentityPropertiesArgs',
'VirtualNetworkRuleArgs',
]
@pulumi.input_type
class AgentPropertiesArgs:
def __init__(__self__, *,
cpu: Optional[pulumi.Input[int]] = None):
"""
The properties that determine the run agent configuration.
:param pulumi.Input[int] cpu: The CPU configuration in terms of number of cores required for the run.
"""
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
@property
@pulumi.getter
def cpu(self) -> Optional[pulumi.Input[int]]:
"""
The CPU configuration in terms of number of cores required for the run.
"""
return pulumi.get(self, "cpu")
@cpu.setter
def cpu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu", value)
@pulumi.input_type
class ArgumentArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str],
is_secret: Optional[pulumi.Input[bool]] = None):
"""
The properties of a run argument.
:param pulumi.Input[str] name: The name of the argument.
:param pulumi.Input[str] value: The value of the argument.
:param pulumi.Input[bool] is_secret: Flag to indicate whether the argument represents a secret and want to be removed from build logs.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
if is_secret is None:
is_secret = False
if is_secret is not None:
pulumi.set(__self__, "is_secret", is_secret)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the argument.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the argument.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="isSecret")
def is_secret(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to indicate whether the argument represents a secret and want to be removed from build logs.
"""
return pulumi.get(self, "is_secret")
@is_secret.setter
def is_secret(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_secret", value)
@pulumi.input_type
class AuthInfoArgs:
def __init__(__self__, *,
token: pulumi.Input[str],
token_type: pulumi.Input[Union[str, 'TokenType']],
expires_in: Optional[pulumi.Input[int]] = None,
refresh_token: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None):
"""
The authorization properties for accessing the source code repository.
:param pulumi.Input[str] token: The access token used to access the source control provider.
:param pulumi.Input[Union[str, 'TokenType']] token_type: The type of Auth token.
:param pulumi.Input[int] expires_in: Time in seconds that the token remains valid
:param pulumi.Input[str] refresh_token: The refresh token used to refresh the access token.
:param pulumi.Input[str] scope: The scope of the access token.
"""
pulumi.set(__self__, "token", token)
pulumi.set(__self__, "token_type", token_type)
if expires_in is not None:
pulumi.set(__self__, "expires_in", expires_in)
if refresh_token is not None:
pulumi.set(__self__, "refresh_token", refresh_token)
if scope is not None:
pulumi.set(__self__, "scope", scope)
@property
@pulumi.getter
def token(self) -> pulumi.Input[str]:
"""
The access token used to access the source control provider.
"""
return pulumi.get(self, "token")
@token.setter
def token(self, value: pulumi.Input[str]):
pulumi.set(self, "token", value)
@property
@pulumi.getter(name="tokenType")
def token_type(self) -> pulumi.Input[Union[str, 'TokenType']]:
"""
The type of Auth token.
"""
return pulumi.get(self, "token_type")
@token_type.setter
def token_type(self, value: pulumi.Input[Union[str, 'TokenType']]):
pulumi.set(self, "token_type", value)
@property
@pulumi.getter(name="expiresIn")
def expires_in(self) -> Optional[pulumi.Input[int]]:
"""
Time in seconds that the token remains valid
"""
return pulumi.get(self, "expires_in")
@expires_in.setter
def expires_in(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "expires_in", value)
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> Optional[pulumi.Input[str]]:
"""
The refresh token used to refresh the access token.
"""
return pulumi.get(self, "refresh_token")
@refresh_token.setter
def refresh_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "refresh_token", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
The scope of the access token.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@pulumi.input_type
class BaseImageTriggerArgs:
def __init__(__self__, *,
base_image_trigger_type: pulumi.Input[Union[str, 'BaseImageTriggerType']],
name: pulumi.Input[str],
status: Optional[pulumi.Input[Union[str, 'TriggerStatus']]] = None):
"""
The trigger based on base image dependency.
:param pulumi.Input[Union[str, 'BaseImageTriggerType']] base_image_trigger_type: The type of the auto trigger for base image dependency updates.
:param pulumi.Input[str] name: The name of the trigger.
:param pulumi.Input[Union[str, 'TriggerStatus']] status: The current status of trigger.
"""
pulumi.set(__self__, "base_image_trigger_type", base_image_trigger_type)
pulumi.set(__self__, "name", name)
if status is None:
status = 'Enabled'
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="baseImageTriggerType")
def base_image_trigger_type(self) -> pulumi.Input[Union[str, 'BaseImageTriggerType']]:
"""
The type of the auto trigger for base image dependency updates.
"""
return pulumi.get(self, "base_image_trigger_type")
@base_image_trigger_type.setter
def base_image_trigger_type(self, value: pulumi.Input[Union[str, 'BaseImageTriggerType']]):
pulumi.set(self, "base_image_trigger_type", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the trigger.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'TriggerStatus']]]:
"""
The current status of trigger.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'TriggerStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class CredentialsArgs:
def __init__(__self__, *,
custom_registries: Optional[pulumi.Input[Mapping[str, pulumi.Input['CustomRegistryCredentialsArgs']]]] = None,
source_registry: Optional[pulumi.Input['SourceRegistryCredentialsArgs']] = None):
"""
The parameters that describes a set of credentials that will be used when a run is invoked.
:param pulumi.Input[Mapping[str, pulumi.Input['CustomRegistryCredentialsArgs']]] custom_registries: Describes the credential parameters for accessing other custom registries. The key
for the dictionary item will be the registry login server (myregistry.azurecr.io) and
the value of the item will be the registry credentials for accessing the registry.
:param pulumi.Input['SourceRegistryCredentialsArgs'] source_registry: Describes the credential parameters for accessing the source registry.
"""
if custom_registries is not None:
pulumi.set(__self__, "custom_registries", custom_registries)
if source_registry is not None:
pulumi.set(__self__, "source_registry", source_registry)
@property
@pulumi.getter(name="customRegistries")
def custom_registries(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['CustomRegistryCredentialsArgs']]]]:
"""
Describes the credential parameters for accessing other custom registries. The key
for the dictionary item will be the registry login server (myregistry.azurecr.io) and
the value of the item will be the registry credentials for accessing the registry.
"""
return pulumi.get(self, "custom_registries")
@custom_registries.setter
def custom_registries(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['CustomRegistryCredentialsArgs']]]]):
pulumi.set(self, "custom_registries", value)
@property
@pulumi.getter(name="sourceRegistry")
def source_registry(self) -> Optional[pulumi.Input['SourceRegistryCredentialsArgs']]:
"""
Describes the credential parameters for accessing the source registry.
"""
return pulumi.get(self, "source_registry")
@source_registry.setter
def source_registry(self, value: Optional[pulumi.Input['SourceRegistryCredentialsArgs']]):
pulumi.set(self, "source_registry", value)
@pulumi.input_type
class CustomRegistryCredentialsArgs:
def __init__(__self__, *,
identity: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input['SecretObjectArgs']] = None,
user_name: Optional[pulumi.Input['SecretObjectArgs']] = None):
"""
Describes the credentials that will be used to access a custom registry during a run.
:param pulumi.Input[str] identity: Indicates the managed identity assigned to the custom credential. If a user-assigned identity
this value is the Client ID. If a system-assigned identity, the value will be `system`. In
the case of a system-assigned identity, the Client ID will be determined by the runner. This
identity may be used to authenticate to key vault to retrieve credentials or it may be the only
source of authentication used for accessing the registry.
:param pulumi.Input['SecretObjectArgs'] password: The password for logging into the custom registry. The password is a secret
object that allows multiple ways of providing the value for it.
:param pulumi.Input['SecretObjectArgs'] user_name: The username for logging into the custom registry.
"""
if identity is not None:
pulumi.set(__self__, "identity", identity)
if password is not None:
pulumi.set(__self__, "password", password)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input[str]]:
"""
Indicates the managed identity assigned to the custom credential. If a user-assigned identity
this value is the Client ID. If a system-assigned identity, the value will be `system`. In
the case of a system-assigned identity, the Client ID will be determined by the runner. This
identity may be used to authenticate to key vault to retrieve credentials or it may be the only
source of authentication used for accessing the registry.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input['SecretObjectArgs']]:
"""
The password for logging into the custom registry. The password is a secret
object that allows multiple ways of providing the value for it.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input['SecretObjectArgs']]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input['SecretObjectArgs']]:
"""
The username for logging into the custom registry.
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input['SecretObjectArgs']]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class DockerBuildRequestArgs:
def __init__(__self__, *,
docker_file_path: pulumi.Input[str],
platform: pulumi.Input['PlatformPropertiesArgs'],
type: pulumi.Input[str],
agent_configuration: Optional[pulumi.Input['AgentPropertiesArgs']] = None,
agent_pool_name: Optional[pulumi.Input[str]] = None,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input['ArgumentArgs']]]] = None,
credentials: Optional[pulumi.Input['CredentialsArgs']] = None,
image_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
is_archive_enabled: Optional[pulumi.Input[bool]] = None,
is_push_enabled: Optional[pulumi.Input[bool]] = None,
log_template: Optional[pulumi.Input[str]] = None,
no_cache: Optional[pulumi.Input[bool]] = None,
source_location: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None,
timeout: Optional[pulumi.Input[int]] = None):
"""
The parameters for a docker quick build.
:param pulumi.Input[str] docker_file_path: The Docker file path relative to the source location.
:param pulumi.Input['PlatformPropertiesArgs'] platform: The platform properties against which the run has to happen.
:param pulumi.Input[str] type: The type of the run request.
Expected value is 'DockerBuildRequest'.
:param pulumi.Input['AgentPropertiesArgs'] agent_configuration: The machine configuration of the run agent.
:param pulumi.Input[str] agent_pool_name: The dedicated agent pool for the run.
:param pulumi.Input[Sequence[pulumi.Input['ArgumentArgs']]] arguments: The collection of override arguments to be used when executing the run.
:param pulumi.Input['CredentialsArgs'] credentials: The properties that describes a set of credentials that will be used when this run is invoked.
:param pulumi.Input[Sequence[pulumi.Input[str]]] image_names: The fully qualified image names including the repository and tag.
:param pulumi.Input[bool] is_archive_enabled: The value that indicates whether archiving is enabled for the run or not.
:param pulumi.Input[bool] is_push_enabled: The value of this property indicates whether the image built should be pushed to the registry or not.
:param pulumi.Input[str] log_template: The template that describes the repository and tag information for run log artifact.
:param pulumi.Input[bool] no_cache: The value of this property indicates whether the image cache is enabled or not.
:param pulumi.Input[str] source_location: The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository.
If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API.
:param pulumi.Input[str] target: The name of the target build stage for the docker build.
:param pulumi.Input[int] timeout: Run timeout in seconds.
"""
pulumi.set(__self__, "docker_file_path", docker_file_path)
pulumi.set(__self__, "platform", platform)
pulumi.set(__self__, "type", 'DockerBuildRequest')
if agent_configuration is not None:
pulumi.set(__self__, "agent_configuration", agent_configuration)
if agent_pool_name is not None:
pulumi.set(__self__, "agent_pool_name", agent_pool_name)
if arguments is not None:
pulumi.set(__self__, "arguments", arguments)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
if image_names is not None:
pulumi.set(__self__, "image_names", image_names)
if is_archive_enabled is None:
is_archive_enabled = False
if is_archive_enabled is not None:
pulumi.set(__self__, "is_archive_enabled", is_archive_enabled)
if is_push_enabled is None:
is_push_enabled = True
if is_push_enabled is not None:
pulumi.set(__self__, "is_push_enabled", is_push_enabled)
if log_template is not None:
pulumi.set(__self__, "log_template", log_template)
if no_cache is None:
no_cache = False
if no_cache is not None:
pulumi.set(__self__, "no_cache", no_cache)
if source_location is not None:
pulumi.set(__self__, "source_location", source_location)
if target is not None:
pulumi.set(__self__, "target", target)
if timeout is None:
timeout = 3600
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="dockerFilePath")
def docker_file_path(self) -> pulumi.Input[str]:
"""
The Docker file path relative to the source location.
"""
return pulumi.get(self, "docker_file_path")
@docker_file_path.setter
def docker_file_path(self, value: pulumi.Input[str]):
pulumi.set(self, "docker_file_path", value)
@property
@pulumi.getter
def platform(self) -> pulumi.Input['PlatformPropertiesArgs']:
"""
The platform properties against which the run has to happen.
"""
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: pulumi.Input['PlatformPropertiesArgs']):
pulumi.set(self, "platform", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of the run request.
Expected value is 'DockerBuildRequest'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="agentConfiguration")
def agent_configuration(self) -> Optional[pulumi.Input['AgentPropertiesArgs']]:
"""
The machine configuration of the run agent.
"""
return pulumi.get(self, "agent_configuration")
@agent_configuration.setter
def agent_configuration(self, value: Optional[pulumi.Input['AgentPropertiesArgs']]):
pulumi.set(self, "agent_configuration", value)
@property
@pulumi.getter(name="agentPoolName")
def agent_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The dedicated agent pool for the run.
"""
return pulumi.get(self, "agent_pool_name")
@agent_pool_name.setter
def agent_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_pool_name", value)
@property
@pulumi.getter
def arguments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ArgumentArgs']]]]:
"""
The collection of override arguments to be used when executing the run.
"""
return pulumi.get(self, "arguments")
@arguments.setter
def arguments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ArgumentArgs']]]]):
pulumi.set(self, "arguments", value)
@property
@pulumi.getter
def credentials(self) -> Optional[pulumi.Input['CredentialsArgs']]:
"""
The properties that describes a set of credentials that will be used when this run is invoked.
"""
return pulumi.get(self, "credentials")
@credentials.setter
def credentials(self, value: Optional[pulumi.Input['CredentialsArgs']]):
pulumi.set(self, "credentials", value)
@property
@pulumi.getter(name="imageNames")
def image_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The fully qualified image names including the repository and tag.
"""
return pulumi.get(self, "image_names")
@image_names.setter
def image_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "image_names", value)
@property
@pulumi.getter(name="isArchiveEnabled")
def is_archive_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value that indicates whether archiving is enabled for the run or not.
"""
return pulumi.get(self, "is_archive_enabled")
@is_archive_enabled.setter
def is_archive_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_archive_enabled", value)
@property
@pulumi.getter(name="isPushEnabled")
def is_push_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value of this property indicates whether the image built should be pushed to the registry or not.
"""
return pulumi.get(self, "is_push_enabled")
@is_push_enabled.setter
def is_push_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_push_enabled", value)
@property
@pulumi.getter(name="logTemplate")
def log_template(self) -> Optional[pulumi.Input[str]]:
"""
The template that describes the repository and tag information for run log artifact.
"""
return pulumi.get(self, "log_template")
@log_template.setter
def log_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_template", value)
@property
@pulumi.getter(name="noCache")
def no_cache(self) -> Optional[pulumi.Input[bool]]:
"""
The value of this property indicates whether the image cache is enabled or not.
"""
return pulumi.get(self, "no_cache")
@no_cache.setter
def no_cache(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "no_cache", value)
@property
@pulumi.getter(name="sourceLocation")
def source_location(self) -> Optional[pulumi.Input[str]]:
"""
The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository.
If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API.
"""
return pulumi.get(self, "source_location")
@source_location.setter
def source_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_location", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The name of the target build stage for the docker build.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
"""
Run timeout in seconds.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class EncodedTaskRunRequestArgs:
def __init__(__self__, *,
encoded_task_content: pulumi.Input[str],
platform: pulumi.Input['PlatformPropertiesArgs'],
type: pulumi.Input[str],
agent_configuration: Optional[pulumi.Input['AgentPropertiesArgs']] = None,
agent_pool_name: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input['CredentialsArgs']] = None,
encoded_values_content: Optional[pulumi.Input[str]] = None,
is_archive_enabled: Optional[pulumi.Input[bool]] = None,
log_template: Optional[pulumi.Input[str]] = None,
source_location: Optional[pulumi.Input[str]] = None,
timeout: Optional[pulumi.Input[int]] = None,
values: Optional[pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]]] = None):
"""
The parameters for a quick task run request.
:param pulumi.Input[str] encoded_task_content: Base64 encoded value of the template/definition file content.
:param pulumi.Input['PlatformPropertiesArgs'] platform: The platform properties against which the run has to happen.
:param pulumi.Input[str] type: The type of the run request.
Expected value is 'EncodedTaskRunRequest'.
:param pulumi.Input['AgentPropertiesArgs'] agent_configuration: The machine configuration of the run agent.
:param pulumi.Input[str] agent_pool_name: The dedicated agent pool for the run.
:param pulumi.Input['CredentialsArgs'] credentials: The properties that describes a set of credentials that will be used when this run is invoked.
:param pulumi.Input[str] encoded_values_content: Base64 encoded value of the parameters/values file content.
:param pulumi.Input[bool] is_archive_enabled: The value that indicates whether archiving is enabled for the run or not.
:param pulumi.Input[str] log_template: The template that describes the repository and tag information for run log artifact.
:param pulumi.Input[str] source_location: The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository.
If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API.
:param pulumi.Input[int] timeout: Run timeout in seconds.
:param pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]] values: The collection of overridable values that can be passed when running a task.
"""
pulumi.set(__self__, "encoded_task_content", encoded_task_content)
pulumi.set(__self__, "platform", platform)
pulumi.set(__self__, "type", 'EncodedTaskRunRequest')
if agent_configuration is not None:
pulumi.set(__self__, "agent_configuration", agent_configuration)
if agent_pool_name is not None:
pulumi.set(__self__, "agent_pool_name", agent_pool_name)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
if encoded_values_content is not None:
pulumi.set(__self__, "encoded_values_content", encoded_values_content)
if is_archive_enabled is None:
is_archive_enabled = False
if is_archive_enabled is not None:
pulumi.set(__self__, "is_archive_enabled", is_archive_enabled)
if log_template is not None:
pulumi.set(__self__, "log_template", log_template)
if source_location is not None:
pulumi.set(__self__, "source_location", source_location)
if timeout is None:
timeout = 3600
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="encodedTaskContent")
def encoded_task_content(self) -> pulumi.Input[str]:
"""
Base64 encoded value of the template/definition file content.
"""
return pulumi.get(self, "encoded_task_content")
@encoded_task_content.setter
def encoded_task_content(self, value: pulumi.Input[str]):
pulumi.set(self, "encoded_task_content", value)
@property
@pulumi.getter
def platform(self) -> pulumi.Input['PlatformPropertiesArgs']:
"""
The platform properties against which the run has to happen.
"""
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: pulumi.Input['PlatformPropertiesArgs']):
pulumi.set(self, "platform", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of the run request.
Expected value is 'EncodedTaskRunRequest'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="agentConfiguration")
def agent_configuration(self) -> Optional[pulumi.Input['AgentPropertiesArgs']]:
"""
The machine configuration of the run agent.
"""
return pulumi.get(self, "agent_configuration")
@agent_configuration.setter
def agent_configuration(self, value: Optional[pulumi.Input['AgentPropertiesArgs']]):
pulumi.set(self, "agent_configuration", value)
@property
@pulumi.getter(name="agentPoolName")
def agent_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The dedicated agent pool for the run.
"""
return pulumi.get(self, "agent_pool_name")
@agent_pool_name.setter
def agent_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_pool_name", value)
@property
@pulumi.getter
def credentials(self) -> Optional[pulumi.Input['CredentialsArgs']]:
"""
The properties that describes a set of credentials that will be used when this run is invoked.
"""
return pulumi.get(self, "credentials")
@credentials.setter
def credentials(self, value: Optional[pulumi.Input['CredentialsArgs']]):
pulumi.set(self, "credentials", value)
@property
@pulumi.getter(name="encodedValuesContent")
def encoded_values_content(self) -> Optional[pulumi.Input[str]]:
"""
Base64 encoded value of the parameters/values file content.
"""
return pulumi.get(self, "encoded_values_content")
@encoded_values_content.setter
def encoded_values_content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encoded_values_content", value)
@property
@pulumi.getter(name="isArchiveEnabled")
def is_archive_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value that indicates whether archiving is enabled for the run or not.
"""
return pulumi.get(self, "is_archive_enabled")
@is_archive_enabled.setter
def is_archive_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_archive_enabled", value)
@property
@pulumi.getter(name="logTemplate")
def log_template(self) -> Optional[pulumi.Input[str]]:
"""
The template that describes the repository and tag information for run log artifact.
"""
return pulumi.get(self, "log_template")
@log_template.setter
def log_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_template", value)
@property
@pulumi.getter(name="sourceLocation")
def source_location(self) -> Optional[pulumi.Input[str]]:
"""
The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository.
If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API.
"""
return pulumi.get(self, "source_location")
@source_location.setter
def source_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_location", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
"""
Run timeout in seconds.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]]]:
"""
The collection of overridable values that can be passed when running a task.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ExportPipelineTargetPropertiesArgs:
def __init__(__self__, *,
key_vault_uri: pulumi.Input[str],
type: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None):
"""
The properties of the export pipeline target.
:param pulumi.Input[str] key_vault_uri: They key vault secret uri to obtain the target storage SAS token.
:param pulumi.Input[str] type: The type of target for the export pipeline.
:param pulumi.Input[str] uri: The target uri of the export pipeline.
When 'AzureStorageBlob': "https://accountName.blob.core.windows.net/containerName/blobName"
When 'AzureStorageBlobContainer': "https://accountName.blob.core.windows.net/containerName"
"""
pulumi.set(__self__, "key_vault_uri", key_vault_uri)
if type is not None:
pulumi.set(__self__, "type", type)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="keyVaultUri")
def key_vault_uri(self) -> pulumi.Input[str]:
"""
They key vault secret uri to obtain the target storage SAS token.
"""
return pulumi.get(self, "key_vault_uri")
@key_vault_uri.setter
def key_vault_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "key_vault_uri", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of target for the export pipeline.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
The target uri of the export pipeline.
When 'AzureStorageBlob': "https://accountName.blob.core.windows.net/containerName/blobName"
When 'AzureStorageBlobContainer': "https://accountName.blob.core.windows.net/containerName"
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
@pulumi.input_type
class FileTaskRunRequestArgs:
def __init__(__self__, *,
platform: pulumi.Input['PlatformPropertiesArgs'],
task_file_path: pulumi.Input[str],
type: pulumi.Input[str],
agent_configuration: Optional[pulumi.Input['AgentPropertiesArgs']] = None,
agent_pool_name: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input['CredentialsArgs']] = None,
is_archive_enabled: Optional[pulumi.Input[bool]] = None,
log_template: Optional[pulumi.Input[str]] = None,
source_location: Optional[pulumi.Input[str]] = None,
timeout: Optional[pulumi.Input[int]] = None,
values: Optional[pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]]] = None,
values_file_path: Optional[pulumi.Input[str]] = None):
"""
The request parameters for a scheduling run against a task file.
:param pulumi.Input['PlatformPropertiesArgs'] platform: The platform properties against which the run has to happen.
:param pulumi.Input[str] task_file_path: The template/definition file path relative to the source.
:param pulumi.Input[str] type: The type of the run request.
Expected value is 'FileTaskRunRequest'.
:param pulumi.Input['AgentPropertiesArgs'] agent_configuration: The machine configuration of the run agent.
:param pulumi.Input[str] agent_pool_name: The dedicated agent pool for the run.
:param pulumi.Input['CredentialsArgs'] credentials: The properties that describes a set of credentials that will be used when this run is invoked.
:param pulumi.Input[bool] is_archive_enabled: The value that indicates whether archiving is enabled for the run or not.
:param pulumi.Input[str] log_template: The template that describes the repository and tag information for run log artifact.
:param pulumi.Input[str] source_location: The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository.
If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API.
:param pulumi.Input[int] timeout: Run timeout in seconds.
:param pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]] values: The collection of overridable values that can be passed when running a task.
:param pulumi.Input[str] values_file_path: The values/parameters file path relative to the source.
"""
pulumi.set(__self__, "platform", platform)
pulumi.set(__self__, "task_file_path", task_file_path)
pulumi.set(__self__, "type", 'FileTaskRunRequest')
if agent_configuration is not None:
pulumi.set(__self__, "agent_configuration", agent_configuration)
if agent_pool_name is not None:
pulumi.set(__self__, "agent_pool_name", agent_pool_name)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
if is_archive_enabled is None:
is_archive_enabled = False
if is_archive_enabled is not None:
pulumi.set(__self__, "is_archive_enabled", is_archive_enabled)
if log_template is not None:
pulumi.set(__self__, "log_template", log_template)
if source_location is not None:
pulumi.set(__self__, "source_location", source_location)
if timeout is None:
timeout = 3600
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if values is not None:
pulumi.set(__self__, "values", values)
if values_file_path is not None:
pulumi.set(__self__, "values_file_path", values_file_path)
@property
@pulumi.getter
def platform(self) -> pulumi.Input['PlatformPropertiesArgs']:
"""
The platform properties against which the run has to happen.
"""
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: pulumi.Input['PlatformPropertiesArgs']):
pulumi.set(self, "platform", value)
@property
@pulumi.getter(name="taskFilePath")
def task_file_path(self) -> pulumi.Input[str]:
"""
The template/definition file path relative to the source.
"""
return pulumi.get(self, "task_file_path")
@task_file_path.setter
def task_file_path(self, value: pulumi.Input[str]):
pulumi.set(self, "task_file_path", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of the run request.
Expected value is 'FileTaskRunRequest'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="agentConfiguration")
def agent_configuration(self) -> Optional[pulumi.Input['AgentPropertiesArgs']]:
"""
The machine configuration of the run agent.
"""
return pulumi.get(self, "agent_configuration")
@agent_configuration.setter
def agent_configuration(self, value: Optional[pulumi.Input['AgentPropertiesArgs']]):
pulumi.set(self, "agent_configuration", value)
@property
@pulumi.getter(name="agentPoolName")
def agent_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The dedicated agent pool for the run.
"""
return pulumi.get(self, "agent_pool_name")
@agent_pool_name.setter
def agent_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_pool_name", value)
@property
@pulumi.getter
def credentials(self) -> Optional[pulumi.Input['CredentialsArgs']]:
"""
The properties that describes a set of credentials that will be used when this run is invoked.
"""
return pulumi.get(self, "credentials")
@credentials.setter
def credentials(self, value: Optional[pulumi.Input['CredentialsArgs']]):
pulumi.set(self, "credentials", value)
@property
@pulumi.getter(name="isArchiveEnabled")
def is_archive_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value that indicates whether archiving is enabled for the run or not.
"""
return pulumi.get(self, "is_archive_enabled")
@is_archive_enabled.setter
def is_archive_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_archive_enabled", value)
@property
@pulumi.getter(name="logTemplate")
def log_template(self) -> Optional[pulumi.Input[str]]:
"""
The template that describes the repository and tag information for run log artifact.
"""
return pulumi.get(self, "log_template")
@log_template.setter
def log_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_template", value)
@property
@pulumi.getter(name="sourceLocation")
def source_location(self) -> Optional[pulumi.Input[str]]:
"""
The URL(absolute or relative) of the source context. It can be an URL to a tar or git repository.
If it is relative URL, the relative path should be obtained from calling listBuildSourceUploadUrl API.
"""
return pulumi.get(self, "source_location")
@source_location.setter
def source_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_location", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
"""
Run timeout in seconds.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]]]:
"""
The collection of overridable values that can be passed when running a task.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]]]):
pulumi.set(self, "values", value)
@property
@pulumi.getter(name="valuesFilePath")
def values_file_path(self) -> Optional[pulumi.Input[str]]:
"""
The values/parameters file path relative to the source.
"""
return pulumi.get(self, "values_file_path")
@values_file_path.setter
def values_file_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "values_file_path", value)
@pulumi.input_type
class IPRuleArgs:
def __init__(__self__, *,
i_p_address_or_range: pulumi.Input[str],
action: Optional[pulumi.Input[Union[str, 'Action']]] = None):
"""
IP rule with specific IP or IP range in CIDR format.
:param pulumi.Input[str] i_p_address_or_range: Specifies the IP or IP range in CIDR format. Only IPV4 address is allowed.
:param pulumi.Input[Union[str, 'Action']] action: The action of IP ACL rule.
"""
pulumi.set(__self__, "i_p_address_or_range", i_p_address_or_range)
if action is None:
action = 'Allow'
if action is not None:
pulumi.set(__self__, "action", action)
@property
@pulumi.getter(name="iPAddressOrRange")
def i_p_address_or_range(self) -> pulumi.Input[str]:
"""
Specifies the IP or IP range in CIDR format. Only IPV4 address is allowed.
"""
return pulumi.get(self, "i_p_address_or_range")
@i_p_address_or_range.setter
def i_p_address_or_range(self, value: pulumi.Input[str]):
pulumi.set(self, "i_p_address_or_range", value)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[Union[str, 'Action']]]:
"""
The action of IP ACL rule.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[Union[str, 'Action']]]):
pulumi.set(self, "action", value)
@pulumi.input_type
class IdentityPropertiesArgs:
def __init__(__self__, *,
principal_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input['ResourceIdentityType']] = None,
user_assigned_identities: Optional[pulumi.Input[Mapping[str, pulumi.Input['UserIdentityPropertiesArgs']]]] = None):
"""
Managed identity for the resource.
:param pulumi.Input[str] principal_id: The principal ID of resource identity.
:param pulumi.Input[str] tenant_id: The tenant ID of resource.
:param pulumi.Input['ResourceIdentityType'] type: The identity type.
:param pulumi.Input[Mapping[str, pulumi.Input['UserIdentityPropertiesArgs']]] user_assigned_identities: The list of user identities associated with the resource. The user identity
dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/
providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[pulumi.Input[str]]:
"""
The principal ID of resource identity.
"""
return pulumi.get(self, "principal_id")
@principal_id.setter
def principal_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The tenant ID of resource.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The identity type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['UserIdentityPropertiesArgs']]]]:
"""
The list of user identities associated with the resource. The user identity
dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/
providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
return pulumi.get(self, "user_assigned_identities")
@user_assigned_identities.setter
def user_assigned_identities(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['UserIdentityPropertiesArgs']]]]):
pulumi.set(self, "user_assigned_identities", value)
@pulumi.input_type
class ImportPipelineSourcePropertiesArgs:
def __init__(__self__, *,
key_vault_uri: pulumi.Input[str],
type: Optional[pulumi.Input[Union[str, 'PipelineSourceType']]] = None,
uri: Optional[pulumi.Input[str]] = None):
"""
The properties of the import pipeline source.
:param pulumi.Input[str] key_vault_uri: They key vault secret uri to obtain the source storage SAS token.
:param pulumi.Input[Union[str, 'PipelineSourceType']] type: The type of source for the import pipeline.
:param pulumi.Input[str] uri: The source uri of the import pipeline.
When 'AzureStorageBlob': "https://accountName.blob.core.windows.net/containerName/blobName"
When 'AzureStorageBlobContainer': "https://accountName.blob.core.windows.net/containerName"
"""
pulumi.set(__self__, "key_vault_uri", key_vault_uri)
if type is None:
type = 'AzureStorageBlobContainer'
if type is not None:
pulumi.set(__self__, "type", type)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="keyVaultUri")
def key_vault_uri(self) -> pulumi.Input[str]:
"""
They key vault secret uri to obtain the source storage SAS token.
"""
return pulumi.get(self, "key_vault_uri")
@key_vault_uri.setter
def key_vault_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "key_vault_uri", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'PipelineSourceType']]]:
"""
The type of source for the import pipeline.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'PipelineSourceType']]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
The source uri of the import pipeline.
When 'AzureStorageBlob': "https://accountName.blob.core.windows.net/containerName/blobName"
When 'AzureStorageBlobContainer': "https://accountName.blob.core.windows.net/containerName"
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
@pulumi.input_type
class LoggingPropertiesArgs:
def __init__(__self__, *,
audit_log_status: Optional[pulumi.Input[Union[str, 'AuditLogStatus']]] = None,
log_level: Optional[pulumi.Input[Union[str, 'LogLevel']]] = None):
"""
The logging properties of the connected registry.
:param pulumi.Input[Union[str, 'AuditLogStatus']] audit_log_status: Indicates whether audit logs are enabled on the connected registry.
:param pulumi.Input[Union[str, 'LogLevel']] log_level: The verbosity of logs persisted on the connected registry.
"""
if audit_log_status is None:
audit_log_status = 'Disabled'
if audit_log_status is not None:
pulumi.set(__self__, "audit_log_status", audit_log_status)
if log_level is None:
log_level = 'Information'
if log_level is not None:
pulumi.set(__self__, "log_level", log_level)
@property
@pulumi.getter(name="auditLogStatus")
def audit_log_status(self) -> Optional[pulumi.Input[Union[str, 'AuditLogStatus']]]:
"""
Indicates whether audit logs are enabled on the connected registry.
"""
return pulumi.get(self, "audit_log_status")
@audit_log_status.setter
def audit_log_status(self, value: Optional[pulumi.Input[Union[str, 'AuditLogStatus']]]):
pulumi.set(self, "audit_log_status", value)
@property
@pulumi.getter(name="logLevel")
def log_level(self) -> Optional[pulumi.Input[Union[str, 'LogLevel']]]:
"""
The verbosity of logs persisted on the connected registry.
"""
return pulumi.get(self, "log_level")
@log_level.setter
def log_level(self, value: Optional[pulumi.Input[Union[str, 'LogLevel']]]):
pulumi.set(self, "log_level", value)
@pulumi.input_type
class NetworkRuleSetArgs:
def __init__(__self__, *,
default_action: pulumi.Input[Union[str, 'DefaultAction']],
ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input['IPRuleArgs']]]] = None,
virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]]] = None):
"""
The network rule set for a container registry.
:param pulumi.Input[Union[str, 'DefaultAction']] default_action: The default action of allow or deny when no other rules match.
:param pulumi.Input[Sequence[pulumi.Input['IPRuleArgs']]] ip_rules: The IP ACL rules.
:param pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]] virtual_network_rules: The virtual network rules.
"""
if default_action is None:
default_action = 'Allow'
pulumi.set(__self__, "default_action", default_action)
if ip_rules is not None:
pulumi.set(__self__, "ip_rules", ip_rules)
if virtual_network_rules is not None:
pulumi.set(__self__, "virtual_network_rules", virtual_network_rules)
@property
@pulumi.getter(name="defaultAction")
def default_action(self) -> pulumi.Input[Union[str, 'DefaultAction']]:
"""
The default action of allow or deny when no other rules match.
"""
return pulumi.get(self, "default_action")
@default_action.setter
def default_action(self, value: pulumi.Input[Union[str, 'DefaultAction']]):
pulumi.set(self, "default_action", value)
@property
@pulumi.getter(name="ipRules")
def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IPRuleArgs']]]]:
"""
The IP ACL rules.
"""
return pulumi.get(self, "ip_rules")
@ip_rules.setter
def ip_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IPRuleArgs']]]]):
pulumi.set(self, "ip_rules", value)
@property
@pulumi.getter(name="virtualNetworkRules")
def virtual_network_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]]]:
"""
The virtual network rules.
"""
return pulumi.get(self, "virtual_network_rules")
@virtual_network_rules.setter
def virtual_network_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]]]):
pulumi.set(self, "virtual_network_rules", value)
@pulumi.input_type
class OverrideTaskStepPropertiesArgs:
def __init__(__self__, *,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input['ArgumentArgs']]]] = None,
context_path: Optional[pulumi.Input[str]] = None,
file: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None,
update_trigger_token: Optional[pulumi.Input[str]] = None,
values: Optional[pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['ArgumentArgs']]] arguments: Gets or sets the collection of override arguments to be used when
executing a build step.
:param pulumi.Input[str] context_path: The source context against which run has to be queued.
:param pulumi.Input[str] file: The file against which run has to be queued.
:param pulumi.Input[str] target: The name of the target build stage for the docker build.
:param pulumi.Input[str] update_trigger_token: Base64 encoded update trigger token that will be attached with the base image trigger webhook.
:param pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]] values: The collection of overridable values that can be passed when running a Task.
"""
if arguments is not None:
pulumi.set(__self__, "arguments", arguments)
if context_path is not None:
pulumi.set(__self__, "context_path", context_path)
if file is not None:
pulumi.set(__self__, "file", file)
if target is not None:
pulumi.set(__self__, "target", target)
if update_trigger_token is not None:
pulumi.set(__self__, "update_trigger_token", update_trigger_token)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def arguments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ArgumentArgs']]]]:
"""
Gets or sets the collection of override arguments to be used when
executing a build step.
"""
return pulumi.get(self, "arguments")
@arguments.setter
def arguments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ArgumentArgs']]]]):
pulumi.set(self, "arguments", value)
@property
@pulumi.getter(name="contextPath")
def context_path(self) -> Optional[pulumi.Input[str]]:
"""
The source context against which run has to be queued.
"""
return pulumi.get(self, "context_path")
@context_path.setter
def context_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "context_path", value)
@property
@pulumi.getter
def file(self) -> Optional[pulumi.Input[str]]:
"""
The file against which run has to be queued.
"""
return pulumi.get(self, "file")
@file.setter
def file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The name of the target build stage for the docker build.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter(name="updateTriggerToken")
def update_trigger_token(self) -> Optional[pulumi.Input[str]]:
"""
Base64 encoded update trigger token that will be attached with the base image trigger webhook.
"""
return pulumi.get(self, "update_trigger_token")
@update_trigger_token.setter
def update_trigger_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_trigger_token", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]]]:
"""
The collection of overridable values that can be passed when running a Task.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SetValueArgs']]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ParentPropertiesArgs:
def __init__(__self__, *,
sync_properties: pulumi.Input['SyncPropertiesArgs'],
id: Optional[pulumi.Input[str]] = None):
"""
The properties of the connected registry parent.
:param pulumi.Input['SyncPropertiesArgs'] sync_properties: The sync properties of the connected registry with its parent.
:param pulumi.Input[str] id: The resource ID of the parent to which the connected registry will be associated.
"""
pulumi.set(__self__, "sync_properties", sync_properties)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="syncProperties")
def sync_properties(self) -> pulumi.Input['SyncPropertiesArgs']:
"""
The sync properties of the connected registry with its parent.
"""
return pulumi.get(self, "sync_properties")
@sync_properties.setter
def sync_properties(self, value: pulumi.Input['SyncPropertiesArgs']):
pulumi.set(self, "sync_properties", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the parent to which the connected registry will be associated.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class PipelineRunRequestArgs:
def __init__(__self__, *,
artifacts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
catalog_digest: Optional[pulumi.Input[str]] = None,
pipeline_resource_id: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input['PipelineRunSourcePropertiesArgs']] = None,
target: Optional[pulumi.Input['PipelineRunTargetPropertiesArgs']] = None):
"""
The request properties provided for a pipeline run.
:param pulumi.Input[Sequence[pulumi.Input[str]]] artifacts: List of source artifacts to be transferred by the pipeline.
Specify an image by repository ('hello-world'). This will use the 'latest' tag.
Specify an image by tag ('hello-world:latest').
Specify an image by sha256-based manifest digest ('hello-world@sha256:abc123').
:param pulumi.Input[str] catalog_digest: The digest of the tar used to transfer the artifacts.
:param pulumi.Input[str] pipeline_resource_id: The resource ID of the pipeline to run.
:param pulumi.Input['PipelineRunSourcePropertiesArgs'] source: The source properties of the pipeline run.
:param pulumi.Input['PipelineRunTargetPropertiesArgs'] target: The target properties of the pipeline run.
"""
if artifacts is not None:
pulumi.set(__self__, "artifacts", artifacts)
if catalog_digest is not None:
pulumi.set(__self__, "catalog_digest", catalog_digest)
if pipeline_resource_id is not None:
pulumi.set(__self__, "pipeline_resource_id", pipeline_resource_id)
if source is not None:
pulumi.set(__self__, "source", source)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter
def artifacts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of source artifacts to be transferred by the pipeline.
Specify an image by repository ('hello-world'). This will use the 'latest' tag.
Specify an image by tag ('hello-world:latest').
Specify an image by sha256-based manifest digest ('hello-world@sha256:abc123').
"""
return pulumi.get(self, "artifacts")
@artifacts.setter
def artifacts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "artifacts", value)
@property
@pulumi.getter(name="catalogDigest")
def catalog_digest(self) -> Optional[pulumi.Input[str]]:
"""
The digest of the tar used to transfer the artifacts.
"""
return pulumi.get(self, "catalog_digest")
@catalog_digest.setter
def catalog_digest(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "catalog_digest", value)
@property
@pulumi.getter(name="pipelineResourceId")
def pipeline_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the pipeline to run.
"""
return pulumi.get(self, "pipeline_resource_id")
@pipeline_resource_id.setter
def pipeline_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pipeline_resource_id", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['PipelineRunSourcePropertiesArgs']]:
"""
The source properties of the pipeline run.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['PipelineRunSourcePropertiesArgs']]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['PipelineRunTargetPropertiesArgs']]:
"""
The target properties of the pipeline run.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['PipelineRunTargetPropertiesArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class PipelineRunSourcePropertiesArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'PipelineRunSourceType']]] = None):
"""
:param pulumi.Input[str] name: The name of the source.
:param pulumi.Input[Union[str, 'PipelineRunSourceType']] type: The type of the source.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if type is None:
type = 'AzureStorageBlob'
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the source.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'PipelineRunSourceType']]]:
"""
The type of the source.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'PipelineRunSourceType']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class PipelineRunTargetPropertiesArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'PipelineRunTargetType']]] = None):
"""
:param pulumi.Input[str] name: The name of the target.
:param pulumi.Input[Union[str, 'PipelineRunTargetType']] type: The type of the target.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if type is None:
type = 'AzureStorageBlob'
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the target.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'PipelineRunTargetType']]]:
"""
The type of the target.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'PipelineRunTargetType']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class PipelineSourceTriggerPropertiesArgs:
def __init__(__self__, *,
status: pulumi.Input[Union[str, 'TriggerStatus']]):
"""
:param pulumi.Input[Union[str, 'TriggerStatus']] status: The current status of the source trigger.
"""
if status is None:
status = 'Enabled'
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def status(self) -> pulumi.Input[Union[str, 'TriggerStatus']]:
"""
The current status of the source trigger.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[Union[str, 'TriggerStatus']]):
pulumi.set(self, "status", value)
@pulumi.input_type
class PipelineTriggerPropertiesArgs:
def __init__(__self__, *,
source_trigger: Optional[pulumi.Input['PipelineSourceTriggerPropertiesArgs']] = None):
"""
:param pulumi.Input['PipelineSourceTriggerPropertiesArgs'] source_trigger: The source trigger properties of the pipeline.
"""
if source_trigger is not None:
pulumi.set(__self__, "source_trigger", source_trigger)
@property
@pulumi.getter(name="sourceTrigger")
def source_trigger(self) -> Optional[pulumi.Input['PipelineSourceTriggerPropertiesArgs']]:
"""
The source trigger properties of the pipeline.
"""
return pulumi.get(self, "source_trigger")
@source_trigger.setter
def source_trigger(self, value: Optional[pulumi.Input['PipelineSourceTriggerPropertiesArgs']]):
pulumi.set(self, "source_trigger", value)
@pulumi.input_type
class PlatformPropertiesArgs:
def __init__(__self__, *,
os: pulumi.Input[Union[str, 'OS']],
architecture: Optional[pulumi.Input[Union[str, 'Architecture']]] = None,
variant: Optional[pulumi.Input[Union[str, 'Variant']]] = None):
"""
The platform properties against which the run has to happen.
:param pulumi.Input[Union[str, 'OS']] os: The operating system type required for the run.
:param pulumi.Input[Union[str, 'Architecture']] architecture: The OS architecture.
:param pulumi.Input[Union[str, 'Variant']] variant: Variant of the CPU.
"""
pulumi.set(__self__, "os", os)
if architecture is not None:
pulumi.set(__self__, "architecture", architecture)
if variant is not None:
pulumi.set(__self__, "variant", variant)
@property
@pulumi.getter
def os(self) -> pulumi.Input[Union[str, 'OS']]:
"""
The operating system type required for the run.
"""
return pulumi.get(self, "os")
@os.setter
def os(self, value: pulumi.Input[Union[str, 'OS']]):
pulumi.set(self, "os", value)
@property
@pulumi.getter
def architecture(self) -> Optional[pulumi.Input[Union[str, 'Architecture']]]:
"""
The OS architecture.
"""
return pulumi.get(self, "architecture")
@architecture.setter
def architecture(self, value: Optional[pulumi.Input[Union[str, 'Architecture']]]):
pulumi.set(self, "architecture", value)
@property
@pulumi.getter
def variant(self) -> Optional[pulumi.Input[Union[str, 'Variant']]]:
"""
Variant of the CPU.
"""
return pulumi.get(self, "variant")
@variant.setter
def variant(self, value: Optional[pulumi.Input[Union[str, 'Variant']]]):
pulumi.set(self, "variant", value)
@pulumi.input_type
class PoliciesArgs:
def __init__(__self__, *,
quarantine_policy: Optional[pulumi.Input['QuarantinePolicyArgs']] = None,
retention_policy: Optional[pulumi.Input['RetentionPolicyArgs']] = None,
trust_policy: Optional[pulumi.Input['TrustPolicyArgs']] = None):
"""
The policies for a container registry.
:param pulumi.Input['QuarantinePolicyArgs'] quarantine_policy: The quarantine policy for a container registry.
:param pulumi.Input['RetentionPolicyArgs'] retention_policy: The retention policy for a container registry.
:param pulumi.Input['TrustPolicyArgs'] trust_policy: The content trust policy for a container registry.
"""
if quarantine_policy is not None:
pulumi.set(__self__, "quarantine_policy", quarantine_policy)
if retention_policy is not None:
pulumi.set(__self__, "retention_policy", retention_policy)
if trust_policy is not None:
pulumi.set(__self__, "trust_policy", trust_policy)
@property
@pulumi.getter(name="quarantinePolicy")
def quarantine_policy(self) -> Optional[pulumi.Input['QuarantinePolicyArgs']]:
"""
The quarantine policy for a container registry.
"""
return pulumi.get(self, "quarantine_policy")
@quarantine_policy.setter
def quarantine_policy(self, value: Optional[pulumi.Input['QuarantinePolicyArgs']]):
pulumi.set(self, "quarantine_policy", value)
@property
@pulumi.getter(name="retentionPolicy")
def retention_policy(self) -> Optional[pulumi.Input['RetentionPolicyArgs']]:
"""
The retention policy for a container registry.
"""
return pulumi.get(self, "retention_policy")
@retention_policy.setter
def retention_policy(self, value: Optional[pulumi.Input['RetentionPolicyArgs']]):
pulumi.set(self, "retention_policy", value)
@property
@pulumi.getter(name="trustPolicy")
def trust_policy(self) -> Optional[pulumi.Input['TrustPolicyArgs']]:
"""
The content trust policy for a container registry.
"""
return pulumi.get(self, "trust_policy")
@trust_policy.setter
def trust_policy(self, value: Optional[pulumi.Input['TrustPolicyArgs']]):
pulumi.set(self, "trust_policy", value)
@pulumi.input_type
class PrivateEndpointArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
The Private Endpoint resource.
:param pulumi.Input[str] id: This is private endpoint resource created with Microsoft.Network resource provider.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
This is private endpoint resource created with Microsoft.Network resource provider.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class PrivateLinkServiceConnectionStateArgs:
def __init__(__self__, *,
actions_required: Optional[pulumi.Input[Union[str, 'ActionsRequired']]] = None,
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'ConnectionStatus']]] = None):
"""
The state of a private link service connection.
:param pulumi.Input[Union[str, 'ActionsRequired']] actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param pulumi.Input[str] description: The description for connection status. For example if connection is rejected it can indicate reason for rejection.
:param pulumi.Input[Union[str, 'ConnectionStatus']] status: The private link service connection status.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[pulumi.Input[Union[str, 'ActionsRequired']]]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@actions_required.setter
def actions_required(self, value: Optional[pulumi.Input[Union[str, 'ActionsRequired']]]):
pulumi.set(self, "actions_required", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description for connection status. For example if connection is rejected it can indicate reason for rejection.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'ConnectionStatus']]]:
"""
The private link service connection status.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'ConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class QuarantinePolicyArgs:
def __init__(__self__, *,
status: Optional[pulumi.Input[Union[str, 'PolicyStatus']]] = None):
"""
The quarantine policy for a container registry.
:param pulumi.Input[Union[str, 'PolicyStatus']] status: The value that indicates whether the policy is enabled or not.
"""
if status is None:
status = 'disabled'
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PolicyStatus']]]:
"""
The value that indicates whether the policy is enabled or not.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PolicyStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class RetentionPolicyArgs:
def __init__(__self__, *,
days: Optional[pulumi.Input[int]] = None,
status: Optional[pulumi.Input[Union[str, 'PolicyStatus']]] = None):
"""
The retention policy for a container registry.
:param pulumi.Input[int] days: The number of days to retain an untagged manifest after which it gets purged.
:param pulumi.Input[Union[str, 'PolicyStatus']] status: The value that indicates whether the policy is enabled or not.
"""
if days is None:
days = 7
if days is not None:
pulumi.set(__self__, "days", days)
if status is None:
status = 'disabled'
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def days(self) -> Optional[pulumi.Input[int]]:
"""
The number of days to retain an untagged manifest after which it gets purged.
"""
return pulumi.get(self, "days")
@days.setter
def days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "days", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PolicyStatus']]]:
"""
The value that indicates whether the policy is enabled or not.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PolicyStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class SecretObjectArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input[Union[str, 'SecretObjectType']]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Describes the properties of a secret object value.
:param pulumi.Input[Union[str, 'SecretObjectType']] type: The type of the secret object which determines how the value of the secret object has to be
interpreted.
:param pulumi.Input[str] value: The value of the secret. The format of this value will be determined
based on the type of the secret object. If the type is Opaque, the value will be
used as is without any modification.
"""
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'SecretObjectType']]]:
"""
The type of the secret object which determines how the value of the secret object has to be
interpreted.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'SecretObjectType']]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the secret. The format of this value will be determined
based on the type of the secret object. If the type is Opaque, the value will be
used as is without any modification.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SetValueArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str],
is_secret: Optional[pulumi.Input[bool]] = None):
"""
The properties of a overridable value that can be passed to a task template.
:param pulumi.Input[str] name: The name of the overridable value.
:param pulumi.Input[str] value: The overridable value.
:param pulumi.Input[bool] is_secret: Flag to indicate whether the value represents a secret or not.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
if is_secret is None:
is_secret = False
if is_secret is not None:
pulumi.set(__self__, "is_secret", is_secret)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the overridable value.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The overridable value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="isSecret")
def is_secret(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to indicate whether the value represents a secret or not.
"""
return pulumi.get(self, "is_secret")
@is_secret.setter
def is_secret(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_secret", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input[Union[str, 'SkuName']]):
"""
The SKU of a container registry.
:param pulumi.Input[Union[str, 'SkuName']] name: The SKU name of the container registry. Required for registry creation.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[Union[str, 'SkuName']]:
"""
The SKU name of the container registry. Required for registry creation.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[Union[str, 'SkuName']]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SourceControlAuthInfoArgs:
def __init__(__self__, *,
token: pulumi.Input[str],
expires_in: Optional[pulumi.Input[int]] = None,
refresh_token: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None,
token_type: Optional[pulumi.Input[Union[str, 'TokenType']]] = None):
"""
The authorization properties for accessing the source code repository.
:param pulumi.Input[str] token: The access token used to access the source control provider.
:param pulumi.Input[int] expires_in: Time in seconds that the token remains valid
:param pulumi.Input[str] refresh_token: The refresh token used to refresh the access token.
:param pulumi.Input[str] scope: The scope of the access token.
:param pulumi.Input[Union[str, 'TokenType']] token_type: The type of Auth token.
"""
pulumi.set(__self__, "token", token)
if expires_in is not None:
pulumi.set(__self__, "expires_in", expires_in)
if refresh_token is not None:
pulumi.set(__self__, "refresh_token", refresh_token)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if token_type is not None:
pulumi.set(__self__, "token_type", token_type)
@property
@pulumi.getter
def token(self) -> pulumi.Input[str]:
"""
The access token used to access the source control provider.
"""
return pulumi.get(self, "token")
@token.setter
def token(self, value: pulumi.Input[str]):
pulumi.set(self, "token", value)
@property
@pulumi.getter(name="expiresIn")
def expires_in(self) -> Optional[pulumi.Input[int]]:
"""
Time in seconds that the token remains valid
"""
return pulumi.get(self, "expires_in")
@expires_in.setter
def expires_in(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "expires_in", value)
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> Optional[pulumi.Input[str]]:
"""
The refresh token used to refresh the access token.
"""
return pulumi.get(self, "refresh_token")
@refresh_token.setter
def refresh_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "refresh_token", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
The scope of the access token.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter(name="tokenType")
def token_type(self) -> Optional[pulumi.Input[Union[str, 'TokenType']]]:
"""
The type of Auth token.
"""
return pulumi.get(self, "token_type")
@token_type.setter
def token_type(self, value: Optional[pulumi.Input[Union[str, 'TokenType']]]):
pulumi.set(self, "token_type", value)
@pulumi.input_type
class SourcePropertiesArgs:
def __init__(__self__, *,
repository_url: pulumi.Input[str],
source_control_type: pulumi.Input[Union[str, 'SourceControlType']],
branch: Optional[pulumi.Input[str]] = None,
source_control_auth_properties: Optional[pulumi.Input['AuthInfoArgs']] = None):
"""
The properties of the source code repository.
:param pulumi.Input[str] repository_url: The full URL to the source code repository
:param pulumi.Input[Union[str, 'SourceControlType']] source_control_type: The type of source control service.
:param pulumi.Input[str] branch: The branch name of the source code.
:param pulumi.Input['AuthInfoArgs'] source_control_auth_properties: The authorization properties for accessing the source code repository and to set up
webhooks for notifications.
"""
pulumi.set(__self__, "repository_url", repository_url)
pulumi.set(__self__, "source_control_type", source_control_type)
if branch is not None:
pulumi.set(__self__, "branch", branch)
if source_control_auth_properties is not None:
pulumi.set(__self__, "source_control_auth_properties", source_control_auth_properties)
@property
@pulumi.getter(name="repositoryUrl")
def repository_url(self) -> pulumi.Input[str]:
"""
The full URL to the source code repository
"""
return pulumi.get(self, "repository_url")
@repository_url.setter
def repository_url(self, value: pulumi.Input[str]):
pulumi.set(self, "repository_url", value)
@property
@pulumi.getter(name="sourceControlType")
def source_control_type(self) -> pulumi.Input[Union[str, 'SourceControlType']]:
"""
The type of source control service.
"""
return pulumi.get(self, "source_control_type")
@source_control_type.setter
def source_control_type(self, value: pulumi.Input[Union[str, 'SourceControlType']]):
pulumi.set(self, "source_control_type", value)
@property
@pulumi.getter
def branch(self) -> Optional[pulumi.Input[str]]:
"""
The branch name of the source code.
"""
return pulumi.get(self, "branch")
@branch.setter
def branch(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "branch", value)
@property
@pulumi.getter(name="sourceControlAuthProperties")
def source_control_auth_properties(self) -> Optional[pulumi.Input['AuthInfoArgs']]:
"""
The authorization properties for accessing the source code repository and to set up
webhooks for notifications.
"""
return pulumi.get(self, "source_control_auth_properties")
@source_control_auth_properties.setter
def source_control_auth_properties(self, value: Optional[pulumi.Input['AuthInfoArgs']]):
pulumi.set(self, "source_control_auth_properties", value)
@pulumi.input_type
class SourceRegistryCredentialsArgs:
def __init__(__self__, *,
login_mode: Optional[pulumi.Input[Union[str, 'SourceRegistryLoginMode']]] = None):
"""
Describes the credential parameters for accessing the source registry.
:param pulumi.Input[Union[str, 'SourceRegistryLoginMode']] login_mode: The authentication mode which determines the source registry login scope. The credentials for the source registry
will be generated using the given scope. These credentials will be used to login to
the source registry during the run.
"""
if login_mode is not None:
pulumi.set(__self__, "login_mode", login_mode)
@property
@pulumi.getter(name="loginMode")
def login_mode(self) -> Optional[pulumi.Input[Union[str, 'SourceRegistryLoginMode']]]:
"""
The authentication mode which determines the source registry login scope. The credentials for the source registry
will be generated using the given scope. These credentials will be used to login to
the source registry during the run.
"""
return pulumi.get(self, "login_mode")
@login_mode.setter
def login_mode(self, value: Optional[pulumi.Input[Union[str, 'SourceRegistryLoginMode']]]):
pulumi.set(self, "login_mode", value)
@pulumi.input_type
class SourceRepositoryPropertiesArgs:
def __init__(__self__, *,
repository_url: pulumi.Input[str],
source_control_type: pulumi.Input[Union[str, 'SourceControlType']],
is_commit_trigger_enabled: Optional[pulumi.Input[bool]] = None,
source_control_auth_properties: Optional[pulumi.Input['SourceControlAuthInfoArgs']] = None):
"""
The properties of the source code repository.
:param pulumi.Input[str] repository_url: The full URL to the source code repository
:param pulumi.Input[Union[str, 'SourceControlType']] source_control_type: The type of source control service.
:param pulumi.Input[bool] is_commit_trigger_enabled: The value of this property indicates whether the source control commit trigger is enabled or not.
:param pulumi.Input['SourceControlAuthInfoArgs'] source_control_auth_properties: The authorization properties for accessing the source code repository.
"""
pulumi.set(__self__, "repository_url", repository_url)
pulumi.set(__self__, "source_control_type", source_control_type)
if is_commit_trigger_enabled is None:
is_commit_trigger_enabled = False
if is_commit_trigger_enabled is not None:
pulumi.set(__self__, "is_commit_trigger_enabled", is_commit_trigger_enabled)
if source_control_auth_properties is not None:
pulumi.set(__self__, "source_control_auth_properties", source_control_auth_properties)
@property
@pulumi.getter(name="repositoryUrl")
def repository_url(self) -> pulumi.Input[str]:
"""
The full URL to the source code repository
"""
return pulumi.get(self, "repository_url")
@repository_url.setter
def repository_url(self, value: pulumi.Input[str]):
pulumi.set(self, "repository_url", value)
@property
@pulumi.getter(name="sourceControlType")
def source_control_type(self) -> pulumi.Input[Union[str, 'SourceControlType']]:
"""
The type of source control service.
"""
return pulumi.get(self, "source_control_type")
@source_control_type.setter
def source_control_type(self, value: pulumi.Input[Union[str, 'SourceControlType']]):
pulumi.set(self, "source_control_type", value)
@property
@pulumi.getter(name="isCommitTriggerEnabled")
def is_commit_trigger_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value of this property indicates whether the source control commit trigger is enabled or not.
"""
return pulumi.get(self, "is_commit_trigger_enabled")
@is_commit_trigger_enabled.setter
def is_commit_trigger_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_commit_trigger_enabled", value)
@property
@pulumi.getter(name="sourceControlAuthProperties")
def source_control_auth_properties(self) -> Optional[pulumi.Input['SourceControlAuthInfoArgs']]:
"""
The authorization properties for accessing the source code repository.
"""
return pulumi.get(self, "source_control_auth_properties")
@source_control_auth_properties.setter
def source_control_auth_properties(self, value: Optional[pulumi.Input['SourceControlAuthInfoArgs']]):
pulumi.set(self, "source_control_auth_properties", value)
@pulumi.input_type
class SourceTriggerArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
source_repository: pulumi.Input['SourcePropertiesArgs'],
source_trigger_events: pulumi.Input[Sequence[pulumi.Input[Union[str, 'SourceTriggerEvent']]]],
status: Optional[pulumi.Input[Union[str, 'TriggerStatus']]] = None):
"""
The properties of a source based trigger.
:param pulumi.Input[str] name: The name of the trigger.
:param pulumi.Input['SourcePropertiesArgs'] source_repository: The properties that describes the source(code) for the task.
:param pulumi.Input[Sequence[pulumi.Input[Union[str, 'SourceTriggerEvent']]]] source_trigger_events: The source event corresponding to the trigger.
:param pulumi.Input[Union[str, 'TriggerStatus']] status: The current status of trigger.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "source_repository", source_repository)
pulumi.set(__self__, "source_trigger_events", source_trigger_events)
if status is None:
status = 'Enabled'
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the trigger.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sourceRepository")
def source_repository(self) -> pulumi.Input['SourcePropertiesArgs']:
"""
The properties that describes the source(code) for the task.
"""
return pulumi.get(self, "source_repository")
@source_repository.setter
def source_repository(self, value: pulumi.Input['SourcePropertiesArgs']):
pulumi.set(self, "source_repository", value)
@property
@pulumi.getter(name="sourceTriggerEvents")
def source_trigger_events(self) -> pulumi.Input[Sequence[pulumi.Input[Union[str, 'SourceTriggerEvent']]]]:
"""
The source event corresponding to the trigger.
"""
return pulumi.get(self, "source_trigger_events")
@source_trigger_events.setter
def source_trigger_events(self, value: pulumi.Input[Sequence[pulumi.Input[Union[str, 'SourceTriggerEvent']]]]):
pulumi.set(self, "source_trigger_events", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'TriggerStatus']]]:
"""
The current status of trigger.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'TriggerStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class StorageAccountPropertiesArgs:
def __init__(__self__, *,
id: pulumi.Input[str]):
"""
The properties of a storage account for a container registry. Only applicable to Classic SKU.
:param pulumi.Input[str] id: The resource ID of the storage account.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
The resource ID of the storage account.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@pulumi.input_type
class SyncPropertiesArgs:
def __init__(__self__, *,
message_ttl: pulumi.Input[str],
token_id: pulumi.Input[str],
schedule: Optional[pulumi.Input[str]] = None,
sync_window: Optional[pulumi.Input[str]] = None):
"""
The sync properties of the connected registry with its parent.
:param pulumi.Input[str] message_ttl: The period of time for which a message is available to sync before it is expired. Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
:param pulumi.Input[str] token_id: The resource ID of the ACR token used to authenticate the connected registry to its parent during sync.
:param pulumi.Input[str] schedule: The cron expression indicating the schedule that the connected registry will sync with its parent.
:param pulumi.Input[str] sync_window: The time window during which sync is enabled for each schedule occurrence. Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
"""
pulumi.set(__self__, "message_ttl", message_ttl)
pulumi.set(__self__, "token_id", token_id)
if schedule is not None:
pulumi.set(__self__, "schedule", schedule)
if sync_window is not None:
pulumi.set(__self__, "sync_window", sync_window)
@property
@pulumi.getter(name="messageTtl")
def message_ttl(self) -> pulumi.Input[str]:
"""
The period of time for which a message is available to sync before it is expired. Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
"""
return pulumi.get(self, "message_ttl")
@message_ttl.setter
def message_ttl(self, value: pulumi.Input[str]):
pulumi.set(self, "message_ttl", value)
@property
@pulumi.getter(name="tokenId")
def token_id(self) -> pulumi.Input[str]:
"""
The resource ID of the ACR token used to authenticate the connected registry to its parent during sync.
"""
return pulumi.get(self, "token_id")
@token_id.setter
def token_id(self, value: pulumi.Input[str]):
pulumi.set(self, "token_id", value)
@property
@pulumi.getter
def schedule(self) -> Optional[pulumi.Input[str]]:
"""
The cron expression indicating the schedule that the connected registry will sync with its parent.
"""
return pulumi.get(self, "schedule")
@schedule.setter
def schedule(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schedule", value)
@property
@pulumi.getter(name="syncWindow")
def sync_window(self) -> Optional[pulumi.Input[str]]:
"""
The time window during which sync is enabled for each schedule occurrence. Specify the duration using the format P[n]Y[n]M[n]DT[n]H[n]M[n]S as per ISO8601.
"""
return pulumi.get(self, "sync_window")
@sync_window.setter
def sync_window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sync_window", value)
@pulumi.input_type
class TaskRunRequestArgs:
def __init__(__self__, *,
task_id: pulumi.Input[str],
type: pulumi.Input[str],
agent_pool_name: Optional[pulumi.Input[str]] = None,
is_archive_enabled: Optional[pulumi.Input[bool]] = None,
log_template: Optional[pulumi.Input[str]] = None,
override_task_step_properties: Optional[pulumi.Input['OverrideTaskStepPropertiesArgs']] = None):
"""
The parameters for a task run request.
:param pulumi.Input[str] task_id: The resource ID of task against which run has to be queued.
:param pulumi.Input[str] type: The type of the run request.
Expected value is 'TaskRunRequest'.
:param pulumi.Input[str] agent_pool_name: The dedicated agent pool for the run.
:param pulumi.Input[bool] is_archive_enabled: The value that indicates whether archiving is enabled for the run or not.
:param pulumi.Input[str] log_template: The template that describes the repository and tag information for run log artifact.
:param pulumi.Input['OverrideTaskStepPropertiesArgs'] override_task_step_properties: Set of overridable parameters that can be passed when running a Task.
"""
pulumi.set(__self__, "task_id", task_id)
pulumi.set(__self__, "type", 'TaskRunRequest')
if agent_pool_name is not None:
pulumi.set(__self__, "agent_pool_name", agent_pool_name)
if is_archive_enabled is None:
is_archive_enabled = False
if is_archive_enabled is not None:
pulumi.set(__self__, "is_archive_enabled", is_archive_enabled)
if log_template is not None:
pulumi.set(__self__, "log_template", log_template)
if override_task_step_properties is not None:
pulumi.set(__self__, "override_task_step_properties", override_task_step_properties)
@property
@pulumi.getter(name="taskId")
def task_id(self) -> pulumi.Input[str]:
"""
The resource ID of task against which run has to be queued.
"""
return pulumi.get(self, "task_id")
@task_id.setter
def task_id(self, value: pulumi.Input[str]):
pulumi.set(self, "task_id", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of the run request.
Expected value is 'TaskRunRequest'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="agentPoolName")
def agent_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The dedicated agent pool for the run.
"""
return pulumi.get(self, "agent_pool_name")
@agent_pool_name.setter
def agent_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_pool_name", value)
@property
@pulumi.getter(name="isArchiveEnabled")
def is_archive_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value that indicates whether archiving is enabled for the run or not.
"""
return pulumi.get(self, "is_archive_enabled")
@is_archive_enabled.setter
def is_archive_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_archive_enabled", value)
@property
@pulumi.getter(name="logTemplate")
def log_template(self) -> Optional[pulumi.Input[str]]:
"""
The template that describes the repository and tag information for run log artifact.
"""
return pulumi.get(self, "log_template")
@log_template.setter
def log_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_template", value)
@property
@pulumi.getter(name="overrideTaskStepProperties")
def override_task_step_properties(self) -> Optional[pulumi.Input['OverrideTaskStepPropertiesArgs']]:
"""
Set of overridable parameters that can be passed when running a Task.
"""
return pulumi.get(self, "override_task_step_properties")
@override_task_step_properties.setter
def override_task_step_properties(self, value: Optional[pulumi.Input['OverrideTaskStepPropertiesArgs']]):
pulumi.set(self, "override_task_step_properties", value)
@pulumi.input_type
class TaskStepPropertiesArgs:
def __init__(__self__, *,
context_access_token: Optional[pulumi.Input[str]] = None,
context_path: Optional[pulumi.Input[str]] = None):
"""
Base properties for any task step.
:param pulumi.Input[str] context_access_token: The token (git PAT or SAS token of storage account blob) associated with the context for a step.
:param pulumi.Input[str] context_path: The URL(absolute or relative) of the source context for the task step.
"""
if context_access_token is not None:
pulumi.set(__self__, "context_access_token", context_access_token)
if context_path is not None:
pulumi.set(__self__, "context_path", context_path)
@property
@pulumi.getter(name="contextAccessToken")
def context_access_token(self) -> Optional[pulumi.Input[str]]:
"""
The token (git PAT or SAS token of storage account blob) associated with the context for a step.
"""
return pulumi.get(self, "context_access_token")
@context_access_token.setter
def context_access_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "context_access_token", value)
@property
@pulumi.getter(name="contextPath")
def context_path(self) -> Optional[pulumi.Input[str]]:
"""
The URL(absolute or relative) of the source context for the task step.
"""
return pulumi.get(self, "context_path")
@context_path.setter
def context_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "context_path", value)
@pulumi.input_type
class TimerTriggerArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
schedule: pulumi.Input[str],
status: Optional[pulumi.Input[Union[str, 'TriggerStatus']]] = None):
"""
The properties of a timer trigger.
:param pulumi.Input[str] name: The name of the trigger.
:param pulumi.Input[str] schedule: The CRON expression for the task schedule
:param pulumi.Input[Union[str, 'TriggerStatus']] status: The current status of trigger.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "schedule", schedule)
if status is None:
status = 'Enabled'
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the trigger.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def schedule(self) -> pulumi.Input[str]:
"""
The CRON expression for the task schedule
"""
return pulumi.get(self, "schedule")
@schedule.setter
def schedule(self, value: pulumi.Input[str]):
pulumi.set(self, "schedule", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'TriggerStatus']]]:
"""
The current status of trigger.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'TriggerStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class TokenCertificateArgs:
def __init__(__self__, *,
encoded_pem_certificate: Optional[pulumi.Input[str]] = None,
expiry: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[Union[str, 'TokenCertificateName']]] = None,
thumbprint: Optional[pulumi.Input[str]] = None):
"""
The properties of a certificate used for authenticating a token.
:param pulumi.Input[str] encoded_pem_certificate: Base 64 encoded string of the public certificate1 in PEM format that will be used for authenticating the token.
:param pulumi.Input[str] expiry: The expiry datetime of the certificate.
:param pulumi.Input[str] thumbprint: The thumbprint of the certificate.
"""
if encoded_pem_certificate is not None:
pulumi.set(__self__, "encoded_pem_certificate", encoded_pem_certificate)
if expiry is not None:
pulumi.set(__self__, "expiry", expiry)
if name is not None:
pulumi.set(__self__, "name", name)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
@property
@pulumi.getter(name="encodedPemCertificate")
def encoded_pem_certificate(self) -> Optional[pulumi.Input[str]]:
"""
Base 64 encoded string of the public certificate1 in PEM format that will be used for authenticating the token.
"""
return pulumi.get(self, "encoded_pem_certificate")
@encoded_pem_certificate.setter
def encoded_pem_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encoded_pem_certificate", value)
@property
@pulumi.getter
def expiry(self) -> Optional[pulumi.Input[str]]:
"""
The expiry datetime of the certificate.
"""
return pulumi.get(self, "expiry")
@expiry.setter
def expiry(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expiry", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Union[str, 'TokenCertificateName']]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Union[str, 'TokenCertificateName']]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def thumbprint(self) -> Optional[pulumi.Input[str]]:
"""
The thumbprint of the certificate.
"""
return pulumi.get(self, "thumbprint")
@thumbprint.setter
def thumbprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "thumbprint", value)
@pulumi.input_type
class TokenCredentialsPropertiesArgs:
def __init__(__self__, *,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input['TokenCertificateArgs']]]] = None,
passwords: Optional[pulumi.Input[Sequence[pulumi.Input['TokenPasswordArgs']]]] = None):
"""
The properties of the credentials that can be used for authenticating the token.
"""
if certificates is not None:
pulumi.set(__self__, "certificates", certificates)
if passwords is not None:
pulumi.set(__self__, "passwords", passwords)
@property
@pulumi.getter
def certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TokenCertificateArgs']]]]:
return pulumi.get(self, "certificates")
@certificates.setter
def certificates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TokenCertificateArgs']]]]):
pulumi.set(self, "certificates", value)
@property
@pulumi.getter
def passwords(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TokenPasswordArgs']]]]:
return pulumi.get(self, "passwords")
@passwords.setter
def passwords(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TokenPasswordArgs']]]]):
pulumi.set(self, "passwords", value)
@pulumi.input_type
class TokenPasswordArgs:
def __init__(__self__, *,
creation_time: Optional[pulumi.Input[str]] = None,
expiry: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[Union[str, 'TokenPasswordName']]] = None):
"""
The password that will be used for authenticating the token of a container registry.
:param pulumi.Input[str] creation_time: The creation datetime of the password.
:param pulumi.Input[str] expiry: The expiry datetime of the password.
:param pulumi.Input[Union[str, 'TokenPasswordName']] name: The password name "password1" or "password2"
"""
if creation_time is not None:
pulumi.set(__self__, "creation_time", creation_time)
if expiry is not None:
pulumi.set(__self__, "expiry", expiry)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> Optional[pulumi.Input[str]]:
"""
The creation datetime of the password.
"""
return pulumi.get(self, "creation_time")
@creation_time.setter
def creation_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "creation_time", value)
@property
@pulumi.getter
def expiry(self) -> Optional[pulumi.Input[str]]:
"""
The expiry datetime of the password.
"""
return pulumi.get(self, "expiry")
@expiry.setter
def expiry(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expiry", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Union[str, 'TokenPasswordName']]]:
"""
The password name "password1" or "password2"
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Union[str, 'TokenPasswordName']]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class TriggerPropertiesArgs:
def __init__(__self__, *,
base_image_trigger: Optional[pulumi.Input['BaseImageTriggerArgs']] = None,
source_triggers: Optional[pulumi.Input[Sequence[pulumi.Input['SourceTriggerArgs']]]] = None,
timer_triggers: Optional[pulumi.Input[Sequence[pulumi.Input['TimerTriggerArgs']]]] = None):
"""
The properties of a trigger.
:param pulumi.Input['BaseImageTriggerArgs'] base_image_trigger: The trigger based on base image dependencies.
:param pulumi.Input[Sequence[pulumi.Input['SourceTriggerArgs']]] source_triggers: The collection of triggers based on source code repository.
:param pulumi.Input[Sequence[pulumi.Input['TimerTriggerArgs']]] timer_triggers: The collection of timer triggers.
"""
if base_image_trigger is not None:
pulumi.set(__self__, "base_image_trigger", base_image_trigger)
if source_triggers is not None:
pulumi.set(__self__, "source_triggers", source_triggers)
if timer_triggers is not None:
pulumi.set(__self__, "timer_triggers", timer_triggers)
@property
@pulumi.getter(name="baseImageTrigger")
def base_image_trigger(self) -> Optional[pulumi.Input['BaseImageTriggerArgs']]:
"""
The trigger based on base image dependencies.
"""
return pulumi.get(self, "base_image_trigger")
@base_image_trigger.setter
def base_image_trigger(self, value: Optional[pulumi.Input['BaseImageTriggerArgs']]):
pulumi.set(self, "base_image_trigger", value)
@property
@pulumi.getter(name="sourceTriggers")
def source_triggers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SourceTriggerArgs']]]]:
"""
The collection of triggers based on source code repository.
"""
return pulumi.get(self, "source_triggers")
@source_triggers.setter
def source_triggers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SourceTriggerArgs']]]]):
pulumi.set(self, "source_triggers", value)
@property
@pulumi.getter(name="timerTriggers")
def timer_triggers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TimerTriggerArgs']]]]:
"""
The collection of timer triggers.
"""
return pulumi.get(self, "timer_triggers")
@timer_triggers.setter
def timer_triggers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TimerTriggerArgs']]]]):
pulumi.set(self, "timer_triggers", value)
@pulumi.input_type
class TrustPolicyArgs:
def __init__(__self__, *,
status: Optional[pulumi.Input[Union[str, 'PolicyStatus']]] = None,
type: Optional[pulumi.Input[Union[str, 'TrustPolicyType']]] = None):
"""
The content trust policy for a container registry.
:param pulumi.Input[Union[str, 'PolicyStatus']] status: The value that indicates whether the policy is enabled or not.
:param pulumi.Input[Union[str, 'TrustPolicyType']] type: The type of trust policy.
"""
if status is None:
status = 'disabled'
if status is not None:
pulumi.set(__self__, "status", status)
if type is None:
type = 'Notary'
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PolicyStatus']]]:
"""
The value that indicates whether the policy is enabled or not.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PolicyStatus']]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'TrustPolicyType']]]:
"""
The type of trust policy.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'TrustPolicyType']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class UserIdentityPropertiesArgs:
def __init__(__self__, *,
client_id: Optional[pulumi.Input[str]] = None,
principal_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] client_id: The client id of user assigned identity.
:param pulumi.Input[str] principal_id: The principal id of user assigned identity.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The client id of user assigned identity.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[pulumi.Input[str]]:
"""
The principal id of user assigned identity.
"""
return pulumi.get(self, "principal_id")
@principal_id.setter
def principal_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_id", value)
@pulumi.input_type
class VirtualNetworkRuleArgs:
def __init__(__self__, *,
virtual_network_resource_id: pulumi.Input[str],
action: Optional[pulumi.Input[Union[str, 'Action']]] = None):
"""
Virtual network rule.
:param pulumi.Input[str] virtual_network_resource_id: Resource ID of a subnet, for example: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}.
:param pulumi.Input[Union[str, 'Action']] action: The action of virtual network rule.
"""
pulumi.set(__self__, "virtual_network_resource_id", virtual_network_resource_id)
if action is None:
action = 'Allow'
if action is not None:
pulumi.set(__self__, "action", action)
@property
@pulumi.getter(name="virtualNetworkResourceId")
def virtual_network_resource_id(self) -> pulumi.Input[str]:
"""
Resource ID of a subnet, for example: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}.
"""
return pulumi.get(self, "virtual_network_resource_id")
@virtual_network_resource_id.setter
def virtual_network_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "virtual_network_resource_id", value)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[Union[str, 'Action']]]:
"""
The action of virtual network rule.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[Union[str, 'Action']]]):
pulumi.set(self, "action", value)
|
"""init"""
NAME = "test01"
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add volume_targets table
Revision ID: 1a59178ebdf6
Revises: daa1ba02d98
Create Date: 2016-02-25 11:25:29.836535
"""
# revision identifiers, used by Alembic.
revision = '1a59178ebdf6'
down_revision = 'daa1ba02d98'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('volume_targets',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=True),
sa.Column('node_id', sa.Integer(), nullable=True),
sa.Column('volume_type', sa.String(length=64),
nullable=True),
sa.Column('properties', sa.Text(), nullable=True),
sa.Column('boot_index', sa.Integer(), nullable=True),
sa.Column('volume_id',
sa.String(length=36), nullable=True),
sa.Column('extra', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('node_id', 'boot_index',
name='uniq_volumetargets0node_id0'
'boot_index'),
sa.UniqueConstraint('uuid',
name='uniq_volumetargets0uuid'),
mysql_charset='utf8',
mysql_engine='InnoDB')
|
#!/usr/bin/env python
"""
_SiblingSubscriptionsComplete_
Oracle implementation of Subscription.SiblingSubscriptionsComplete
"""
from WMCore.WMBS.MySQL.Subscriptions.SiblingSubscriptionsComplete import \
SiblingSubscriptionsComplete as SiblingCompleteMySQL
class SiblingSubscriptionsComplete(SiblingCompleteMySQL):
pass
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import re
from pathlib import Path
from typing import List
class _MarkdownLink:
"""Handle to a markdown link, for easy existence test and printing
(external links are not tested)
"""
def __init__(self, root: Path, file: Path, string: str, link: str) -> None:
self._root = root
self._file = file
self._string = string
self._link = link
def exists(self) -> bool:
if self._link.startswith("http"):
# We don't check external urls.
return True
link = self._link.split("#")[0]
if not link:
return False
fullpath = self._root / self._file.parent / link
return fullpath.exists()
def __repr__(self) -> str:
return f"[{self._link}]({self._string}) in file {self._file}"
def _get_root() -> Path:
root = Path(__file__).parent.parent.absolute()
assert (root / "setup.py").exists(), f"Wrong root folder: {root}"
return root
def _get_markdown_files(root: Path) -> List[Path]:
return [md for pattern in ("*.md", "submitit/**/*.md", "docs/**/*.md") for md in root.glob(pattern)]
def _get_all_markdown_links(root: Path, files: List[Path]) -> List[_MarkdownLink]:
"""Returns a list of all existing markdown links"""
pattern = re.compile(r"\[(?P<string>.+?)\]\((?P<link>\S+?)\)")
links = []
for file in files:
for match in pattern.finditer(file.read_text()):
links.append(_MarkdownLink(root, file, match.group("string"), match.group("link")))
return links
def test_assert_markdown_links_not_broken() -> None:
root = _get_root()
files = _get_markdown_files(root)
assert len(files) > 3
links = _get_all_markdown_links(root, files)
assert len(links) > 5, "There should be several hyperlinks!"
broken_links = [l for l in links if not l.exists()]
assert not broken_links
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.