content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def test_csi_snapshot_with_invalid_param(client, volume_name): # NOQA
"""
Context:
After deploy the CSI snapshot CRDs, Controller at
https://longhorn.io/docs/1.2.3/snapshots-and-backups/
csi-snapshot-support/enable-csi-snapshot-support/
Create VolumeSnapshotClass with type=invalid
- invalid (type=invalid)
Test the extend CSI snapshot type=invalid behavior to Longhorn snapshot
Steps:
0. Create Longhorn volume test-vol
- Size 5GB
- Create PV/PVC for the Longhorn volume
- Write data into volume
- Setup backup store
1. Test create CSI snapshot
- Create VolumeSnapshot with class invalid
- Verify that the volumesnapshot object is not ready
""" | 5,324,100 |
def ScreenToMouse(pt):
"""Convert a value in screen coordinates to mouse coordinates.
Mouse coordinates are specified as a percentage of screen dimensions,
normalized to 16 bits. 0 represents the far left/top of the screen,
65535 represents the far right/bottom. This function assumes that
the size of the screen is fixed at module load time and does not change
Args:
pt: the point of the coords to convert
Returns:
the converted point
"""
# Initialize the screen dimensions on first execution. Note that this
# function assumes that the screen dimensions do not change during run.
if not ScreenToMouse._SCREEN_DIMENSIONS:
desktop = win32gui.GetClientRect(win32gui.GetDesktopWindow())
ScreenToMouse._SCREEN_DIMENSIONS = (desktop[2], desktop[3])
return ((65535 * pt[0]) / ScreenToMouse._SCREEN_DIMENSIONS[0],
(65535 * pt[1]) / ScreenToMouse._SCREEN_DIMENSIONS[1]) | 5,324,101 |
def triadic_closure_algorithm():
"""
How to do triadic closure.
"""
ans = """
I would suggest the following strategy:
1. Pick a node
1. For every pair of neighbors:
1. If neighbors are not connected,
then this is a potential triangle to close.
This strategy gives you potential triadic closures
given a "center" node `n`.
The other way is to trace out a path two degrees out
and ask whether the terminal node is a neighbor
of the starting node.
If not, then we have another triadic closure to make.
"""
return render_html(ans) | 5,324,102 |
def export_reaction_subsystem_and_pathways(model, csv_fn):
"""
Use pandas to write a csv-file which can be used to curate the subsystem
annotation. The csv-file is written with the following columns:
Reaction ID, Reaction name, KEGG ID, Biocyc-annotation,
KEGG Subsystem, KEGG pathway, Subsystem
"""
annotation_keys = ["kegg.reaction", "biocyc", "kegg.pathway", "kegg.subsystem",
"biocyc.subsystem1", "biocyc.subsystem2", "subsystem"]
reactions_list = []
for r in model.reactions:
r_list = [r.id, r.name]
for key in annotation_keys:
try:
ann = r.annotation[key]
except KeyError:
r_list.append(None)
else:
if isinstance(ann, str):
r_list.append(ann)
else:
r_list.append(", ".join(ann))
reactions_list.append(r_list)
df = pd.DataFrame(reactions_list)
df.columns = ["Reaction ID", "Reaction name"] + annotation_keys
# Add empty column
df["curated pathway"] = None
df["curated subsystem"] = None
print(df.head())
df.to_csv(csv_fn, sep = ";") | 5,324,103 |
def _stat_to_tarinfo(
base_path: str, arch_path: str, *, umask: Optional[int] = None, follow_link=True
) -> tarfile.TarInfo:
"""
Convert a stat_result into a TarInfo structure.
"""
tarinfo = tarfile.TarInfo()
if follow_link:
statres = os.stat(os.path.join(base_path, arch_path))
else:
statres = os.lstat(os.path.join(base_path, arch_path))
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
typ = tarfile.REGTYPE
elif stat.S_ISDIR(stmd):
typ = tarfile.DIRTYPE
elif stat.S_ISFIFO(stmd):
typ = tarfile.FIFOTYPE
elif stat.S_ISLNK(stmd):
typ = tarfile.SYMTYPE
linkname = os.readlink(os.path.join(base_path, arch_path))
elif stat.S_ISCHR(stmd):
typ = tarfile.CHRTYPE
elif stat.S_ISBLK(stmd):
typ = tarfile.BLKTYPE
else:
raise TplBuildException("Unsupported file mode in context")
if arch_path == ".":
tarinfo.name = "/"
elif arch_path.startswith("./"):
tarinfo.name = arch_path[1:]
else:
tarinfo.name = "/" + arch_path
tarinfo.mode = _apply_umask(stmd, umask)
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.uname = "root"
tarinfo.gname = "root"
if typ == tarfile.REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = 0
tarinfo.type = typ
tarinfo.linkname = linkname
if typ in (tarfile.CHRTYPE, tarfile.BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo | 5,324,104 |
def parsed_args_gen():
"""Returns a function which creates an emulated parsed_args from kwargs
"""
def generator(**kwargs):
pdict = {}
for k, v in kwargs.items():
pdict[k] = v
return AttrDict(pdict)
return generator | 5,324,105 |
def floordiv(a, b):
"""Compute the floordiv of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api._OpFloorDiv(a, b) | 5,324,106 |
def optional(converter: typing.Callable) -> typing.Any:
"""
A modified version of attrs optional decorator that supports both `None` and `MISSING`
Type annotations will be inferred from the wrapped converter's, if it
has any.
args:
converter: The convertor that is used for the non-None or MISSING
"""
def optional_converter(val) -> typing.Any:
if val is None or val is MISSING:
return val
return converter(val)
sig = None
try:
sig = inspect.signature(converter)
except (ValueError, TypeError): # inspect failed
pass
if sig:
params = list(sig.parameters.values())
if params and params[0].annotation is not inspect.Parameter.empty:
optional_converter.__annotations__["val"] = typing.Optional[params[0].annotation]
if sig.return_annotation is not inspect.Signature.empty:
optional_converter.__annotations__["return"] = typing.Optional[sig.return_annotation]
return optional_converter | 5,324,107 |
def convert_symbol(mpl_symbol):
"""Convert mpl marker symbol to plotly symbol and return symbol."""
if isinstance(mpl_symbol, list):
symbol = list()
for s in mpl_symbol:
symbol += [convert_symbol(s)]
return symbol
elif mpl_symbol in SYMBOL_MAP:
return SYMBOL_MAP[mpl_symbol]
else:
return 'dot' | 5,324,108 |
def remove_none_dict(input_dict: Dict) -> Dict:
"""
removes all none values from a dict
:param input_dict: any dictionary in the world is OK
:return: same dictionary but without None values
"""
return {key: value for key, value in input_dict.items() if value is not None} | 5,324,109 |
def get_host(request, host_id, segment_id):
"""return single host """
return openstack_connection(request).get_host(host_id, segment_id) | 5,324,110 |
def object_main():
"""OBJECT DETECTION APP"""
#Favicon
favpath=os.path.join(os.path.dirname( __file__ ),'images','icons8-coronavirus-16.png')
img1=Image.open(favpath)
#st.set_page_config(layout='wide')
st.set_page_config(layout='wide',page_title='Object detection',page_icon=img1,initial_sidebar_state = 'auto')
#components.iframe("https://docs.streamlit.io/en/latest")
hide_streamlit_style = """
<style>
footer {visibility: hidden;}
</style>
"""
st.markdown("""
<style>
.css-18e3th9{
position: relative;
padding-bottom: 0px;
padding-top: 0px;
}
</style>""",unsafe_allow_html=True)
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.markdown("""
<style>
div.css-18e3th9{
position: relative;
padding-bottom: 0px;
padding-top: 0px;
}
</style>
""",unsafe_allow_html=True)
st.markdown("""
<style>
nav{
position: relative;
display: flex;
width: 640px;
margin: 4rem auto;
}
nav.navbar a{
display: block;
width: 20%;
padding: .65rem 0;
color:rgb(255, 75, 75);
text-decoration: none;
text-align: center;
text-transform: uppercase;
}
.nav-underline, .nav-underline2{
position: absolute;
left: 0;
bottom: -1px;
width: 18%;
height: 2px;
background: #fff;
transition: all .3s ease-in-out;
}
.nav-underline2{
top: -1px !important;
}
nav a:hover{
font-size: 20px;
font-weight: 900;
transition: font-size .1s linear,
font-weight .1s linear;
color: rgb(220,20,60);
}
nav a:nth-child(1).current ~ .nav-underline{
left: 0;
}
nav a:nth-child(2).current ~ .nav-underline{
left: 20%;
}
nav a:nth-child(3).current ~ .nav-underline{
left: 40%;
}
nav a:nth-child(4).current ~ .nav-underline{
left: 60%;
}
nav a:nth-child(5).current ~ .nav-underline{
left: 80%;
}
nav a:nth-child(1):hover ~ .nav-underline{
left: 0;
}
nav a:nth-child(2):hover ~ .nav-underline{
left: 20%;
}
nav a:nth-child(3):hover ~ .nav-underline{
left: 40%;
}
nav a:nth-child(4):hover ~ .nav-underline{
left: 60%;
}
nav a:nth-child(5):hover ~ .nav-underline{
left: 80%;
}
nav a:nth-child(1).current ~ .nav-underline2{
left: 0;
}
nav a:nth-child(2).current ~ .nav-underline2{
left: 20%;
}
nav a:nth-child(3).current ~ .nav-underline2{
left: 40%;
}
nav a:nth-child(4).current ~ .nav-underline2{
left: 60%;
}
nav a:nth-child(5).current ~ .nav-underline2{
left: 80%;
}
nav a:nth-child(1):hover ~ .nav-underline2{
left: 0;
}
nav a:nth-child(2):hover ~ .nav-underline2{
left: 20%;
}
nav a:nth-child(3):hover ~ .nav-underline2{
left: 40%;
}
nav a:nth-child(4):hover ~ .nav-underline2{
left: 60%;
}
nav a:nth-child(5):hover ~ .nav-underline2{
left: 80%;
}
</style>
""",unsafe_allow_html=True)
st.markdown("""
<nav class="navbar">
<a href="#">Home</a>
<a href="https://github.com/Kaushal000/Streamlit-coronavirus-detection-app/wiki" target="_blank">WIKI</a>
<a href="#" class="current">Thesis</a>
<a href="https://github.com/Kaushal000/Streamlit-coronavirus-detection-app/tree/main/src" target="_blank">Source</a>
<a href="https://colab.research.google.com/drive/1KszU9b3t-T_Ia5GNjiy_uuktOnydlEID#scrollTo=O2w9w1Ye_nk1" target="_blank">Train</a>
<div class="nav-underline"></div>
<div class="nav-underline2"></div>
</nav>""",unsafe_allow_html=True)
"""
[![GitHub][github_badge]][github_link]
[github_badge]: https://badgen.net/badge/icon/GitHub?icon=github&color=black&label
[github_link]: https://github.com/Kaushal000/Streamlit-coronavirus-detection-app
"""
st.title("Coronavirus detection app")
opt=st.sidebar.radio("Choose what to do",("Run the app","View documentation","View source code","Show mAP% score"))
if opt=="Run the app":
st.header("Object Detection")
st.write("Object detection is a central algorithm in computer vision. The algorithm implemented below is YOLO (You Only Look Once), a state-of-the-art algorithm trained to identify thousands of objects types. It extracts objects from images and identifies them using OpenCV and Yolo. This task involves Deep Neural Networks(DNN), yolo trained model, yolo configuration and a dataset to detect objects.")
score_threshold = st.sidebar.slider("Confidence Threshold", 0.00,1.00,0.5,0.01)
nms_threshold = st.sidebar.slider("NMS Threshold", 0.00, 1.00, 0.4, 0.01)
choice = st.radio("", ("Show Demo", "Upload image and detect coronaviruses from image" ,"Upload video and detect coronaviruses form video"))
st.write()
if choice == "Upload image and detect coronaviruses from image":
st.set_option('deprecation.showfileUploaderEncoding', False)
image_file = st.file_uploader("Upload Image", type=['jpg','png','jpeg'])
if image_file is not None:
our_image = Image.open(image_file)
st.info('Image uploaded')
with st.spinner('Detecting objects and generating confidence scores...'):
time.sleep(5)
detect_objects(our_image,score_threshold,nms_threshold)
elif choice== "Upload video and detect coronaviruses form video" :
st.write()
f=st.file_uploader("Upload Video",type='mp4')
col1, col2, col3 = st.columns([5,20,1])
if f is not None:
tfile = tempfile.NamedTemporaryFile(delete=False)
tfile.write(f.read())
nm=tfile.name
with col1:
st.write("")
with col2:
getvideo(nm,score_threshold,nms_threshold)
with col3:
st.write("")
else :
path=os.path.join(os.path.dirname( __file__ ),'images','coronavirus.jpg')
our_image = Image.open(path)
detect_objects(our_image,score_threshold,nms_threshold)
# embed streamlit docs in a streamlit app
elif opt=="View documentation":
with st.spinner('Fetching documentation from github..'):
time.sleep(5)
content=requests.get('https://raw.githubusercontent.com/Kaushal000/Streamlit-coronavirus-detection-app/main/README.md').text
st.markdown(content,unsafe_allow_html=True)
elif opt=="View source code":
pth=os.path.join(os.path.dirname( __file__ ),'app.py')
p=Path(pth).read_text()
st.code(p,language='python')
else:
col1,col2,col3=st.columns([11,20,10])
st.markdown("""
<style>
.Red{
color:red;
}
.Blue{
color:blue;
}
</style>
""",unsafe_allow_html=True)
with col1:
st.markdown("""<br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br>
<div>
<strong>The <span class="Blue">blue</span> line indicates average loss percentage</strong> 👉
</div>
""",unsafe_allow_html=True)
with col2:
p=os.path.join(os.path.dirname(__file__),'images','cov.png')
chart=Image.open(p)
st.image(chart,use_column_width='auto')
with col3:
st.markdown(
"""<br><br><br><br>
<div>
👈 <strong>The <span class="Red">red</span> line indicates mAP% score</strong>
</div>
<br><br><br><br>
""",unsafe_allow_html=True)
st.sidebar.markdown(
"""<br><br>
<style>
.center {
margin: auto;
width: 50%;
padding: 10px;
color: rgb(255, 75, 75);
}
</style>
<h3 class="center">Presentation</h3>
<iframe src="https://onedrive.live.com/embed?resid=CC721E0634E29AC8%2113676&authkey=%21AJqlhggJ3vIp8MA&em=2&wdAr=1.7777777777777777" width="300px" height="263px" frameborder="0">This is an embedded <a target="_blank" href="https://office.com">Microsoft Office</a></iframe>
""",unsafe_allow_html=True) | 5,324,111 |
def _GetAttachedDevices(blacklist_file, test_device):
"""Get all attached devices.
Args:
test_device: Name of a specific device to use.
Returns:
A list of attached devices.
"""
blacklist = (device_blacklist.Blacklist(blacklist_file)
if blacklist_file
else None)
attached_devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
if test_device:
test_device = [d for d in attached_devices if d == test_device]
if not test_device:
raise device_errors.DeviceUnreachableError(
'Did not find device %s among attached device. Attached devices: %s'
% (test_device, ', '.join(attached_devices)))
return test_device
else:
if not attached_devices:
raise device_errors.NoDevicesError()
return sorted(attached_devices) | 5,324,112 |
def li_dong_2016_load_uy_profiles():
""" Load and return the y-velocity profiles (digitized from Fig. 4b).
Returns
-------
numpy.ndarray
y positions as a 1D array of floats.
numpy.ndarray
y-velocity (plus x position) as a 1D array of floats.
"""
filepath = DATADIR / 'li_dong_2016_fig4b.csv'
with open(filepath, 'r') as infile:
uy, y = numpy.loadtxt(infile, delimiter=',', unpack=True)
return uy, y | 5,324,113 |
def msg_queue_mode(params):
"""
Generate outgoing messages for `queue_mode_...` commands. The supported option is ``set``.
Parameters
----------
params : list
List of parameters of the command. The first two elements of the list are expected to
be ``mode`` and ``set`` keywords.
Returns
-------
str
Name of the method from RE Manager API
dict
Dictionary of the method parameters
"""
# Check if the function was called for the appropriate command
command = "queue"
expected_p0 = "mode"
if params[0] != expected_p0:
raise ValueError(f"Incorrect parameter value '{params[0]}'. Expected value: '{expected_p0}'")
# Make sure that there is a sufficient number of parameters to start processing
if len(params) < 2:
raise CommandParameterError(f"Item type and options are not specified '{command} {params[0]}'")
p_item_type = params[1]
if p_item_type != "set":
raise_request_not_supported([command, params[0], params[1]])
try:
if p_item_type == "set":
params_mode = params[2:]
if len(params_mode) % 2:
raise CommandParameterError(
f"The list of queue mode parameters must have even number of elements: {params_mode}"
)
queue_mode = {params_mode[i]: params_mode[i + 1] for i in range(0, len(params_mode), 2)}
for k in queue_mode.keys():
# Attempt to evaluate key parameters (e.g. "True" should become boolean True)
# If a parameter fails to evaluate, it should remain a string.
try:
queue_mode[k] = eval(queue_mode[k], {}, {})
except Exception:
pass
cmd_prms = {"mode": queue_mode}
else:
# This indicates a bug in the program.
raise ValueError(f"Unknown item type: {p_item_type}")
except IndexError:
raise CommandParameterError(f"The command '{params}' contain insufficient number of parameters")
method = f"{command}_{params[0]}_{params[1]}"
prms = cmd_prms
return method, prms | 5,324,114 |
def read_tiff(path, pages=None):
"""
Reads in a tiff stack
:param path: Full path to file
:param pages: list or numpy array of pages to load
:return: height x width x num_pages array of image files
"""
# Get number of requested pages
if pages is None:
num_pages = 1
elif type(pages) is int:
num_pages = 1
pages = [pages]
elif type(pages) is list or type(pages) is np.ndarray:
num_pages = len(pages)
else:
raise TypeError('Pages is type {}, but must be a list, int, or array'.format(type(pages).__name__))
with TiffFile(path) as f:
# get number of pages in actual tiff
num_tiff_pages = len(f.pages)
if num_pages > num_tiff_pages:
raise IndexError("Too many pages requested. Requested {} pages but only {} pages in tiff"
.format(num_pages, num_tiff_pages))
if pages is None and num_tiff_pages > 1:
warnings.warn("No specific pages requested, so returning all pages ({})"
.format(num_tiff_pages))
pages = xrange(num_tiff_pages)
num_pages = num_tiff_pages
# initialize tiff array
tiff_shape = f.pages[0].shape
tiff_array = np.empty(shape=(tiff_shape[0], tiff_shape[1], num_pages))
# load each page and store
for ind, page in enumerate(pages):
curr_page = f.pages[page]
tiff_array[:, :, ind] = curr_page.asarray()
# Compress if only 2d
if tiff_array.shape[2] == 1:
tiff_array = tiff_array.squeeze(axis=2)
return tiff_array | 5,324,115 |
def parse_file_buffer_to_seldon_request(file):
"""
Reads file buffer and parse to seldon request.
Parameters
----------
file : dict
Spooled temporary file.
Returns
-------
dict
Seldon API request
Raises
------
BadRequest
When `file` has no header.
"""
try:
df = pandas.read_csv(file._file, sep=None, engine='python')
df = df.to_dict('split')
return {
"data": {
"names": df['columns'],
"ndarray": df['data'],
}
}
except UnicodeDecodeError:
file.seek(0)
content = file.read()
bin_data = base64.b64encode(content).decode("utf-8")
kind = filetype.guess(content)
content_type = "application/octet-stream"
if kind is not None:
content_type = kind.mime
return {
"binData": bin_data,
"meta": {
"content-type": content_type,
},
}
except csv.Error:
file.seek(0)
return {
"strData": file.read().decode("utf-8")
} | 5,324,116 |
def is_pro():
"""Check if working in PRO"""
return os.environ.get("VTASKS_ENV", "False") == "True" | 5,324,117 |
def test_get_invalid_chunks(harness):
"""
Attempts:
requesting data with invalid chunk numbers, which are:
- 0 as numbering start with 1
- chunk count + 1 (or any more) as it exceeds maximum no of chunks
requesting data with invalid rxID:
- rxID != current rxID transfer
"""
ret = FsInitGet(sysUserLogsPath, osLogFileName).run(harness)
assert ret.fileSize != 0
totalChunks = int(((ret.fileSize + ret.chunkSize - 1) / ret.chunkSize))
log.info(f"totalChunks #: {totalChunks}")
with pytest.raises(TransactionError, match=r".*" + str(Status.BadRequest.value) + ".*"):
FsGetChunk(ret.rxID, 0).run(harness)
with pytest.raises(TransactionError, match=r".*" + str(Status.BadRequest.value) + ".*"):
FsGetChunk(ret.rxID, totalChunks + 1).run(harness)
with pytest.raises(TransactionError, match=r".*" + str(Status.BadRequest.value) + ".*"):
FsGetChunk(ret.rxID - 1, totalChunks + 1).run(harness)
with pytest.raises(TransactionError, match=r".*" + str(Status.BadRequest.value) + ".*"):
FsGetChunk(ret.rxID + 1, totalChunks + 1).run(harness) | 5,324,118 |
def recursive_file_discover(folder: str) -> tuple:
"""Generator based approach to discovering files in the specified folder
Args:
folder (str): The folder to search in
Yields:
tuple: The root path as the first element, and a list of the files
"""
for root, dirs, files in os.walk(folder):
yield root, files | 5,324,119 |
def test_get_no_session(
resource_config: dict[str, str], static_files: "Path", requests_mock: "Mocker"
) -> None:
"""Test the `get()` method - session is `None`."""
from optimade.adapters import Structure
from oteapi_optimade.models.strategies.resource import OPTIMADEResourceSession
from oteapi_optimade.strategies.resource import OPTIMADEResourceStrategy
sample_file = static_files / "optimade_response.json"
requests_mock.get(resource_config["accessUrl"], content=sample_file.read_bytes())
session = OPTIMADEResourceStrategy(resource_config).get()
assert isinstance(session, OPTIMADEResourceSession)
assert session.optimade_config is None
assert session.optimade_resource_model == f"{Structure.__module__}:Structure"
assert session.optimade_resources
for resource in session.optimade_resources:
assert Structure(resource) | 5,324,120 |
def load(data_dir, config, use_feature_transform=False, numeric=False, categorical=False):
"""
Load specific dataset.
Args:
data_dir (str): path to the dataset directory.
config (dict): general dict with settings.
use_feature_transform (bool): apply dense feature transform or not
Returns (dict): tensorflow Dataset objects and features.
"""
if config['data.dataset'] == "heart":
ret = load_heart(data_dir, config,
use_feature_transform=use_feature_transform,
numeric=numeric, categorical=categorical)
else:
raise ValueError(f"Unknow dataset: {config['data.dataset']}")
return ret | 5,324,121 |
def generate_computer_vis_task(config, logger, device, data_dir):
"""Generate a computer vision datahandler.
Args:
config: Command-line arguments.
logger: The logger.
device: The cuda device.
data_dir (str): The data directory.
Returns:
(....): See docstring of function `generate_task`.
"""
transform = None
if config.dataset in ['mnist', 'mnist_autoencoder']:
# Downloading MNIST from the page of Yann Lecun can give errors. This
# problem is solved in torchvision version 0.9.1 but for earlier versions
# the following fix can be used.
if torchvision.__version__ != '0.9.1':
datasets.MNIST.resources = [
('https://ossci-datasets.s3.amazonaws.com/mnist/train' +
'-images-idx3-ubyte.gz', 'f68b3c2dcbeaaa9fbdd348bbdeb94873'),
('https://ossci-datasets.s3.amazonaws.com/mnist/train' +
'-labels-idx1-ubyte.gz', 'd53e105ee54ea40749a09fcbcd1e9432'),
('https://ossci-datasets.s3.amazonaws.com/mnist/t10k' +
'-images-idx3-ubyte.gz', '9fb629c4189551a2d022fa330f9573f3'),
('https://ossci-datasets.s3.amazonaws.com/mnist/t10k' +
'-labels-idx1-ubyte.gz', 'ec29112dd5afa0611ce80d1b7f02629c')
]
if config.dataset == 'mnist':
logger.info('Loading MNIST dataset.')
from datahandlers.mnist_data import MNISTData as CVData
elif config.dataset == 'mnist_autoencoder':
logger.info('Loading MNIST autoencoder dataset.')
from datahandlers.mnist_auto_data import MNISTAutoData as CVData
train_val_split = [55000, 5000]
elif config.dataset == 'fashion_mnist':
logger.info('Loading Fashion MNIST dataset.')
from datahandlers.fashionmnist_data import FashionMNISTData as CVData
train_val_split = [55000, 5000]
elif config.dataset == 'cifar10':
logger.info('Loading CIFAR-10 dataset.')
from datahandlers.cifar10_data import CIFAR10Data as CVData
train_val_split = [45000, 5000]
### Load the testing data.
testset = CVData(data_dir, device, train=False, download=True,
double_precision=config.double_precision,
target_class_value=config.target_class_value)
test_loader = DataLoader(testset, batch_size=config.batch_size)
### Load the training data and split with validation if necessary.
trainset = CVData(data_dir, device, train=True, download=True,
double_precision=config.double_precision,
target_class_value=config.target_class_value)
val_loader = None
if not config.no_val_set:
trainset, valset = torch.utils.data.random_split(trainset,
train_val_split)
val_loader = DataLoader(valset, batch_size=config.batch_size,
shuffle=False)
train_loader = DataLoader(trainset, batch_size=config.batch_size,
shuffle=True)
### Create the dataset.
ds = DatasetWrapper(train_loader, test_loader, valset=val_loader,
name=config.dataset, in_size=testset._in_size,
out_size=testset._out_size)
return ds | 5,324,122 |
def setenv():
"""set some basic env pargs"""
global CIN
if sys.version > '3':
CIN = input
else:
CIN = raw_input | 5,324,123 |
def create(client, spec: str, namespace: str = "default", timeout=100):
"""Create a CronJob.
:batch_v1_api: The Batch V1 API object.
:spec: A valid CronJob YAML manifest.
:namespace: The namespace of the CronJob.
:timeout: Timeout in seconds to wait for object creation/modification
:returns: True on creation, False if it already exists.
"""
body = yaml.safe_load(spec)
try:
response = client.create_namespaced_cron_job(namespace, body)
except ApiException as e:
# If the object already exists, return False.
if e.reason == "Conflict":
return False
raise e
name = body["metadata"]["name"]
if get(client, name, namespace) is None:
w = watch.Watch()
for event in w.stream(
client.list_cron_job_for_all_namespaces, timeout_seconds=timeout
):
if (
(event["type"] == "ADDED" or event["type"] == "MODIFIED")
and event["object"].kind == "CronJob"
and event["object"].metadata.name == response.metadata.name
and event["object"].metadata.namespace == response.metadata.namespace
):
break
return response | 5,324,124 |
def pick_projects(directory):
"""
Finds all subdirectories in directory containing a .json file
:param directory: string containing directory of subdirectories to search
:return: list projects found under the given directory
"""
ext = '.json'
subs = [x[0] for x in os.walk(directory)]
projects = []
for sub in subs:
files = []
for f in os.listdir(sub):
if f.endswith(ext):
files.append(f)
if len(files) > 0:
sizes = [os.stat(os.path.join(sub, pick)).st_size for pick in files]
max_size = max(sizes)
index = sizes.index(max_size)
projects.append(os.path.join(sub, files[index]))
return projects | 5,324,125 |
def get_expanded_types(types, type_hierarchy):
"""Expands a set of types with both more specific and more generic types
(i.e., all super-types and sub-types)."""
expanded_types = set()
for type in types:
# Adding all supertypes.
expanded_types.update(get_type_path(type, type_hierarchy))
# Adding all subtypes (NOTE: this bit could be done more efficiently).
for type2 in type_hierarchy:
if type_hierarchy[type2]['depth'] <= type_hierarchy[type]['depth']:
continue
type2_path = get_type_path(type2, type_hierarchy)
if type in type2_path:
expanded_types.update(type2_path)
return expanded_types | 5,324,126 |
def render(template, **kwargs):
"""Render a Jinja2 template.
Parameters
----------
template : str
Name of the template file (without '.template' suffix). It must be
located in the directory 'pywrap/template_data'.
kwargs : dict
Template arguments.
Returns
-------
text : str
Rendered template.
"""
template_file = resource_filename(
"pywrap", os.path.join("template_data", template + ".template"))
if not os.path.exists(template_file):
raise IOError("No template for '%s' found." % template)
template = jinja2.Template(open(template_file, "r").read())
return template.render(**kwargs) | 5,324,127 |
def test_get_value_repr():
"""
Тест утилиты get_value_repr()
"""
v1 = 12345
v1_repr = '12345'
assert get_value_repr(v1) == v1_repr
v2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
v2_repr = '[1, 2, 3, 4, 5, 6, 7, 8, 9...]'
assert get_value_repr(v2) == v2_repr | 5,324,128 |
def wait_for_user_credentials():
"""
Although the key creation via IAM immediately returns credentials, it takes a little time
(on the order of ~10s) before the key is propagated widely enough to allow it to be used in an
sts:AssumeRole call. Unfortunately, there isn't a good way to test for the propagation other
than simply trying to use them, but in practice we haven't seen these become available any
sooner than ~8s after creation.
The get_session_credentials call is wrapped in a @retry, so even if this hardcoded timeout isn't
quite long enough, the subsequent downstream calls will still gracefully handle propagation
delay.
"""
sleep(10) | 5,324,129 |
def create_clean_db():
"""
Use from a python shell to create a fresh database.
"""
with hnp.test_request_context():
db.create_all()
# Creating superuser entry.
superuser = user_datastore.create_user(
email=hnp.config.get('SUPERUSER_EMAIL'),
password=encrypt(hnp.config.get('SUPERUSER_PASSWORD')))
adminrole = user_datastore.create_role(name='admin', description='')
user_datastore.add_role_to_user(superuser, adminrole)
user_datastore.create_role(name='user', description='')
db.session.flush()
apikey = ApiKey(user_id=superuser.id, api_key=str(uuid.uuid4()).replace("-", ""))
db.session.add(apikey)
db.session.flush()
from os import path
from hnp.api.models import DeployScript, RuleSource
from hnp.tasks.rules import fetch_sources
# Creating a initial deploy scripts.
# Reading initial deploy script should be: ../../scripts/
#|-- deploy_conpot.sh
#|-- deploy_dionaea.sh
#|-- deploy_snort.sh
#|-- deploy_kippo.sh
deployscripts = [
['Ubuntu - Conpot', '../scripts/deploy_conpot.sh'],
['Ubuntu/Raspberry Pi - Drupot', '../scripts/deploy_drupot.sh'],
['Ubuntu/Raspberry Pi - Magenpot', '../scripts/deploy_magenpot.sh'],
['Ubuntu - Wordpot', '../scripts/deploy_wordpot.sh'],
['Ubuntu - Shockpot', '../scripts/deploy_shockpot.sh'],
['Ubuntu - p0f', '../scripts/deploy_p0f.sh'],
['Ubuntu - Suricata', '../scripts/deploy_suricata.sh'],
['Ubuntu - Glastopf', '../scripts/deploy_glastopf.sh'],
['Ubuntu - ElasticHoney', '../scripts/deploy_elastichoney.sh'],
['Ubuntu - Amun', '../scripts/deploy_amun.sh'],
['Ubuntu - Snort', '../scripts/deploy_snort.sh'],
['Ubuntu - Cowrie', '../scripts/deploy_cowrie.sh'],
['Ubuntu/Raspberry Pi - Dionaea', '../scripts/deploy_dionaea.sh'],
['Ubuntu - Shockpot Sinkhole', '../scripts/deploy_shockpot_sinkhole.sh'],
]
for honeypot, deploypath in reversed(deployscripts):
with open(path.abspath(deploypath), 'r') as deployfile:
initdeploy = DeployScript()
initdeploy.script = deployfile.read()
initdeploy.notes = 'Initial deploy script for {}'.format(honeypot)
initdeploy.user = superuser
initdeploy.name = honeypot
db.session.add(initdeploy)
# Creating an initial rule source.
rules_source = hnp.config.get('SNORT_RULES_SOURCE')
if not hnp.config.get('TESTING'):
rulesrc = RuleSource()
rulesrc.name = rules_source['name']
rulesrc.uri = rules_source['uri']
rulesrc.name = 'Default rules source'
db.session.add(rulesrc)
db.session.commit()
fetch_sources() | 5,324,130 |
def exceptor():
"""
This function just raises an exception.
:return: None
"""
# Raise an exception with the raise statement. Try raising different exceptions, to see what the effects on the
# try-except-else block in this file. These three exceptions are all built-in exceptions in Python. More
# information is available here: https://docs.python.org/3/library/exceptions.html
raise ValueError
#raise IndexError
#raise TypeError | 5,324,131 |
def main(files, **kwargs):
"""Generate or check SHA3-512."""
from hashlib import sha3_512 as H
script_main(H(), files, **kwargs) | 5,324,132 |
def inject_where(builder):
"""
helper function to append to the query the generated where clause
:param builder: the current builder
:return:
"""
query = builder.v.query
if callable(query):
return builder
lower = query.lower()
where = lower.find(' where ')
before = -1
for before_q in [' group by', ' order by', ' limit', ' offset']:
before = lower.find(before_q)
if before >= 0:
break
if where >= 0:
if before < 0:
builder.append(' {and_where}')
else:
builder.query('{} {{and_where}} {}'.format(query[:before], query[before:]))
builder.replace('and_where')
query = builder.v.query
builder.query('{} {{joins}} {}'.format(query[:where], query[where:]))
builder.replace('joins')
else:
if before < 0:
builder.append('{joins} {where}')
else:
builder.query('{} {{joins}} {{where}} {}'.format(query[:before], query[before:]))
builder.replace('where')
builder.replace('joins')
return builder | 5,324,133 |
def loadCPT(path):
"""A function that loads a .cpt file and converts it into a colormap for the colorbar.
This code was adapted from the GEONETClass Tutorial written by Diego Souza, retrieved 18 July 2019.
https://geonetcast.wordpress.com/2017/06/02/geonetclass-manipulating-goes-16-data-with-python-part-v/
Parameters
----------
path :
Path to the .cpt file
Returns
-------
cpt :
A colormap that can be used for the cmap argument in matplotlib type plot.
"""
try:
f = open(path)
except:
print ("File ", path, "not found")
return None
lines = f.readlines()
f.close()
x = np.array([])
r = np.array([])
g = np.array([])
b = np.array([])
colorModel = 'RGB'
for l in lines:
ls = l.split()
if l[0] == '#':
if ls[-1] == 'HSV':
colorModel = 'HSV'
continue
else:
continue
if ls[0] == 'B' or ls[0] == 'F' or ls[0] == 'N':
pass
else:
x=np.append(x,float(ls[0]))
r=np.append(r,float(ls[1]))
g=np.append(g,float(ls[2]))
b=np.append(b,float(ls[3]))
xtemp = float(ls[4])
rtemp = float(ls[5])
gtemp = float(ls[6])
btemp = float(ls[7])
x=np.append(x,xtemp)
r=np.append(r,rtemp)
g=np.append(g,gtemp)
b=np.append(b,btemp)
if colorModel == 'HSV':
for i in range(r.shape[0]):
rr, gg, bb = colorsys.hsv_to_rgb(r[i]/360.,g[i],b[i])
r[i] = rr ; g[i] = gg ; b[i] = bb
if colorModel == 'RGB':
r = r/255.0
g = g/255.0
b = b/255.0
xNorm = (x - x[0])/(x[-1] - x[0])
red = []
blue = []
green = []
for i in range(len(x)):
red.append([xNorm[i],r[i],r[i]])
green.append([xNorm[i],g[i],g[i]])
blue.append([xNorm[i],b[i],b[i]])
colorDict = {'red': red, 'green': green, 'blue': blue}
# Makes a linear interpolation
cpt = LinearSegmentedColormap('cpt', colorDict)
return cpt | 5,324,134 |
def main():
"""
The main function to execute upon call.
Returns
-------
int
returns integer 0 for safe executions.
"""
print("Program to find the character from an input ASCII value.")
ascii_val = int(input("Enter ASCII value to find character: "))
print("\nASCII {asci} in character is \"{char}\""
.format(asci=ascii_val, char=chr(ascii_val)))
return 0 | 5,324,135 |
def get_random_greeting():
"""
Return random greeting message.
"""
return random.choice(GREETINGS) | 5,324,136 |
def dyad_completion(w):
""" Return the dyadic completion of ``w``.
Return ``w`` if ``w`` is already dyadic.
We assume the input is a tuple of nonnegative Fractions or integers which sum to 1.
Examples
--------
>>> w = (Fraction(1,3), Fraction(1,3), Fraction(1, 3))
>>> dyad_completion(w)
(Fraction(1, 4), Fraction(1, 4), Fraction(1, 4), Fraction(1, 4))
>>> w = (Fraction(1,3), Fraction(1,5), Fraction(7, 15))
>>> dyad_completion(w)
(Fraction(5, 16), Fraction(3, 16), Fraction(7, 16), Fraction(1, 16))
>>> w = (1, 0, 0.0, Fraction(0,1))
>>> dyad_completion(w)
(Fraction(1, 1), Fraction(0, 1), Fraction(0, 1), Fraction(0, 1))
"""
w = tuple(Fraction(v) for v in w)
d = max(v.denominator for v in w)
# if extra_index:
p = next_pow2(d)
if p == d:
# the tuple of fractions is already dyadic
return w
else:
# need to add the dummy variable to represent as dyadic
return tuple(Fraction(v*d, p) for v in w) + (Fraction(p-d, p),) | 5,324,137 |
def create_pv_string_points(x_coord: float,
y_coord: float,
string_width: float,
string_height: float
) -> [Polygon, np.ndarray]:
"""
:param x_coord:
:param y_coord:
:param string_width:
:param string_height:
:return:
"""
pts = ((x_coord, y_coord),
(x_coord + string_width, y_coord),
(x_coord + string_width, y_coord + string_height),
(x_coord, y_coord + string_height))
module = Polygon(pts)
xs_string = np.arange(module_width / 2, module_width, module_width)
ys_string = np.arange(module_height / 2 + y_coord, y_coord + string_height, module_height)
xxs, yys = np.meshgrid(xs_string, ys_string, sparse=True)
string_points = MultiPoint(np.transpose([np.tile(xs_string, len(ys_string)),
np.repeat(yys, len(xs_string))]))
return module, string_points | 5,324,138 |
def phasor(H):
"""
Caculate phasor values from given histogram
g = 1 / N * sum(H * cos(f))
s = 1 / N * sum(H * sin(f))
===========================================================================
Input Meaning
---------- ---------------------------------------------------------------
H List or np.array with histogram values
===========================================================================
Output Meaning
---------- ---------------------------------------------------------------
z complex number describing the phasor
===========================================================================
"""
Np = len(H)
F = 2 * np.pi * np.linspace(0, Np-1, Np) / (Np - 1)
g = np.sum(H * np.cos(F)) / np.sum(H)
s = np.sum(H * np.sin(F)) / np.sum(H)
z = complex(g, s)
return(z) | 5,324,139 |
def if_stmt(cond, body, orelse):
"""Functional form of an if statement.
Args:
cond: Boolean.
body: Callable with no arguments, and outputs of the positive (if) branch
as return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
Returns:
Tuple containing the statement outputs.
"""
if tensor_util.is_tensor(cond):
return tf_if_stmt(cond, body, orelse)
else:
return _py_if_stmt(cond, body, orelse) | 5,324,140 |
def download_file(save_dir, filename, url, md5=None):
"""
Download the file from the url to specified directory.
Check md5 value when the file is exists, if the md5 value is the same as the existed file, just use
the older file, if not, will download the file from the url.
Args:
save_dir(string): The specified directory saving the file.
filename(string): The specified filename saving the file.
url(string): The url downling the file.
md5(string, optional): The md5 value that checking the version downloaded.
"""
fullname = os.path.join(save_dir, filename)
if os.path.exists(fullname):
if md5 and (not md5file(fullname) == md5):
logger.info("Updating {} from {}".format(filename, url))
logger.disable()
get_path_from_url(url, save_dir, md5)
else:
logger.info("Downloading {} from {}".format(filename, url))
logger.disable()
get_path_from_url(url, save_dir, md5)
logger.enable()
return fullname | 5,324,141 |
def load_db(DB_Filename, ValueType, ValueColumnIdx, KeyColumnIdx):
"""Loads a database contained in file 'DB_Filename'. Creates a python dictionary
that maps from a string (contained in column KeyColumnIdx) to a number set
or a single number (contained in column ValueColumnIdx).
NOTE: The 'key' maybe for example the property value. The
'value' is the number set it points to. This maybe
confusing.
"""
table = parse_table(DB_Filename)
if ValueType == "NumberSet": convert_column_to_interval(table, ValueColumnIdx)
elif ValueType == "number": convert_column_to_number(table, ValueColumnIdx)
db = convert_table_to_associative_map(table, ValueColumnIdx, ValueType, KeyColumnIdx)
return db | 5,324,142 |
def insert_tables(cur, conn):
"""
Description: This function triggers the transform and load process.
Arguments:
cur: the cursor object
conn: object of the connection to the database
Returns:
None
"""
try:
for query in insert_table_queries:
cur.execute(query)
conn.commit()
except psycopg2.Error as e:
print(e) | 5,324,143 |
def createFoodObject(dataset, row):
"""
Create food URI and triples related to food properties
"""
food_onto_term = str(row['Food Ontology Term'])
food_label = row['Food']
food_type = row['NEW Food Type']
food_amount = row['NEW Food Matrix']
food_source = food_amount.split('\n')[0].replace('Matrix:', '')
dose_unit = row['Unit']
dose_value = row['Value']
dose_freq = row['Frequency']
fooduri_list = []
# if there is no ontology term define for this food, create one
if food_onto_term == 'nan':
fooduri = FOODHKG_INST[get_hash(food_label)]
createFoodProp(dataset, fooduri, food_label, food_type, food_source,
dose_unit, dose_value, dose_freq)
fooduri_list.append(fooduri)
else:
for fooduri in food_onto_term.split(';'):
fooduri = fooduri.strip()
if fooduri == '':
continue
fooduri = URIRef(fooduri)
createFoodProp(dataset, fooduri, food_label, food_type, food_source,
dose_unit, dose_value, dose_freq)
fooduri_list.append(fooduri)
return fooduri_list | 5,324,144 |
def business():
""" RESTful CRUD controller """
def rheader_table(r):
if r.record:
return TABLE( TR( TH("%s: %s" % (T("Name"),
r.record.business_name)),
TH("%s: %s %s" % (T("Address"),
r.record.street1,
r.record.street2))))
return None
list_fields = ["business_name", "owner_name",
"address1", "address2", "city", "county"]
return generic(resourcename, rheader_table, list_fields) | 5,324,145 |
def points_from_svg(svg_file_path):
""" Takes a SVG file as an input and returns a list of points in the complex plane from its path. """
# Read SVG into a list of curves.
paths, attributes = svg2paths(svg_file_path)
curves = paths[0]
# Get a list of the coordinates from each curve.
# Coordinates are given as points in the complex plane.
num_samples = 10
points_list = []
for curve in curves:
for i in range(num_samples):
points_list.append(Path(curve).point(i/(float(num_samples)-1)))
return points_list | 5,324,146 |
def cluster_mols(
mols: List[Chem.rdchem.Mol],
cutoff: float = 0.2,
feature_fn: Callable = None,
n_jobs: Optional[int] = 1,
):
"""Cluster a set of molecules using the butina clustering algorithm and a given threshold.
Args:
mols: a list of molecules.
cutoff: Cuttoff for the clustering. Default to 0.2.
feature_fn: A feature function that takes a Chem.rdchem.Mol object
and return molecular features. By default, the `dm.to_fp()` is used.
Default to None.
n_jobs: Number of jobs for parallelization. Let to 1 for no
parallelization. Set to None to use all available cores.
"""
if feature_fn is None:
feature_fn = functools.partial(dm.to_fp, as_array=False)
features = dm.parallelized(feature_fn, mols, n_jobs=n_jobs)
dists = []
n_mols = len(mols)
for i in range(1, n_mols):
dist = DataStructs.BulkTanimotoSimilarity(features[i], features[:i], returnDistance=True)
dists.extend([x for x in dist])
# now cluster the data
cluster_indices = Butina.ClusterData(dists, n_mols, cutoff, isDistData=True)
cluster_mols = [operator.itemgetter(*cluster)(mols) for cluster in cluster_indices]
# Make single mol cluster a list
cluster_mols = [[c] if isinstance(c, Chem.rdchem.Mol) else c for c in cluster_mols]
return cluster_indices, cluster_mols | 5,324,147 |
def process_data(cur, conn, filepath, func):
"""Resolve files under filepath root, parse and insert elements into the database.
Arguments:
cur -- database cursor used to insert data
conn -- database network connection
filepath -- path to JSON data files
func -- function used to extract data elements
"""
# get all files matching extension from directory
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root, '*.json'))
for f in files:
all_files.append(os.path.abspath(f))
# get total number of files found
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
# iterate over files and process
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files)) | 5,324,148 |
def test_create_or_update_log_level_update():
"""Will update the log level to "WARN".
:return: Should return: "WARN"
"""
syn = syncope.Syncope(syncope_url="http://192.168.1.145:9080", username="admin", password="password")
update_log_level = '{"name": "org.apache.http", "level": "WARN"}'
log_level = syn.create_or_update_log_level(update_log_level)
assert log_level['level'] == "WARN" | 5,324,149 |
def match_depends(module):
""" Check for matching dependencies.
This inspects spell's dependencies with the desired states and returns
'False' if a recast is needed to match them. It also adds required lines
to the system-wide depends file for proper recast procedure.
"""
params = module.params
spells = params['name']
depends = {}
depends_ok = True
if len(spells) > 1 or not params['depends']:
return depends_ok
spell = spells[0]
if module.check_mode:
sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends")
sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check")
try:
shutil.copy2(sorcery_depends_orig, sorcery_depends)
except IOError:
module.fail_json(msg="failed to copy depends.check file")
else:
sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends")
rex = re.compile(r"^(?P<status>\+?|\-){1}(?P<depend>[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$")
for d in params['depends'].split(','):
match = rex.match(d)
if not match:
module.fail_json(msg="wrong depends line for spell '%s'" % spell)
# normalize status
if not match.group('status') or match.group('status') == '+':
status = 'on'
else:
status = 'off'
depends[match.group('depend')] = status
# drop providers spec
depends_list = [s.split('(')[0] for s in depends]
cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list))
rc, stdout, stderr = module.run_command(cmd_gaze)
if rc != 0:
module.fail_json(msg="wrong dependencies for spell '%s'" % spell)
fi = fileinput.input(sorcery_depends, inplace=True)
try:
try:
for line in fi:
if line.startswith(spell + ':'):
match = None
for d in depends:
# when local status is 'off' and dependency is provider,
# use only provider value
d_offset = d.find('(')
if d_offset == -1:
d_p = ''
else:
d_p = re.escape(d[d_offset:])
# .escape() is needed mostly for the spells like 'libsigc++'
rex = re.compile("%s:(?:%s|%s):(?P<lstatus>on|off):optional:" %
(re.escape(spell), re.escape(d), d_p))
match = rex.match(line)
# we matched the line "spell:dependency:on|off:optional:"
if match:
# if we also matched the local status, mark dependency
# as empty and put it back into depends file
if match.group('lstatus') == depends[d]:
depends[d] = None
sys.stdout.write(line)
# status is not that we need, so keep this dependency
# in the list for further reverse switching;
# stop and process the next line in both cases
break
if not match:
sys.stdout.write(line)
else:
sys.stdout.write(line)
except IOError:
module.fail_json(msg="I/O error on the depends file")
finally:
fi.close()
depends_new = [v for v in depends if depends[v]]
if depends_new:
try:
try:
fl = open(sorcery_depends, 'a')
for k in depends_new:
fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k]))
except IOError:
module.fail_json(msg="I/O error on the depends file")
finally:
fl.close()
depends_ok = False
if module.check_mode:
try:
os.remove(sorcery_depends)
except IOError:
module.fail_json(msg="failed to clean up depends.backup file")
return depends_ok | 5,324,150 |
def reshape_shuffle_ctg(fhr: np.array, uc: np.array, y: np.array, time: Optional[np.array]) -> Tuple[np.array, np.array, np.array, Optional[np.array], Optional[np.array]]:
"""
Reshape and optionally shuffle inputs and targets for the keras/tf input in model
Args:
fhr, y : (np.arrays) combines batch of fhr signals and targets
shuffle: (bool) is True the data are shuffled together
Output: np.arrays of reshaped and (optionally) suffled data
"""
N = fhr.shape[1] # number of samples after concat
length = fhr.shape[0]
fhr = np.reshape(fhr[:, :N], (N, length, 1))
uc = np.reshape(uc[:, :N], (N, length, 1))
y = np.reshape(y, (N, 1))
if time:
time = np.reshape(time[:, :N], (N, length, 1))
if shuffle:
fhr, uc, time, y = shuffle(fhr, uc, time, y)
return fhr, uc, time, y
elif shuffle:
fhr, uc, y = shuffle(fhr, uc, y) # shuffles all in unison along the first axis
return fhr, uc, y | 5,324,151 |
def do_project(project: str) -> Optional[Tuple[str, str, str]]:
"""
Query Anitya and zypper for current version.
"""
max_version = None
prog_id = anitya_find_project_id(proj_name=project)
if prog_id:
res = requests.get('https://release-monitoring.org/project/%d/' % prog_id)
res.raise_for_status()
lines = res.text.splitlines()
for line in lines:
if "doap:revision" in line:
version = Version(line[line.find('>') + 1:line.rfind('<')])
if version and (max_version is None or version > max_version):
max_version = version
opensuse_version = zypper.package_version(project)
return (project,
str(max_version) if max_version else '',
str(opensuse_version) if opensuse_version else '') | 5,324,152 |
def save_channel_videoid(channel_id: str, video_id: str):
"""儲存單個影片ID與頻道ID
Args:
channel_id (str): [channel_id]
video_id (str): [video_id]
Returns:
[bool]]: [suss/fail]
"""
schemas = {
"video_id": video_id,
"channel_id": channel_id
}
play_list_model = ChannelPlaylistItem(**schemas)
try:
with app.app_context():
db.session.add(play_list_model)
db.session.commit()
return True
except SQLAlchemyError as e:
print(type(e))
return False | 5,324,153 |
def mark_point(mark_point=None, **kwargs):
"""
:param mark_point:
标记点,有'min', 'max', 'average'可选
:param kwargs:
:return:
"""
return _mark(mark_point, **kwargs) | 5,324,154 |
def get_keys(opts):
"""Gets keys from keystore and known-hosts store"""
hosts = KnownHostsStore()
serverkey = hosts.serverkey(opts.vip_address)
key_store = KeyStore()
publickey = key_store.public
secretkey = key_store.secret
return {"publickey": publickey, "secretkey": secretkey,
"serverkey": serverkey} | 5,324,155 |
def _get_model_ptr_from_binary(binary_path=None, byte_string=None):
"""Returns a pointer to an mjModel from the contents of a MuJoCo model binary.
Args:
binary_path: Path to an MJB file (as produced by MjModel.save_binary).
byte_string: String of bytes (as returned by MjModel.to_bytes).
One of `binary_path` or `byte_string` must be specified.
Returns:
A `ctypes.POINTER` to a new `mjbindings.types.MJMODEL` instance.
Raises:
TypeError: If both or neither of `byte_string` and `binary_path`
are specified.
"""
if binary_path is None and byte_string is None:
raise TypeError(
"At least one of `byte_string` or `binary_path` must be specified.")
elif binary_path is not None and byte_string is not None:
raise TypeError(
"Only one of `byte_string` or `binary_path` may be specified.")
if byte_string is not None:
assets = {_FAKE_BINARY_FILENAME: byte_string}
return mujoco.MjModel.from_binary_path(_FAKE_BINARY_FILENAME, assets)
return mujoco.MjModel.from_binary_path(binary_path, {}) | 5,324,156 |
def plot_work_terms(constants_class, fig_name=None):
"""
Calculate actual work terms to use and plot to check them.
We need both n and b in image space.
TODO: What exactly are these work terms??
:param constants_class: BaseArrays class object
containing fundamental and derived parameters
:param fig_name: partial name or prefix (can include path) if figure
is saved. If None, pylab.show() is called instead
"""
pylab.clf()
pylab.semilogy(
coordinates(constants_class.xMxN_yP_size)
/ constants_class.yP_size
* constants_class.xMxN_yP_size,
constants_class.facet_m0_trunc,
)
xM = constants_class.xM_size / 2 / constants_class.N
mark_range("xM", -xM, xM)
pylab.grid()
if fig_name is None:
pylab.show()
else:
pylab.savefig(f"{fig_name}_xm.png") | 5,324,157 |
def emulate_decoding_routine(vw, function_index, function: int, context, max_instruction_count: int) -> List[Delta]:
"""
Emulate a function with a given context and extract the CPU and
memory contexts at interesting points during emulation.
These "interesting points" include calls to other functions and
the final state.
Emulation terminates if the CPU executes an unexpected region of
memory, or the function returns.
Implementation note: currently limits emulation to 20,000 instructions.
This prevents unexpected infinite loops.
This number is taken from emulating the decoding of "Hello world" using RC4.
:param vw: The vivisect workspace in which the function is defined.
:type function_index: viv_utils.FunctionIndex
:param function: The address of the function to emulate.
:type context: funtion_argument_getter.FunctionContext
:param context: The initial state of the CPU and memory
prior to the function being called.
:param max_instruction_count: The maximum number of instructions to emulate per function.
:rtype: Sequence[decoding_manager.Delta]
"""
emu = floss.utils.make_emulator(vw)
emu.setEmuSnap(context.emu_snap)
logger.trace(
"Emulating function at 0x%08X called at 0x%08X, return address: 0x%08X",
function,
context.decoded_at_va,
context.return_address,
)
deltas = floss.decoding_manager.emulate_function(
emu, function_index, function, context.return_address, max_instruction_count
)
return deltas | 5,324,158 |
def probs_to_costs(costs, beta=.5):
""" Transform probabilities to costs (in-place)
"""
p_min = 0.001
p_max = 1. - p_min
costs = (p_max - p_min) * costs + p_min
# probabilities to costs, second term is boundary bias
costs = np.log((1. - costs) / costs) + np.log((1. - beta) / beta)
return costs | 5,324,159 |
def requires_ids_or_filenames(method):
"""
A decorator for spectrum library methods that require either a list of Ids or a list of filenames.
:param method:
A method belonging to a sub-class of SpectrumLibrary.
"""
def wrapper(model, *args, **kwargs):
have_ids = ("ids" in kwargs) and (kwargs["ids"] is not None)
have_filenames = ("filenames" in kwargs) and (kwargs["filenames"] is not None)
assert have_ids or have_filenames, "Must supply a list of Ids or a list of filenames"
assert not (have_ids and have_filenames), "Must supply either a list of Ids or a list of filenames, not both."
# If a single Id is supplied, rather than a list of Ids, turn it into a one-entry tuple
if have_ids and not isinstance(kwargs["ids"], (list, tuple)):
kwargs["ids"] = (kwargs["ids"],)
# If a single filename is supplied, turn it into a one-entry tuple
if have_filenames and not isinstance(kwargs["filenames"], (list, tuple)):
kwargs["filenames"] = (kwargs["filenames"],)
return method(model, *args, **kwargs)
return wrapper | 5,324,160 |
def _list_nsrr(
db_slug: str,
subfolder: str = '',
pattern: str = '*',
shallow: bool = False,
) -> List[Tuple[str, str]]:
"""
Recursively list filenames and checksums for a dataset.
Specify a subfolder and/or a filename-pattern to filter results.
Implemented according to the NSRR API documentation:
https://github.com/nsrr/sleepdata.org/wiki/api-v1-datasets#list-files-in-folder
Parameters
----------
db_slug : str
Short identifier of a database, e.g. `'mesa'`.
subfolder : str, optional
The folder at which to start the search, by default `''` (i.e. the
root folder).
pattern : str, optional
Glob-like pattern to select files (only applied to the basename,
not the dirname), by default `'*'`.
shallow : bool, optional
If `True`, only search in the given subfolder (i.e. no recursion),
by default `False`.
Returns
-------
list[tuple[str, str]]
A list of tuples `(<filename>, <checksum>)`; `<filename>` is the
full filename (i.e. dirname and basename) and `<checksum>` the
MD5 checksum.
"""
api_url = f'https://sleepdata.org/api/v1/datasets/{db_slug}/files.json'
response = requests.get(api_url, params={'path': subfolder})
try:
response_json = response.json()
except JSONDecodeError:
raise RuntimeError(f'No API response for dataset {db_slug}.') from None
files = []
for item in response_json:
if not item['is_file'] and not shallow:
files.extend(_list_nsrr(db_slug, item['full_path'], pattern))
elif fnmatch(item['file_name'], pattern):
files.append((item['full_path'], item['file_checksum_md5']))
return files | 5,324,161 |
def red():
"""
Returns the red RGB tensor
Returns
-------
Tensor
the (1,3,) red tensor
"""
return color2float(Uint8Tensor([237, 28, 36])) | 5,324,162 |
def get_temporary_text_file(contents, filename):
"""
Creates a temporary text file
:param contents: contents of the file
:param filename: name of the file
:type contents: str
:type filename: str
"""
f = StringIO()
flength = f.write(contents)
text_file = InMemoryUploadedFile(f, None, filename, 'text', flength, None)
# Setting the file to its start
text_file.seek(0)
return text_file | 5,324,163 |
def print_wf_integrity_stats(stats, workflow_id, dax_label, fmt):
"""
Prints the integrity statistics of workflow
stats : workflow statistics object reference
workflow_id : UUID of workflow
dax_label : Name of workflow
format : Format of report ('text' or 'csv')
"""
if fmt not in ["text", "csv"]:
print("Output format %s not recognized!" % fmt)
sys.exit(1)
report = ["\n"]
if fmt == "text":
# In text file, we need a line with the workflow id first
report.append("# {} ({})".format(workflow_id, dax_label or "All"))
col_names = integrity_stats_col_name_text
if fmt == "csv":
col_names = integrity_stats_col_name_csv
integrity_statistics = stats.get_integrity_metrics()
if fmt == "text":
max_length = [max(0, len(col_names[i])) for i in range(4)]
columns = ["" for i in range(4)]
# figure out max lengths?
for i in integrity_statistics:
max_length[0] = max(max_length[0], len(i.type))
max_length[1] = max(max_length[0], len(i.file_type))
max_length[2] = max(max_length[1], len(str(i.count)))
max_length[3] = max(max_length[2], len(str(i.duration)))
max_length = [i + 1 for i in max_length]
header_printed = False
for i in integrity_statistics:
content = [i.type, i.file_type, str(i.count), str(i.duration)]
if fmt == "text":
for i in range(0, 4):
columns[i] = col_names[i].ljust(max_length[i])
content[i] = content[i].ljust(max_length[i])
if fmt == "csv":
columns = integrity_stats_col_name_csv
content = [workflow_id, dax_label] + content
if not header_printed:
header_printed = True
report.append(print_row(columns, integrity_stats_col_size, fmt))
report.append(print_row(content, integrity_stats_col_size, fmt))
return NEW_LINE_STR.join(report) + NEW_LINE_STR | 5,324,164 |
def build_dynamic_focal_key_loss(task_cfgs):
"""According to "Dynamic Task Prioritization for Multitask Learning"
by Michelle Guo et al."""
losses = {}
for task_cfg in task_cfgs:
name = task_cfg['name']
losses[name] = build_dynamic_focal_key_task(task_cfg)
return WeightModule(losses) | 5,324,165 |
def settings_from_task_id(
task_id: int,
inj_data_path: str = "./data_raw_injections/task_files/",
) -> Tuple[str, Dict[str, Union[str, Optional[Dict[str, str]], bool, int]], int]:
"""Returns injection file (with path), waveform parameters in a dictionary, and number of injections for the given task id.
Args:
task_id: Slurm task ID from 1 to 2048.
inj_data_path: Path to injection files.
Raises:
ValueError: If there are no matching or more than one matching injections files.
Also, if the science case is not recognised to set the waveform parameters.
"""
# TODO: rewrite injection_file_name in generate_injections to use it here?
matches = glob.glob(inj_data_path + f"*_TASK_{task_id}.npy")
if len(matches) != 1:
raise ValueError(
f"Number of matches in data_raw_injections/ path is not one: {len(matches)}"
)
# includes absolute path
file = matches[0]
science_case, num_injs_per_redshift_bin_str = (
file.replace("_INJS-PER-ZBIN_", "_SCI-CASE_")
.replace("_TASK_", "_SCI-CASE_")
.replace(".npy", "_SCI-CASE_")
.split("_SCI-CASE_")[1:3]
)
num_injs_per_redshift_bin = int(num_injs_per_redshift_bin_str)
if science_case == "BNS":
wf_dict = dict(
wf_model_name="tf2_tidal",
wf_other_var_dic=None,
numerical_over_symbolic_derivs=False,
coeff_fisco=4,
)
# TODO: change to more accurate numerical waveform once gwbench 0.7 released
# wf_dict = dict(science_case=science_case, wf_model_name='lal_bns', wf_other_var_dic=dict(approximant='IMRPhenomD_NRTidalv2'), numerical_over_symbolic_derivs=True, coeff_fisco = 4)
elif science_case == "BBH":
wf_dict = dict(
wf_model_name="lal_bbh",
wf_other_var_dic=dict(approximant="IMRPhenomHM"),
numerical_over_symbolic_derivs=True,
coeff_fisco=8,
)
else:
raise ValueError("Science case not recognised.")
wf_dict["science_case"] = science_case
return file, wf_dict, num_injs_per_redshift_bin | 5,324,166 |
def test_should_parse_word2vec_with_multiple_entires(load_embedding_func, tmp_path):
"""Loading a Word2Vec Embedding should pass for multiple word entries"""
# GIVEN
word2vec_path = create_tmp_word_embedding(
tmp_path,
"""
4 2
word1 1.0 2.0
word2 3.0 4.0
word3 5.0 6.0
word4 7.0 8.0
""",
)
# WHEN
embedding = load_embedding_func(word2vec_path)
# THEN
assert embedding.get_words() == ["word1", "word2", "word3", "word4"]
assert np.array_equal(embedding.get_word_vector("word1"), np.array([1.0, 2.0]))
assert np.array_equal(embedding.get_word_vector("word2"), np.array([3.0, 4.0]))
assert np.array_equal(embedding.get_word_vector("word3"), np.array([5.0, 6.0]))
assert np.array_equal(embedding.get_word_vector("word4"), np.array([7.0, 8.0])) | 5,324,167 |
def remove_comments_and_docstrings(source):
"""
Returns *source* minus comments and docstrings.
.. note:: Uses Python's built-in tokenize module to great effect.
Example::
def noop(): # This is a comment
'''
Does nothing.
'''
pass # Don't do anything
Will become::
def noop():
pass
"""
io_obj = io.StringIO(source)
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
ltext = tok[4]
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# Remove comments:
if token_type == tokenize.COMMENT:
pass
# This series of conditionals removes docstrings:
elif token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
# Note regarding NEWLINE vs NL: The tokenize module
# differentiates between newlines that start a new statement
# and newlines inside of operators such as parens, brackes,
# and curly braces. Newlines inside of operators are
# NEWLINE and newlines that start new code are NL.
# Catch whole-module docstrings:
if start_col > 0:
# Unlabelled indentation means we're inside an operator
out += token_string
# Note regarding the INDENT token: The tokenize module does
# not label indentation inside of an operator (parens,
# brackets, and curly braces) as actual indentation.
# For example:
# def foo():
# "The spaces before this docstring are tokenize.INDENT"
# test = [
# "The spaces before this string do not get a token"
# ]
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
return out | 5,324,168 |
def get_arg_loc(callingconvention: str, bytecounter: int, size: int) -> str:
"""Return a string that denotes the location of a given function argument."""
index = bytecounter // 4
if index < 0:
raise Exception(
"Argument index cannot be smaller than zero: " + str(index))
if callingconvention == "arm":
return get_arm_arg_loc(bytecounter, size)
elif callingconvention == "mips":
return get_mips_arg_loc(bytecounter, size)
else:
return "?" | 5,324,169 |
def alignments_pass(alignments: pathlib.Path) -> Alignments:
"""Peform a single pass on all alignments to calculate meta information."""
meta = Alignments()
for speaker in tqdm(list(alignments.glob("*")), desc="Alignment Pass"):
# To ignore hidden files etc.
if str(speaker.stem).isnumeric():
for grid in speaker.glob("*.TextGrid"):
tg = textgrid.TextGrid.fromFile(grid)
for interval in tg[0]:
text = interval.mark
if text:
if text not in meta.word_counts:
meta.word_counts[text] = 0
meta.word_counts[text] += 1
return meta | 5,324,170 |
def BRepBlend_HCurve2dTool_Intervals(*args):
"""
:param C:
:type C: Handle_Adaptor2d_HCurve2d &
:param T:
:type T: TColStd_Array1OfReal &
:param S:
:type S: GeomAbs_Shape
:rtype: void
"""
return _BRepBlend.BRepBlend_HCurve2dTool_Intervals(*args) | 5,324,171 |
async def test_ternary(dut):
"""Try accessing the design."""
dut._log.info("Running test...")
cocotb.fork(Clock(dut.clk, 1, units="ns").start())
fail = 0
for i in range(10):
dut.rst <= 1
bit_str = prg(700)
dut.bit_str <= int.from_bytes(bit_str, "big")
await RisingEdge(dut.clk)
await Timer(1, units="ns")
dut.rst <= 0
await Timer(1500, units="ns")
expect = ter_model(bit_str)
try:
if dut.out.value != expect:
fail = 1
report.write( "When bit_str = %X, v = %s, but i expect it = %s\n" %( dut.bit_str.value, bin( int(dut.out.value) ), bin(expect) ) )
except:
report.write( "When bit_str = %X, v = ...xxx, but i expect it = %s\n" %( dut.bit_str.value, bin(expect) ) )
if fail == 0: report.write("------VERIFICATION SUCCEED------")
else: report.write("------VERIFICATION FAIL------")
dut._log.info("Running test...done")
report.close() | 5,324,172 |
def get_block_objects(disasm, nodes, func_addr):
"""
Get a list of objects to be displayed in a block in disassembly view. Objects may include instructions, stack
variables, and labels.
:param angr.analyses.Disassembly disasm: The angr Disassembly Analysis instance.
:param iterable nodes: A collection of CFG nodes.
:param int func_addr: The function address of the current block.
:return: a list of Instruction objects and label names (strings).
:rtype: list
"""
block_addrs = [node.addr for node in nodes]
block_addr = block_addrs[0]
insn_addrs = list(itertools.chain.from_iterable(disasm.block_to_insn_addrs[addr] for addr in block_addrs))
lst = [ ]
variable_manager = disasm.kb.variables[func_addr]
# function beginning
if block_addr == func_addr:
# function header
func = disasm.kb.functions.get_by_addr(func_addr)
if func is not None:
func_header = FunctionHeader(func.name, func.prototype,
func.calling_convention.args if func.calling_convention is not None else None)
lst.append(func_header)
# stack variables
# filter out all stack variables
variables = variable_manager.get_variables(sort='stack', collapse_same_ident=False)
variables = sorted(variables, key=lambda v: v.offset)
lst.append(Variables(variables))
# phi variables
phi_variables = variable_manager.get_phi_variables(block_addr)
if phi_variables:
for phi, variables in phi_variables.items():
lst.append(PhiVariable(phi, variables))
# instructions and labels
for insn_addr in insn_addrs:
if insn_addr != func_addr and insn_addr in disasm.kb.labels:
lst.append(Label(insn_addr, get_label_text(insn_addr, disasm.kb)))
lst.append(disasm.raw_result_map['instructions'][insn_addr])
# initial label, if there is any
# FIXME: all labels should be generated during CFG recovery, and this step should not be necessary.
if lst and not isinstance(lst[0], FunctionHeader):
# the first element should be a label
lst.insert(0, Label(block_addrs[0], get_label_text(block_addrs[0], disasm.kb)))
return lst | 5,324,173 |
def new_lunar_system_in_time(time_JD=2457099.5|units.day):
"""
Initial conditions of Solar system --
particle set with the sun + eight moons,
at the center-of-mass reference frame.
Defined attributes:
name, mass, radius, x, y, z, vx, vy, vz
"""
time_0 = 2457099.5 | units.day
delta_JD = time_JD-time_0
solar_system = solar_system_in_time(time_JD)
solar_system[0].type = "star"
solar_system[0].name = "sun"
solar_system[1:].type = "planet"
for pi in solar_system:
moons = get_moons_for_planet(pi, delta_JD=delta_JD)
solar_system.add_particles(moons)
solar_system.move_to_center()
### to compare with JPL, relative positions and velocities need to be corrected for the
# Sun's vectors with respect to the barycenter
#r_s = (3.123390770608490E-03, -4.370830943817017E-04, -1.443425433116342E-04) | units.AU
#v_s = (3.421633816761503E-06, 5.767414405893875E-06, -8.878039607570240E-08) | (units.AU / units.day)
#print sun
#print moons.position.in_(units.AU) + r_s
#print moons.velocity.in_(units.AU/units.day) + v_s
return solar_system | 5,324,174 |
def read_annot(fname):
"""Read a Freesurfer annotation from a .annot file.
Note : Copied from nibabel
Parameters
----------
fname : str
Path to annotation file
Returns
-------
annot : numpy array, shape=(n_verts)
Annotation id at each vertex
ctab : numpy array, shape=(n_entries, 5)
RGBA + label id colortable array
names : list of str
List of region names as stored in the annot file
"""
if not op.isfile(fname):
dir_name = op.split(fname)[0]
if not op.isdir(dir_name):
raise IOError('Directory for annotation does not exist: %s',
fname)
cands = os.listdir(dir_name)
cands = [c for c in cands if '.annot' in c]
if len(cands) == 0:
raise IOError('No such file %s, no candidate parcellations '
'found in directory' % fname)
else:
raise IOError('No such file %s, candidate parcellations in '
'that directory: %s' % (fname, ', '.join(cands)))
with open(fname, "rb") as fid:
n_verts = np.fromfile(fid, '>i4', 1)[0]
data = np.fromfile(fid, '>i4', n_verts * 2).reshape(n_verts, 2)
annot = data[data[:, 0], 1]
ctab_exists = np.fromfile(fid, '>i4', 1)[0]
if not ctab_exists:
raise Exception('Color table not found in annotation file')
n_entries = np.fromfile(fid, '>i4', 1)[0]
if n_entries > 0:
length = np.fromfile(fid, '>i4', 1)[0]
orig_tab = np.fromfile(fid, '>c', length)
orig_tab = orig_tab[:-1]
names = list()
ctab = np.zeros((n_entries, 5), np.int)
for i in range(n_entries):
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16) +
ctab[i, 3] * (2 ** 24))
else:
ctab_version = -n_entries
if ctab_version != 2:
raise Exception('Color table version not supported')
n_entries = np.fromfile(fid, '>i4', 1)[0]
ctab = np.zeros((n_entries, 5), np.int)
length = np.fromfile(fid, '>i4', 1)[0]
np.fromfile(fid, "|S%d" % length, 1) # Orig table path
entries_to_read = np.fromfile(fid, '>i4', 1)[0]
names = list()
for i in range(entries_to_read):
np.fromfile(fid, '>i4', 1) # Structure
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16))
# convert to more common alpha value
ctab[:, 3] = 255 - ctab[:, 3]
return annot, ctab, names | 5,324,175 |
def overlap3(data):
"""
"""
#
dataC = copy.copy(data)
temp = [[] for i in range(0, 12)]
index = int(dataC[0][0][5 : 7])
for x in dataC:
temp[(index % 12) - 1].append(x[1:])
index += 1
final = []
for x in temp:
final.append(np.array(x))
return final | 5,324,176 |
def gen_qsub_script(exp, run_type):
"""Populate qsub script with settings"""
reps = {}
qsub_path = ''
if config.is_cp_job(run_type):
reps = gen_cp_qsub_constants(exp, run_type)
else:
reps = gen_mask_qsub_constants(exp, run_type)
qsub_script = ''
qsub_template_path = config.get_template_qsub(run_type)
# read qsub template
with open(qsub_template_path, 'r') as f:
qsub_template = f.read()
qsub_script = helpers.multi_str_replace(qsub_template, reps)
# write qsub script
qsub_path = config.get_sge_input_qsub(exp, run_type)
with open(qsub_path, 'w') as f:
f.write(qsub_script)
return qsub_path | 5,324,177 |
def quat_diff_rad(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Get the difference in radians between two quaternions.
Args:
a: first quaternion, shape (N, 4)
b: second quaternion, shape (N, 4)
Returns:
Difference in radians, shape (N,)
"""
b_conj = quat_conjugate(b)
mul = quat_mul(a, b_conj)
# 2 * torch.acos(torch.abs(mul[:, -1]))
return 2.0 * torch.asin(
torch.clamp(
torch.norm(
mul[:, 0:3],
p=2, dim=-1), max=1.0)
) | 5,324,178 |
def PairsMerging(xIn: list):
"""
Recursive function for merging pairs formed at the first step
Parameters
----------
xIn : list
xIn - input list containing pairs of subarrays for sorting.
Returns
-------
Merged pair.
"""
# print(xIn,"input array for Pairs Merging") # Debugging
# while loop below - instead of recursive call of this function -
while(len(xIn) > 1):
xOut = [None]*((len(xIn)//2) + (len(xIn) % 2)) # a temporary array for forming output
sortingStep = 1 # Stepping and while() loop below going on pairs of subarrays for composing them in a sorted subarr
# while loop below going on the pairs of subarrays for making composed, sorted subarray for an output
while (sortingStep <= (len(xIn)//2)):
xTemp = [i*0 for i in range(len(xIn[2*(sortingStep-1)])+len(xIn[2*(sortingStep-1)+1]))] # For saving values
# from pairs of lists to sort
l1 = 0; l2 = 0 # Indexes for comparing values from both subarrays - donors for composing result sorted array
iFill = 0 # for filling resulting sorted subarray - result of this recursive function
# Picking values from two subarrays for making composed subarray as a result
while (l1 < len(xIn[2*(sortingStep-1)])) and (l2 < len(xIn[2*(sortingStep-1)+1])):
if (xIn[2*(sortingStep-1)])[l1] < (xIn[2*(sortingStep-1)+1])[l2]:
xTemp[iFill] = (xIn[2*(sortingStep-1)])[l1]; l1 += 1
else:
xTemp[iFill] = (xIn[2*(sortingStep-1)+1])[l2]; l2 += 1
iFill += 1
# Adding below the remaining, last biggest value from two subarrays to a composed subarray (output of recursion)
if (l1 < len(xIn[2*(sortingStep-1)])):
while ((l1 < len(xIn[2*(sortingStep-1)]))): # Adding remaining values from subarrays to a composed one
xTemp[iFill] = (xIn[2*(sortingStep-1)])[l1]; l1 += 1; iFill += 1
elif (l2 < len(xIn[2*(sortingStep-1)+1])):
while ((l2 < len(xIn[2*(sortingStep-1)+1]))): # Adding remaining values from subarrays to a composed one
xTemp[iFill] = (xIn[2*(sortingStep-1)+1])[l2]; l2 += 1; iFill += 1
# print(xTemp,"resulting of subarray")
xOut[sortingStep-1] = xTemp
sortingStep += 1
# Adding odd value (a single value subarray) to a resulting subarray - an output one
if (len(xIn) % 2) > 0:
xOut[sortingStep-1] = xIn[len(xIn)-1]
xIn = xOut.copy()
# Final function result
return xIn[0] | 5,324,179 |
def generate_dataset_db(
connection_string: str, file_name: str, include_null: bool
) -> str:
"""
Given a database connection string, extract all tables/fields from it
and write out a boilerplate dataset manifest, excluding optional null attributes.
"""
db_engine = get_db_engine(connection_string)
db_schemas = get_db_schemas(engine=db_engine)
db_datasets = create_db_datasets(db_schemas=db_schemas)
write_dataset_manifest(
file_name=file_name, include_null=include_null, datasets=db_datasets
)
return file_name | 5,324,180 |
def save_path_plan(path_wp):
"""
Compute the waypoints (distance and angle).
"""
path_dist_cm = []
path_dist_px = []
path_angle = []
for index in range(len(path_wp)):
# Skip the first and second index.
if index > 0:
dist_px = get_distance(path_wp[index - 1], path_wp[index])
dist_cm = dist_px * MAP_SIZE_COEFF
path_dist_cm.append(dist_cm)
path_dist_px.append(dist_px)
if index == 0:
path_angle.append(0)
# Skip the first and last index.
if index > 0 and index < (len(path_wp) - 1):
angle = get_angle_btw_line(path_wp[index - 1], path_wp[index], path_wp[index + 1])
# workaround for an issue when calculating angle direction on pathcontroller when angle is not divisible for 5
angle = math.ceil(angle / 5) * 5
path_angle.append(angle)
"""
Save waypoints into JSON file.
"""
waypoints = []
for index in range(len(path_dist_cm)):
waypoints.append({
"dist_cm": path_dist_cm[index],
"dist_px": path_dist_px[index],
"angle_deg": path_angle[index],
})
# Save to JSON file.
f = open('waypoint.json', 'w+')
json.dump({
"wp": waypoints,
"pos": path_wp
}, f, indent=4)
f.close() | 5,324,181 |
def on_key_press(symbol, modifiers):
"""
This handler processes keyboard commands that control the simulation
"""
if symbol == key.BACKSPACE or symbol == key.SLASH:
env.reset()
env.render()
return
elif symbol == key.ESCAPE:
env.close()
sys.exit(0)
elif symbol == key.P:
env.reset()
print("env reset")
# Take a screenshot
# UNCOMMENT IF NEEDED - Skimage depencency
# elif symbol == key.RETURN:
# print('saving screenshot')
# img = env.render('rgb_array')
# try:
# from experiments.utils import save_img
# save_img('screenshot.png', img)
# except BaseException as e:
# print(str(e)) | 5,324,182 |
def salt(secret: str) -> str:
"""A PBKDF salt."""
return sha256(secret.encode("utf-8")).hexdigest() | 5,324,183 |
async def async_get_service(
hass: HomeAssistant,
config: ConfigType,
discovery_info: DiscoveryInfoType | None = None,
) -> KNXNotificationService | None:
"""Get the KNX notification service."""
if not discovery_info or not discovery_info["platform_config"]:
return None
platform_config = discovery_info["platform_config"]
xknx: XKNX = hass.data[DOMAIN].xknx
notification_devices = []
for device_config in platform_config:
notification_devices.append(
XknxNotification(
xknx,
name=device_config[CONF_NAME],
group_address=device_config[KNX_ADDRESS],
)
)
return (
KNXNotificationService(notification_devices) if notification_devices else None
) | 5,324,184 |
def docx_to_df(file_path):
"""
Convert docx file to dataframe
Parameters
----------
file_path : str
A file path of documnet
Returns
-------
dataframe
speech | transcript_filepath | id | transcriber_id | wave_filepath
------------------------------------------------------------------
00:00 | Users/Soyeon/~~~. |119-2| 113. | Users/~~~~
"""
# Convert docx file to dataframe
text = docx2txt.process(file_path)
text_list = text.split('\n')
df = pd.DataFrame(text_list, columns = ["speech"])
# Add [transcript_filepath] column
df["transcript_filepath"] = file_path
# Add [id], [transcriber_id] columns
extract = re.search('(\d{3})-(\d{1})-(\d{3})', file_path)
if extract is not None:
df["id"] = extract.group(1) + "-" + extract.group(2)
df["transcriber_id"] = extract.group(3)
else:
df["id"] = None
df["transcriber_id"] = None
warnings.warn('File {0} seems to have the wrong title format for extracting id and transcriber_id'.format(file_path));
# Add [wave_filepath] column
audio_path = base_prefix + "Audio Files & Transcripts/Audio Files/"
df["wave_filepath"] = audio_path + df["id"] + ".wav"
return df | 5,324,185 |
def write_current_station(config, station):
"""
Upisuje oznaku trenutno selektovanog računara u fajl u kojem se evidentira ta informacija
config - globalna konfiguracija alata za pregled
station - oznaka trenutno selektovanog računara
"""
with open (config.STATION_FILENAME, 'w') as wfile:
wfile.write(station) | 5,324,186 |
def _is_referenced_by_a_stack_frame_name(referrers, obj, name):
"""
Is there a reference among the given referrers, that is a stack frame,
which contains a local variable of the given name, which points to
the object of interest?
:param referrers: The references to scan.
:param obj: The object of interest.
:param name: The name the reference must have.
:return: Boolean.
"""
frame_referrers = [ref for ref in referrers if _is_a_(ref, 'frame')]
for frame in frame_referrers:
if name in frame.f_locals:
object_referred_to = frame.f_locals[name]
if object_referred_to == obj:
return True
return False | 5,324,187 |
def get_table_value(table, row_name, column, namespace='default', network=None, base_url=DEFAULT_BASE_URL):
"""Retrieve the value from a specific row and column from node, edge or network tables.
Args:
table (str): Name of table, e.g., node (default), edge, network
row_name (str): Node, edge or network name, i.e., the value in the "name" column
column (str): Name of column to retrieve values from
namespace (str): Namespace of table. Default is "default".
network (SUID or str or None): Name or SUID of a network. Default is the
"current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
obj: the value of the table cell, cast to float, int, bool or str depending on column type
Raises:
HTTPError: if table or namespace doesn't exist in network or if cell contains a numeric type but no number
CyError: if network name or SUID doesn't exist
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> get_table_value('node', 'YDL194W', 'COMMON')
'SNF3'
>>> get_table_value('edge', 'YLR197W (pp) YOR310C', 'EdgeBetweenness', network='My Network')
2.0
>>> get_table_value('node', 'YDL194W', 'IsSingleNode')
False
"""
suid = networks.get_network_suid(network, base_url=base_url)
# column type
table_col_info = get_table_column_types(table, namespace, network, base_url=base_url)
table_col_type = table_col_info[column]
# which row
row_key = None
from .py4cytoscape_utils import node_name_to_node_suid
from .py4cytoscape_utils import edge_name_to_edge_suid
if table == 'node':
row_key = node_name_to_node_suid(row_name, network, base_url=base_url)[0]
elif table == 'edge':
row_key = edge_name_to_edge_suid(row_name, network, base_url=base_url)[0]
elif table == 'network':
row_key = networks.get_network_suid(row_name,
base_url=base_url) # TODO: R implementation looks wrong because of == and use of row_name
else:
row_key = None
# get row/column value
res = commands.cyrest_get(f'networks/{suid}/tables/{namespace}{table}/rows/{row_key}/{column}', base_url=base_url,
require_json=False)
if not res: return None
# TODO: This "not res" can't happen for numbers because CyREST returns HTTPError if a value doesn't exist ... is this what we want?
# TODO: For strings, a '' is returned ... do we want to return None for this?
if table_col_type == 'Double':
return float(res)
elif table_col_type == 'Long':
return int(res)
elif table_col_type == 'Integer':
return int(res)
elif table_col_type == 'Boolean':
return bool(res)
else:
return str(res) | 5,324,188 |
def polling_wait(
poller: Union[LROPoller, Future],
message: str = None,
start_time: float = None,
is_local=False,
timeout=LROConfigurations.POLLING_TIMEOUT,
) -> Any:
"""Print out status while polling and time of operation once completed.
:param Union[LROPoller, concurrent.futures.Future] poller: An poller which will return status update via function done().
:param (str, optional) message: Message to print out before starting operation write-out.
:param (float, optional) start_time: Start time of operation.
:param (bool, optional) is_local: If poller is for a local endpoint, so the timeout is removed.
:param (int, optional) timeout: New value to overwrite the default timeout.
"""
module_logger.info(f"{message}")
if is_local:
"""We removed timeout on local endpoints in case
it takes a long time to pull image or install conda env.
We want user to be able to see that.
"""
while not poller.done():
module_logger.info(".")
time.sleep(LROConfigurations.SLEEP_TIME)
else:
poller.result(timeout=timeout)
if poller.done():
module_logger.info("Done ")
else:
module_logger.warning("Timeout waiting for long running operation")
if start_time:
end_time = time.time()
duration = divmod(int(round(end_time - start_time)), 60)
module_logger.info(f"({duration[0]}m {duration[1]}s)\n") | 5,324,189 |
def _check_circular_dependency(nodes_qty: int, ind_nodes_qty: int) -> None:
"""Checks for a circular dependency.
The circular dependency prevents the formation of the topological order.
Parameters
----------
nodes_qty : int
Quantity of nodes.
ind_nodes_qty : int
Quantity of independent nodes.
Raises
------
SortingError
Informs that a circular dependency was found in the graph.
"""
if nodes_qty != ind_nodes_qty:
raise SortingError(
'circular dependency found in the graph',
[f'{nodes_qty} != {ind_nodes_qty}']
) | 5,324,190 |
def prettify_name_tuple(tup):
""" Processes the intersect tuples from the steam API. """
res = []
for name in tup:
res.append(name.split("_")[0])
return ", ".join(res) | 5,324,191 |
def init_repository(path, bare=False,
flags=C.GIT_REPOSITORY_INIT_MKPATH,
mode=0,
workdir_path=None,
description=None,
template_path=None,
initial_head=None,
origin_url=None):
"""
Creates a new Git repository in the given *path*.
If *bare* is True the repository will be bare, i.e. it will not have a
working copy.
The *flags* may be a combination of:
- GIT_REPOSITORY_INIT_BARE (overriden by the *bare* parameter)
- GIT_REPOSITORY_INIT_NO_REINIT
- GIT_REPOSITORY_INIT_NO_DOTGIT_DIR
- GIT_REPOSITORY_INIT_MKDIR
- GIT_REPOSITORY_INIT_MKPATH (set by default)
- GIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE
The *mode* parameter may be any of GIT_REPOSITORY_SHARED_UMASK (default),
GIT_REPOSITORY_SHARED_GROUP or GIT_REPOSITORY_INIT_SHARED_ALL, or a custom
value.
The *workdir_path*, *description*, *template_path*, *initial_head* and
*origin_url* are all strings.
See libgit2's documentation on git_repository_init_ext for further details.
"""
# Pre-process input parameters
if bare:
flags |= C.GIT_REPOSITORY_INIT_BARE
# Options
options = ffi.new('git_repository_init_options *')
C.git_repository_init_init_options(options, C.GIT_REPOSITORY_INIT_OPTIONS_VERSION)
options.flags = flags
options.mode = mode
options.workdir_path = to_bytes(workdir_path)
options.description = to_bytes(description)
options.template_path = to_bytes(template_path)
options.initial_head = to_bytes(initial_head)
options.origin_url = to_bytes(origin_url)
# Call
crepository = ffi.new('git_repository **')
err = C.git_repository_init_ext(crepository, to_bytes(path), options)
check_error(err)
# Ok
return Repository(path) | 5,324,192 |
def set_color(frame: np.ndarray, x: int, y: int, color: Tuple[int]) -> None:
"""Sets the pixel at position (x, y) on the frame to the specified color.
"""
frame[x, y, :] = np.asarray(color) | 5,324,193 |
def carrington_rotation_number_relative(time, lon):
"""
A function that returns the decimal carrington rotation number for a spacecraft position
that may not be at the same place at earth. In this case you know the carrington longitude
of the spacecraft, and want to convert that to a decimal carrington number that is within
+0.5 and -0.5 of the decimal rotation for the earth-based longitude.
:param time: an astropy Time object indicating the time the position is known.
:param lon: the carrington longitude of the spacecraft position.
:return: the decimal_carrington number.
"""
# get the decimal carrington number for Earth at this time
cr_earth = sunpy.coordinates.sun.carrington_rotation_number(time)
# convert that to the earth longitude (this should match sunpy.coordinates.sun.L0(time))
cr0 = np.floor(cr_earth)
lon_earth = np.mod((1 - (cr_earth - cr0)*360), 360)
# compute the angular difference and the modulus
diff = lon_earth - lon
mod = np.mod(diff, 360.)
# compute the fractional rotation offset, which depends on where the periodic boundary is.
offset = 0.0
if lon_earth < 180 and mod < 180 and diff < 0:
offset = +1.0
if lon_earth >= 180 and mod >= 180 and diff >= 0:
offset = -1.0
cr_now = cr0 + np.mod(1.0 - lon/360., 360.) + offset
debug = False
if debug:
print('{: 7.3f} {: 7.3f} {: 7.3f} {: 7.3f} {: 7.3f} {: 7.3f}'.format(lon, diff, mod, cr_now, cr_earth,
cr_now - cr_earth))
print(cr_earth, cr0, lon_earth, sunpy.coordinates.sun.L0(time).value, lon, cr_now)
return cr_now | 5,324,194 |
def _impl_ufunc_remainder(x1, x2):
"""
Returns the remainder of division for each element x1_i of the input array x1 and the
respective element x2_i of the input array x2.
"""
pass | 5,324,195 |
def plot_square_lattice(width, height, numbered=False, title='', save_path=None):
"""
Plot a figure of square lattice
:param width: width of the square lattice
:param height: height of the square lattice
:param numbered: if show each each lattice dot a number
:param title: title of the figure
:param save_path: if save the figure
Example:
>>>plot_square_lattice(2, 2)
show a figure of a 2x2 square lattice
"""
from HamiltonianModule import positions_nearest_neighbor_square
pos_1d = np.arange(0, width*height, dtype=int).reshape(height, width)
index = positions_nearest_neighbor_square(width, height)
for n in range(0, index.shape[0]):
pos1 = arg_find_array(pos_1d == index[n, 0])
pos2 = arg_find_array(pos_1d == index[n, 1])
plt.plot([pos1[0], pos2[0]], [pos1[1], pos2[1]], '-ob', markersize=8)
plt.axis('equal')
if numbered:
for w in range(0, width):
for h in range(0, height):
plt.text(h + 0.06, w - 0.06, str(pos_1d[h, w]), horizontalalignment='left',
verticalalignment='top', fontsize=15)
plt.axis('off')
plt.title(title)
if save_path is not None:
mkdir(save_path)
plt.savefig(os.path.join(save_path, 'square(%d,%d).png' % (width, height)))
plt.show() | 5,324,196 |
def delete_volume_backup(cinder, vol_backup_id):
"""Delete the given volume from cinder.
:param cinder: Authenticated cinderclient
:type cinder: cinderclient.Client
:param vol_backup_id: unique name or id for the openstack resource
:type vol_backup_id: str
"""
delete_resource(cinder.backups, vol_backup_id,
msg="deleting cinder volume backup") | 5,324,197 |
def wrap_elasticluster(args):
"""Wrap elasticluster commands to avoid need to call separately.
- Uses .bcbio/elasticluster as default configuration location.
- Sets NFS client parameters for elasticluster Ansible playbook. Uses async
clients which provide better throughput on reads/writes:
http://nfs.sourceforge.net/nfs-howto/ar01s05.html (section 5.9 for tradeoffs)
"""
if "-s" not in args and "--storage" not in args:
# clean up old storage directory if starting a new cluster
# old pickle files will cause consistent errors when restarting
storage_dir = os.path.join(os.path.dirname(DEFAULT_EC_CONFIG), "storage")
std_args = [x for x in args if not x.startswith("-")]
if len(std_args) >= 3 and std_args[1] == "start":
cluster = std_args[2]
pickle_file = os.path.join(storage_dir, "%s.pickle" % cluster)
if os.path.exists(pickle_file):
os.remove(pickle_file)
args = [args[0], "--storage", storage_dir] + args[1:]
if "-c" not in args and "--config" not in args:
args = [args[0]] + ["--config", DEFAULT_EC_CONFIG] + args[1:]
os.environ["nfsoptions"] = "rw,async,nfsvers=3" # NFS tuning
sys.argv = args
try:
return elasticluster.main.main()
except SystemExit as exc:
return exc.args[0] | 5,324,198 |
def test_turn_on_not_connected(device):
"""Test the CLI."""
light = Light("00:11:22")
with pytest.raises(RuntimeError):
light.turn_on() | 5,324,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.