hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acecf09b718fa6495caae82d0ed0af5a7892e1e6 | 1,072 | py | Python | weather_sp/setup.py | CillianFn/weather-tools | 2d8651d212cc998f098cbcf83a6e4536e9e455a7 | [
"Apache-2.0"
] | null | null | null | weather_sp/setup.py | CillianFn/weather-tools | 2d8651d212cc998f098cbcf83a6e4536e9e455a7 | [
"Apache-2.0"
] | null | null | null | weather_sp/setup.py | CillianFn/weather-tools | 2d8651d212cc998f098cbcf83a6e4536e9e455a7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
base_requirements = [
"apache-beam[gcp]",
"pygrib",
"numpy>=1.20.3",
"netcdf4",
]
setup(
name='splitter_pipeline',
packages=find_packages(),
author='Anthromets',
author_email='anthromets-ecmwf@google.com',
version='0.1.0',
url='https://weather-tools.readthedocs.io/en/latest/weather_sp/',
description='A tool to split weather data files into per-variable files.',
install_requires=base_requirements,
)
| 31.529412 | 78 | 0.728545 |
acecf0ba4103c82970c5a11ce81cfc74bc80f05e | 5,367 | py | Python | custom_models/Meta.py | occasumlux/Promoters | 2256b56d129c540f7d737442ad4045d1a816f43f | [
"MIT"
] | null | null | null | custom_models/Meta.py | occasumlux/Promoters | 2256b56d129c540f7d737442ad4045d1a816f43f | [
"MIT"
] | null | null | null | custom_models/Meta.py | occasumlux/Promoters | 2256b56d129c540f7d737442ad4045d1a816f43f | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import List
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from enum import Enum, auto
# Since PromoterType's values are accessed sometimes, auto isn't used
class PromoterType(Enum):
NON_PROMOTER = 1
SIGMA_70 = 2
SIGMA_24 = 3
SIGMA_28 = 4
SIGMA_38 = 5
SIGMA_32 = 6
SIGMA_54 = 7
class ModelType(Enum):
SAVED_MODEL = auto()
WEIGHTS_ONLY = auto()
class CustomMetrics:
"""
Todo: Unify methods
"""
@staticmethod
def compute_TnF(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
tn = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
fp = K.sum(K.round(K.clip((1 - y_true) * y_pred, 0, 1)))
fn = K.sum(K.round(K.clip(y_true * (1 - y_pred), 0, 1)))
return tp, tn, fp, fn
# Training methods
@staticmethod
def matthews_correlation_coefficient(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
tn = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
fp = K.sum(K.round(K.clip((1 - y_true) * y_pred, 0, 1)))
fn = K.sum(K.round(K.clip(y_true * (1 - y_pred), 0, 1)))
num = tp * tn - fp * fn
den = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
return num / K.sqrt(den + K.epsilon())
@staticmethod
def specificity(y_true, y_pred):
tn = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
fp = K.sum(K.round(K.clip((1 - y_true) * y_pred, 0, 1)))
return tn / (tn + fp + K.epsilon())
@staticmethod
def sensitivity(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
fn = K.sum(K.round(K.clip(y_true * (1 - y_pred), 0, 1)))
return tp / (tp + fn + K.epsilon())
# Evaluation methods
@staticmethod
def val_matthews_correlation_coefficient(*, tp, fp, tn, fn):
num = tp * tn - fp * fn
den = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
return num / K.sqrt(den + K.epsilon())
@staticmethod
def val_specificity(*, tp, fp, tn, fn):
return tn / (tn + fp + K.epsilon())
@staticmethod
def val_sensitivity(*, tp, fp, tn, fn):
return tp / (tp + fn + K.epsilon())
@staticmethod
def val_accuracy(*, tp, fp, tn, fn):
return (tp + tn) / (tp + tn + fp + fn + K.epsilon())
class MetaModel(ABC):
@abstractmethod
def __init__(self, name: str):
self.name = name
@staticmethod
@abstractmethod
def _preprocess(data):
pass
# Maybe not
@staticmethod
@abstractmethod
def _create_model():
pass
@abstractmethod
def predict(self):
pass
@abstractmethod
def train(self):
pass
@staticmethod
def _generate_indices(predictions: np.ndarray, comp_value = 0):
# Get index for future use
bindices_zero = (predictions == comp_value) # Boolean indices
indices_zero = np.arange(len(predictions))[bindices_zero] # For next stage
indices_nonzero = np.arange(len(predictions))[~bindices_zero] # Classification
return indices_zero, indices_nonzero
def test(self, inputs: List[str], outputs: List[PromoterType]):
# Create a list holding all possible promoter types (+ non promoter)
types = list(PromoterType)
# Get predictions for later use
preds_array = self.predict(inputs)
# Create np.array of outputs for later use
outputs_array = np.array(outputs)
# Get indices for each promoter type from outputs_array, store them in a dictionary
outputs_idx = {_type : self._generate_indices(outputs_array, _type) for _type in types}
stats = {}
# Calculate metrics for each type
for _type, idx in outputs_idx.items():
tp = tf.constant(np.count_nonzero(preds_array[idx[0]] == _type), dtype=tf.float32)
fp = tf.constant(np.count_nonzero(preds_array[idx[1]] == _type), dtype=tf.float32)
fn = tf.constant(np.count_nonzero(preds_array[idx[0]] != _type), dtype=tf.float32)
tn = tf.constant(np.count_nonzero(preds_array[idx[1]] != _type), dtype=tf.float32)
#print(f"Type: {_type} --- TP: {tp.numpy()} --- FP: {fp.numpy()} --- TN: {tn.numpy()} --- FN: {fn.numpy()}")
# Metrics
sn = CustomMetrics.val_sensitivity(tp=tp, fp=fp, fn=fn, tn=tn).numpy()
sp = CustomMetrics.val_specificity(tp=tp, fp=fp, fn=fn, tn=tn).numpy()
cc = CustomMetrics.val_matthews_correlation_coefficient(tp=tp, fp=fp, fn=fn, tn=tn).numpy()
ac = CustomMetrics.val_accuracy(tp=tp, fp=fp, fn=fn, tn=tn).numpy()
#print(f"Sn: {sn} --- Sp: {sp} --- MCC: {cc} --- Ac: {ac}")
stats[_type] = {"TP": tp.numpy(), "FP": fp.numpy(),
"TN": tn.numpy(), "FN": fn.numpy(),
"Specificity": sp, "Sensitivity": sn,
"MCC": cc, "Accuracy": ac
}
total_acc = sum(1 for x, y in zip(preds_array, outputs) if x == y) / len(inputs)
return stats, total_acc | 37.013793 | 120 | 0.569406 |
acecf2519e405b05cb4b712218855024634d532c | 2,816 | py | Python | src/PyDS/Queue/Queue.py | AoWangPhilly/PyDS | d79f92d0d2e7c005ebb8fa9f631d5f01e590625e | [
"MIT"
] | null | null | null | src/PyDS/Queue/Queue.py | AoWangPhilly/PyDS | d79f92d0d2e7c005ebb8fa9f631d5f01e590625e | [
"MIT"
] | null | null | null | src/PyDS/Queue/Queue.py | AoWangPhilly/PyDS | d79f92d0d2e7c005ebb8fa9f631d5f01e590625e | [
"MIT"
] | null | null | null | from typing import Any, List
from dataclasses import dataclass, field
from PyDS.Error import Empty
@dataclass
class Queue:
"""Implementation of Queue ADT
:param __capacity: The maximum number of elements a queue can hold
:type __capacity: int
:param __list: A container that holds n-elements in queue
:type __list: list[Any]
:param __front: The index pointing at front of queue
:type __front: int
:param __size: The size of the queue
:type __size: int
"""
__capacity: int = 64
__list: List[Any] = field(default_factory=lambda: [None] * Queue.__capacity)
__front: int = 0
__size: int = 0
def enqueue(self, value: Any) -> None:
"""Insertion to the tail of the queue
:param value: The value inserting to the tail
:type value: Any
"""
if self.__size == self.__capacity:
self.__resize(capacity=2 * self.__capacity)
end = (self.__front + self.__size) % self.__capacity
self.__list[end] = value
self.__size += 1
def dequeue(self) -> Any:
"""Deletion at the front of the queue
:return: A value at the front of queue
:rtype: Any
"""
if self.is_empty():
raise Empty("Queue is empty")
if 0 < self.__size < (self.__capacity // 4):
self.__resize(capacity=self.__capacity // 2)
value = self.__list[self.__front]
self.__list[self.__front] = None
self.__front = (self.__front + 1) % self.__capacity
self.__size -= 1
return value
def front(self) -> Any:
"""Gets value at front of queue
:return: A value at the front of queue
:rtype: Any
"""
if self.is_empty():
raise Empty("Queue is empty")
return self.__list[self.__front]
def is_empty(self) -> bool:
"""Checks to see if queue is empty
:return: Whether or not the queue's empty
:rtype: bool
"""
return self.__size == 0
def __resize(self, capacity: int) -> None:
"""Resize queue with twice the capacity"""
list_ = [None] * capacity
front = self.__front
for i in range(self.__size):
list_[i] = self.__list[front]
front = (front + 1) % self.__capacity
self.__front = 0
self.__list = list_
self.__capacity = capacity
def __len__(self) -> int:
return self.__size
def __str__(self) -> str:
if self.is_empty():
return 'Queue([])'
front = self.__front
output = 'Queue(['
for _ in range(self.__size - 1):
output += f'{self.__list[front]}, '
front = (front + 1) % self.__capacity
output += f'{self.__list[front]}])'
return output
| 28.734694 | 80 | 0.577415 |
acecf33ad9377b7a39d98ca21e25febafb03a30d | 1,415 | py | Python | masterpiece/masterpiece/doctype/bulk_import/bulk_import.py | zeta17/masterpiece | 672b1e437ecd47cfcfd12aace6a93f21e1a6085e | [
"MIT"
] | null | null | null | masterpiece/masterpiece/doctype/bulk_import/bulk_import.py | zeta17/masterpiece | 672b1e437ecd47cfcfd12aace6a93f21e1a6085e | [
"MIT"
] | null | null | null | masterpiece/masterpiece/doctype/bulk_import/bulk_import.py | zeta17/masterpiece | 672b1e437ecd47cfcfd12aace6a93f21e1a6085e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, hendrik and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import msgprint, _
from frappe.utils import flt
class BulkImport(Document):
def on_submit(self):
prod = frappe.new_doc("Production")
prod.posting_date = "2020-01-01"
prod.seri_n_patrun = self.seri
prod.qty = self.qty
prod.received_qty = self.qty
prod.uom = "PCS"
prod.lebar_dada = self.ld
prod.panjang = self.pj
prod.price_list_rate = self.price_list
if flt(self.potong) > 0:
prod.append("expenses", {
"cost_component": "Potong",
"tukang": "Fung",
"rate": self.potong,
"qty": self.qty,
"received_qty": self.qty
})
if flt(self.cmt) > 0:
prod.append("expenses", {
"cost_component": "CMT",
"tukang": "Fung",
"rate": self.cmt,
"qty": self.qty,
"received_qty": self.qty
})
if flt(self.wash) > 0:
prod.append("expenses", {
"cost_component": "Wash",
"tukang": "Fung",
"rate": self.wash,
"qty": self.qty,
"received_qty": self.qty
})
if flt(self.finishing) > 0:
prod.append("expenses", {
"cost_component": "Finishing",
"tukang": "Fung",
"rate": self.finishing,
"qty": self.qty,
"received_qty": self.qty
})
prod.flags.ignore_permissions = True
prod.submit()
| 25.267857 | 49 | 0.646643 |
acecf4e6e913ab859fce69a8c54148c08db64064 | 23,235 | py | Python | spotrix/migrations/versions/bebcf3fed1fe_convert_dashboard_v1_positions.py | Spotrix/spotrix | 611aaac9f47e0451660b37d0ade06750883aba36 | [
"Apache-2.0"
] | 1 | 2021-12-12T10:23:06.000Z | 2021-12-12T10:23:06.000Z | spotrix/migrations/versions/bebcf3fed1fe_convert_dashboard_v1_positions.py | Spotrix/spotrix | 611aaac9f47e0451660b37d0ade06750883aba36 | [
"Apache-2.0"
] | null | null | null | spotrix/migrations/versions/bebcf3fed1fe_convert_dashboard_v1_positions.py | Spotrix/spotrix | 611aaac9f47e0451660b37d0ade06750883aba36 | [
"Apache-2.0"
] | 1 | 2021-12-12T10:23:19.000Z | 2021-12-12T10:23:19.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Migrate dashboard position_json data from V1 to V2
Revision ID: bebcf3fed1fe
Revises: fc480c87706c
Create Date: 2018-07-22 11:59:07.025119
"""
# revision identifiers, used by Alembic.
import collections
import json
import sys
import uuid
from functools import reduce
from alembic import op
from sqlalchemy import Column, ForeignKey, Integer, String, Table, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from spotrix import db
revision = "bebcf3fed1fe"
down_revision = "fc480c87706c"
Base = declarative_base()
BACKGROUND_TRANSPARENT = "BACKGROUND_TRANSPARENT"
CHART_TYPE = "DASHBOARD_CHART_TYPE"
COLUMN_TYPE = "DASHBOARD_COLUMN_TYPE"
DASHBOARD_GRID_ID = "DASHBOARD_GRID_ID"
DASHBOARD_GRID_TYPE = "DASHBOARD_GRID_TYPE"
DASHBOARD_HEADER_ID = "DASHBOARD_HEADER_ID"
DASHBOARD_HEADER_TYPE = "DASHBOARD_HEADER_TYPE"
DASHBOARD_ROOT_ID = "DASHBOARD_ROOT_ID"
DASHBOARD_ROOT_TYPE = "DASHBOARD_ROOT_TYPE"
DASHBOARD_VERSION_KEY = "DASHBOARD_VERSION_KEY"
MARKDOWN_TYPE = "DASHBOARD_MARKDOWN_TYPE"
ROW_TYPE = "DASHBOARD_ROW_TYPE"
GRID_COLUMN_COUNT = 12
GRID_MIN_COLUMN_COUNT = 1
GRID_MIN_ROW_UNITS = 5
GRID_RATIO = 4.0
NUMBER_OF_CHARTS_PER_ROW = 3
MAX_RECURSIVE_LEVEL = 6
ROW_HEIGHT = 8
TOTAL_COLUMNS = 48
DEFAULT_CHART_WIDTH = int(TOTAL_COLUMNS / NUMBER_OF_CHARTS_PER_ROW)
MAX_VALUE = sys.maxsize
class Slice(Base):
"""Declarative class to do query in upgrade"""
__tablename__ = "slices"
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
params = Column(Text)
viz_type = Column(String(250))
dashboard_slices = Table(
"dashboard_slices",
Base.metadata,
Column("id", Integer, primary_key=True),
Column("dashboard_id", Integer, ForeignKey("dashboards.id")),
Column("slice_id", Integer, ForeignKey("slices.id")),
)
class Dashboard(Base):
"""Declarative class to do query in upgrade"""
__tablename__ = "dashboards"
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(Text)
slices = relationship("Slice", secondary=dashboard_slices, backref="dashboards")
def is_v2_dash(positions):
return (
isinstance(positions, dict) and positions.get("DASHBOARD_VERSION_KEY") == "v2"
)
def get_boundary(positions):
top = MAX_VALUE
left = MAX_VALUE
bottom = 0
right = 0
for position in positions:
top = min(position["row"], top)
left = min(position["col"], left)
bottom = max(position["row"] + position["size_y"], bottom)
right = max(position["col"] + position["size_x"], right)
return {"top": top, "bottom": bottom, "left": left, "right": right}
def generate_id():
return uuid.uuid4().hex[:8]
def has_overlap(positions, xAxis=True):
sorted_positions = (
sorted(positions[:], key=lambda pos: pos["col"])
if xAxis
else sorted(positions[:], key=lambda pos: pos["row"])
)
result = False
for idx, position in enumerate(sorted_positions):
if idx < len(sorted_positions) - 1:
if xAxis:
result = (
position["col"] + position["size_x"]
> sorted_positions[idx + 1]["col"]
)
else:
result = (
position["row"] + position["size_y"]
> sorted_positions[idx + 1]["row"]
)
if result:
break
return result
def get_empty_layout():
return {
DASHBOARD_VERSION_KEY: "v2",
DASHBOARD_ROOT_ID: {
"type": DASHBOARD_ROOT_TYPE,
"id": DASHBOARD_ROOT_ID,
"children": [DASHBOARD_GRID_ID],
},
DASHBOARD_GRID_ID: {
"type": DASHBOARD_GRID_TYPE,
"id": DASHBOARD_GRID_ID,
"children": [],
},
}
def get_header_component(title):
return {
"id": DASHBOARD_HEADER_ID,
"type": DASHBOARD_HEADER_TYPE,
"meta": {"text": title},
}
def get_row_container():
return {
"type": ROW_TYPE,
"id": "DASHBOARD_ROW_TYPE-{}".format(generate_id()),
"children": [],
"meta": {"background": BACKGROUND_TRANSPARENT},
}
def get_col_container():
return {
"type": COLUMN_TYPE,
"id": "DASHBOARD_COLUMN_TYPE-{}".format(generate_id()),
"children": [],
"meta": {"background": BACKGROUND_TRANSPARENT},
}
def get_chart_holder(position):
size_x = position["size_x"]
size_y = position["size_y"]
slice_id = position["slice_id"]
slice_name = position.get("slice_name")
code = position.get("code")
width = max(GRID_MIN_COLUMN_COUNT, int(round(size_x / GRID_RATIO)))
height = max(
GRID_MIN_ROW_UNITS, int(round(((size_y / GRID_RATIO) * 100) / ROW_HEIGHT))
)
if code is not None:
markdown_content = " " # white-space markdown
if len(code):
markdown_content = code
elif slice_name.strip():
markdown_content = "##### {}".format(slice_name)
return {
"type": MARKDOWN_TYPE,
"id": "DASHBOARD_MARKDOWN_TYPE-{}".format(generate_id()),
"children": [],
"meta": {"width": width, "height": height, "code": markdown_content},
}
return {
"type": CHART_TYPE,
"id": "DASHBOARD_CHART_TYPE-{}".format(generate_id()),
"children": [],
"meta": {"width": width, "height": height, "chartId": int(slice_id)},
}
def get_children_max(children, attr, root):
return max([root[childId]["meta"][attr] for childId in children])
def get_children_sum(children, attr, root):
return reduce((lambda sum, childId: sum + root[childId]["meta"][attr]), children, 0)
# find column that: width > 2 and
# each row has at least 1 chart can reduce width
def get_wide_column_ids(children, root):
return list(
filter(lambda childId: can_reduce_column_width(root[childId], root), children)
)
def is_wide_leaf_component(component):
return (
component["type"] in [CHART_TYPE, MARKDOWN_TYPE]
and component["meta"]["width"] > GRID_MIN_COLUMN_COUNT
)
def can_reduce_column_width(column_component, root):
return (
column_component["type"] == COLUMN_TYPE
and column_component["meta"]["width"] > GRID_MIN_COLUMN_COUNT
and all(
[
is_wide_leaf_component(root[childId])
or (
root[childId]["type"] == ROW_TYPE
and all(
[
is_wide_leaf_component(root[id])
for id in root[childId]["children"]
]
)
)
for childId in column_component["children"]
]
)
)
def reduce_row_width(row_component, root):
wide_leaf_component_ids = list(
filter(
lambda childId: is_wide_leaf_component(root[childId]),
row_component["children"],
)
)
widest_chart_id = None
widest_width = 0
for component_id in wide_leaf_component_ids:
if root[component_id]["meta"]["width"] > widest_width:
widest_width = root[component_id]["meta"]["width"]
widest_chart_id = component_id
if widest_chart_id:
root[widest_chart_id]["meta"]["width"] -= 1
return get_children_sum(row_component["children"], "width", root)
def reduce_component_width(component):
if is_wide_leaf_component(component):
component["meta"]["width"] -= 1
return component["meta"]["width"]
def convert(positions, level, parent, root):
if len(positions) == 0:
return
if len(positions) == 1 or level >= MAX_RECURSIVE_LEVEL:
# special treatment for single chart dash:
# always wrap chart inside a row
if parent["type"] == DASHBOARD_GRID_TYPE:
row_container = get_row_container()
root[row_container["id"]] = row_container
parent["children"].append(row_container["id"])
parent = row_container
chart_holder = get_chart_holder(positions[0])
root[chart_holder["id"]] = chart_holder
parent["children"].append(chart_holder["id"])
return
current_positions = positions[:]
boundary = get_boundary(current_positions)
top = boundary["top"]
bottom = boundary["bottom"]
left = boundary["left"]
right = boundary["right"]
# find row dividers
layers = []
current_row = top + 1
while len(current_positions) and current_row <= bottom:
upper = []
lower = []
is_row_divider = True
for position in current_positions:
row = position["row"]
size_y = position["size_y"]
if row + size_y <= current_row:
lower.append(position)
continue
elif row >= current_row:
upper.append(position)
continue
is_row_divider = False
break
if is_row_divider:
current_positions = upper[:]
layers.append(lower)
current_row += 1
# Each layer is a list of positions belong to same row section
# they can be a list of charts, or arranged in columns, or mixed
for layer in layers:
if len(layer) == 0:
continue
if len(layer) == 1 and parent["type"] == COLUMN_TYPE:
chart_holder = get_chart_holder(layer[0])
root[chart_holder["id"]] = chart_holder
parent["children"].append(chart_holder["id"])
continue
# create a new row
row_container = get_row_container()
root[row_container["id"]] = row_container
parent["children"].append(row_container["id"])
current_positions = layer[:]
if not has_overlap(current_positions):
# this is a list of charts in the same row
sorted_by_col = sorted(current_positions, key=lambda pos: pos["col"])
for position in sorted_by_col:
chart_holder = get_chart_holder(position)
root[chart_holder["id"]] = chart_holder
row_container["children"].append(chart_holder["id"])
else:
# this row has columns, find col dividers
current_col = left + 1
while len(current_positions) and current_col <= right:
upper = []
lower = []
is_col_divider = True
for position in current_positions:
col = position["col"]
size_x = position["size_x"]
if col + size_x <= current_col:
lower.append(position)
continue
elif col >= current_col:
upper.append(position)
continue
is_col_divider = False
break
if is_col_divider:
# is single chart in the column:
# add to parent container without create new column container
if len(lower) == 1:
chart_holder = get_chart_holder(lower[0])
root[chart_holder["id"]] = chart_holder
row_container["children"].append(chart_holder["id"])
else:
# create new col container
col_container = get_col_container()
root[col_container["id"]] = col_container
if not has_overlap(lower, False):
sorted_by_row = sorted(lower, key=lambda pos: pos["row"])
for position in sorted_by_row:
chart_holder = get_chart_holder(position)
root[chart_holder["id"]] = chart_holder
col_container["children"].append(chart_holder["id"])
else:
convert(lower, level + 2, col_container, root)
# add col meta
if len(col_container["children"]):
row_container["children"].append(col_container["id"])
col_container["meta"]["width"] = get_children_max(
col_container["children"], "width", root
)
current_positions = upper[:]
current_col += 1
# add row meta
row_container["meta"]["width"] = get_children_sum(
row_container["children"], "width", root
)
def convert_to_layout(positions):
root = get_empty_layout()
convert(positions, 0, root[DASHBOARD_GRID_ID], root)
# remove row's width, height and col's height from its meta data
# and make sure every row's width <= GRID_COLUMN_COUNT
# Each item is a dashboard component:
# row_container, or col_container, or chart_holder
for item in root.values():
if not isinstance(item, dict):
continue
if ROW_TYPE == item["type"]:
meta = item["meta"]
if meta.get("width", 0) > GRID_COLUMN_COUNT:
current_width = meta["width"]
while current_width > GRID_COLUMN_COUNT and len(
list(
filter(
lambda childId: is_wide_leaf_component(root[childId]),
item["children"],
)
)
):
current_width = reduce_row_width(item, root)
# because we round v1 chart size to nearest v2 grids count, result
# in there might be overall row width > GRID_COLUMN_COUNT.
# So here is an extra step to check row width, and reduce chart
# or column width if needed and if possible.
if current_width > GRID_COLUMN_COUNT:
has_wide_columns = True
while has_wide_columns:
col_ids = get_wide_column_ids(item["children"], root)
idx = 0
# need 2nd loop since same column may reduce multiple times
while idx < len(col_ids) and current_width > GRID_COLUMN_COUNT:
current_column = col_ids[idx]
for childId in root[current_column]["children"]:
if root[childId]["type"] == ROW_TYPE:
root[childId]["meta"]["width"] = reduce_row_width(
root[childId], root
)
else:
root[childId]["meta"][
"width"
] = reduce_component_width(root[childId])
root[current_column]["meta"]["width"] = get_children_max(
root[current_column]["children"], "width", root
)
current_width = get_children_sum(
item["children"], "width", root
)
idx += 1
has_wide_columns = (
len(get_wide_column_ids(item["children"], root))
and current_width > GRID_COLUMN_COUNT
)
meta.pop("width", None)
return root
def merge_position(position, bottom_line, last_column_start):
col = position["col"]
size_x = position["size_x"]
size_y = position["size_y"]
end_column = len(bottom_line) if col + size_x > last_column_start else col + size_x
# finding index where index >= col and bottom_line value > bottom_line[col]
taller_indexes = [
i
for i, value in enumerate(bottom_line)
if (i >= col and value > bottom_line[col])
]
current_row_value = bottom_line[col]
# if no enough space to fit current position, will start from taller row value
if len(taller_indexes) > 0 and (taller_indexes[0] - col + 1) < size_x:
current_row_value = max(bottom_line[col : col + size_x])
# add current row value with size_y of this position
for i in range(col, end_column):
bottom_line[i] = current_row_value + size_y
# In original position data, a lot of position's row attribute are problematic,
# for example, same positions are assigned to more than 1 chart.
# The convert function depends on row id, col id to split the whole dashboard into
# nested rows and columns. Bad row id will lead to many empty spaces, or a few charts
# are overlapped in the same row.
# This function read positions by row first.
# Then based on previous col id, width and height attribute,
# re-calculate next position's row id.
def scan_dashboard_positions_data(positions):
positions_by_row_id = {}
for position in positions:
row = position["row"]
position["col"] = min(position["col"], TOTAL_COLUMNS)
if not positions_by_row_id.get(row):
positions_by_row_id[row] = []
positions_by_row_id[row].append(position)
bottom_line = [0] * (TOTAL_COLUMNS + 1)
# col index always starts from 1, set a large number for [0] as placeholder
bottom_line[0] = MAX_VALUE
last_column_start = max([position["col"] for position in positions])
# ordered_raw_positions are arrays of raw positions data sorted by row id
ordered_raw_positions = []
row_ids = sorted(positions_by_row_id.keys())
for row_id in row_ids:
ordered_raw_positions.append(positions_by_row_id[row_id])
updated_positions = []
while len(ordered_raw_positions):
next_row = ordered_raw_positions.pop(0)
next_col = 1
while len(next_row):
# special treatment for same (row, col) assigned to more than 1 chart:
# add one additional row and display wider chart first
available_columns_index = [
i
for i, e in enumerate(
list(filter(lambda x: x["col"] == next_col, next_row))
)
]
if len(available_columns_index):
idx = available_columns_index[0]
if len(available_columns_index) > 1:
idx = sorted(
available_columns_index,
key=lambda x: next_row[x]["size_x"],
reverse=True,
)[0]
next_position = next_row.pop(idx)
merge_position(next_position, bottom_line, last_column_start + 1)
next_position["row"] = (
bottom_line[next_position["col"]] - next_position["size_y"]
)
updated_positions.append(next_position)
next_col += next_position["size_x"]
else:
next_col = next_row[0]["col"]
return updated_positions
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
dashboards = session.query(Dashboard).all()
for i, dashboard in enumerate(dashboards):
print("scanning dashboard ({}/{}) >>>>".format(i + 1, len(dashboards)))
position_json = json.loads(dashboard.position_json or "[]")
if not is_v2_dash(position_json):
print("Converting dashboard... dash_id: {}".format(dashboard.id))
position_dict = {}
positions = []
slices = dashboard.slices
if position_json:
# scan and fix positions data: extra spaces, dup rows, .etc
position_json = scan_dashboard_positions_data(position_json)
position_dict = {
str(position["slice_id"]): position for position in position_json
}
last_row_id = (
max([pos["row"] + pos["size_y"] for pos in position_json])
if position_json
else 0
)
new_slice_counter = 0
for slice in slices:
position = position_dict.get(str(slice.id))
# some dashboard didn't have position_json
# place 3 charts in a row
if not position:
position = {
"col": (
new_slice_counter
% NUMBER_OF_CHARTS_PER_ROW
* DEFAULT_CHART_WIDTH
+ 1
),
"row": (
last_row_id
+ int(new_slice_counter / NUMBER_OF_CHARTS_PER_ROW)
* DEFAULT_CHART_WIDTH
),
"size_x": DEFAULT_CHART_WIDTH,
"size_y": DEFAULT_CHART_WIDTH,
"slice_id": str(slice.id),
}
new_slice_counter += 1
# attach additional parameters to position dict,
# prepare to replace markup and separator viz_type
# to dashboard UI component
form_data = json.loads(slice.params or "{}")
viz_type = slice.viz_type
if form_data and viz_type in ["markup", "separator"]:
position["code"] = form_data.get("code")
position["slice_name"] = slice.slice_name
positions.append(position)
v2_layout = convert_to_layout(positions)
v2_layout[DASHBOARD_HEADER_ID] = get_header_component(
dashboard.dashboard_title
)
sorted_by_key = collections.OrderedDict(sorted(v2_layout.items()))
dashboard.position_json = json.dumps(sorted_by_key, indent=2)
session.merge(dashboard)
session.commit()
else:
print("Skip converted dash_id: {}".format(dashboard.id))
session.close()
def downgrade():
print("downgrade is done")
| 35.151286 | 88 | 0.569055 |
acecf62e9c0caf0f774431066a788204f67a0ca6 | 2,744 | py | Python | 02_fashionAI/examples/prepare_data.py | hooloong/aliyunProblems | 077d115535fe54e47e59a0d96676b3995bbda75e | [
"MIT"
] | null | null | null | 02_fashionAI/examples/prepare_data.py | hooloong/aliyunProblems | 077d115535fe54e47e59a0d96676b3995bbda75e | [
"MIT"
] | null | null | null | 02_fashionAI/examples/prepare_data.py | hooloong/aliyunProblems | 077d115535fe54e47e59a0d96676b3995bbda75e | [
"MIT"
] | null | null | null | import mxnet
from mxnet import gluon, image
import os, shutil, random
# Read label.csv
# For each task, make folders, and copy picture to corresponding folders
label_dir = 'data/base/Annotations/label.csv'
warmup_label_dir = 'data/web/Annotations/skirt_length_labels.csv'
label_dict = {'coat_length_labels': [],
'lapel_design_labels': [],
'neckline_design_labels': [],
'skirt_length_labels': [],
'collar_design_labels': [],
'neck_design_labels': [],
'pant_length_labels': [],
'sleeve_length_labels': []}
task_list = label_dict.keys()
def mkdir_if_not_exist(path):
if not os.path.exists(os.path.join(*path)):
os.makedirs(os.path.join(*path))
with open(label_dir, 'r') as f:
lines = f.readlines()
tokens = [l.rstrip().split(',') for l in lines]
for path, task, label in tokens:
label_dict[task].append((path, label))
mkdir_if_not_exist(['data/train_valid'])
mkdir_if_not_exist(['submission'])
for task, path_label in label_dict.items():
mkdir_if_not_exist(['data/train_valid', task])
train_count = 0
n = len(path_label)
m = len(list(path_label[0][1]))
for mm in range(m):
mkdir_if_not_exist(['data/train_valid', task, 'train', str(mm)])
mkdir_if_not_exist(['data/train_valid', task, 'val', str(mm)])
random.shuffle(path_label)
for path, label in path_label:
label_index = list(label).index('y')
src_path = os.path.join('data/base', path)
if train_count < n * 0.9:
shutil.copy(src_path,
os.path.join('data/train_valid', task, 'train', str(label_index)))
else:
shutil.copy(src_path,
os.path.join('data/train_valid', task, 'val', str(label_index)))
train_count += 1
# Add warmup data to skirt task
label_dict = {'skirt_length_labels': []}
with open(warmup_label_dir, 'r') as f:
lines = f.readlines()
tokens = [l.rstrip().split(',') for l in lines]
for path, task, label in tokens:
label_dict[task].append((path, label))
for task, path_label in label_dict.items():
train_count = 0
n = len(path_label)
m = len(list(path_label[0][1]))
random.shuffle(path_label)
for path, label in path_label:
label_index = list(label).index('y')
src_path = os.path.join('data/web', path)
if train_count < n * 0.9:
shutil.copy(src_path,
os.path.join('data/train_valid', task, 'train', str(label_index)))
else:
shutil.copy(src_path,
os.path.join('data/train_valid', task, 'val', str(label_index)))
train_count += 1
| 32.666667 | 90 | 0.609694 |
acecf83a7cf9f5a5241995259318567a2a37a7e5 | 8,350 | py | Python | Lib/site-packages/nose-0.10.0b1-py2.5.egg/nose/plugins/doctests.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | Lib/site-packages/nose-0.10.0b1-py2.5.egg/nose/plugins/doctests.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | Lib/site-packages/nose-0.10.0b1-py2.5.egg/nose/plugins/doctests.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | """Use the Doctest plugin with --with-doctest or the NOSE_WITH_DOCTEST
environment variable to enable collection and execution of doctests. doctest_
tests are usually included in the tested package, not grouped into packages or
modules of their own. For this reason, nose will try to detect and run doctest
tests only in the non-test packages it discovers in the working
directory. Doctests may also be placed into files other than python modules,
in which case they can be collected and executed by using the
--doctest-extension switch or NOSE_DOCTEST_EXTENSION environment variable to
indicate which file extension(s) to load.
doctest tests are run like any other test, with the exception that output
capture does not work, because doctest does its own output capture in the
course of running a test.
.. _doctest: http://docs.python.org/lib/module-doctest.html
"""
from __future__ import generators
import logging
import os
from inspect import getmodule
from nose.plugins.base import Plugin
from nose.util import anyp, getpackage, test_address, resolve_name, tolist
log = logging.getLogger(__name__)
try:
import doctest
doctest.DocTestCase
# system version of doctest is acceptable, but needs a monkeypatch
except (ImportError, AttributeError):
# system version is too old
import nose.ext.dtcompat as doctest
#
# Doctest and coverage don't get along, so we need to create
# a monkeypatch that will replace the part of doctest that
# interferes with coverage reports.
#
# The monkeypatch is based on this zope patch:
# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705
#
_orp = doctest._OutputRedirectingPdb
class NoseOutputRedirectingPdb(_orp):
def __init__(self, out):
self.__debugger_used = False
_orp.__init__(self, out)
def set_trace(self):
self.__debugger_used = True
_orp.set_trace(self)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
_orp.set_continue(self)
doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb
class Doctest(Plugin):
"""
Activate doctest plugin to find and run doctests in non-test modules.
"""
extension = None
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
parser.add_option('--doctest-tests', action='store_true',
dest='doctest_tests',
default=env.get('NOSE_DOCTEST_TESTS'),
help="Also look for doctests in test modules "
"[NOSE_DOCTEST_TESTS]")
parser.add_option('--doctest-extension', action="append",
dest="doctestExtension",
help="Also look for doctests in files with "
"this extension [NOSE_DOCTEST_EXTENSION]")
# Set the default as a list, if given in env; otherwise
# an additional value set on the command line will cause
# an error.
env_setting = env.get('NOSE_DOCTEST_EXTENSION')
if env_setting is not None:
parser.set_defaults(doctestExtension=tolist(env_setting))
def configure(self, options, config):
Plugin.configure(self, options, config)
self.doctest_tests = options.doctest_tests
try:
self.extension = tolist(options.doctestExtension)
except AttributeError:
# 2.3, no other-file option
self.extension = None
self.finder = doctest.DocTestFinder()
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
log.debug("Doctest doesn't want module %s", module)
return
tests = self.finder.find(module)
if not tests:
return
tests.sort()
module_file = module.__file__
if module_file[-4:] in ('.pyc', '.pyo'):
module_file = module_file[:-1]
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
yield DocTestCase(test)
def loadTestsFromFile(self, filename):
if self.extension and anyp(filename.endswith, self.extension):
name = os.path.basename(filename)
dh = open(filename)
try:
doc = dh.read()
finally:
dh.close()
parser = doctest.DocTestParser()
test = parser.get_doctest(
doc, globs={}, name=name, filename=filename, lineno=0)
if test.examples:
yield DocFileCase(test)
else:
yield False # no tests to load
def makeTest(self, obj, parent):
"""Look for doctests in the given object, which will be a
function, method or class.
"""
doctests = self.finder.find(obj, module=getmodule(parent))
if doctests:
for test in doctests:
if len(test.examples) == 0:
continue
yield DocTestCase(test, obj=obj)
def matches(self, name):
"""Doctest wants only non-test modules in general.
"""
# FIXME this seems wrong -- nothing is ever going to
# fail this test, since we're given a module NAME not FILE
if name == '__init__.py':
return False
# FIXME don't think we need include/exclude checks here?
return ((self.doctest_tests or not self.conf.testMatch.search(name)
or (self.conf.include
and filter(None,
[inc.search(name)
for inc in self.conf.include])))
and (not self.conf.exclude
or not filter(None,
[exc.search(name)
for exc in self.conf.exclude])))
def wantFile(self, file):
# always want .py files
if file.endswith('.py'):
return True
# also want files that match my extension
if (self.extension
and anyp(file.endswith, self.extension)
and (not self.conf.exclude
or not filter(None,
[exc.search(file)
for exc in self.conf.exclude]))):
return True
return None
class DocTestCase(doctest.DocTestCase):
"""Proxy for DocTestCase: provides an address() method that
returns the correct address for the doctest case. Otherwise
acts as a proxy to the test case. To provide hints for address(),
an obj may also be passed -- this will be used as the test object
for purposes of determining the test address, if it is provided.
"""
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None):
self._nose_obj = obj
super(DocTestCase, self).__init__(
test, optionflags=optionflags, setUp=None, tearDown=None,
checker=None)
def address(self):
if self._nose_obj is not None:
return test_address(self._nose_obj)
return test_address(resolve_name(self._dt_test.name))
# doctests loaded via find(obj) omit the module name
# so we need to override id, __repr__ and shortDescription
# bonus: this will squash a 2.3 vs 2.4 incompatiblity
def id(self):
name = self._dt_test.name
filename = self._dt_test.filename
if filename is not None:
pk = getpackage(filename)
if not name.startswith(pk):
name = "%s.%s" % (pk, name)
return name
def __repr__(self):
name = self.id()
name = name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return 'Doctest: %s' % self.id()
class DocFileCase(doctest.DocFileCase):
"""Overrides to provide filename
"""
def address(self):
return (self._dt_test.filename, None, None)
| 37.954545 | 89 | 0.608982 |
acecf83f9d002c9c4d5cd97d263925b37466acd5 | 2,360 | py | Python | rockstar/RockStar.py | navnm/green-hacker | 86c13c95c1554cfceb0d0b160a53f910d55e4d29 | [
"MIT"
] | 1 | 2019-06-27T11:38:58.000Z | 2019-06-27T11:38:58.000Z | rockstar/RockStar.py | ghuntley/rockstar | fc8345e14a0dd6636bd37c1d3a49876226cf807e | [
"MIT"
] | null | null | null | rockstar/RockStar.py | ghuntley/rockstar | fc8345e14a0dd6636bd37c1d3a49876226cf807e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import uuid
from datetime import time, date, datetime, timedelta
from random import randint
import click
import git
hello_world_c = """#include <iostream>
int main()
{
std::cout << "Hello World!" << std::endl;
return 0;
}
"""
default_file_name = 'main.cpp'
class RockStar:
def __init__(self, days=400, file_name=default_file_name,
code=hello_world_c):
self.days = days
self.file_name = file_name
self.file_path = os.path.join(os.getcwd(), file_name)
self.code = code
self.repo_path = os.getcwd()
def _make_last_commit(self):
with open(self.file_path, 'w') as f:
f.write(self.code)
os.environ['GIT_AUTHOR_DATE'] = ''
os.environ['GIT_COMMITTER_DATE'] = ''
self.repo.index.add([self.file_path])
self.repo.index.commit('Final commit :sunglasses:')
def _edit_and_commit(self, message, commit_date):
with open(self.file_path, 'w') as f:
f.write(message)
self.repo.index.add([self.file_path])
date_in_iso = commit_date.strftime("%Y-%m-%d %H:%M:%S")
os.environ['GIT_AUTHOR_DATE'] = date_in_iso
os.environ['GIT_COMMITTER_DATE'] = date_in_iso
self.repo.index.commit(message)
def _get_random_time(self):
return time(hour=randint(0, 23), minute=randint(0, 59),
second=randint(0, 59), microsecond=randint(0, 999999))
def _get_dates_list(self):
def dates():
today = date.today()
for day_delta in range(self.days):
for i in range(randint(1, 10)):
yield today - timedelta(days=day_delta)
return [datetime.combine(d, self._get_random_time())
for d in dates()]
def make_me_a_rockstar(self):
self.repo = git.Repo.init(self.repo_path)
label = 'Making you a Rockstar Programmer'
with click.progressbar(self._get_dates_list(), label=label) as bar:
for commit_date in bar:
self._edit_and_commit(str(uuid.uuid1()), commit_date)
self._make_last_commit()
print('\nYou are now a Rockstar Programmer!')
@click.command()
@click.option('--days', type=int, default=400)
def cli(days):
magic = RockStar(days=days)
magic.make_me_a_rockstar()
| 29.873418 | 75 | 0.622458 |
acecf9efd500402c72e5c6df15605484000675dd | 3,707 | py | Python | app.py | tappi287/rf2_video_settings | 6ae73c63f48e6d515a9efb653f236dea0494d9f1 | [
"MIT"
] | 8 | 2020-12-09T17:34:40.000Z | 2022-02-21T10:15:09.000Z | app.py | tappi287/rf2_video_settings | 6ae73c63f48e6d515a9efb653f236dea0494d9f1 | [
"MIT"
] | 11 | 2021-02-27T00:21:47.000Z | 2022-02-25T14:41:56.000Z | app.py | tappi287/rf2_video_settings | 6ae73c63f48e6d515a9efb653f236dea0494d9f1 | [
"MIT"
] | 2 | 2021-06-28T21:11:53.000Z | 2022-02-06T17:20:18.000Z | import logging
import platform
import sys
import webbrowser
import eel
import gevent
from rf2settings.app import expose_app_methods
from rf2settings.app.app_main import CLOSE_EVENT
from rf2settings.app_settings import AppSettings
from rf2settings.gamecontroller import controller_greenlet, controller_event_loop
from rf2settings.globals import FROZEN
from rf2settings.headlights import headlights_greenlet
from rf2settings.log import setup_logging
from rf2settings.rf2connect import RfactorConnect
from rf2settings.rf2greenlet import rfactor_greenlet, rfactor_event_loop
from rf2settings.runasadmin import run_as_admin
from rf2settings.utils import AppExceptionHook, capture_app_exceptions
# -- Make sure eel methods are exposed at start-up
expose_app_methods()
# -- Setup logging
setup_logging()
@capture_app_exceptions
def test_exception():
if AppExceptionHook.produce_exception:
AppExceptionHook.produce_exception = False
AppExceptionHook.test_exception()
def start_eel():
logging.info('\n\n\n')
logging.info('#######################################################')
logging.info('################ Starting APP ###########')
logging.info('#######################################################\n\n\n')
if FROZEN:
# Set Exception hook
sys.excepthook = AppExceptionHook.exception_hook
AppSettings.load()
AppSettings.copy_default_presets()
AppSettings.delete_current_settings_presets()
# This will ask for and re-run with admin rights
# if setting needs_admin set.
if AppSettings.needs_admin and not run_as_admin():
return
"""
THIS WILL DISABLE ctypes support! But it will make sure "Launch rFactor2"
or basically any executable that is loading DLLs will work.
"""
if sys.platform == "win32":
import ctypes
ctypes.windll.kernel32.SetDllDirectoryA(None)
"""
//
"""
page = 'index.html'
host = 'localhost'
port = 8123
eel.init('web')
# TODO: fetch OSError port in use
try:
eel.start(page, host=host, port=port, block=False)
except EnvironmentError:
# If Chrome isn't found, fallback to Microsoft Edge on Win10 or greater
if sys.platform in ['win32', 'win64'] and int(platform.release()) >= 10:
eel.start(page, mode='edge', host=host, port=port, block=False)
# Fallback to opening a regular browser window
else:
eel.start(page, mode=None, app_mode=False, host=host, port=port, block=False)
# Open system default web browser
webbrowser.open_new(f'http://{host}:{port}')
# -- Game Controller Greenlet
cg = eel.spawn(controller_greenlet)
# -- Headlights Greenlet
hg = eel.spawn(headlights_greenlet)
# -- rFactor Greenlet
rg = eel.spawn(rfactor_greenlet)
# -- Run until window/tab closed
while not CLOSE_EVENT.is_set():
# Game controller event loop
controller_event_loop()
# rFactor 2 event loop
rfactor_event_loop()
# Capture exception events
AppExceptionHook.exception_event_loop()
# -- Test App Exception
test_exception()
# -- Shutdown Greenlets
logging.debug('Shutting down Greenlets.')
gevent.joinall((cg, hg, rg), timeout=15.0, raise_error=True)
# -- Shutdown logging
logging.info('\n\n\n')
logging.info('#######################################################')
logging.info('################ APP SHUTDOWN ###########')
logging.info('#######################################################\n\n\n')
logging.shutdown()
if __name__ == '__main__':
start_eel()
| 31.956897 | 89 | 0.636364 |
acecfb14dc014bdbb8f7fe6d719a93f775882ffb | 9,317 | py | Python | python/ray/workflow/tests/test_basic_workflows_2.py | mkucijan/ray | ea2bea7e309cd60457aa0e027321be5f10fa0fe5 | [
"Apache-2.0"
] | 21,382 | 2016-09-26T23:12:52.000Z | 2022-03-31T21:47:45.000Z | python/ray/workflow/tests/test_basic_workflows_2.py | mkucijan/ray | ea2bea7e309cd60457aa0e027321be5f10fa0fe5 | [
"Apache-2.0"
] | 19,689 | 2016-09-17T08:21:25.000Z | 2022-03-31T23:59:30.000Z | python/ray/workflow/tests/test_basic_workflows_2.py | cc13ny/ray | 48ecb1f88a89c3894e2a92d66d89d3965c179ecd | [
"Apache-2.0"
] | 4,114 | 2016-09-23T18:54:01.000Z | 2022-03-31T15:07:32.000Z | import os
import pytest
import ray
import re
from filelock import FileLock
from pathlib import Path
from ray._private.test_utils import run_string_as_driver, SignalActor
from ray import workflow
from ray.tests.conftest import * # noqa
from unittest.mock import patch
def test_init_twice(call_ray_start, reset_workflow, tmp_path):
workflow.init()
with pytest.raises(RuntimeError):
workflow.init(str(tmp_path))
driver_script = """
from ray import workflow
if __name__ == "__main__":
workflow.init()
"""
def test_init_twice_2(call_ray_start, reset_workflow, tmp_path):
with patch.dict(os.environ, {"RAY_ADDRESS": call_ray_start}):
run_string_as_driver(driver_script)
with pytest.raises(
RuntimeError, match=".*different from the workflow manager.*"):
workflow.init(str(tmp_path))
@pytest.mark.parametrize(
"workflow_start_regular", [{
"num_cpus": 2,
}], indirect=True)
def test_step_resources(workflow_start_regular, tmp_path):
lock_path = str(tmp_path / "lock")
# We use signal actor here because we can't guarantee the order of tasks
# sent from worker to raylet.
signal_actor = SignalActor.remote()
@workflow.step
def step_run():
ray.wait([signal_actor.send.remote()])
with FileLock(lock_path):
return None
@ray.remote(num_cpus=1)
def remote_run():
return None
lock = FileLock(lock_path)
lock.acquire()
ret = step_run.options(num_cpus=2).step().run_async()
ray.wait([signal_actor.wait.remote()])
obj = remote_run.remote()
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(obj, timeout=2)
lock.release()
assert ray.get(ret) is None
assert ray.get(obj) is None
def test_get_output_1(workflow_start_regular, tmp_path):
@workflow.step
def simple(v):
return v
assert 0 == simple.step(0).run("simple")
assert 0 == ray.get(workflow.get_output("simple"))
def test_get_output_2(workflow_start_regular, tmp_path):
lock_path = str(tmp_path / "lock")
lock = FileLock(lock_path)
@workflow.step
def simple(v):
with FileLock(lock_path):
return v
lock.acquire()
obj = simple.step(0).run_async("simple")
obj2 = workflow.get_output("simple")
lock.release()
assert ray.get([obj, obj2]) == [0, 0]
def test_get_output_3(workflow_start_regular, tmp_path):
cnt_file = tmp_path / "counter"
cnt_file.write_text("0")
error_flag = tmp_path / "error"
error_flag.touch()
@workflow.step
def incr():
v = int(cnt_file.read_text())
cnt_file.write_text(str(v + 1))
if error_flag.exists():
raise ValueError()
return 10
with pytest.raises(ray.exceptions.RaySystemError):
incr.options(max_retries=1).step().run("incr")
assert cnt_file.read_text() == "1"
with pytest.raises(ray.exceptions.RaySystemError):
ray.get(workflow.get_output("incr"))
assert cnt_file.read_text() == "1"
error_flag.unlink()
with pytest.raises(ray.exceptions.RaySystemError):
ray.get(workflow.get_output("incr"))
assert ray.get(workflow.resume("incr")) == 10
def test_get_named_step_output_finished(workflow_start_regular, tmp_path):
@workflow.step
def double(v):
return 2 * v
# Get the result from named step after workflow finished
assert 4 == double.options(name="outer").step(
double.options(name="inner").step(1)).run("double")
assert ray.get(workflow.get_output("double", name="inner")) == 2
assert ray.get(workflow.get_output("double", name="outer")) == 4
def test_get_named_step_output_running(workflow_start_regular, tmp_path):
@workflow.step
def double(v, lock=None):
if lock is not None:
with FileLock(lock_path):
return 2 * v
else:
return 2 * v
# Get the result from named step after workflow before it's finished
lock_path = str(tmp_path / "lock")
lock = FileLock(lock_path)
lock.acquire()
output = double.options(name="outer").step(
double.options(name="inner").step(1, lock_path),
lock_path).run_async("double-2")
inner = workflow.get_output("double-2", name="inner")
outer = workflow.get_output("double-2", name="outer")
@ray.remote
def wait(obj_ref):
return ray.get(obj_ref[0])
# Make sure nothing is finished.
ready, waiting = ray.wait(
[wait.remote([output]),
wait.remote([inner]),
wait.remote([outer])],
timeout=1)
assert 0 == len(ready)
assert 3 == len(waiting)
# Once job finished, we'll be able to get the result.
lock.release()
assert 4 == ray.get(output)
# Here sometimes inner will not be generated when we call
# run_async. So there is a race condition here.
try:
v = ray.get(inner)
except Exception:
v = None
if v is not None:
assert 2 == 20
assert 4 == ray.get(outer)
inner = workflow.get_output("double-2", name="inner")
outer = workflow.get_output("double-2", name="outer")
assert 2 == ray.get(inner)
assert 4 == ray.get(outer)
def test_get_named_step_output_error(workflow_start_regular, tmp_path):
@workflow.step
def double(v, error):
if error:
raise Exception()
return v + v
# Force it to fail for the outer step
with pytest.raises(Exception):
double.options(name="outer").step(
double.options(name="inner").step(1, False), True).run("double")
# For the inner step, it should have already been executed.
assert 2 == ray.get(workflow.get_output("double", name="inner"))
outer = workflow.get_output("double", name="outer")
with pytest.raises(Exception):
ray.get(outer)
def test_get_named_step_default(workflow_start_regular, tmp_path):
@workflow.step
def factorial(n, r=1):
if n == 1:
return r
return factorial.step(n - 1, r * n)
import math
assert math.factorial(5) == factorial.step(5).run("factorial")
for i in range(5):
step_name = ("test_basic_workflows_2."
"test_get_named_step_default.locals.factorial")
if i != 0:
step_name += "_" + str(i)
# All outputs will be 120
assert math.factorial(5) == ray.get(
workflow.get_output("factorial", name=step_name))
def test_get_named_step_duplicate(workflow_start_regular):
@workflow.step(name="f")
def f(n, dep):
return n
inner = f.step(10, None)
outer = f.step(20, inner)
assert 20 == outer.run("duplicate")
# The outer will be checkpointed first. So there is no suffix for the name
assert ray.get(workflow.get_output("duplicate", name="f")) == 20
# The inner will be checkpointed after the outer. And there is a duplicate
# for the name. suffix _1 is added automatically
assert ray.get(workflow.get_output("duplicate", name="f_1")) == 10
def test_no_init(shutdown_only):
@workflow.step
def f():
pass
fail_wf_init_error_msg = re.escape(
"`workflow.init()` must be called prior to using "
"the workflows API.")
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
f.step().run()
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
workflow.list_all()
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
workflow.resume_all()
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
workflow.cancel("wf")
with pytest.raises(RuntimeError, match=fail_wf_init_error_msg):
workflow.get_actor("wf")
def test_wf_run(workflow_start_regular, tmp_path):
counter = tmp_path / "counter"
counter.write_text("0")
@workflow.step
def f():
v = int(counter.read_text()) + 1
counter.write_text(str(v))
f.step().run("abc")
assert counter.read_text() == "1"
# This will not rerun the job from beginning
f.step().run("abc")
assert counter.read_text() == "1"
def test_wf_no_run():
@workflow.step
def f1():
pass
f1.step()
@workflow.step
def f2(*w):
pass
f = f2.step(*[f1.step() for _ in range(10)])
with pytest.raises(Exception):
f.run()
def test_dedupe_indirect(workflow_start_regular, tmp_path):
counter = Path(tmp_path) / "counter.txt"
lock = Path(tmp_path) / "lock.txt"
counter.write_text("0")
@workflow.step
def incr():
with FileLock(str(lock)):
c = int(counter.read_text())
c += 1
counter.write_text(f"{c}")
@workflow.step
def identity(a):
return a
@workflow.step
def join(*a):
return counter.read_text()
# Here a is passed to two steps and we need to ensure
# it's only executed once
a = incr.step()
i1 = identity.step(a)
i2 = identity.step(a)
assert "1" == join.step(i1, i2).run()
assert "2" == join.step(i1, i2).run()
# pass a multiple times
assert "3" == join.step(a, a, a, a).run()
assert "4" == join.step(a, a, a, a).run()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| 28.405488 | 79 | 0.640872 |
acecfd12daf2f103ac6def054d00bbf24c31c3ea | 365 | py | Python | medium/countingbits.py | 7u/leetcode | 4460d21dbb19c67486ec9bd83c5dcb64b5791652 | [
"BSD-3-Clause"
] | null | null | null | medium/countingbits.py | 7u/leetcode | 4460d21dbb19c67486ec9bd83c5dcb64b5791652 | [
"BSD-3-Clause"
] | null | null | null | medium/countingbits.py | 7u/leetcode | 4460d21dbb19c67486ec9bd83c5dcb64b5791652 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
def countBits(num):
"""
:type num: int
:rtype: List[int]
"""
ret=[None] * (num + 1)
n=1
for i in range(0, num + 1):
if i == 0:
ret[i] = 0
elif i == n:
ret[i] = 1
n = n * 2
else:
ret[i] = ret[n / 2] + ret[i - n / 2]
return ret
| 20.277778 | 49 | 0.353425 |
acecfdb260f735f11ab45cff2fd16dd4eeedd0b8 | 2,700 | py | Python | example_test_set/example_check_variants.py | AngieHinrichs/hgvslib | 33c1d0e70d609dee03ce0016230da6c04f4b7a53 | [
"BSD-3-Clause"
] | 15 | 2017-02-03T22:34:43.000Z | 2021-09-13T17:15:34.000Z | example_test_set/example_check_variants.py | AngieHinrichs/hgvslib | 33c1d0e70d609dee03ce0016230da6c04f4b7a53 | [
"BSD-3-Clause"
] | 2 | 2017-07-27T18:57:39.000Z | 2020-11-14T21:04:16.000Z | example_test_set/example_check_variants.py | AngieHinrichs/hgvslib | 33c1d0e70d609dee03ce0016230da6c04f4b7a53 | [
"BSD-3-Clause"
] | 5 | 2017-07-25T16:36:49.000Z | 2020-11-14T11:37:05.000Z | import argparse
__author__ = 'jyen'
from hgvslib.class_functions import compare_hgvs
from hgvslib.constants import NULL_SET, NO_MATCH
def write_contents(outfile):
try:
return open( '%s' % outfile, 'w')
except IOError:
logging.error('Error: can\'t find file %s or read data' % outfile)
def check_ref_list(ref_str, hgvs_list, transcripts):
'''
Compares hgvs given a reference hgvs, and list of hgvs string and corresponding transcripts.
:param ref_str: reference hgvs string
:param hgvs_list: list of hgvs strings e.g. [c.4324A>T, c.4324A>T, c.4324A>T]
:param transcripts: list of transcripts e.g [NM_0000342.1, NM_0000342.1, NM_0000342.1]
*order must correspond with the hgvs strings*
:return: list of comparison results in order e.g. [yes, yes, yes]
'''
r = []
for num in [0,1,2]:
query_str = hgvs_list[num]
transcript = transcripts[num]
if transcript == NO_MATCH: # only compare transcripts with exact match
result = NO_MATCH
else:
result = compare_hgvs(ref_str, query_str)
r.append(result)
return '\t'.join(r)
def main(input_fileobject, outfile):
# write header
out = write_contents(outfile)
header = input_fileobject.readline()
column_names = header.strip('\n').split('\t')
newheader = header.rstrip() + '\t'.join(['s_c_check','vr_c_check','vep_c_check',
's_p_check','vr_p_check','vep_p_check', '\n'])
out.write(newheader)
print 'Reading %s, printing to %s' % (input_fileobject.name, outfile)
for line in input_fileobject.readlines():
line = line.rstrip().replace('NULL','')
data = dict(zip(column_names,line.split('\t')))
if data['transcript'] in NULL_SET:
continue
if all(string in NULL_SET for string in [data['snpeff_transcript'],data['vr_transcript'],data['vep_transcript']]):
continue
chgvs_list = [data['snpeff_c_hgvs'],data['vr_c_hgvs'],data['vep_c_hgvs']]
phgvs_list = [data['snpeff_p_hgvs'],data['vr_p_hgvs'],data['vep_p_hgvs']]
transcript_list = [data['snpeff_transcript'],data['vr_transcript'],data['vep_transcript']]
chgvs_check_list = check_ref_list(data['ref_c_hgvs'], chgvs_list, transcript_list)
phgvs_check_list = check_ref_list(data['ref_p_hgvs'], phgvs_list, transcript_list)
items = '{}\t{}'.format(chgvs_check_list, phgvs_check_list)
s = '{line}\t{hgvs_items}\n'.format(line, items)
out.write(s)
out.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse compare output for hgvs checking ')
parser.add_argument('-i', '--infile', nargs='?', type=argparse.FileType('r'), required = True)
args = parser.parse_args()
outfile = str(args.infile.name)[:-3] + 'parsed.txt'
main(args.infile, outfile)
| 31.395349 | 116 | 0.706667 |
acecfe6314cb55d7c927b1bf32055fc85cbc6533 | 6,160 | py | Python | CircuitPy_OTP/main.py | joewalk102/Adafruit_Learning_System_Guides | 2bda607f8c433c661a2d9d40b4db4fd132334c9a | [
"MIT"
] | 3 | 2019-09-25T17:32:15.000Z | 2019-10-04T18:31:34.000Z | CircuitPy_OTP/main.py | joewalk102/Adafruit_Learning_System_Guides | 2bda607f8c433c661a2d9d40b4db4fd132334c9a | [
"MIT"
] | 1 | 2020-04-30T18:20:19.000Z | 2020-04-30T18:20:19.000Z | CircuitPy_OTP/main.py | joewalk102/Adafruit_Learning_System_Guides | 2bda607f8c433c661a2d9d40b4db4fd132334c9a | [
"MIT"
] | 1 | 2020-10-16T15:23:04.000Z | 2020-10-16T15:23:04.000Z | import time
import adafruit_ssd1306
import bitbangio as io
import board
import network
import ntptime
import ubinascii
import uhashlib
# pylint: disable=broad-except
# https://github.com/pyotp/pyotp example
totp = [("Discord ", 'JBSWY3DPEHPK3PXP'),
("Gmail ", 'abcdefghijklmnopqrstuvwxyz234567'),
("Accounts", 'asfdkwefoaiwejfa323nfjkl')]
ssid = 'my_wifi_ssid'
password = 'my_wifi_password'
TEST = False # if you want to print out the tests the hashers
ALWAYS_ON = False # Set to true if you never want to go to sleep!
ON_SECONDS = 60 # how long to stay on if not in always_on mode
i2c = io.I2C(board.SCL, board.SDA)
oled = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c)
# Gimme a welcome screen!
oled.fill(0)
oled.text('CircuitPython', 0, 0)
oled.text('PyTOTP Pal!', 0, 10)
oled.text(' <3 adafruit <3 ', 0, 20)
oled.show()
time.sleep(0.25)
EPOCH_DELTA = 946684800 # seconds between year 2000 and year 1970
SECS_DAY = 86400
SHA1 = uhashlib.sha1
if TEST:
print("===========================================")
print("SHA1 test: ", ubinascii.hexlify(SHA1(b'hello world').digest()))
# should be 2aae6c35c94fcfb415dbe95f408b9ce91ee846ed
# HMAC implementation, as hashlib/hmac wouldn't fit
# From https://en.wikipedia.org/wiki/Hash-based_message_authentication_code
def HMAC(k, m):
SHA1_BLOCK_SIZE = 64
KEY_BLOCK = k + (b'\0' * (SHA1_BLOCK_SIZE - len(k)))
KEY_INNER = bytes((x ^ 0x36) for x in KEY_BLOCK)
KEY_OUTER = bytes((x ^ 0x5C) for x in KEY_BLOCK)
inner_message = KEY_INNER + m
outer_message = KEY_OUTER + SHA1(inner_message).digest()
return SHA1(outer_message)
if TEST:
KEY = b'abcd'
MESSAGE = b'efgh'
print("===========================================")
print("HMAC test: ", ubinascii.hexlify(HMAC(KEY, MESSAGE).digest()))
# should be e5dbcf9263188f9fce90df572afeb39b66b27198
# Base32 decoder, since base64 lib wouldnt fit
def base32_decode(encoded):
missing_padding = len(encoded) % 8
if missing_padding != 0:
encoded += '=' * (8 - missing_padding)
encoded = encoded.upper()
chunks = [encoded[i:i + 8] for i in range(0, len(encoded), 8)]
out = []
for chunk in chunks:
bits = 0
bitbuff = 0
for c in chunk:
if 'A' <= c <= 'Z':
n = ord(c) - ord('A')
elif '2' <= c <= '7':
n = ord(c) - ord('2') + 26
elif n == '=':
continue
else:
raise ValueError("Not base32")
# 5 bits per 8 chars of base32
bits += 5
# shift down and add the current value
bitbuff <<= 5
bitbuff |= n
# great! we have enough to extract a byte
if bits >= 8:
bits -= 8
byte = bitbuff >> bits # grab top 8 bits
bitbuff &= ~(0xFF << bits) # and clear them
out.append(byte) # store what we got
return out
if TEST:
print("===========================================")
print("Base32 test: ", bytes(base32_decode("IFSGCZTSOVUXIIJB")))
# should be "Adafruit!!"
# Turns an integer into a padded-with-0x0 bytestr
def int_to_bytestring(i, padding=8):
result = []
while i != 0:
result.insert(0, i & 0xFF)
i >>= 8
result = [0] * (padding - len(result)) + result
return bytes(result)
# HMAC -> OTP generator, pretty much same as
# https://github.com/pyotp/pyotp/blob/master/src/pyotp/otp.py
def generate_otp(int_input, secret_key, digits=6):
if int_input < 0:
raise ValueError('input must be positive integer')
hmac_hash = bytearray(
HMAC(bytes(base32_decode(secret_key)),
int_to_bytestring(int_input)).digest()
)
offset = hmac_hash[-1] & 0xf
code = ((hmac_hash[offset] & 0x7f) << 24 |
(hmac_hash[offset + 1] & 0xff) << 16 |
(hmac_hash[offset + 2] & 0xff) << 8 |
(hmac_hash[offset + 3] & 0xff))
str_code = str(code % 10 ** digits)
while len(str_code) < digits:
str_code = '0' + str_code
return str_code
print("===========================================")
# Set up networking
sta_if = network.WLAN(network.STA_IF)
oled.fill(0)
oled.text('Connecting to', 0, 0)
oled.text(ssid, 0, 10)
oled.show()
if not sta_if.isconnected():
print("Connecting to SSID", ssid)
sta_if.active(True)
sta_if.connect(ssid, password)
while not sta_if.isconnected():
pass
print("Connected! IP = ", sta_if.ifconfig()[0])
# Done! Let them know we made it
oled.text("IP: " + sta_if.ifconfig()[0], 0, 20)
oled.show()
time.sleep(0.25)
# Get the latest time from NTP
t = None
while not t:
try:
t = ntptime.time()
except Exception:
pass
time.sleep(0.1)
# NTP time is seconds-since-2000
print("NTP time: ", t)
# But we need Unix time, which is seconds-since-1970
t += EPOCH_DELTA
print("Unix time: ", t)
# Instead of using RTC which means converting back and forth
# we'll just keep track of seconds-elapsed-since-NTP-call
mono_time = int(time.monotonic())
print("Monotonic time", mono_time)
countdown = ON_SECONDS # how long to stay on if not in always_on mode
while ALWAYS_ON or (countdown > 0):
# Calculate current time based on NTP + monotonic
unix_time = t - mono_time + int(time.monotonic())
print("Unix time: ", unix_time)
# Clear the screen
oled.fill(0)
y = 0
# We can do up to 3 per line on the Feather OLED
for name, secret in totp:
otp = generate_otp(unix_time // 30, secret)
print(name + " OTP output: ", otp) # serial debugging output
oled.text(name + ": " + str(otp), 0, y) # display name & OTP on OLED
y += 10 # Go to next line on OLED
# Display a little bar that 'counts down' how many seconds you have left
oled.framebuf.line(0, 31, 128 - (unix_time % 30) * 4, 31, True)
oled.show()
# We'll update every 1/4 second, we can hash very fast so its no biggie!
countdown -= 0.25
time.sleep(0.25)
# All these hashes will be lost in time(), like tears in rain. Time to die
oled.fill(0)
oled.show()
| 28.920188 | 77 | 0.605357 |
acecffd67a3cd2dc8967874a9c2deb47834b8804 | 671 | py | Python | stests/core/mq/initialiser.py | goral09/stests | 4de26485535cadf1b708188a7133a976536ccba3 | [
"Apache-2.0"
] | 4 | 2020-03-10T15:28:17.000Z | 2021-10-02T11:41:17.000Z | stests/core/mq/initialiser.py | goral09/stests | 4de26485535cadf1b708188a7133a976536ccba3 | [
"Apache-2.0"
] | 1 | 2020-03-25T11:31:44.000Z | 2020-03-25T11:31:44.000Z | stests/core/mq/initialiser.py | goral09/stests | 4de26485535cadf1b708188a7133a976536ccba3 | [
"Apache-2.0"
] | 9 | 2020-02-25T18:43:42.000Z | 2021-08-10T17:08:42.000Z | import enum
import dramatiq
from stests.core.logging import log_event
from stests.core.mq.brokers import get_broker
from stests.core.mq import encoder
from stests.events import EventType
def execute():
"""Initialises MQ broker & connects dramatiq library.
"""
# JIT import to avoid circularity - TODO remove.
from stests.core.mq.middleware import get_middleware
# Configure broker.
broker = get_broker()
for mware in get_middleware():
broker.add_middleware(mware)
# Configure dramatiq.
dramatiq.set_broker(broker)
dramatiq.set_encoder(encoder)
log_event(EventType.CORE_BROKER_CONNECTION_ESTABLISHED, None)
| 23.137931 | 65 | 0.740686 |
acecfffdeded6d958d52b37ecb6bd86c139ded10 | 4,096 | py | Python | thingspeak.png.py | maarten-pennings/NarrowCast | 5f05729ce6d37b3a00537993668f78888f1f2503 | [
"MIT"
] | 1 | 2020-10-06T10:06:53.000Z | 2020-10-06T10:06:53.000Z | thingspeak.png.py | maarten-pennings/NarrowCast | 5f05729ce6d37b3a00537993668f78888f1f2503 | [
"MIT"
] | null | null | null | thingspeak.png.py | maarten-pennings/NarrowCast | 5f05729ce6d37b3a00537993668f78888f1f2503 | [
"MIT"
] | null | null | null | # sudo pip install matplotlib
# sudo apt-get install python-tk
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import json
import requests
import os, sys, io
from datetime import datetime,timedelta
from dateutil import tz
def application(environ, start_response):
info_plot = [["249563","2","#ffd43b","600"], #ENS210.H [may be changed]
["616372","2","#e74c3c", "300"], #CCS811.eTVOC [may be changed]
["381884","1","#34495e", "300"], #iAQcore.CO2 [may be changed]
["249563","1","#ffd43b","600"], #ENS210.T [may be changed]
["616372","4","#e74c3c", "300"], #CCS811.R [may be changed]
["320672","1","#2ecc71","600"]] #ENS220.P [may be changed]
data = []
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
for idata in range(len(info_plot)):
url= "https://thingspeak.com/channels/"+info_plot[idata][0]+"/field/"+info_plot[idata][1]+".json?results="+info_plot[idata][3]
resp= requests.get(url)
text= resp.text
data.append(json.loads(text))
plt.ioff()
fig = plt.figure(figsize=[19.2*0.8,10*0.8])
for idata in range(len(info_plot)):
data_feeds = data[idata]["feeds"]
dtime = []
plotdata = []
plotname = data[idata]["channel"]["field%s" %info_plot[idata][1]]
for idx in range(len(data_feeds)):
utc = datetime.strptime(data_feeds[idx]['created_at'], '%Y-%m-%dT%H:%M:%SZ')
utc = utc.replace(tzinfo = from_zone)
central = utc.astimezone(to_zone)
dtime.append(central)
plotdata.append(float(data_feeds[idx]["field%s" %info_plot[idata][1]]))
plt.subplot(int("23"+str(idata+1)))
plt.plot(dtime, plotdata,'.-', color = info_plot[idata][2])
plt.xlabel('Date')
plt.ylabel(plotname)
plt.title(data[idata]["channel"]["name"])
ax = plt.gca()
ax.get_yaxis().get_major_formatter().set_useOffset(False)
plt.tight_layout()
with io.BytesIO() as memfile:
plt.savefig(memfile, format="png")
bytes = memfile.getvalue()
plt.close(fig)
status = '200 OK'
response_header = [('Content-type','image/png')]
start_response(status,response_header)
return [bytes]
if __name__ == "__main__":
application({},{})
# { u'feeds': [
# {u'created_at': u'2019-02-27T08:00:56Z', u'field1': u'23.053125', u'entry_id': 1835901},
# {u'created_at': u'2019-02-27T08:01:27Z', u'field1': u'23.037500', u'entry_id': 1835902},
# {u'created_at': u'2019-02-27T08:01:58Z', u'field1': u'23.006250', u'entry_id': 1835903},
# ...
# {u'created_at': u'2019-02-27T08:48:42Z', u'field1': u'22.975000', u'entry_id': 1835993},
# {u'created_at': u'2019-02-27T08:49:13Z', u'field1': u'22.959375', u'entry_id': 1835994},
# {u'created_at': u'2019-02-27T08:49:45Z', u'field1': u'22.959375', u'entry_id': 1835995},
# {u'created_at': u'2019-02-27T08:50:16Z', u'field1': u'23.006250', u'entry_id': 1835996},
# {u'created_at': u'2019-02-27T08:50:47Z', u'field1': u'23.006250', u'entry_id': 1835997},
# {u'created_at': u'2019-02-27T08:51:18Z', u'field1': u'22.990625', u'entry_id': 1835998},
# {u'created_at': u'2019-02-27T08:51:49Z', u'field1': u'22.975000', u'entry_id': 1835999},
# {u'created_at': u'2019-02-27T08:52:20Z', u'field1': u'23.006250', u'entry_id': 1836000}
# ]
# , u'channel': {
# u'description': u'ENS210 relative humidity and temperature sensor by ams',
# u'updated_at': u'2018-10-18T15:08:33Z',
# u'longitude': u'5.4601',
# u'last_entry_id': 1836000,
# u'id': 249563,
# u'name': u'ENS210 @ HTC',
# u'field2': u'Humidity (in %RH)',
# u'field3': u'TStatus',
# u'created_at': u'2017-03-28T18:46:17Z',
# u'field1': u'Temperature (in \xb0C)',
# u'field4': u'HStatus',
# u'latitude': u'51.4109'
# }
# }
#
| 40.156863 | 135 | 0.572998 |
aced00bff3460bc575e46df9efb6c4708bc67a1e | 268 | py | Python | spec/fixtures/smoke/bad/badIndent0.py | Askaholic/linter-mypy | 97978c5c9455d4215ea0cd0395e34b8eb118feca | [
"MIT"
] | 33 | 2016-12-08T14:53:50.000Z | 2022-02-22T20:56:49.000Z | spec/fixtures/smoke/bad/badIndent0.py | Askaholic/linter-mypy | 97978c5c9455d4215ea0cd0395e34b8eb118feca | [
"MIT"
] | 27 | 2017-03-12T01:18:05.000Z | 2021-01-27T14:59:54.000Z | spec/fixtures/smoke/bad/badIndent0.py | Askaholic/linter-mypy | 97978c5c9455d4215ea0cd0395e34b8eb118feca | [
"MIT"
] | 7 | 2017-03-12T01:56:07.000Z | 2022-03-24T18:09:00.000Z | xyz = 0
xyz = ""
def a():
xyz = 0
xyz = ""
def a():
xyz = 0
xyz = ""
def a():
xyz = 0
xyz = ""
def a():
xyz = 0
xyz = ""
class Hello(object):
def __init__(self):
class World(object):
xyz = 0
xyz = ""
def __init__(self):
pass
| 11.652174 | 22 | 0.462687 |
aced00fbe06eecc4794a2496de7afa8f074c2909 | 1,345 | py | Python | images_watcher.py | mshemanskyi/image-batch-processor | dd7e0d9bc77fd06fe0f94c4f4fd025323633d9f7 | [
"MIT"
] | null | null | null | images_watcher.py | mshemanskyi/image-batch-processor | dd7e0d9bc77fd06fe0f94c4f4fd025323633d9f7 | [
"MIT"
] | null | null | null | images_watcher.py | mshemanskyi/image-batch-processor | dd7e0d9bc77fd06fe0f94c4f4fd025323633d9f7 | [
"MIT"
] | null | null | null | import sys
import time
import os
from watchdog.observers import Observer
from events_handler import ImagesEventHandler
from image_processor import ImageProcessor
class ImagesWatcher:
def __init__(self, params):
self.params = params
self.__src_path = params['path']
self.__event_handler = ImagesEventHandler(params)
self.__event_observer = Observer()
def run(self):
#On Start, check if any images exists in folder
for filename in os.listdir(self.__src_path):
if filename == '.keep':
continue
filePath = os.path.join(self.__src_path, filename)
ImageProcessor.do(self, filePath, self.params)
print(self.params['watch'])
if not self.params['watch']:
sys.exit(2)
self.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self.stop()
def start(self):
self.__schedule()
self.__event_observer.start()
def stop(self):
self.__event_observer.stop()
self.__event_observer.join()
def __schedule(self):
self.__event_observer.schedule(
self.__event_handler,
self.__src_path,
recursive=True
) | 26.372549 | 63 | 0.58513 |
aced0177c3b0d948bae58dc6f8f3fb42c490f291 | 4,290 | py | Python | lsbtools/lsb_release.py | renodr/LSB-Tools | 3fc412e6737b4f5acbf671b0afd8329439cce30a | [
"MIT"
] | null | null | null | lsbtools/lsb_release.py | renodr/LSB-Tools | 3fc412e6737b4f5acbf671b0afd8329439cce30a | [
"MIT"
] | 2 | 2020-05-15T05:06:39.000Z | 2020-08-14T03:12:09.000Z | lsbtools/lsb_release.py | renodr/LSB-Tools | 3fc412e6737b4f5acbf671b0afd8329439cce30a | [
"MIT"
] | 3 | 2020-08-13T01:19:15.000Z | 2021-09-14T02:14:55.000Z | # Begin /usr/bin/lsb_release
import sys
if sys.version_info < (3, 7):
sys.exit("Python %s.%s or later is required.\n" %(3, 7))
import argparse, glob, itertools, lsbtools, os, re
# Set default values
config = {
'LSB_VERSION' : 'unavailable',
'DISTRIB_ID' : 'unavailable',
'DISTRIB_DESCRIPTION' : 'unavailable',
'DISTRIB_RELEASE' : 'unavailable',
'DISTRIB_CODENAME' : 'unavailable',
}
printval = ''
# Process command line argurments
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version", help="Display the version of the LSB specification against which the distribution is compliant", action="store_true")
parser.add_argument("-i", "--id", help="Display the string id of the distributor", action="store_true")
parser.add_argument("-d", "--description", help="Display the single line text description of the distribution", action="store_true")
parser.add_argument("-r", "--release", help="Display the release number of the distribution", action="store_true")
parser.add_argument("-c", "--codename", help="Display the codename according to the distribution release", action="store_true")
parser.add_argument("-a", "--all", help="Display all of the above information", action="store_true")
parser.add_argument("-s", "--short", help="Display all of the above information in short output format", action="store_true")
parser.add_argument("--progver", help=argparse.SUPPRESS, action="store_true")
args = parser.parse_args()
if args.progver:
strver = lsbtools.get_prog_ver(sys.argv[0])
print(strver, "\n")
print("Copyright (C) 2020 DJ Lucas")
print("This is free software; see the source for copying conditions. There is NO")
print("warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.")
print("\nWritten by DJ Lucas.\n")
sys.exit(0)
if args.version:
lv = 1
else:
lv = 0
if args.id:
li = 1
else:
li =0
if args.description:
ld = 1
else:
ld = 0
if args.release:
lr = 1
else:
lr = 0
if args.codename:
lc = 1
else:
lc = 0
if args.all:
lv = 1
li = 1
ld = 1
lr = 1
lc = 1
if args.short:
ls = 1
else:
ls = 0
if lv == 0 and li == 0 and ld == 0 and lr == 0 and lc == 0:
lv = 1
# Read required configuration file
if not os.path.exists("/etc/lsb-release"):
print("Required configuration file '/etc/lsb-release' is not found. Exiting...", file=sys.stderr)
sys.exit(1)
conffile = open("/etc/lsb-release", 'r')
content = conffile.read()
items = content.split('\n')
for pair in items:
if pair != '':
key,val = pair.split('=')
config[key] = val.strip('\"')
conffile.close()
# As of LSB-2.0, the LSB Version string is comprised of colon separated modules
# A module can be represented directly in the LSB_VERSION value or consist
# of empty files with the name of the module in /etc/lsb-release.d/
lsbver = ''
if not os.path.isdir("/etc/lsb-release.d"):
lsbver = config['LSB_VERSION']
else:
if len(os.listdir('/etc/lsb-release.d')) == 0:
lsbver = config['LSB_VERSION']
else:
if config['LSB_VERSION'] != 'unavailable':
lsbver = config['LSB_VERSION']
# See what else is there
for lsbfile in os.listdir('/etc/lsb-release.d'):
if lsbver == '':
lsbver = basename(lsbfile)
else:
lsbver = lsbver + ":" + lsbfile
# Set the LSB Version to our assembled string
config['LSB_VERSION'] = lsbver.strip(' ')
if lv == 1:
if ls == 1:
printval = printval + " " + config['LSB_VERSION']
else:
lprintval = "LSB Version:\t" + config['LSB_VERSION']
print(lprintval)
if li == 1:
if ls == 1:
printval = printval + " " + config['DISTRIB_ID']
else:
lprintval = "Distributor ID:\t" + config['DISTRIB_ID']
print(lprintval)
if ld == 1:
if ls == 1:
printval = printval + " " + config['DISTRIB_DESCRIPTION']
else:
lprintval = "Description:\t" + config['DISTRIB_DESCRIPTION']
print(lprintval)
if lr == 1:
if ls == 1:
printval = printval + " " + config['DISTRIB_RELEASE']
else:
lprintval = "Release:\t" + config['DISTRIB_RELEASE']
print(lprintval)
if lc == 1:
if ls == 1:
printval = printval + " " + config['DISTRIB_CODENAME']
else:
lprintval = "Codename:\t" + config['DISTRIB_CODENAME']
print(lprintval)
if ls == 1:
print(printval.strip(" "))
| 27.677419 | 156 | 0.660373 |
aced01a805efc3ee576f8f59feff3ba765323859 | 17,464 | py | Python | pyinfra/api/connectors/ssh.py | marianod92/pyinfra | 8578430329d9a582ba92545859adb8fc45854f5b | [
"MIT"
] | null | null | null | pyinfra/api/connectors/ssh.py | marianod92/pyinfra | 8578430329d9a582ba92545859adb8fc45854f5b | [
"MIT"
] | null | null | null | pyinfra/api/connectors/ssh.py | marianod92/pyinfra | 8578430329d9a582ba92545859adb8fc45854f5b | [
"MIT"
] | null | null | null | from __future__ import print_function, unicode_literals
from distutils.spawn import find_executable
from getpass import getpass
from os import path
from socket import (
error as socket_error,
gaierror,
)
import click
import six
from paramiko import (
AuthenticationException,
DSSKey,
ECDSAKey,
Ed25519Key,
PasswordRequiredException,
RSAKey,
SFTPClient,
SSHException,
)
import pyinfra
from pyinfra import logger
from pyinfra.api.command import QuoteString, StringCommand
from pyinfra.api.exceptions import ConnectError, PyinfraError
from pyinfra.api.util import get_file_io, memoize
from .sshuserclient import SSHClient
from .util import (
execute_command_with_sudo_retry,
make_unix_command_for_host,
read_buffers_into_queue,
run_local_process,
split_combined_output,
write_stdin,
)
EXECUTION_CONNECTOR = True
def make_names_data(hostname):
yield '@ssh/{0}'.format(hostname), {'ssh_hostname': hostname}, []
def _raise_connect_error(host, message, data):
message = '{0} ({1})'.format(message, data)
raise ConnectError(message)
def _load_private_key_file(filename, key_filename, key_password):
exception = PyinfraError('Invalid key: {0}'.format(filename))
for key_cls in (RSAKey, DSSKey, ECDSAKey, Ed25519Key):
try:
return key_cls.from_private_key_file(
filename=filename,
)
except PasswordRequiredException:
if not key_password:
# If password is not provided, but we're in CLI mode, ask for it. I'm not a
# huge fan of having CLI specific code in here, but it doesn't really fit
# anywhere else without duplicating lots of key related code into cli.py.
if pyinfra.is_cli:
key_password = getpass(
'Enter password for private key: {0}: '.format(
key_filename,
),
)
# API mode and no password? We can't continue!
else:
raise PyinfraError(
'Private key file ({0}) is encrypted, set ssh_key_password to '
'use this key'.format(key_filename),
)
try:
return key_cls.from_private_key_file(
filename=filename,
password=key_password,
)
except SSHException as e: # key does not match key_cls type
exception = e
except SSHException as e: # key does not match key_cls type
exception = e
raise exception
def _get_private_key(state, key_filename, key_password):
if key_filename in state.private_keys:
return state.private_keys[key_filename]
ssh_key_filenames = [
# Global from executed directory
path.expanduser(key_filename),
]
# Relative to the deploy
if state.deploy_dir:
ssh_key_filenames.append(
path.join(state.deploy_dir, key_filename),
)
key = False
key_file_exists = False
for filename in ssh_key_filenames:
if not path.isfile(filename):
continue
key_file_exists = True
try:
key = _load_private_key_file(filename, key_filename, key_password)
break
except SSHException:
pass
# No break, so no key found
if not key:
if not key_file_exists:
raise PyinfraError('No such private key file: {0}'.format(key_filename))
# TODO: upgrade min paramiko version to 2.7 and remove this (pyinfra v2)
extra_info = ''
from pkg_resources import get_distribution, parse_version
if get_distribution('paramiko').parsed_version < parse_version('2.7'):
extra_info = (
'\n Paramiko versions under 2.7 do not support the latest OpenSSH key formats,'
' upgrading may fix this error.'
'\n For more information, see this issue: '
'https://github.com/Fizzadar/pyinfra/issues/548'
)
raise PyinfraError('Invalid private key file: {0}{1}'.format(key_filename, extra_info))
# Load any certificate, names from OpenSSH:
# https://github.com/openssh/openssh-portable/blob/049297de975b92adcc2db77e3fb7046c0e3c695d/ssh-keygen.c#L2453 # noqa: E501
for certificate_filename in (
'{0}-cert.pub'.format(key_filename),
'{0}.pub'.format(key_filename),
):
if path.isfile(certificate_filename):
key.load_certificate(certificate_filename)
state.private_keys[key_filename] = key
return key
def _make_paramiko_kwargs(state, host):
kwargs = {
'allow_agent': False,
'look_for_keys': False,
'hostname': host.data.ssh_hostname or host.name,
# Special pyinfra specific kwarg for our custom SSHClient
'_pyinfra_force_forward_agent': host.data.ssh_forward_agent,
# Special pyinfra specific kwarg to select an alternative SSH config
'_pyinfra_ssh_config_file': host.data.ssh_config_file,
}
for key, value in (
('username', host.data.ssh_user),
('port', int(host.data.ssh_port or 0)),
('timeout', state.config.CONNECT_TIMEOUT),
):
if value:
kwargs[key] = value
# Password auth (boo!)
if host.data.ssh_password:
kwargs['password'] = host.data.ssh_password
# Key auth!
elif host.data.ssh_key:
kwargs['pkey'] = _get_private_key(
state,
key_filename=host.data.ssh_key,
key_password=host.data.ssh_key_password,
)
# No key or password, so let's have paramiko look for SSH agents and user keys
# unless disabled by the user.
else:
kwargs['allow_agent'] = host.data.get('ssh_allow_agent', True)
kwargs['look_for_keys'] = host.data.get('ssh_look_for_keys', True)
return kwargs
def connect(state, host):
'''
Connect to a single host. Returns the SSH client if succesful. Stateless by
design so can be run in parallel.
'''
kwargs = _make_paramiko_kwargs(state, host)
logger.debug('Connecting to: {0} ({1})'.format(host.name, kwargs))
hostname = kwargs.pop('hostname')
try:
# Create new client & connect to the host
client = SSHClient()
client.connect(hostname, **kwargs)
return client
except AuthenticationException as e:
auth_kwargs = {}
for key, value in kwargs.items():
if key in ('username', 'password'):
auth_kwargs[key] = value
continue
if key == 'pkey' and value:
auth_kwargs['key'] = host.data.ssh_key
auth_args = ', '.join(
'{0}={1}'.format(key, value)
for key, value in auth_kwargs.items()
)
_raise_connect_error(host, 'Authentication error ({0})'.format(auth_args), e)
except SSHException as e:
_raise_connect_error(host, 'SSH error', e)
except gaierror:
_raise_connect_error(host, 'Could not resolve hostname', hostname)
except socket_error as e:
_raise_connect_error(host, 'Could not connect', e)
except EOFError as e:
_raise_connect_error(host, 'EOF error', e)
def run_shell_command(
state, host, command,
get_pty=False,
timeout=None,
stdin=None,
success_exit_codes=None,
print_output=False,
print_input=False,
return_combined_output=False,
**command_kwargs
):
'''
Execute a command on the specified host.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
sudo (boolean): whether to wrap the command with sudo
sudo_user (string): user to sudo to
get_pty (boolean): whether to get a PTY before executing the command
env (dict): environment variables to set
timeout (int): timeout for this command to complete before erroring
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer.
'''
def execute_command():
unix_command = make_unix_command_for_host(state, host, command, **command_kwargs)
actual_command = unix_command.get_raw_value()
logger.debug('Running command on {0}: (pty={1}) {2}'.format(
host.name, get_pty, unix_command,
))
if print_input:
click.echo('{0}>>> {1}'.format(host.print_prefix, unix_command), err=True)
# Run it! Get stdout, stderr & the underlying channel
stdin_buffer, stdout_buffer, stderr_buffer = host.connection.exec_command(
actual_command,
get_pty=get_pty,
)
if stdin:
write_stdin(stdin, stdin_buffer)
combined_output = read_buffers_into_queue(
stdout_buffer,
stderr_buffer,
timeout=timeout,
print_output=print_output,
print_prefix=host.print_prefix,
)
logger.debug('Waiting for exit status...')
exit_status = stdout_buffer.channel.recv_exit_status()
logger.debug('Command exit status: {0}'.format(exit_status))
return exit_status, combined_output
return_code, combined_output = execute_command_with_sudo_retry(
host, command_kwargs, execute_command,
)
if success_exit_codes:
status = return_code in success_exit_codes
else:
status = return_code == 0
if return_combined_output:
return status, combined_output
stdout, stderr = split_combined_output(combined_output)
return status, stdout, stderr
@memoize
def _get_sftp_connection(host):
transport = host.connection.get_transport()
try:
return SFTPClient.from_transport(transport)
except SSHException as e:
six.raise_from(ConnectError((
'Unable to establish SFTP connection. Check that the SFTP subsystem '
'for the SSH service at {0} is enabled.'
).format(host)), e)
def _get_file(host, remote_filename, filename_or_io):
with get_file_io(filename_or_io, 'wb') as file_io:
sftp = _get_sftp_connection(host)
sftp.getfo(remote_filename, file_io)
def get_file(
state, host, remote_filename, filename_or_io,
remote_temp_filename=None,
sudo=False, sudo_user=None, su_user=None,
print_output=False, print_input=False,
**command_kwargs
):
'''
Download a file from the remote host using SFTP. Supports download files
with sudo by copying to a temporary directory with read permissions,
downloading and then removing the copy.
'''
if sudo or su_user:
# Get temp file location
temp_file = remote_temp_filename or state.get_temp_filename(remote_filename)
# Copy the file to the tempfile location and add read permissions
command = 'cp {0} {1} && chmod +r {0}'.format(remote_filename, temp_file)
copy_status, _, stderr = run_shell_command(
state, host, command,
sudo=sudo, sudo_user=sudo_user, su_user=su_user,
print_output=print_output,
print_input=print_input,
**command_kwargs
)
if copy_status is False:
logger.error('File download copy temp error: {0}'.format('\n'.join(stderr)))
return False
try:
_get_file(host, temp_file, filename_or_io)
# Ensure that, even if we encounter an error, we (attempt to) remove the
# temporary copy of the file.
finally:
remove_status, _, stderr = run_shell_command(
state, host, 'rm -f {0}'.format(temp_file),
sudo=sudo, sudo_user=sudo_user, su_user=su_user,
print_output=print_output,
print_input=print_input,
**command_kwargs
)
if remove_status is False:
logger.error('File download remove temp error: {0}'.format('\n'.join(stderr)))
return False
else:
_get_file(host, remote_filename, filename_or_io)
if print_output:
click.echo(
'{0}file downloaded: {1}'.format(host.print_prefix, remote_filename),
err=True,
)
return True
def _put_file(host, filename_or_io, remote_location):
with get_file_io(filename_or_io) as file_io:
sftp = _get_sftp_connection(host)
sftp.putfo(file_io, remote_location)
def put_file(
state, host, filename_or_io, remote_filename,
remote_temp_filename=None,
sudo=False, sudo_user=None, su_user=None,
print_output=False, print_input=False,
**command_kwargs
):
'''
Upload file-ios to the specified host using SFTP. Supports uploading files
with sudo by uploading to a temporary directory then moving & chowning.
'''
# sudo/su are a little more complicated, as you can only sftp with the SSH
# user connected, so upload to tmp and copy/chown w/sudo and/or su_user
if sudo or su_user:
# Get temp file location
temp_file = remote_temp_filename or state.get_temp_filename(remote_filename)
_put_file(host, filename_or_io, temp_file)
# Make sure our sudo/su user can access the file
if su_user:
command = StringCommand('setfacl', '-m', 'u:{0}:r'.format(su_user), temp_file)
elif sudo_user:
command = StringCommand('setfacl -m u:{0}:r'.format(sudo_user), temp_file)
if su_user or sudo_user:
status, _, stderr = run_shell_command(
state, host, command,
sudo=False,
print_output=print_output,
print_input=print_input,
**command_kwargs
)
if status is False:
logger.error('Error on handover to sudo/su user: {0}'.format('\n'.join(stderr)))
return False
# Execute run_shell_command w/sudo and/or su_user
command = StringCommand('cp', temp_file, QuoteString(remote_filename))
status, _, stderr = run_shell_command(
state, host, command,
sudo=sudo, sudo_user=sudo_user, su_user=su_user,
print_output=print_output,
print_input=print_input,
**command_kwargs
)
if status is False:
logger.error('File upload error: {0}'.format('\n'.join(stderr)))
return False
# Delete the temporary file now that we've successfully copied it
command = StringCommand('rm', '-f', temp_file)
status, _, stderr = run_shell_command(
state, host, command,
sudo=False,
print_output=print_output,
print_input=print_input,
**command_kwargs
)
if status is False:
logger.error('Unable to remove temporary file: {0}'.format('\n'.join(stderr)))
return False
# No sudo and no su_user, so just upload it!
else:
_put_file(host, filename_or_io, remote_filename)
if print_output:
click.echo(
'{0}file uploaded: {1}'.format(host.print_prefix, remote_filename),
err=True,
)
return True
def check_can_rsync(host):
if host.data.ssh_key_password:
raise NotImplementedError('Rsync does not currently work with SSH keys needing passwords.')
if host.data.ssh_password:
raise NotImplementedError('Rsync does not currently work with SSH passwords.')
if not find_executable('rsync'):
raise NotImplementedError('The `rsync` binary is not available on this system.')
def rsync(
state, host, src, dest, flags,
print_output=False, print_input=False,
sudo=False,
sudo_user=None,
**ignored_kwargs
):
hostname = host.data.ssh_hostname or host.name
user = ''
if host.data.ssh_user:
user = '{0}@'.format(host.data.ssh_user)
ssh_flags = []
port = host.data.ssh_port
if port:
ssh_flags.append('-p {0}'.format(port))
ssh_key = host.data.ssh_key
if ssh_key:
ssh_flags.append('-i {0}'.format(ssh_key))
remote_rsync_command = 'rsync'
if sudo:
remote_rsync_command = 'sudo rsync'
if sudo_user:
remote_rsync_command = 'sudo -u {0} rsync'.format(sudo_user)
# To avoid asking for interactive input, specify BatchMode=yes
rsync_command = (
'rsync {rsync_flags} '
"--rsh 'ssh -o BatchMode=yes {ssh_flags}' "
"--rsync-path '{remote_rsync_command}' "
'{src} {user}{hostname}:{dest}'
).format(
rsync_flags=' '.join(flags),
ssh_flags=' '.join(ssh_flags),
remote_rsync_command=remote_rsync_command,
user=user, hostname=hostname,
src=src, dest=dest,
)
if print_input:
click.echo('{0}>>> {1}'.format(host.print_prefix, rsync_command), err=True)
return_code, combined_output = run_local_process(
rsync_command,
print_output=print_output,
print_prefix=host.print_prefix,
)
status = return_code == 0
if not status:
_, stderr = split_combined_output(combined_output)
raise IOError('\n'.join(stderr))
return True
| 31.35368 | 128 | 0.628665 |
aced01df918f868a913e0ba6d5c4b3bab8e86e74 | 2,787 | py | Python | study/models/ConvLSTM/ConvLSTMCell.py | dreaming-coder/STudy | c16b787bff5b2d54d2d1847629e0ff198819956f | [
"MIT"
] | 25 | 2021-07-02T12:39:16.000Z | 2022-03-24T08:43:02.000Z | study/models/ConvLSTM/ConvLSTMCell.py | dreaming-coder/STudy | c16b787bff5b2d54d2d1847629e0ff198819956f | [
"MIT"
] | null | null | null | study/models/ConvLSTM/ConvLSTMCell.py | dreaming-coder/STudy | c16b787bff5b2d54d2d1847629e0ff198819956f | [
"MIT"
] | 4 | 2021-07-04T00:52:02.000Z | 2021-07-26T02:46:13.000Z | from typing import Tuple
import torch
from torch import nn, Tensor
__all__ = ["ConvLSTMCell"]
class ConvLSTMCell(nn.Module):
def __init__(self, in_channels: int, hidden_channels: int, size: Tuple[int, int],
kernel_size: int = 3, forget_bias: float = 0.01):
"""
:param in_channels: 输入的通道数
:param hidden_channels: 隐藏层通道数
:param size: 输入的二维尺寸,即 (Height, Width)
:param kernel_size: 卷积核尺寸
:param forget_bias: 偏置
"""
super(ConvLSTMCell, self).__init__()
self.hidden_channels = hidden_channels
self.forget_bias = forget_bias
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
padding = tuple([f // 2 for f in kernel_size])
self.conv_x = nn.Conv2d(in_channels=in_channels, out_channels=hidden_channels * 4,
kernel_size=kernel_size, padding=padding, stride=(1, 1))
self.conv_h = nn.Conv2d(in_channels=hidden_channels, out_channels=hidden_channels * 4,
kernel_size=kernel_size, padding=padding, stride=(1, 1))
self.w_ci = nn.Parameter(torch.zeros(1, hidden_channels, *size), requires_grad=True)
self.w_cf = nn.Parameter(torch.zeros(1, hidden_channels, *size), requires_grad=True)
self.w_co = nn.Parameter(torch.zeros(1, hidden_channels, *size), requires_grad=True)
def forward(self, x: Tensor, h: Tensor, c: Tensor) -> Tuple[Tensor, Tensor]:
"""
:param x: x 是输入的一个 batch 的某一时序,shape应该是 (B, in_channels, H, W)
:param h: h 是隐藏层,shape应该是 (B, hidden_channels, H, W)
:param c: c 是 cell 记忆的载体,shape应该是 (B, hidden_channels, H, W)
:return: 更新过的 h 和 c
"""
if x is None and (h is None or c is None):
raise ValueError("x 和 [h, c] 不能同时为 None")
x_concat = self.conv_x(x)
h_concat = self.conv_h(h)
i_x, f_x, c_x, o_x = torch.split(x_concat, self.hidden_channels, dim=1)
i_h, f_h, c_h, o_h = torch.split(h_concat, self.hidden_channels, dim=1)
i = torch.sigmoid(i_x + i_h + self.w_ci * c)
f = torch.sigmoid(f_x + f_h + self.forget_bias)
c = f * c + i * torch.tanh(c_x + c_h)
o = torch.sigmoid(o_x + o_h + self.w_co * c)
h = o * torch.tanh(c)
return h, c
# if __name__ == '__main__':
# device = "cuda"
# cell = ConvLSTMCell(in_channels=64, hidden_channels=96, size=(50, 50)).to(device)
# x = torch.ones(3, 64, 50, 50).to(device)
# h = torch.zeros(3, 96, 50, 50).to(device)
# c = torch.zeros(3, 96, 50, 50).to(device)
# hh, cc = cell(x, h, c)
# print(hh.shape)
# print(cc.shape)
# hh.sum().backward()
| 38.178082 | 94 | 0.591676 |
aced025d1e4b1aa498460cd7714c134d4a981adc | 4,050 | py | Python | src/libnrl/graph.py | river-li/DeepBinDiff | a5f6fa1a23743ca462a126d3636e8fc4099ac841 | [
"BSD-3-Clause"
] | null | null | null | src/libnrl/graph.py | river-li/DeepBinDiff | a5f6fa1a23743ca462a126d3636e8fc4099ac841 | [
"BSD-3-Clause"
] | null | null | null | src/libnrl/graph.py | river-li/DeepBinDiff | a5f6fa1a23743ca462a126d3636e8fc4099ac841 | [
"BSD-3-Clause"
] | null | null | null | """Graph utilities."""
# from time import time
import networkx as nx
import pickle as pkl
import numpy as np
import scipy.sparse as sp
__author__ = "Zhang Zhengyan"
__email__ = "zhangzhengyan14@mails.tsinghua.edu.cn"
class Graph(object):
def __init__(self):
self.G = None
self.look_up_dict = {}
self.look_back_list = []
self.node_size = 0
# Yue: stores the feature vectors for singular nodes
self.singular_node_dict = {}
def encode_node(self):
look_up = self.look_up_dict
look_back = self.look_back_list
for node in self.G.nodes():
look_up[node] = self.node_size
look_back.append(node)
self.node_size += 1
self.G.nodes[node]['status'] = ''
def read_adjlist(self, filename):
""" Read graph from adjacency file in which the edge must be unweighted
the format of each line: v1 n1 n2 n3 ... nk
:param filename: the filename of input file
"""
self.G = nx.read_adjlist(filename, create_using=nx.DiGraph())
for i, j in self.G.edges():
self.G[i][j]['weight'] = 1.0
self.encode_node()
def read_edgelist(self, filename, weighted=False, directed=False):
self.G = nx.DiGraph()
if directed:
def read_unweighted(l):
src, dst = l.split()
self.G.add_edge(src, dst)
self.G[src][dst]['weight'] = 1.0
def read_weighted(l):
src, dst, w = l.split()
self.G.add_edge(src, dst)
self.G[src][dst]['weight'] = float(w)
else:
def read_unweighted(l):
src, dst = l.split()
self.G.add_edge(src, dst)
self.G.add_edge(dst, src)
self.G[src][dst]['weight'] = 1.0
self.G[dst][src]['weight'] = 1.0
def read_weighted(l):
src, dst, w = l.split()
self.G.add_edge(src, dst)
self.G.add_edge(dst, src)
self.G[src][dst]['weight'] = float(w)
self.G[dst][src]['weight'] = float(w)
fin = open(filename, 'r')
func = read_unweighted
if weighted:
func = read_weighted
while 1:
l = fin.readline()
if l == '':
break
func(l)
fin.close()
# self.encode_node()
def read_node_label(self, filename):
fin = open(filename, 'r')
while 1:
l = fin.readline()
if l == '':
break
vec = l.split()
self.G.nodes[vec[0]]['label'] = vec[1:]
fin.close()
def read_node_features(self, filename):
self.sgl_node_list = []
fin = open(filename, 'r')
for l in fin.readlines():
vec = l.split()
if vec[0] in self.G.nodes:
self.G.nodes[vec[0]]['feature'] = np.array([float(x) for x in vec[1:]])
else:
# Yue: still put the feature vectors for singular nodes so that they are considered as embeddings
self.sgl_node_list.append(vec[0])
self.singular_node_dict[vec[0]] = np.array([float(x) for x in vec[1:]])
# self.G.add_node(vec[0])
# self.G.nodes[vec[0]]['feature'] = np.array([float(x) for x in vec[1:]])
fin.close()
self.encode_node()
def read_node_status(self, filename):
fin = open(filename, 'r')
while 1:
l = fin.readline()
if l == '':
break
vec = l.split()
self.G.nodes[vec[0]]['status'] = vec[1] # train test valid
fin.close()
def read_edge_label(self, filename):
fin = open(filename, 'r')
while 1:
l = fin.readline()
if l == '':
break
vec = l.split()
self.G[vec[0]][vec[1]]['label'] = vec[2:]
fin.close()
| 31.889764 | 113 | 0.502469 |
aced02866ba2a3c85bfb53afe29e437001ff8aa6 | 24,356 | py | Python | covid/covid.py | Kuro-Rui/flare-cogs | f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d | [
"MIT"
] | 38 | 2021-03-07T17:13:10.000Z | 2022-02-28T19:50:00.000Z | covid/covid.py | Kuro-Rui/flare-cogs | f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d | [
"MIT"
] | 44 | 2021-03-12T19:13:32.000Z | 2022-03-18T10:20:52.000Z | covid/covid.py | Kuro-Rui/flare-cogs | f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d | [
"MIT"
] | 33 | 2021-03-08T18:59:59.000Z | 2022-03-23T10:57:46.000Z | import datetime
import typing
import aiohttp
import discord
from redbot.core import commands
from redbot.core.utils.chat_formatting import humanize_number
from .menus import ArticleFormat, CovidMenu, CovidStateMenu, GenericMenu
class Covid(commands.Cog):
"""Covid-19 (Novel Coronavirus Stats)."""
__version__ = "0.3.1"
def format_help_for_context(self, ctx):
"""Thanks Sinbad."""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\nCog Version: {self.__version__}"
def __init__(self, bot):
self.bot = bot
self.api = "https://disease.sh/v3/covid-19"
self.newsapi = "https://newsapi.org/v2/top-headlines?q=COVID&sortBy=publishedAt&pageSize=100&country={}&apiKey={}&page=1"
self.session = aiohttp.ClientSession()
self.newsapikey = None
async def red_get_data_for_user(self, *, user_id: int):
# this cog does not story any data
return {}
async def red_delete_data_for_user(self, *, requester, user_id: int) -> None:
# this cog does not story any data
pass
async def initalize(self):
token = await self.bot.get_shared_api_tokens("newsapi")
self.newsapikey = token.get("key", None)
@commands.Cog.listener()
async def on_red_api_tokens_update(self, service_name, api_tokens):
if service_name == "newsapi":
self.newsapikey = api_tokens.get("key", None)
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
async def get(self, url):
async with self.session.get(url) as response:
try:
data = await response.json()
except aiohttp.ContentTypeError:
return {
"failed": "Their appears to be an issue with the API. Please try again later."
}
if response.status != 200:
return {"failed": data["message"]}
try:
if isinstance(data, dict) and data.get("message") is not None:
return {"failed": data["message"]}
return data
except aiohttp.ServerTimeoutError:
return {
"failed": "Their appears to be an issue with the API. Please try again later."
}
@commands.command(hidden=True)
async def covidcountries(self, ctx):
"""Countries supported by covidnews."""
await ctx.send(
"Valid country codes are:\nae ar at au be bg br ca ch cn co cu cz de eg fr gb gr hk hu id ie il in it jp kr lt lv ma mx my ng nl no nz ph pl pt ro rs ru sa se sg si sk th tr tw ua us ve za"
)
@commands.command()
@commands.bot_has_permissions(embed_links=True)
async def covidnews(self, ctx, countrycode: str):
"""Covid News from a Country - County must be 2-letter ISO 3166-1 code.
Check [p]covidcountries for a list of all possible country codes supported."""
async with ctx.typing():
data = await self.get(self.newsapi.format(countrycode, self.newsapikey))
if data.get("failed") is not None:
return await ctx.send(data.get("failed"))
if data["totalResults"] == 0:
return await ctx.send(
"No results found, ensure you're looking up the correct country code. Check {}covidcountries for a list.".format(
ctx.prefix
)
)
await GenericMenu(source=ArticleFormat(data["articles"]), ctx=ctx,).start(
ctx=ctx,
wait=False,
)
@commands.command()
@commands.is_owner()
async def covidsetup(self, ctx):
"""Instructions on how to setup covid related APIs."""
msg = "**Covid News API Setup**\n**1**. Visit https://newsapi.org and register for an API.\n**2**. Use the following command: {}set api newsapi key <api_key_here>\n**3**. Reload the cog if it doesnt work immediately.".format(
ctx.prefix
)
await ctx.maybe_send_embed(msg)
@commands.group(invoke_without_command=True)
@commands.bot_has_permissions(embed_links=True)
async def covid(self, ctx, *, country: typing.Optional[str]):
"""Stats about Covid-19 or countries if provided.
Supports multiple countries seperated by a comma.
Example: [p]covid Ireland, England
"""
if not country:
async with ctx.typing():
data = await self.get(self.api + "/all")
if isinstance(data, dict) and data.get("failed") is not None:
return await ctx.send(data.get("failed"))
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title="Covid-19 Global Statistics",
timestamp=datetime.datetime.utcfromtimestamp(data["updated"] / 1000),
)
embed.add_field(name="Cases", value=humanize_number(data["cases"]))
embed.add_field(name="Deaths", value=humanize_number(data["deaths"]))
embed.add_field(name="Recovered", value=humanize_number(data["recovered"]))
embed.add_field(name="Critical", value=humanize_number(data["critical"]))
embed.add_field(name="Active", value=humanize_number(data["active"]))
embed.add_field(
name="Affected Countries", value=humanize_number(data["affectedCountries"])
)
embed.add_field(name="Cases Today", value=humanize_number(data["todayCases"]))
embed.add_field(name="Deaths Today", value=humanize_number(data["todayDeaths"]))
embed.add_field(name="Recovered Today", value=humanize_number(data["todayRecovered"]))
embed.add_field(name="Total Tests", value=humanize_number(data["tests"]))
await ctx.send(embed=embed)
else:
async with ctx.typing():
data = await self.get(self.api + "/countries/{}".format(country))
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
data = [data]
if not data:
return await ctx.send("No data available.")
await GenericMenu(source=CovidMenu(data), ctx=ctx, type="Today").start(
ctx=ctx,
wait=False,
)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def yesterday(self, ctx, *, country: str):
"""Show the statistics from yesterday for countries.
Supports multiple countries seperated by a comma.
Example: [p]covid yesterday Ireland, England
"""
async with ctx.typing():
data = await self.get(self.api + "/countries/{}?yesterday=1".format(country))
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
data = [data]
if not data:
return await ctx.send("No data available.")
await GenericMenu(source=CovidMenu(data), ctx=ctx, type="Yesterday").start(
ctx=ctx,
wait=False,
)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def todaycases(self, ctx):
"""Show the highest cases from countrys today."""
async with ctx.typing():
data = await self.get(self.api + "/countries?sort=todayCases")
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title="Covid-19 | Highest Cases Today | {}".format(data[0]["country"]),
timestamp=datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000),
)
embed.add_field(name="Cases", value=humanize_number(data[0]["cases"]))
embed.add_field(name="Deaths", value=humanize_number(data[0]["deaths"]))
embed.add_field(name="Recovered", value=humanize_number(data[0]["recovered"]))
embed.add_field(name="Cases Today", value=humanize_number(data[0]["todayCases"]))
embed.add_field(name="Deaths Today", value=humanize_number(data[0]["todayDeaths"]))
embed.add_field(name="Critical Condition", value=humanize_number(data[0]["critical"]))
await ctx.send(embed=embed)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def todaydeaths(self, ctx):
"""Show the highest deaths from countrys today."""
async with ctx.typing():
data = await self.get(self.api + "/countries?sort=todayDeaths")
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title="Covid-19 | Highest Deaths Today | {}".format(data[0]["country"]),
timestamp=datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000),
)
embed.add_field(name="Cases", value=humanize_number(data[0]["cases"]))
embed.add_field(name="Deaths", value=humanize_number(data[0]["deaths"]))
embed.add_field(name="Recovered", value=humanize_number(data[0]["recovered"]))
embed.add_field(name="Cases Today", value=humanize_number(data[0]["todayCases"]))
embed.add_field(name="Deaths Today", value=humanize_number(data[0]["todayDeaths"]))
embed.add_field(name="Critical Condition", value=humanize_number(data[0]["critical"]))
await ctx.send(embed=embed)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def highestcases(self, ctx):
"""Show the highest cases from countrys overall."""
async with ctx.typing():
data = await self.get(self.api + "/countries?sort=cases")
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title="Covid-19 | Highest Cases Overall | {}".format(data[0]["country"]),
timestamp=datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000),
)
embed.add_field(name="Cases", value=humanize_number(data[0]["cases"]))
embed.add_field(name="Deaths", value=humanize_number(data[0]["deaths"]))
embed.add_field(name="Recovered", value=humanize_number(data[0]["recovered"]))
embed.add_field(name="Cases Today", value=humanize_number(data[0]["todayCases"]))
embed.add_field(name="Deaths Today", value=humanize_number(data[0]["todayDeaths"]))
embed.add_field(name="Critical Condition", value=humanize_number(data[0]["critical"]))
await ctx.send(embed=embed)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def highestdeaths(self, ctx):
"""Show the highest deaths from countrys overall."""
async with ctx.typing():
data = await self.get(self.api + "/countries?sort=deaths")
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title="Covid-19 | Highest Deaths Overall | {}".format(data[0]["country"]),
timestamp=datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000),
)
embed.add_field(name="Cases", value=humanize_number(data[0]["cases"]))
embed.add_field(name="Deaths", value=humanize_number(data[0]["deaths"]))
embed.add_field(name="Recovered", value=humanize_number(data[0]["recovered"]))
embed.add_field(name="Cases Today", value=humanize_number(data[0]["todayCases"]))
embed.add_field(name="Deaths Today", value=humanize_number(data[0]["todayDeaths"]))
embed.add_field(name="Critical Condition", value=humanize_number(data[0]["critical"]))
await ctx.send(embed=embed)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def topcases(self, ctx, amount: int = 6):
"""Show X countries with top amount of cases.
Defaults to 6.
"""
if amount > 20 or amount < 0:
return await ctx.send("Invalid amount. Please choose between an amount between 1-20.")
async with ctx.typing():
data = await self.get(self.api + "/countries?sort=cases")
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title="Covid-19 | Top {} Cases ".format(amount),
timestamp=datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000),
)
for i in range(amount):
msg = f'**Cases**: {humanize_number(data[i]["cases"])}\n**Deaths**: {humanize_number(data[i]["deaths"])}\n**Recovered**: {humanize_number(data[i]["recovered"])}\n**Cases Today**: {humanize_number(data[i]["todayCases"])}\n**Deaths**: {humanize_number(data[i]["todayDeaths"])}\n**Critical**: {humanize_number(data[i]["critical"])}'
embed.add_field(name=data[i]["country"], value=msg)
await ctx.send(embed=embed)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def topcasestoday(self, ctx, amount: int = 6):
"""Show X countries with top amount of cases today.
Defaults to 6.
"""
if amount > 20 or amount < 0:
return await ctx.send("Invalid amount. Please choose between an amount between 1-20.")
async with ctx.typing():
data = await self.get(self.api + "/countries?sort=todayCases")
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title="Covid-19 | Top {} Cases Today ".format(amount),
timestamp=datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000),
)
for i in range(amount):
msg = f'**Cases**: {humanize_number(data[i]["cases"])}\n**Deaths**: {humanize_number(data[i]["deaths"])}\n**Recovered**: {humanize_number(data[i]["recovered"])}\n**Cases Today**: {humanize_number(data[i]["todayCases"])}\n**Deaths**: {humanize_number(data[i]["todayDeaths"])}\n**Critical**: {humanize_number(data[i]["critical"])}'
embed.add_field(name=data[i]["country"], value=msg)
await ctx.send(embed=embed)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def topdeaths(self, ctx, amount: int = 6):
"""Show X countries with top amount of deaths.
Defaults to 6.
"""
if amount > 20 or amount < 0:
return await ctx.send("Invalid amount. Please choose between an amount between 1-20.")
async with ctx.typing():
data = await self.get(self.api + "/countries?sort=deaths")
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title="Covid-19 | Top {} Deaths ".format(amount),
timestamp=datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000),
)
for i in range(amount):
msg = f'**Cases**: {humanize_number(data[i]["cases"])}\n**Deaths**: {humanize_number(data[i]["deaths"])}\n**Recovered**: {humanize_number(data[i]["recovered"])}\n**Cases Today**: {humanize_number(data[i]["todayCases"])}\n**Deaths**: {humanize_number(data[i]["todayDeaths"])}\n**Critical**: {humanize_number(data[i]["critical"])}'
embed.add_field(name=data[i]["country"], value=msg)
await ctx.send(embed=embed)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def topdeathstoday(self, ctx, amount: int = 6):
"""Show X countries with top amount of deaths today.
Defaults to 6.
"""
if amount > 20 or amount < 0:
return await ctx.send("Invalid amount. Please choose between an amount between 1-20.")
async with ctx.typing():
data = await self.get(self.api + "/countries?sort=todayDeaths")
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title="Covid-19 | Top {} Deaths Today ".format(amount),
timestamp=datetime.datetime.utcfromtimestamp(data[0]["updated"] / 1000),
)
for i in range(amount):
msg = f'**Cases**: {humanize_number(data[i]["cases"])}\n**Deaths**: {humanize_number(data[i]["deaths"])}\n**Recovered**: {humanize_number(data[i]["recovered"])}\n**Cases Today**: {humanize_number(data[i]["todayCases"])}\n**Deaths**: {humanize_number(data[i]["todayDeaths"])}\n**Critical**: {humanize_number(data[i]["critical"])}'
embed.add_field(name=data[i]["country"], value=msg)
await ctx.send(embed=embed)
@covid.group(invoke_without_command=True)
@commands.bot_has_permissions(embed_links=True)
async def state(self, ctx, *, states: str):
"""Show stats for specific states.
Supports multiple countries seperated by a comma.
Example: [p]covid state New York, California
"""
if not states:
return await ctx.send_help()
async with ctx.typing():
states = ",".join(states.split(", "))
data = await self.get(self.api + "/states/{}".format(states))
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
data = [data]
if not data:
return await ctx.send("No data available.")
await GenericMenu(source=CovidStateMenu(data), ctx=ctx, type="Today").start(
ctx=ctx,
wait=False,
)
@state.command(name="yesterday")
@commands.bot_has_permissions(embed_links=True)
async def _yesterday(self, ctx, *, states: str):
"""Show stats for yesterday for specific states.
Supports multiple countries seperated by a comma.
Example: [p]covid state yesterday New York, California.
"""
async with ctx.typing():
states = ",".join(states.split(", "))
data = await self.get(self.api + "/states/{}?yesterday=1".format(states))
if isinstance(data, dict):
error = data.get("failed")
if error is not None:
return await ctx.send(error)
data = [data]
if not data:
return await ctx.send("No data available.")
await GenericMenu(source=CovidStateMenu(data), ctx=ctx, type="Yesterday").start(
ctx=ctx,
wait=False,
)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def continent(self, ctx, *, continent: str):
"""Stats about Covid-19 for a particular continent.
Example: [p]covid continent europe
"""
async with ctx.typing():
data = await self.get(self.api + f"/continents/{continent}")
if isinstance(data, dict) and data.get("failed") is not None:
return await ctx.send(data.get("failed"))
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title=f"Covid-19 {continent.title()} Statistics",
timestamp=datetime.datetime.utcfromtimestamp(data["updated"] / 1000),
)
embed.add_field(name="Cases", value=humanize_number(data["cases"]))
embed.add_field(name="Deaths", value=humanize_number(data["deaths"]))
embed.add_field(name="Recovered", value=humanize_number(data["recovered"]))
embed.add_field(name="Critical", value=humanize_number(data["critical"]))
embed.add_field(name="Active", value=humanize_number(data["active"]))
embed.add_field(name="Cases Today", value=humanize_number(data["todayCases"]))
embed.add_field(name="Deaths Today", value=humanize_number(data["todayDeaths"]))
embed.add_field(name="Recovered Today", value=humanize_number(data["todayRecovered"]))
embed.add_field(name="Total Tests", value=humanize_number(data["tests"]))
await ctx.send(embed=embed)
@covid.command()
@commands.bot_has_permissions(embed_links=True)
async def vaccine(self, ctx, *, country: typing.Optional[str]):
"""Stats about Covid-19 vaccinate data globally or per country.
Example: [p]covid vaccine
[p]covid vaccine ireland
"""
if not country:
async with ctx.typing():
data = await self.get(self.api + "/vaccine/coverage")
if isinstance(data, dict) and data.get("failed") is not None:
return await ctx.send(data.get("failed"))
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title="Covid-19 Global Vaccine Statistics",
)
msg = "".join(
f"{datetime.datetime.strptime(day, '%m/%d/%y').strftime('%d-%m-%Y')}: {humanize_number(data[day])}\n"
for day in data
)
else:
async with ctx.typing():
data = await self.get(self.api + "/vaccine/coverage/countries/{}".format(country))
if isinstance(data, dict) and data.get("failed") is not None:
return await ctx.send(data.get("failed"))
if not data:
return await ctx.send("No data available.")
embed = discord.Embed(
color=await self.bot.get_embed_color(ctx.channel),
title=f"Covid-19 {data['country']} Vaccine Statistics",
)
msg = "".join(
f"{datetime.datetime.strptime(day, '%m/%d/%y').strftime('%d-%m-%Y')}: {humanize_number(data['timeline'][day])}\n"
for day in data["timeline"]
)
embed.description = f"```{msg}```"
await ctx.send(embed=embed)
| 48.421471 | 345 | 0.588192 |
aced03361ead515d19379d8b9c51d96844bce8ea | 103 | py | Python | hex.py | krishna-prasath/My_programs | 96bac29eb448beda2dda439ac7adea7a5343c066 | [
"bzip2-1.0.6"
] | null | null | null | hex.py | krishna-prasath/My_programs | 96bac29eb448beda2dda439ac7adea7a5343c066 | [
"bzip2-1.0.6"
] | null | null | null | hex.py | krishna-prasath/My_programs | 96bac29eb448beda2dda439ac7adea7a5343c066 | [
"bzip2-1.0.6"
] | null | null | null | a=input()
s=[]
for i in range(1,len(a)):
z1=int(a)/2
s.append(z=(int(a)%2))
z=z1
| 12.875 | 27 | 0.446602 |
aced04ba5dee9485a32ed76d550cc3bcafe9298f | 2,172 | py | Python | tests/test_robotws_util.py | nokia/crl-doc | fee1c26e93f9492ad7b8681c0e27d2048c968cdd | [
"BSD-3-Clause"
] | null | null | null | tests/test_robotws_util.py | nokia/crl-doc | fee1c26e93f9492ad7b8681c0e27d2048c968cdd | [
"BSD-3-Clause"
] | 5 | 2019-08-30T12:13:25.000Z | 2019-09-06T08:00:12.000Z | tests/test_robotws_util.py | nokia/crl-doc | fee1c26e93f9492ad7b8681c0e27d2048c968cdd | [
"BSD-3-Clause"
] | 2 | 2019-08-30T12:11:10.000Z | 2020-01-23T20:50:29.000Z | import os
import six
import pytest
import mock
from crl.doc.robotws_util import create_dir, add_toc_tree # pylint:disable=E0401
__copyright__ = 'Copyright (C), 2019 Nokia'
FILENAME = os.path.join("path", "to", "file", "file.txt")
@pytest.fixture
def mockopen():
patch_str = "__builtin__.open" if six.PY2 else "builtins.open"
with mock.patch(patch_str) as m:
yield m
@pytest.fixture
def template():
with mock.patch("crl.doc.robotws_util.Template") as m:
yield m
@pytest.fixture
def oswalk():
with mock.patch("crl.doc.robotws_util.os.walk") as m:
yield m
@pytest.fixture
def mockOS():
with mock.patch("crl.doc.robotws_util.os") as m:
yield m
def test_check_if_directory_will_be_created_if_it_didnt_exist_before(mockOS):
mockOS.path.exists.return_value = False
mockOS.path.dirname.return_value = os.path.dirname(FILENAME)
create_dir(FILENAME)
mockOS.makedirs.assert_called_once_with(os.path.dirname(FILENAME))
def test_check_if_directory_wont_be_created_if_it_exists_before(mockOS):
mockOS.path.exists.return_value = True
mockOS.path.dirname.return_value = os.path.dirname(FILENAME)
create_dir(FILENAME)
with pytest.raises(AssertionError):
mockOS.makedirs.assert_called_once_with(os.path.dirname(FILENAME))
def test_check_if_function_execute_correct_methods(template, mockopen, oswalk):
template_render = mock.MagicMock()
template_mock = mock.MagicMock(name="template")
template_mock.render.return_value = template_render
template.return_value = template_mock
file_mock = mock.MagicMock(name="file")
file_mock_enter = file_mock
file_mock.__enter__.return_value = file_mock_enter
mockopen.return_value = file_mock
dirpath = os.path.join("path", "to", "directory")
subdirectory_list = ('dir1', 'dir2', 'dir3')
oswalk.return_value = ((dirpath, subdirectory_list, []),)
add_toc_tree('root_dir', 'template_toc')
for subdir in subdirectory_list:
template_mock.render.assert_any_call(
e=os.path.basename(os.path.join(dirpath, subdir)))
file_mock_enter.write.assert_any_call(template_render)
| 30.591549 | 81 | 0.735727 |
aced06732528a1e1d734d5c11f3dbd31a76f01cc | 6,200 | py | Python | openslides/utils/rest_api.py | rolandgeider/OpenSlides | 331141c17cb23da26e377d4285efdb4a50753a59 | [
"MIT"
] | null | null | null | openslides/utils/rest_api.py | rolandgeider/OpenSlides | 331141c17cb23da26e377d4285efdb4a50753a59 | [
"MIT"
] | null | null | null | openslides/utils/rest_api.py | rolandgeider/OpenSlides | 331141c17cb23da26e377d4285efdb4a50753a59 | [
"MIT"
] | null | null | null | from collections import OrderedDict
from rest_framework import status # noqa
from rest_framework.decorators import detail_route, list_route # noqa
from rest_framework.metadata import SimpleMetadata # noqa
from rest_framework.mixins import ( # noqa
DestroyModelMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
)
from rest_framework.response import Response # noqa
from rest_framework.routers import DefaultRouter
from rest_framework.serializers import ModelSerializer as _ModelSerializer
from rest_framework.serializers import ( # noqa
MANY_RELATION_KWARGS,
CharField,
DictField,
Field,
FileField,
IntegerField,
ListField,
ListSerializer,
ManyRelatedField,
PrimaryKeyRelatedField,
RelatedField,
SerializerMethodField,
ValidationError,
)
from rest_framework.viewsets import GenericViewSet as _GenericViewSet # noqa
from rest_framework.viewsets import ModelViewSet as _ModelViewSet # noqa
from rest_framework.viewsets import \
ReadOnlyModelViewSet as _ReadOnlyModelViewSet # noqa
from rest_framework.viewsets import ViewSet as _ViewSet # noqa
router = DefaultRouter()
class IdManyRelatedField(ManyRelatedField):
"""
ManyRelatedField that appends an suffix to the sub-fields.
Only works together with the IdPrimaryKeyRelatedField and our
ModelSerializer.
"""
field_name_suffix = '_id'
def bind(self, field_name, parent):
"""
Called when the field is bound to the serializer.
See IdPrimaryKeyRelatedField for more informations.
"""
self.source = field_name[:-len(self.field_name_suffix)]
super().bind(field_name, parent)
class IdPrimaryKeyRelatedField(PrimaryKeyRelatedField):
"""
Field, that renames the field name to FIELD_NAME_id.
Only works together the our ModelSerializer.
"""
field_name_suffix = '_id'
def bind(self, field_name, parent):
"""
Called when the field is bound to the serializer.
Changes the source so that the original field name is used (removes
the _id suffix).
"""
if field_name:
# field_name is an empty string when the field is created with the
# attribute many=True. In this case the suffix is added with the
# IdManyRelatedField class.
self.source = field_name[:-len(self.field_name_suffix)]
super().bind(field_name, parent)
@classmethod
def many_init(cls, *args, **kwargs):
"""
Method from rest_framework.relations.RelatedField That uses our
IdManyRelatedField class instead of
rest_framework.relations.ManyRelatedField class.
"""
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs.keys():
if key in MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return IdManyRelatedField(**list_kwargs)
class PermissionMixin:
"""
Mixin for subclasses of APIView like GenericViewSet and ModelViewSet.
The methods check_view_permissions or check_projector_requirements are
evaluated. If both return False self.permission_denied() is called.
Django REST Framework's permission system is disabled.
Also connects container to handle access permissions for model and
viewset.
"""
access_permissions = None
def get_permissions(self):
"""
Overridden method to check view and projector permissions. Returns an
empty iterable so Django REST framework won't do any other
permission checks by evaluating Django REST framework style permission
classes and the request passes.
"""
if not self.check_view_permissions() and not self.check_projector_requirements():
self.permission_denied(self.request)
return ()
def check_view_permissions(self):
"""
Override this and return True if the requesting user should be able to
get access to your view.
Use access permissions container for retrieve requests.
"""
return False
def check_projector_requirements(self):
"""
Helper method which returns True if the current request (on this
view instance) is required for at least one active projector element.
"""
from openslides.core.models import Projector
result = False
if self.request.user.has_perm('core.can_see_projector'):
for requirement in Projector.get_all_requirements():
if requirement.is_currently_required(view_instance=self):
result = True
break
return result
def get_access_permissions(self):
"""
Returns a container to handle access permissions for this viewset and
its corresponding model.
"""
return self.access_permissions
def get_serializer_class(self):
"""
Overridden method to return the serializer class given by the
access permissions container.
"""
if self.get_access_permissions() is not None:
serializer_class = self.get_access_permissions().get_serializer_class(self.request.user)
else:
serializer_class = super().get_serializer_class()
return serializer_class
class ModelSerializer(_ModelSerializer):
"""
ModelSerializer that changes the field names of related fields to
FIELD_NAME_id.
"""
serializer_related_field = IdPrimaryKeyRelatedField
def get_fields(self):
"""
Returns all fields of the serializer.
"""
fields = OrderedDict()
for field_name, field in super().get_fields().items():
try:
field_name += field.field_name_suffix
except AttributeError:
pass
fields[field_name] = field
return fields
class GenericViewSet(PermissionMixin, _GenericViewSet):
pass
class ModelViewSet(PermissionMixin, _ModelViewSet):
pass
class ReadOnlyModelViewSet(PermissionMixin, _ReadOnlyModelViewSet):
pass
class ViewSet(PermissionMixin, _ViewSet):
pass
| 31.472081 | 100 | 0.685323 |
aced09aed7c3306475c3987aa9cbff495fa3b500 | 7,002 | py | Python | sharebears/url_decoder_github_test.py | mgp/sharebears | aabb2c568707cea1107498a05d7f56bd772ef4fb | [
"Apache-2.0"
] | 1 | 2015-01-17T20:02:14.000Z | 2015-01-17T20:02:14.000Z | sharebears/url_decoder_github_test.py | mgp/sharebears | aabb2c568707cea1107498a05d7f56bd772ef4fb | [
"Apache-2.0"
] | null | null | null | sharebears/url_decoder_github_test.py | mgp/sharebears | aabb2c568707cea1107498a05d7f56bd772ef4fb | [
"Apache-2.0"
] | null | null | null | import unittest
import url_decoder
from url_decoder_test import UrlDecoderTestCase
from url_decoder_github import GitHubRepositoryUrlDecoder, GitHubCommitUrlDecoder, GitHubGistUrlDecoder
class _GitHubTestClient:
def __init__(self):
self.get_repository_args = []
self.get_commit_args = []
def _get_repository_json(self):
owner_json = { "login": "mgp" }
return { "owner": owner_json, "name": "repo-name" }
def get_repository(self, *pargs):
self.get_repository_args.append(pargs)
return self._get_repository_json()
def _get_commit_json(self):
return { "sha": "a8b7818", "message": "Initial commit" }
def get_commit(self, *pargs):
self.get_commit_args.append(pargs)
return self._get_commit_json()
class GitHubRepositoryUrlDecoderTest(UrlDecoderTestCase):
def setUp(self):
UrlDecoderTestCase.setUp(self)
self.test_client = _GitHubTestClient()
self.url_decoder = GitHubRepositoryUrlDecoder(self.test_client)
def test_can_decode_url(self):
# Invalid netloc.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://invalid.github.com/mgp/sharebears"))
# Invalid path.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://github.com/"))
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://github.com/mgp"))
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://github.com/mgp/sharebears/wiki"))
# Valid URL.
self.assertTrue(self._can_decode_url(
self.url_decoder, "https://github.com/mgp/sharebears"))
def test_decode_url(self):
url = "https://github.com/mgp/sharebears"
parsed_url = self._parse_url(url)
json = self.url_decoder.decode_url(url, parsed_url)
self.assertDictEqual(json, self.test_client._get_repository_json())
self.assertEqual(0, len(self.test_client.get_commit_args))
self.assertEqual(1, len(self.test_client.get_repository_args))
owner, repo = self.test_client.get_repository_args[0]
self.assertEqual("mgp", owner)
self.assertEqual("sharebears", repo)
def test_item_for_rendering(self):
owner_json = {"login": "login-value", "avatar_url": "avatar-url-value", "html_url": "html_url-value"}
decoded_url = {
"name": "name-value",
"description": "description-value",
"html_url": "html_url-value",
"language": "language-value",
"owner": owner_json
}
item = self.url_decoder.item_for_rendering(decoded_url)
self.assertEqual(decoded_url["name"], item.name)
self.assertEqual(decoded_url["description"], item.description)
self.assertEqual(decoded_url["html_url"], item.html_url)
self.assertEqual(decoded_url["language"], item.language)
# Assert that the GitHubRepositoryOwnerItem instance is correct.
owner = item.owner
self.assertEqual(owner_json["login"], owner.login)
self.assertEqual(owner_json["avatar_url"], owner.avatar_url)
self.assertEqual(owner_json["html_url"], owner.html_url)
class GitHubCommitUrlDecoderTest(UrlDecoderTestCase):
def setUp(self):
UrlDecoderTestCase.setUp(self)
self.test_client = _GitHubTestClient()
self.url_decoder = GitHubCommitUrlDecoder(self.test_client)
def test_can_decode_url(self):
# Invalid netloc.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://invalid.github.com/mgp/sharebears/commit/a8b7818"))
# Invalid path.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://github.com/"))
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://github.com/mgp/sharebears/commit"))
# Valid URL.
self.assertTrue(self._can_decode_url(
self.url_decoder, "https://github.com/mgp/sharebears/commit/a8b7818"))
def test_decode_url(self):
url = "https://github.com/mgp/sharebears/commit/a8b7818"
parsed_url = self._parse_url(url)
json = self.url_decoder.decode_url(url, parsed_url)
self.assertDictEqual(json, self.test_client._get_commit_json())
self.assertEqual(0, len(self.test_client.get_repository_args))
self.assertEqual(1, len(self.test_client.get_commit_args))
owner, repo, sha = self.test_client.get_commit_args[0]
self.assertEqual("mgp", owner)
self.assertEqual("sharebears", repo)
self.assertEqual("a8b7818", sha)
def _make_user_json(self, name, email, date_string):
return { "name": name, "email": email, "date": date_string }
def _assert_user(self, user_json, user):
self.assertEqual(user_json["name"], user.name)
self.assertEqual(user_json["email"], user.email)
expected_datetime = url_decoder.to_datetime(user_json["date"])
self.assertEqual(expected_datetime, user.date)
def test_item_for_rendering(self):
author_json = self._make_user_json(
"author_name", "author_email", "2010-04-10T14:10:01-07:00")
committer_json = self._make_user_json(
"committer_name", "committer_email", "2011-05-11T15:11:02-08:00")
decoded_url = {
"sha": "sha-value",
"url": "url-value",
"message": "message-value",
"author": author_json,
"committer": committer_json
}
item = self.url_decoder.item_for_rendering(decoded_url)
self.assertEqual(decoded_url["sha"], item.sha)
self.assertEqual(decoded_url["url"], item.url)
self.assertEqual(decoded_url["message"], item.message)
# Assert that the GitHubCommitUserItem instances are correct.
self._assert_user(author_json, item.author)
self._assert_user(committer_json, item.committer)
class GitHubGistUrlDecoderTest(UrlDecoderTestCase):
def setUp(self):
UrlDecoderTestCase.setUp(self)
self.url_decoder = GitHubGistUrlDecoder()
def test_can_decode_url(self):
# Invalid netloc.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://invalid.gist.github.com/mgp/92b50ae3e1b1b46eadab"))
# Invalid path.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://gist.github.com/"))
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://gist.github.com/mgp"))
# Valid URL.
self.assertTrue(self._can_decode_url(
self.url_decoder, "https://gist.github.com/mgp/92b50ae3e1b1b46eadab"))
def test_decode_url(self):
url = "https://gist.github.com/mgp/92b50ae3e1b1b46eadab"
parsed_url = self._parse_url(url)
expected_dict = { "url": url }
self.assertDictEqual(expected_dict, self.url_decoder.decode_url(url, parsed_url))
def test_item_for_rendering(self):
url = "https://gist.github.com/mgp/92b50ae3e1b1b46eadab"
decoded_url = { "url": url }
item = self.url_decoder.item_for_rendering(decoded_url)
self.assertEqual(url, item.url)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GitHubRepositoryUrlDecoderTest))
suite.addTest(unittest.makeSuite(GitHubCommitUrlDecoderTest))
suite.addTest(unittest.makeSuite(GitHubGistUrlDecoderTest))
return suite
| 36.852632 | 105 | 0.721937 |
aced09b6ecceacedc7194c0de193eada37fe8713 | 373 | py | Python | molecule/resources/tests/test_docker_compose.py | Penbase/ansible-dockerswarm | 579f802ec3b97a6a57f1362fdea04006d13b4ee2 | [
"MIT"
] | 269 | 2016-07-07T05:03:17.000Z | 2022-03-14T13:36:49.000Z | molecule/resources/tests/test_docker_compose.py | Penbase/ansible-dockerswarm | 579f802ec3b97a6a57f1362fdea04006d13b4ee2 | [
"MIT"
] | 77 | 2016-09-01T17:30:14.000Z | 2022-02-21T16:31:26.000Z | molecule/resources/tests/test_docker_compose.py | Penbase/ansible-dockerswarm | 579f802ec3b97a6a57f1362fdea04006d13b4ee2 | [
"MIT"
] | 148 | 2016-09-07T15:40:13.000Z | 2022-03-19T21:49:14.000Z | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_docker_compose_installed(host):
f = host.file('/usr/local/bin/docker-compose')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
| 23.3125 | 63 | 0.726542 |
aced0aa033e2fd701379651606d030af5aa6e1f1 | 2,498 | py | Python | tests/emmet-core/vasp/test_vasp.py | acrutt/emmet | e98100c9932f145a3ad3087ddb7aa9b779d9a191 | [
"BSD-3-Clause-LBNL"
] | null | null | null | tests/emmet-core/vasp/test_vasp.py | acrutt/emmet | e98100c9932f145a3ad3087ddb7aa9b779d9a191 | [
"BSD-3-Clause-LBNL"
] | null | null | null | tests/emmet-core/vasp/test_vasp.py | acrutt/emmet | e98100c9932f145a3ad3087ddb7aa9b779d9a191 | [
"BSD-3-Clause-LBNL"
] | null | null | null | import json
import pytest
from monty.io import zopen
from emmet.core.vasp.calc_types import RunType, TaskType, run_type, task_type
from emmet.core.vasp.task_valid import TaskDocument
from emmet.core.vasp.validation import ValidationDoc
def test_task_type():
# TODO: Switch this to actual inputs?
input_types = [
("NSCF Line", {"incar": {"ICHARG": 11}, "kpoints": {"labels": ["A"]}}),
("NSCF Uniform", {"incar": {"ICHARG": 11}}),
("Dielectric", {"incar": {"LEPSILON": True}}),
("DFPT Dielectric", {"incar": {"LEPSILON": True, "IBRION": 7}}),
("DFPT Dielectric", {"incar": {"LEPSILON": True, "IBRION": 8}}),
("DFPT", {"incar": {"IBRION": 7}}),
("DFPT", {"incar": {"IBRION": 8}}),
("Static", {"incar": {"NSW": 0}}),
]
for _type, inputs in input_types:
assert task_type(inputs) == TaskType(_type)
def test_run_type():
params_sets = [
("GGA", {"GGA": "--"}),
("GGA+U", {"GGA": "--", "LDAU": True}),
("SCAN", {"METAGGA": "Scan"}),
("SCAN+U", {"METAGGA": "Scan", "LDAU": True}),
("R2SCAN", {"METAGGA": "R2SCAN"}),
("R2SCAN+U", {"METAGGA": "R2SCAN", "LDAU": True}),
]
for _type, params in params_sets:
assert run_type(params) == RunType(_type)
@pytest.fixture(scope="session")
def tasks(test_dir):
with zopen(test_dir / "test_si_tasks.json.gz") as f:
data = json.load(f)
return [TaskDocument(**d) for d in data]
def test_validator(tasks):
validation_docs = [ValidationDoc.from_task_doc(task) for task in tasks]
assert len(validation_docs) == len(tasks)
assert all(doc.valid for doc in validation_docs)
def test_computed_entry(tasks):
entries = [task.entry for task in tasks]
ids = {e.entry_id for e in entries}
assert ids == {"mp-1141021", "mp-149", "mp-1686587", "mp-1440634"}
@pytest.fixture(scope="session")
def task_ldau(test_dir):
with zopen(test_dir / "test_task.json") as f:
data = json.load(f)
return TaskDocument(**data)
def test_ldau(task_ldau):
task_ldau.input.is_hubbard = True
assert task_ldau.run_type == RunType.GGA_U
assert ValidationDoc.from_task_doc(task_ldau).valid is False
def test_ldau_validation(test_dir):
with open(test_dir / "old_aflow_ggau_task.json") as f:
data = json.load(f)
task = TaskDocument(**data)
assert task.run_type == "GGA+U"
valid = ValidationDoc.from_task_doc(task)
assert valid.valid
| 28.067416 | 79 | 0.618895 |
aced0b96e869d58b4a2d79ebb5faeb8fe5945e27 | 63,673 | py | Python | src/toil/test/src/fileStoreTest.py | ngs-mstb/toil | e14b41e7552e2aa7c161e01f93b8260299590e2d | [
"Apache-2.0"
] | null | null | null | src/toil/test/src/fileStoreTest.py | ngs-mstb/toil | e14b41e7552e2aa7c161e01f93b8260299590e2d | [
"Apache-2.0"
] | null | null | null | src/toil/test/src/fileStoreTest.py | ngs-mstb/toil | e14b41e7552e2aa7c161e01f93b8260299590e2d | [
"Apache-2.0"
] | 1 | 2021-11-13T10:31:33.000Z | 2021-11-13T10:31:33.000Z | # Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
import filecmp
from abc import abstractmethod, ABCMeta
from struct import pack, unpack
from uuid import uuid4
from toil.job import Job
from toil.fileStore import IllegalDeletionCacheError, CachingFileStore
from toil.test import ToilTest, needs_aws, needs_azure, needs_google, slow
from toil.leader import FailedJobsException
from toil.jobStores.abstractJobStore import NoSuchFileException
from toil.fileStore import CacheUnbalancedError
import collections
import inspect
import os
import random
import signal
import time
import pytest
# Python 3 compatibility imports
from six.moves import xrange
from future.utils import with_metaclass
# Some tests take too long on the AWS and Azure Job stores and are unquitable for CI. They can be
# be run during manual tests by setting this to False.
testingIsAutomatic = True
class hidden(object):
"""
Hiding the abstract test classes from the Unittest loader so it can be inherited in different
test suites for the different job stores.
"""
class AbstractFileStoreTest(with_metaclass(ABCMeta, ToilTest)):
"""
An abstract base class for testing the various general functions described in
:class:toil.fileStore.FileStore
"""
# This is overwritten in the inheriting classs
jobStoreType = None
def _getTestJobStore(self):
if self.jobStoreType == 'file':
return self._getTestJobStorePath()
elif self.jobStoreType == 'aws':
return 'aws:%s:cache-tests-%s' % (self.awsRegion(), uuid4())
elif self.jobStoreType == 'azure':
accountName = os.getenv('TOIL_AZURE_KEYNAME')
return 'azure:%s:cache-tests-%s' % (accountName, str(uuid4()))
elif self.jobStoreType == 'google':
projectID = os.getenv('TOIL_GOOGLE_PROJECTID')
return 'google:%s:cache-tests-%s' % (projectID, str(uuid4()))
else:
raise RuntimeError('Illegal job store type.')
def setUp(self):
super(hidden.AbstractFileStoreTest, self).setUp()
testDir = self._createTempDir()
self.options = Job.Runner.getDefaultOptions(self._getTestJobStore())
self.options.logLevel = 'INFO'
self.options.workDir = testDir
self.options.clean = 'always'
self.options.logFile = os.path.join(testDir, 'logFile')
@staticmethod
def _uselessFunc(job):
"""
I do nothing. Don't judge me.
"""
return None
# Sanity test
def testToilIsNotBroken(self):
"""
Runs a simple DAG to test if if any features other that caching were broken.
"""
A = Job.wrapJobFn(self._uselessFunc)
B = Job.wrapJobFn(self._uselessFunc)
C = Job.wrapJobFn(self._uselessFunc)
D = Job.wrapJobFn(self._uselessFunc)
A.addChild(B)
A.addChild(C)
B.addChild(D)
C.addChild(D)
Job.Runner.startToil(A, self.options)
# Test filestore operations. This is a slightly less intense version of the cache specific
# test `testReturnFileSizes`
@slow
def testFileStoreOperations(self):
"""
Write a couple of files to the jobstore. Delete a couple of them. Read back written
and locally deleted files.
"""
workdir = self._createTempDir(purpose='nonLocalDir')
F = Job.wrapJobFn(self._testFileStoreOperations,
nonLocalDir=workdir,
numIters=30, disk='2G')
Job.Runner.startToil(F, self.options)
@staticmethod
def _testFileStoreOperations(job, nonLocalDir, numIters=100):
"""
Aux function for testFileStoreOperations Conduct numIters operations.
"""
work_dir = job.fileStore.getLocalTempDir()
writtenFiles = {} # fsID: (size, isLocal)
localFileIDs = set()
# Add one file for the sake of having something in the job store
writeFileSize = random.randint(0, 30)
cls = hidden.AbstractNonCachingFileStoreTest
fsId, _ = cls._writeFileToJobStore(job, isLocalFile=True, nonLocalDir=nonLocalDir,
fileMB=writeFileSize)
writtenFiles[fsId] = writeFileSize
localFileIDs.add(list(writtenFiles.keys())[0])
i = 0
while i <= numIters:
randVal = random.random()
if randVal < 0.33: # Write
writeFileSize = random.randint(0, 30)
isLocalFile = True if random.random() <= 0.5 else False
fsID, _ = cls._writeFileToJobStore(job, isLocalFile=isLocalFile,
nonLocalDir=nonLocalDir,
fileMB=writeFileSize)
writtenFiles[fsID] = writeFileSize
localFileIDs.add(fsID)
else:
if len(writtenFiles) == 0:
continue
else:
fsID, rdelFileSize = random.choice(list(writtenFiles.items()))
rdelRandVal = random.random()
if randVal < 0.66: # Read
mutable = True if random.random() <= 0.5 else False
cache = True if random.random() <= 0.5 else False
job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, str(uuid4())]),
cache=cache, mutable=mutable)
localFileIDs.add(fsID)
else: # Delete
if rdelRandVal <= 0.5: # Local Delete
if fsID not in localFileIDs:
continue
job.fileStore.deleteLocalFile(fsID)
else: # Global Delete
job.fileStore.deleteGlobalFile(fsID)
writtenFiles.pop(fsID)
if fsID in localFileIDs:
localFileIDs.remove(fsID)
i += 1
# Tests for the various defer possibilities
def testDeferredFunctionRunsWithMethod(self):
"""
Refer docstring in _testDeferredFunctionRuns.
Test with Method
"""
self._testDeferredFunctionRuns(self._writeNonLocalFilesMethod)
def testDeferredFunctionRunsWithClassMethod(self):
"""
Refer docstring in _testDeferredFunctionRuns.
Test with Class Method
"""
self._testDeferredFunctionRuns(self._writeNonLocalFilesClassMethod)
def testDeferredFunctionRunsWithLambda(self):
"""
Refer docstring in _testDeferredFunctionRuns.
Test with Lambda
"""
self._testDeferredFunctionRuns(self._writeNonLocalFilesLambda)
def _testDeferredFunctionRuns(self, callableFn):
"""
Create 2 files. Make a job that writes data to them. Register a deferred function that
deletes the two files (one passed as an arg, adn one as a kwarg) and later assert that
the files have been deleted.
:param function callableFn: The function to use in the test.
:return: None
"""
workdir = self._createTempDir(purpose='nonLocalDir')
nonLocalFile1 = os.path.join(workdir, str(uuid4()))
nonLocalFile2 = os.path.join(workdir, str(uuid4()))
open(nonLocalFile1, 'w').close()
open(nonLocalFile2, 'w').close()
assert os.path.exists(nonLocalFile1)
assert os.path.exists(nonLocalFile2)
A = Job.wrapJobFn(callableFn, files=(nonLocalFile1, nonLocalFile2))
Job.Runner.startToil(A, self.options)
assert not os.path.exists(nonLocalFile1)
assert not os.path.exists(nonLocalFile2)
@staticmethod
def _writeNonLocalFilesMethod(job, files):
"""
Write some data to 2 files. Pass them to a registered deferred method.
:param tuple files: the tuple of the two files to work with
:return: None
"""
for nlf in files:
with open(nlf, 'w') as nonLocalFileHandle:
nonLocalFileHandle.write(os.urandom(1 * 1024 * 1024))
job.defer(_deleteMethods._deleteFileMethod, files[0], nlf=files[1])
return None
@staticmethod
def _writeNonLocalFilesClassMethod(job, files):
"""
Write some data to 2 files. Pass them to a registered deferred class method.
:param tuple files: the tuple of the two files to work with
:return: None
"""
for nlf in files:
with open(nlf, 'w') as nonLocalFileHandle:
nonLocalFileHandle.write(os.urandom(1 * 1024 * 1024))
job.defer(_deleteMethods._deleteFileClassMethod, files[0], nlf=files[1])
return None
@staticmethod
def _writeNonLocalFilesLambda(job, files):
"""
Write some data to 2 files. Pass them to a registered deferred Lambda.
:param tuple files: the tuple of the two files to work with
:return: None
"""
lmd = lambda x, nlf: [os.remove(x), os.remove(nlf)]
for nlf in files:
with open(nlf, 'w') as nonLocalFileHandle:
nonLocalFileHandle.write(os.urandom(1 * 1024 * 1024))
job.defer(lmd, files[0], nlf=files[1])
return None
@slow
def testDeferredFunctionRunsWithFailures(self):
"""
Create 2 non local filesto use as flags. Create a job that registers a function that
deletes one non local file. If that file exists, the job SIGKILLs itself. If it doesn't
exist, the job registers a second deferred function to delete the second non local file
and exits normally.
Initially the first file exists, so the job should SIGKILL itself and neither deferred
function will run (in fact, the second should not even be registered). On the restart,
the first deferred function should run and the first file should not exist, but the
second one should. We assert the presence of the second, then register the second
deferred function and exit normally. At the end of the test, neither file should exist.
Incidentally, this also tests for multiple registered deferred functions, and the case
where a deferred function fails (since the first file doesn't exist on the retry).
"""
self.options.retryCount = 1
workdir = self._createTempDir(purpose='nonLocalDir')
nonLocalFile1 = os.path.join(workdir, str(uuid4()))
nonLocalFile2 = os.path.join(workdir, str(uuid4()))
open(nonLocalFile1, 'w').close()
open(nonLocalFile2, 'w').close()
assert os.path.exists(nonLocalFile1)
assert os.path.exists(nonLocalFile2)
A = Job.wrapJobFn(self._deferredFunctionRunsWithFailuresFn,
files=(nonLocalFile1, nonLocalFile2))
Job.Runner.startToil(A, self.options)
assert not os.path.exists(nonLocalFile1)
assert not os.path.exists(nonLocalFile2)
@staticmethod
def _deferredFunctionRunsWithFailuresFn(job, files):
"""
Refer testDeferredFunctionRunsWithFailures
:param tuple files: the tuple of the two files to work with
:return: None
"""
cls = hidden.AbstractNonCachingFileStoreTest
job.defer(cls._deleteFile, files[0])
if os.path.exists(files[0]):
os.kill(os.getpid(), signal.SIGKILL)
else:
assert os.path.exists(files[1])
job.defer(cls._deleteFile, files[1])
@staticmethod
def _deleteFile(nonLocalFile, nlf=None):
"""
Delete nonLocalFile and nlf
:param str nonLocalFile:
:param str nlf:
:return: None
"""
os.remove(nonLocalFile)
if nlf is not None:
os.remove(nlf)
@slow
def testNewJobsCanHandleOtherJobDeaths(self):
"""
Create 2 non-local files and then create 2 jobs. The first job registers a deferred job
to delete the second non-local file, deletes the first non-local file and then kills
itself. The second job waits for the first file to be deleted, then sleeps for a few
seconds and then spawns a child. the child of the second does nothing. However starting
it should handle the untimely demise of the first job and run the registered deferred
function that deletes the first file. We assert the absence of the two files at the
end of the run.
"""
# There can be no retries
self.options.retryCount = 0
workdir = self._createTempDir(purpose='nonLocalDir')
nonLocalFile1 = os.path.join(workdir, str(uuid4()))
nonLocalFile2 = os.path.join(workdir, str(uuid4()))
open(nonLocalFile1, 'w').close()
open(nonLocalFile2, 'w').close()
assert os.path.exists(nonLocalFile1)
assert os.path.exists(nonLocalFile2)
files = [nonLocalFile1, nonLocalFile2]
root = Job()
A = Job.wrapJobFn(self._testNewJobsCanHandleOtherJobDeaths_A, files=files)
B = Job.wrapJobFn(self._testNewJobsCanHandleOtherJobDeaths_B, files=files)
C = Job.wrapJobFn(self._testNewJobsCanHandleOtherJobDeaths_C, files=files,
expectedResult=False)
root.addChild(A)
root.addChild(B)
B.addChild(C)
try:
Job.Runner.startToil(root, self.options)
except FailedJobsException as e:
pass
@staticmethod
def _testNewJobsCanHandleOtherJobDeaths_A(job, files):
"""
Defer deletion of files[1], then wait for _testNewJobsCanHandleOtherJobDeaths_B to
start up, and finally delete files[0] before sigkilling self.
:param tuple files: the tuple of the two files to work with
:return: None
"""
# Write the pid to files[1] such that we can be sure that this process has died before
# we spawn the next job that will do the cleanup.
with open(files[1], 'w') as fileHandle:
fileHandle.write(str(os.getpid()))
job.defer(hidden.AbstractNonCachingFileStoreTest._deleteFile, files[1])
while os.stat(files[0]).st_size == 0:
time.sleep(0.5)
os.remove(files[0])
os.kill(os.getpid(), signal.SIGKILL)
@staticmethod
def _testNewJobsCanHandleOtherJobDeaths_B(job, files):
# Write something to files[0] such that we can be sure that this process has started
# before _testNewJobsCanHandleOtherJobDeaths_A kills itself.
with open(files[0], 'w') as fileHandle:
fileHandle.write(str(os.getpid()))
while os.path.exists(files[0]):
time.sleep(0.5)
# Get the pid of _testNewJobsCanHandleOtherJobDeaths_A and wait for it to truly be dead.
with open(files[1], 'r') as fileHandle:
meeseeksPID = int(fileHandle.read())
while CachingFileStore._pidExists(meeseeksPID):
time.sleep(0.5)
# Now that we are convinced that_testNewJobsCanHandleOtherJobDeaths_A has died, we can
# spawn the next job
return None
@staticmethod
def _testNewJobsCanHandleOtherJobDeaths_C(job, files, expectedResult):
"""
Asserts whether the files exist or not.
:param Job job: Job
:param list files: list of files to test
:param bool expectedResult: Are we expecting the files to exist or not?
"""
for testFile in files:
assert os.path.exists(testFile) is expectedResult
def testBatchSystemCleanupCanHandleWorkerDeaths(self):
"""
Create a non-local files. Create a job that registers a deferred job to delete the file
and then kills itself.
Assert that the file is missing after the pipeline fails.
"""
# There can be no retries
self.options.retryCount = 0
workdir = self._createTempDir(purpose='nonLocalDir')
nonLocalFile1 = os.path.join(workdir, str(uuid4()))
nonLocalFile2 = os.path.join(workdir, str(uuid4()))
# The first file has to be non zero or meseeks will go into an infinite sleep
file1 = open(nonLocalFile1, 'w')
file1.write('test')
file1.close()
open(nonLocalFile2, 'w').close()
assert os.path.exists(nonLocalFile1)
assert os.path.exists(nonLocalFile2)
A = Job.wrapJobFn(self._testNewJobsCanHandleOtherJobDeaths_A,
files=(nonLocalFile1, nonLocalFile2))
try:
Job.Runner.startToil(A, self.options)
except FailedJobsException:
pass
assert not os.path.exists(nonLocalFile1)
assert not os.path.exists(nonLocalFile2)
@staticmethod
def _writeFileToJobStore(job, isLocalFile, nonLocalDir=None, fileMB=1):
"""
This function creates a file and writes it to the jobstore.
:param bool isLocalFile: Is the file local(T) or Non-Local(F)?
:param str nonLocalDir: A dir to write the file to. If unspecified, a local directory
is created.
:param int fileMB: Size of the created file in MB
"""
if isLocalFile:
work_dir = job.fileStore.getLocalTempDir()
else:
assert nonLocalDir is not None
work_dir = nonLocalDir
with open(os.path.join(work_dir, str(uuid4())), 'w') as testFile:
testFile.write(os.urandom(fileMB * 1024 * 1024))
return job.fileStore.writeGlobalFile(testFile.name), testFile
class AbstractNonCachingFileStoreTest(with_metaclass(ABCMeta, AbstractFileStoreTest)):
"""
Abstract tests for the the various functions in :class:toil.fileStore.NonCachingFileStore.
These tests are general enough that they can also be used for
:class:toil.fileStore.CachingFileStore.
"""
def setUp(self):
super(hidden.AbstractNonCachingFileStoreTest, self).setUp()
self.options.disableCaching = True
class AbstractCachingFileStoreTest(with_metaclass(ABCMeta, AbstractFileStoreTest)):
"""
Abstract tests for the the various cache-related functions in
:class:toil.fileStore.CachingFileStore.
"""
def setUp(self):
super(hidden.AbstractCachingFileStoreTest, self).setUp()
self.options.disableCaching = False
@slow
def testExtremeCacheSetup(self):
"""
Try to create the cache with bad worker active and then have 10 child jobs try to run in
the chain. This tests whether the cache is created properly even when the job crashes
randomly.
"""
if testingIsAutomatic and self.jobStoreType != 'file':
self.skipTest("To save time")
self.options.retryCount = 20
self.options.badWorker = 0.5
self.options.badWorkerFailInterval = 0.1
for test in range(0, 20):
E = Job.wrapJobFn(self._uselessFunc)
F = Job.wrapJobFn(self._uselessFunc)
jobs = {}
for i in range(0, 10):
jobs[i] = Job.wrapJobFn(self._uselessFunc)
E.addChild(jobs[i])
jobs[i].addChild(F)
Job.Runner.startToil(E, self.options)
@slow
def testCacheLockRace(self):
"""
Make 3 jobs compete for the same cache lock file. If they have the lock at the same
time, the test will fail. This test abuses the _CacheState class and modifies values in
the lock file. DON'T TRY THIS AT HOME.
"""
A = Job.wrapJobFn(self._setUpLockFile)
B = Job.wrapJobFn(self._selfishLocker, cores=1)
C = Job.wrapJobFn(self._selfishLocker, cores=1)
D = Job.wrapJobFn(self._selfishLocker, cores=1)
E = Job.wrapJobFn(self._raceTestSuccess)
A.addChild(B)
A.addChild(C)
A.addChild(D)
B.addChild(E)
C.addChild(E)
D.addChild(E)
Job.Runner.startToil(A, self.options)
@staticmethod
def _setUpLockFile(job):
"""
Set nlink=0 for the cache test
"""
with job.fileStore.cacheLock():
cacheInfo = job.fileStore._CacheState._load(job.fileStore.cacheStateFile)
cacheInfo.nlink = 0
cacheInfo.write(job.fileStore.cacheStateFile)
@staticmethod
def _selfishLocker(job):
"""
Try to acquire a lock on the lock file. If 2 threads have the lock concurrently, then
abort.
"""
for i in range(0, 1000):
with job.fileStore.cacheLock():
cacheInfo = job.fileStore._CacheState._load(job.fileStore.cacheStateFile)
cacheInfo.nlink += 1
cacheInfo.cached = max(cacheInfo.nlink, cacheInfo.cached)
cacheInfo.write(job.fileStore.cacheStateFile)
time.sleep(0.001)
with job.fileStore.cacheLock():
cacheInfo = job.fileStore._CacheState._load(job.fileStore.cacheStateFile)
cacheInfo.nlink -= 1
cacheInfo.write(job.fileStore.cacheStateFile)
@staticmethod
def _raceTestSuccess(job):
"""
Assert that the cache test passed successfully.
"""
with job.fileStore.cacheLock():
cacheInfo = job.fileStore._CacheState._load(job.fileStore.cacheStateFile)
# Value of the nlink has to be zero for successful run
assert cacheInfo.nlink == 0
assert cacheInfo.cached > 1
@slow
def testCacheEvictionPartialEvict(self):
"""
Ensure the cache eviction happens as expected. Two files (20MB and 30MB) are written
sequentially into the job store in separate jobs. The cache max is force set to 50MB.
A Third Job requests 10MB of disk requiring eviction of the 1st file. Ensure that the
behavior is as expected.
"""
self._testValidityOfCacheEvictTest()
# Explicitly set clean to always so even the failed cases get cleaned (This will
# overwrite the value set in setUp if it is ever changed in the future)
self.options.clean = 'always'
self._testCacheEviction(file1MB=20, file2MB=30, diskRequestMB=10)
@slow
def testCacheEvictionTotalEvict(self):
"""
Ensure the cache eviction happens as expected. Two files (20MB and 30MB) are written
sequentially into the job store in separate jobs. The cache max is force set to 50MB.
A Third Job requests 10MB of disk requiring eviction of the 1st file. Ensure that the
behavior is as expected.
"""
self._testValidityOfCacheEvictTest()
# Explicitly set clean to always so even the failed cases get cleaned (This will
# overwrite the value set in setUp if it is ever changed in the future)
self.options.clean = 'always'
self._testCacheEviction(file1MB=20, file2MB=30, diskRequestMB=30)
@slow
def testCacheEvictionFailCase(self):
"""
Ensure the cache eviction happens as expected. Two files (20MB and 30MB) are written
sequentially into the job store in separate jobs. The cache max is force set to 50MB.
A Third Job requests 10MB of disk requiring eviction of the 1st file. Ensure that the
behavior is as expected.
"""
self._testValidityOfCacheEvictTest()
# Explicitly set clean to always so even the failed cases get cleaned (This will
# overwrite the value set in setUp if it is ever changed in the future)
self.options.clean = 'always'
self._testCacheEviction(file1MB=20, file2MB=30, diskRequestMB=60)
def _testValidityOfCacheEvictTest(self):
# If the job store and cache are on the same file system, file sizes are accounted for
# by the job store and are not reflected in the cache hence this test is redundant.
if not self.options.jobStore.startswith(('aws', 'azure', 'google')):
workDirDev = os.stat(self.options.workDir).st_dev
jobStoreDev = os.stat(os.path.dirname(self.options.jobStore)).st_dev
if workDirDev == jobStoreDev:
self.skipTest('Job store and working directory are on the same filesystem.')
def _testCacheEviction(self, file1MB, file2MB, diskRequestMB):
"""
Ensure the cache eviction happens as expected. Two files (20MB and 30MB) are written
sequentially into the job store in separate jobs. The cache max is force set to 50MB.
A Third Job requests either 10, 30 or 60MB -- requiring eviction of 1 file, both files,
or results in an error due to lack of space, respectively. Ensure that the behavior is
as expected.
"""
self.options.retryCount = 0
if diskRequestMB > 50:
# This can be non int as it will never reach _probeJobReqs
expectedResult = 'Fail'
else:
expectedResult = 50 - file1MB if diskRequestMB <= file1MB else 0
try:
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True,
fileMB=file1MB)
# Sleep for 1 second after writing the first file so that their ctimes are
# guaranteed to be distinct for the purpose of this test.
B = Job.wrapJobFn(self._sleepy, timeToSleep=1)
C = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True,
fileMB=file2MB)
D = Job.wrapJobFn(self._forceModifyCacheLockFile, newTotalMB=50, disk='0M')
E = Job.wrapJobFn(self._uselessFunc, disk=''.join([str(diskRequestMB), 'M']))
# Set it to > 2GB such that the cleanup jobs don't die in the non-fail cases
F = Job.wrapJobFn(self._forceModifyCacheLockFile, newTotalMB=5000, disk='10M')
G = Job.wrapJobFn(self._probeJobReqs, sigmaJob=100, cached=expectedResult,
disk='100M')
A.addChild(B)
B.addChild(C)
C.addChild(D)
D.addChild(E)
E.addChild(F)
F.addChild(G)
Job.Runner.startToil(A, self.options)
except FailedJobsException as err:
self.assertEqual(err.numberOfFailedJobs, 1)
with open(self.options.logFile) as f:
logContents = f.read()
if CacheUnbalancedError.message in logContents:
self.assertEqual(expectedResult, 'Fail')
else:
self.fail('Toil did not raise the expected AssertionError')
@staticmethod
def _writeFileToJobStoreWithAsserts(job, isLocalFile, nonLocalDir=None, fileMB=1):
"""
This function creates a file and writes it to the jobstore.
:param bool isLocalFile: Is the file local(T) or Non-Local(F)?
:param str nonLocalDir: A dir to write the file to. If unspecified, a local directory
is created.
:param int fileMB: Size of the created file in MB
"""
cls = hidden.AbstractNonCachingFileStoreTest
fsID, testFile = cls._writeFileToJobStore(job, isLocalFile, nonLocalDir, fileMB)
actual = os.stat(testFile.name).st_nlink
if isLocalFile:
# Since the file has been hard linked it should have nlink_count = threshold + 1
# (local, cached, and possibly job store).
expected = job.fileStore.nlinkThreshold + 1
assert actual == expected, 'Should have %i nlinks. Got %i' % (expected, actual)
else:
# Since the file hasn't been hard linked it should have nlink_count = 1
assert actual == 1, 'Should have one nlink. Got %i.' % actual
return fsID
@staticmethod
def _sleepy(job, timeToSleep):
"""
I'm waiting for prince charming... but only for timeToSleep seconds.
:param int timeToSleep: Time in seconds
"""
time.sleep(timeToSleep)
@staticmethod
def _forceModifyCacheLockFile(job, newTotalMB):
"""
This function opens and modifies the cache lock file to reflect a new "total"
value = newTotalMB and thereby fooling the cache logic into believing only newTotalMB is
allowed for the run.
:param int newTotalMB: New value for "total" in the cacheLockFile
"""
with job.fileStore.cacheLock() as _:
cacheInfo = job.fileStore._CacheState._load(job.fileStore.cacheStateFile)
cacheInfo.total = float(newTotalMB * 1024 * 1024)
cacheInfo.write(job.fileStore.cacheStateFile)
@staticmethod
def _probeJobReqs(job, total=None, cached=None, sigmaJob=None):
"""
Probes the cacheLockFile to ensure the values for total, disk and cache are as expected.
Can also specify combinations of the requirements if desired.
:param int total: Expected Total Space available for caching in MB.
:param int cached: Expected Total size of files in the cache in MB.
:param int sigmaJob: Expected sum of job requirements in MB.
"""
valueDict = locals()
assert (total or cached or sigmaJob)
with job.fileStore.cacheLock() as x:
cacheInfo = job.fileStore._CacheState._load(job.fileStore.cacheStateFile)
for value in ('total', 'cached', 'sigmaJob'):
# If the value wasn't provided, it is None and should be ignored
if valueDict[value] is None:
continue
expectedMB = valueDict[value] * 1024 * 1024
cacheInfoMB = getattr(cacheInfo, value)
assert cacheInfoMB == expectedMB, 'Testing %s: Expected ' % value + \
'%s but got %s.' % (expectedMB, cacheInfoMB)
@slow
def testAsyncWriteWithCaching(self):
"""
Ensure the Async Writing of files happens as expected. The first Job forcefully
modifies the cache lock file to 1GB. The second asks for 1GB of disk and writes a 900MB
file into cache then rewrites it to the job store triggering an async write since the
two unique jobstore IDs point to the same local file. Also, the second write is not
cached since the first was written to cache, and there "isn't enough space" to cache the
second. Imediately assert that the second write isn't cached, and is being
asynchronously written to the job store (through the presence of a harbinger file).
Attempting to get the file from the jobstore should not fail.
"""
self.options.retryCount = 0
self.options.logLevel = 'DEBUG'
A = Job.wrapJobFn(self._forceModifyCacheLockFile, newTotalMB=1024, disk='1G')
B = Job.wrapJobFn(self._doubleWriteFileToJobStore, fileMB=850, disk='900M')
# Set it to > 2GB such that the cleanup jobs don't die.
C = Job.wrapJobFn(self._readFromJobStoreWithoutAssertions, fsID=B.rv(), disk='1G')
D = Job.wrapJobFn(self._forceModifyCacheLockFile, newTotalMB=5000, disk='1G')
A.addChild(B)
B.addChild(C)
C.addChild(D)
Job.Runner.startToil(A, self.options)
@staticmethod
def _doubleWriteFileToJobStore(job, fileMB):
"""
Write a local file to job store, then write it again. The second should trigger an
async write.
:param job: job
:param fileMB: File Size
:return: Job store file ID for second written file
"""
# Make this take longer so we can test asynchronous writes across jobs/workers.
oldHarbingerFileRead = job.fileStore.HarbingerFile.read
def newHarbingerFileRead(self):
time.sleep(5)
return oldHarbingerFileRead(self)
job.fileStore.logToMaster('Double writing a file into job store')
work_dir = job.fileStore.getLocalTempDir()
with open(os.path.join(work_dir, str(uuid4())), 'w') as testFile:
testFile.write(os.urandom(fileMB * 1024 * 1024))
job.fileStore.writeGlobalFile(testFile.name)
fsID = job.fileStore.writeGlobalFile(testFile.name)
hidden.AbstractCachingFileStoreTest._readFromJobStoreWithoutAssertions(job, fsID)
# Make this take longer so we can test asynchronous writes across jobs/workers.
job.fileStore.HarbingerFile.read = newHarbingerFileRead
return job.fileStore.writeGlobalFile(testFile.name)
@staticmethod
def _readFromJobStoreWithoutAssertions(job, fsID):
"""
Reads a file from the job store. That will be all, thank you.
:param job: job
:param fsID: Job store file ID for the read file
:return: None
"""
job.fileStore.logToMaster('Reading the written file')
assert not job.fileStore._fileIsCached(fsID)
assert job.fileStore.HarbingerFile(job.fileStore, fileStoreID=fsID).exists()
job.fileStore.readGlobalFile(fsID)
# writeGlobalFile tests
def testWriteNonLocalFileToJobStore(self):
"""
Write a file not in localTempDir to the job store. Such a file should not be cached.
Ensure the file is not cached.
"""
workdir = self._createTempDir(purpose='nonLocalDir')
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=False,
nonLocalDir=workdir)
Job.Runner.startToil(A, self.options)
def testWriteLocalFileToJobStore(self):
"""
Write a file from the localTempDir to the job store. Such a file will be cached by
default. Ensure the file is cached.
"""
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True)
Job.Runner.startToil(A, self.options)
# readGlobalFile tests
def testReadCacheMissFileFromJobStoreWithoutCachingReadFile(self):
"""
Read a file from the file store that does not have a corresponding cached copy. Do not
cache the read file. Ensure the number of links on the file are appropriate.
"""
self._testCacheMissFunction(cacheReadFile=False)
def testReadCacheMissFileFromJobStoreWithCachingReadFile(self):
"""
Read a file from the file store that does not have a corresponding cached copy. Cache
the read file. Ensure the number of links on the file are appropriate.
"""
self._testCacheMissFunction(cacheReadFile=True)
def _testCacheMissFunction(self, cacheReadFile):
"""
This is the function that actually does what the 2 cache miss functions want.
:param cacheReadFile: Does the read file need to be cached(T) or not(F)
"""
workdir = self._createTempDir(purpose='nonLocalDir')
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=False,
nonLocalDir=workdir)
B = Job.wrapJobFn(self._readFromJobStore, isCachedFile=False,
cacheReadFile=cacheReadFile, fsID=A.rv())
A.addChild(B)
Job.Runner.startToil(A, self.options)
@staticmethod
def _readFromJobStore(job, isCachedFile, cacheReadFile, fsID, isTest=True):
"""
Read a file from the filestore. If the file was cached, ensure it was hard linked
correctly. If it wasn't, ensure it was put into cache.
:param bool isCachedFile: Flag. Was the read file read from cache(T)? This defines the
nlink count to be asserted.
:param bool cacheReadFile: Should the the file that is read be cached(T)?
:param str fsID: job store file ID
:param bool isTest: Is this being run as a test(T) or an accessory to another test(F)?
"""
work_dir = job.fileStore.getLocalTempDir()
x = job.fileStore.nlinkThreshold
if isCachedFile:
outfile = job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, 'temp']),
mutable=False)
expected = x + 1
else:
if cacheReadFile:
outfile = job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, 'temp']),
cache=True, mutable=False)
expected = x + 1
else:
outfile = job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, 'temp']),
cache=False, mutable=False)
expected = x
if isTest:
actual = os.stat(outfile).st_nlink
assert actual == expected, 'Should have %i nlinks. Got %i.' % (expected, actual)
return None
else:
return outfile
def testReadCachHitFileFromJobStore(self):
"""
Read a file from the file store that has a corresponding cached copy. Ensure the number
of links on the file are appropriate.
"""
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True)
B = Job.wrapJobFn(self._readFromJobStore, isCachedFile=True, cacheReadFile=None,
fsID=A.rv())
A.addChild(B)
Job.Runner.startToil(A, self.options)
@slow
def testMultipleJobsReadSameCacheHitGlobalFile(self):
"""
Write a local file to the job store (hence adding a copy to cache), then have 10 jobs
read it. Assert cached file size in the cache lock file never goes up, assert sigma job
reqs is always
(a multiple of job reqs) - (number of files linked to the cachedfile * filesize).
At the end, assert the cache lock file shows sigma job = 0.
"""
self._testMultipleJobsReadGlobalFileFunction(cacheHit=True)
@slow
def testMultipleJobsReadSameCacheMissGlobalFile(self):
"""
Write a non-local file to the job store(hence no cached copy), then have 10 jobs read
it. Assert cached file size in the cache lock file never goes up, assert sigma job reqs
is always
(a multiple of job reqs) - (number of files linked to the cachedfile * filesize).
At the end, assert the cache lock file shows sigma job = 0.
"""
self._testMultipleJobsReadGlobalFileFunction(cacheHit=False)
def _testMultipleJobsReadGlobalFileFunction(self, cacheHit):
"""
This function does what the two Multiple File reading tests want to do
:param bool cacheHit: Is the test for the CacheHit case(T) or cacheMiss case(F)
"""
dirPurpose = 'tempWriteDir' if cacheHit else 'nonLocalDir'
workdir = self._createTempDir(purpose=dirPurpose)
with open(os.path.join(workdir, 'test'), 'w') as x:
x.write(str(0))
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=cacheHit,
nonLocalDir=workdir,
fileMB=256)
B = Job.wrapJobFn(self._probeJobReqs, sigmaJob=100, disk='100M')
jobs = {}
for i in range(0, 10):
jobs[i] = Job.wrapJobFn(self._multipleFileReader, diskMB=1024, fsID=A.rv(),
maxWriteFile=os.path.abspath(x.name), disk='1G',
memory='10M', cores=1)
A.addChild(jobs[i])
jobs[i].addChild(B)
Job.Runner.startToil(A, self.options)
with open(x.name, 'r') as y:
assert int(y.read()) > 2
@staticmethod
def _multipleFileReader(job, diskMB, fsID, maxWriteFile):
"""
Read a file from the job store immutable and explicitly ask to have it in the cache.
If we are using the File Job Store, assert sum of cached file sizes in the cache lock
file is zero, else assert it is equal to the read file.
Also assert the sum job reqs + (number of files linked to the cachedfile * filesize) is
and integer multiple of the disk requirements provided to this job.
:param int diskMB: disk requirements provided to the job
:param str fsID: job store file ID
:param str maxWriteFile: path to file where the max number of concurrent readers of
cache lock file will be written
"""
work_dir = job.fileStore.getLocalTempDir()
outfile = job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, 'temp']), cache=True,
mutable=False)
diskMB = diskMB * 1024 * 1024
with job.fileStore.cacheLock():
fileStats = os.stat(outfile)
fileSize = fileStats.st_size
fileNlinks = fileStats.st_nlink
with open(maxWriteFile, 'r+') as x:
prev_max = int(x.read())
x.seek(0)
x.truncate()
x.write(str(max(prev_max, fileNlinks)))
cacheInfo = job.fileStore._CacheState._load(job.fileStore.cacheStateFile)
if cacheInfo.nlink == 2:
assert cacheInfo.cached == 0.0 # Since fileJobstore on same filesystem
else:
assert cacheInfo.cached == fileSize
assert ((cacheInfo.sigmaJob + (fileNlinks - cacheInfo.nlink) * fileSize) %
diskMB) == 0.0
# Sleep so there's no race conditions where a job ends before another can get a hold of
# the file
time.sleep(3)
@staticmethod
def _writeExportGlobalFile(job):
fileName = os.path.join(job.fileStore.getLocalTempDir(), 'testfile')
with open(fileName, 'wb') as f:
f.write(os.urandom(1024 * 30000)) # 30 Mb
outputFile = os.path.join(job.fileStore.getLocalTempDir(), 'exportedFile')
job.fileStore.exportFile(job.fileStore.writeGlobalFile(fileName), 'File://' + outputFile)
assert filecmp.cmp(fileName, outputFile)
@slow
def testFileStoreExportFile(self):
# Tests that files written to job store can be immediately exported
# motivated by https://github.com/BD2KGenomics/toil/issues/1469
root = Job.wrapJobFn(self._writeExportGlobalFile)
Job.Runner.startToil(root, self.options)
# Testing for the return of file sizes to the sigma job pool.
@slow
def testReturnFileSizes(self):
"""
Write a couple of files to the jobstore. Delete a couple of them. Read back written
and locally deleted files. Ensure that after every step that the cache state file is
describing the correct values.
"""
workdir = self._createTempDir(purpose='nonLocalDir')
F = Job.wrapJobFn(self._returnFileTestFn,
jobDisk=2 * 1024 * 1024 * 1024,
initialCachedSize=0,
nonLocalDir=workdir,
disk='2G')
Job.Runner.startToil(F, self.options)
@slow
def testReturnFileSizesWithBadWorker(self):
"""
Write a couple of files to the jobstore. Delete a couple of them. Read back written
and locally deleted files. Ensure that after every step that the cache state file is
describing the correct values.
"""
self.options.retryCount = 20
self.options.badWorker = 0.5
self.options.badWorkerFailInterval = 0.1
workdir = self._createTempDir(purpose='nonLocalDir')
F = Job.wrapJobFn(self._returnFileTestFn,
jobDisk=2 * 1024 * 1024 * 1024,
initialCachedSize=0,
nonLocalDir=workdir,
numIters=30, disk='2G')
Job.Runner.startToil(F, self.options)
@staticmethod
def _returnFileTestFn(job, jobDisk, initialCachedSize, nonLocalDir, numIters=100):
"""
Aux function for jobCacheTest.testReturnFileSizes Conduct numIters operations and ensure
the cache state file is tracked appropriately.
Track the cache calculations even thought they won't be used in filejobstore
:param float jobDisk: The value of disk passed to this job.
"""
cached = initialCachedSize
work_dir = job.fileStore.getLocalTempDir()
writtenFiles = {} # fsID: (size, isLocal)
localFileIDs = collections.defaultdict(list) # fsid: local/non-local/mutable/immutable
# Add one file for the sake of having something in the job store
writeFileSize = random.randint(0, 30)
jobDisk -= writeFileSize * 1024 * 1024
cls = hidden.AbstractCachingFileStoreTest
fsId = cls._writeFileToJobStoreWithAsserts(job, isLocalFile=True, fileMB=writeFileSize)
writtenFiles[fsId] = writeFileSize
if job.fileStore._fileIsCached(list(writtenFiles.keys())[0]):
cached += writeFileSize * 1024 * 1024
localFileIDs[list(writtenFiles.keys())[0]].append('local')
cls._requirementsConcur(job, jobDisk, cached)
i = 0
while i <= numIters:
randVal = random.random()
if randVal < 0.33: # Write
writeFileSize = random.randint(0, 30)
if random.random() <= 0.5: # Write a local file
fsID = cls._writeFileToJobStoreWithAsserts(job, isLocalFile=True,
fileMB=writeFileSize)
writtenFiles[fsID] = writeFileSize
localFileIDs[fsID].append('local')
jobDisk -= writeFileSize * 1024 * 1024
if job.fileStore._fileIsCached(fsID):
cached += writeFileSize * 1024 * 1024
else: # Write a non-local file
fsID = cls._writeFileToJobStoreWithAsserts(job, isLocalFile=False,
nonLocalDir=nonLocalDir,
fileMB=writeFileSize)
writtenFiles[fsID] = writeFileSize
localFileIDs[fsID].append('non-local')
# No change to the job since there was no caching
cls._requirementsConcur(job, jobDisk, cached)
else:
if len(writtenFiles) == 0:
continue
else:
fsID, rdelFileSize = random.choice(list(writtenFiles.items()))
rdelRandVal = random.random()
fileWasCached = job.fileStore._fileIsCached(fsID)
if randVal < 0.66: # Read
if rdelRandVal <= 0.5: # Read as mutable
job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, str(uuid4())]),
mutable=True)
localFileIDs[fsID].append('mutable')
# No change because the file wasn't cached
else: # Read as immutable
job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, str(uuid4())]),
mutable=False)
localFileIDs[fsID].append('immutable')
jobDisk -= rdelFileSize * 1024 * 1024
if not fileWasCached:
if job.fileStore._fileIsCached(fsID):
cached += rdelFileSize * 1024 * 1024
cls._requirementsConcur(job, jobDisk, cached)
else: # Delete
if rdelRandVal <= 0.5: # Local Delete
if fsID not in list(localFileIDs.keys()):
continue
job.fileStore.deleteLocalFile(fsID)
else: # Global Delete
job.fileStore.deleteGlobalFile(fsID)
assert not os.path.exists(job.fileStore.encodedFileID(fsID))
writtenFiles.pop(fsID)
if fsID in list(localFileIDs.keys()):
for lFID in localFileIDs[fsID]:
if lFID not in ('non-local', 'mutable'):
jobDisk += rdelFileSize * 1024 * 1024
localFileIDs.pop(fsID)
if fileWasCached:
if not job.fileStore._fileIsCached(fsID):
cached -= rdelFileSize * 1024 * 1024
cls._requirementsConcur(job, jobDisk, cached)
i += 1
return jobDisk, cached
@staticmethod
def _requirementsConcur(job, jobDisk, cached):
"""
Assert the values for job disk and total cached file sizes tracked in the job's cache
state file is equal to the values we expect.
"""
with job.fileStore._CacheState.open(job.fileStore) as cacheInfo:
jobState = cacheInfo.jobState[job.fileStore.jobID]
# cached should have a value only if the job store is on a different file system
# than the cache
if cacheInfo.nlink != 2:
assert cacheInfo.cached == cached
else:
assert cacheInfo.cached == 0
assert jobState['jobReqs'] == jobDisk
# Testing the resumability of a failed worker
@slow
def testControlledFailedWorkerRetry(self):
"""
Conduct a couple of job store operations. Then die. Ensure that the restarted job is
tracking values in the cache state file appropriately.
"""
workdir = self._createTempDir(purpose='nonLocalDir')
self.options.retryCount = 1
F = Job.wrapJobFn(self._controlledFailTestFn, jobDisk=2 * 1024 * 1024 * 1024,
testDir=workdir,
disk='2G')
G = Job.wrapJobFn(self._probeJobReqs, sigmaJob=100, disk='100M')
F.addChild(G)
Job.Runner.startToil(F, self.options)
@staticmethod
def _controlledFailTestFn(job, jobDisk, testDir):
"""
This is the aux function for the controlled failed worker test. It does a couple of
cache operations, fails, then checks whether the new worker starts with the expected
value, and whether it exits with zero for sigmaJob.
:param float jobDisk: Disk space supplied for this job
:param str testDir: T3sting directory
"""
cls = hidden.AbstractCachingFileStoreTest
if os.path.exists(os.path.join(testDir, 'testfile.test')):
with open(os.path.join(testDir, 'testfile.test'), 'r') as fH:
cached = unpack('d', fH.read())[0]
cls._requirementsConcur(job, jobDisk, cached)
cls._returnFileTestFn(job, jobDisk, cached, testDir, 20)
else:
modifiedJobReqs, cached = cls._returnFileTestFn(job, jobDisk, 0, testDir, 20)
with open(os.path.join(testDir, 'testfile.test'), 'w') as fH:
fH.write(pack('d', cached))
os.kill(os.getpid(), signal.SIGKILL)
@slow
def testRemoveLocalMutablyReadFile(self):
"""
If a mutably read file is deleted by the user, it is ok.
"""
self._deleteLocallyReadFilesFn(readAsMutable=True)
@slow
def testRemoveLocalImmutablyReadFile(self):
"""
If an immutably read file is deleted by the user, it is not ok.
"""
self._deleteLocallyReadFilesFn(readAsMutable=False)
def _deleteLocallyReadFilesFn(self, readAsMutable):
self.options.retryCount = 0
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True, memory='10M')
B = Job.wrapJobFn(self._removeReadFileFn, A.rv(), readAsMutable=readAsMutable,
memory='20M')
A.addChild(B)
Job.Runner.startToil(A, self.options)
@staticmethod
def _removeReadFileFn(job, fileToDelete, readAsMutable):
"""
Accept a file. Run os.remove on it. Then attempt to delete it locally. This will raise
an error for files read immutably.
Then write a new file to the jobstore and try to do the same. This should always raise
an error
:param fileToDelete: File written to the job store that is tracked by the cache
"""
work_dir = job.fileStore.getLocalTempDir()
# Are we processing the read file or the written file?
processsingReadFile = True
# Read in the file
outfile = job.fileStore.readGlobalFile(fileToDelete, os.path.join(work_dir, 'temp'),
mutable=readAsMutable)
tempfile = os.path.join(work_dir, 'tmp.tmp')
# The first time we run this loop, processsingReadFile is True and fileToDelete is the
# file read from the job store. The second time, processsingReadFile is False and
# fileToDelete is one that was just written in to the job store. Ensure the correct
# behaviour is seen in both conditions.
while True:
os.rename(outfile, tempfile)
try:
job.fileStore.deleteLocalFile(fileToDelete)
except IllegalDeletionCacheError:
job.fileStore.logToMaster('Detected a deleted file %s.' % fileToDelete)
os.rename(tempfile, outfile)
else:
# If we are processing the write test, or if we are testing the immutably read
# file, we should not reach here.
assert processsingReadFile and readAsMutable
if processsingReadFile:
processsingReadFile = False
# Write a file
with open(os.path.join(work_dir, str(uuid4())), 'w') as testFile:
testFile.write(os.urandom(1 * 1024 * 1024))
fileToDelete = job.fileStore.writeGlobalFile(testFile.name)
outfile = testFile.name
else:
break
def testDeleteLocalFile(self):
"""
Test the deletion capabilities of deleteLocalFile
"""
self.options.retryCount = 0
workdir = self._createTempDir(purpose='nonLocalDir')
A = Job.wrapJobFn(self._deleteLocalFileFn, nonLocalDir=workdir)
Job.Runner.startToil(A, self.options)
@staticmethod
def _deleteLocalFileFn(job, nonLocalDir):
"""
Test deleteLocalFile on a local write, non-local write, read, mutable read, and bogus
jobstore IDs.
"""
work_dir = job.fileStore.getLocalTempDir()
# Write local file
with open(os.path.join(work_dir, str(uuid4())), 'w') as localFile:
localFile.write(os.urandom(1 * 1024 * 1024))
localFsID = job.fileStore.writeGlobalFile(localFile.name)
# write Non-Local File
with open(os.path.join(nonLocalDir, str(uuid4())), 'w') as nonLocalFile:
nonLocalFile.write(os.urandom(1 * 1024 * 1024))
nonLocalFsID = job.fileStore.writeGlobalFile(nonLocalFile.name)
# Delete fsid of local file. The file should be deleted
job.fileStore.deleteLocalFile(localFsID)
assert not os.path.exists(localFile.name)
# Delete fsid of non-local file. The file should persist
job.fileStore.deleteLocalFile(nonLocalFsID)
assert os.path.exists(nonLocalFile.name)
# Read back one file and then delete it
readBackFile1 = job.fileStore.readGlobalFile(localFsID)
job.fileStore.deleteLocalFile(localFsID)
assert not os.path.exists(readBackFile1)
# Read back one file with 2 different names and then delete it. Assert both get deleted
readBackFile1 = job.fileStore.readGlobalFile(localFsID)
readBackFile2 = job.fileStore.readGlobalFile(localFsID)
job.fileStore.deleteLocalFile(localFsID)
assert not os.path.exists(readBackFile1)
assert not os.path.exists(readBackFile2)
# Try to get a bogus FSID
try:
job.fileStore.readGlobalFile('bogus')
except NoSuchFileException:
pass
class _deleteMethods(object):
@staticmethod
def _deleteFileMethod(nonLocalFile, nlf=None):
"""
Delete nonLocalFile and nlf
:return: None
"""
os.remove(nonLocalFile)
if nlf is not None:
os.remove(nlf)
@classmethod
def _deleteFileClassMethod(cls, nonLocalFile, nlf=None):
"""
Delete nonLocalFile and nlf
:return: None
"""
os.remove(nonLocalFile)
if nlf is not None:
os.remove(nlf)
class NonCachingFileStoreTestWithFileJobStore(hidden.AbstractNonCachingFileStoreTest):
jobStoreType = 'file'
@pytest.mark.timeout(1000)
class CachingFileStoreTestWithFileJobStore(hidden.AbstractCachingFileStoreTest):
jobStoreType = 'file'
@needs_aws
class NonCachingFileStoreTestWithAwsJobStore(hidden.AbstractNonCachingFileStoreTest):
jobStoreType = 'aws'
@slow
@needs_aws
@pytest.mark.timeout(1000)
class CachingFileStoreTestWithAwsJobStore(hidden.AbstractCachingFileStoreTest):
jobStoreType = 'aws'
@needs_azure
class NonCachingFileStoreTestWithAzureJobStore(hidden.AbstractNonCachingFileStoreTest):
jobStoreType = 'azure'
@slow
@needs_azure
@pytest.mark.timeout(1000)
class CachingFileStoreTestWithAzureJobStore(hidden.AbstractCachingFileStoreTest):
jobStoreType = 'azure'
@needs_google
class NonCachingFileStoreTestWithGoogleJobStore(hidden.AbstractNonCachingFileStoreTest):
jobStoreType = 'google'
@slow
@needs_google
@pytest.mark.timeout(1000)
class CachingFileStoreTestWithGoogleJobStore(hidden.AbstractCachingFileStoreTest):
jobStoreType = 'google'
def _exportStaticMethodAsGlobalFunctions(cls):
"""
Define utility functions because Toil can't pickle static methods. Note that this relies on
the convention that the first argument of a job function is named 'job'.
"""
for name, kind, clazz, value in inspect.classify_class_attrs(cls):
if kind == 'static method':
method = value.__func__
args = inspect.getargspec(method).args
if args and args[0] == 'job':
globals()[name] = method
_exportStaticMethodAsGlobalFunctions(hidden.AbstractFileStoreTest)
_exportStaticMethodAsGlobalFunctions(hidden.AbstractCachingFileStoreTest)
_exportStaticMethodAsGlobalFunctions(hidden.AbstractNonCachingFileStoreTest)
| 47.130274 | 101 | 0.582445 |
aced0c0f7ed3e7ea14d51ec93c97b7c7db4f8067 | 15,877 | py | Python | env/lib/python3.4/site-packages/pip/cmdoptions.py | musicrighter/P2-Iteration-0 | dcf2f2f2a5a8849e57ff3c2e9f2ab4cdfca9b86d | [
"Artistic-2.0"
] | 11 | 2017-12-25T23:22:13.000Z | 2021-09-28T00:23:37.000Z | env/lib/python3.4/site-packages/pip/cmdoptions.py | musicrighter/P2-Iteration-0 | dcf2f2f2a5a8849e57ff3c2e9f2ab4cdfca9b86d | [
"Artistic-2.0"
] | 2 | 2021-04-30T20:34:10.000Z | 2021-06-01T21:47:23.000Z | env/lib/python3.4/site-packages/pip/cmdoptions.py | musicrighter/P2-Iteration-0 | dcf2f2f2a5a8849e57ff3c2e9f2ab4cdfca9b86d | [
"Artistic-2.0"
] | 4 | 2017-01-11T15:40:52.000Z | 2018-07-25T16:03:52.000Z | """
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parses general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
from __future__ import absolute_import
from functools import partial
from optparse import OptionGroup, SUPPRESS_HELP, Option
import warnings
from pip.index import (
FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary,
fmt_ctl_no_use_wheel)
from pip.models import PyPI
from pip.locations import USER_CACHE_DIR, src_prefix
from pip.utils.hashes import STRONG_HASHES
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option())
return option_group
def resolve_wheel_no_use_binary(options):
if not options.use_wheel:
control = options.format_control
fmt_ctl_no_use_wheel(control)
def check_install_build_global(options, check_options=None):
"""Disable wheels if per-setup.py call options are set.
:param options: The OptionParser options to update.
:param check_options: The options to check, if not supplied defaults to
options.
"""
if check_options is None:
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
fmt_ctl_no_binary(control)
warnings.warn(
'Disabling all use of wheels due to the use of --build-options '
'/ --global-options / --install-options.', stacklevel=2)
###########
# options #
###########
help_ = partial(
Option,
'-h', '--help',
dest='help',
action='help',
help='Show help.')
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = partial(
Option,
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = partial(
Option,
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
)
version = partial(
Option,
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = partial(
Option,
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.')
log = partial(
Option,
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
)
no_input = partial(
Option,
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = partial(
Option,
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
retries = partial(
Option,
'--retries',
dest='retries',
type='int',
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).")
timeout = partial(
Option,
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = partial(
Option,
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = partial(
Option,
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
def exists_action():
return Option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup.")
cert = partial(
Option,
'--cert',
dest='cert',
type='str',
metavar='path',
help="Path to alternate CA bundle.")
client_cert = partial(
Option,
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.")
index_url = partial(
Option,
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default=PyPI.simple_url,
help='Base URL of Python Package Index (default %default).')
def extra_index_url():
return Option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url.'
)
no_index = partial(
Option,
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
def find_links():
return Option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to "
"archives. If a local path or file:// url that's a directory,"
"then look for archives in the directory listing.")
def allow_external():
return Option(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
allow_all_external = partial(
Option,
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help=SUPPRESS_HELP,
)
def trusted_host():
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host as trusted, even though it does not have valid "
"or any HTTPS.",
)
# Remove after 7.0
no_allow_external = partial(
Option,
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 7.0
def allow_unsafe():
return Option(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
# Remove after 7.0
no_allow_unsafe = partial(
Option,
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = partial(
Option,
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
def constraints():
return Option(
'-c', '--constraint',
dest='constraints',
action='append',
default=[],
metavar='file',
help='Constrain versions using the given constraints file. '
'This option can be used multiple times.')
def requirements():
return Option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
def editable():
return Option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = partial(
Option,
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
)
# XXX: deprecated, remove in 9.0
use_wheel = partial(
Option,
'--use-wheel',
dest='use_wheel',
action='store_true',
default=True,
help=SUPPRESS_HELP,
)
# XXX: deprecated, remove in 9.0
no_use_wheel = partial(
Option,
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations. DEPRECATED in favour of --no-binary.'),
)
def _get_format_control(values, option):
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.no_binary, existing.only_binary)
def _handle_only_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.only_binary, existing.no_binary)
def no_binary():
return Option(
"--no-binary", dest="format_control", action="callback",
callback=_handle_no_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use binary packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all binary packages, :none: to empty the set, or one or "
"more package names with commas between them. Note that some "
"packages are tricky to compile and may fail to install when "
"this option is used on them.")
def only_binary():
return Option(
"--only-binary", dest="format_control", action="callback",
callback=_handle_only_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use source packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all source packages, :none: to empty the set, or one or "
"more package names with commas between them. Packages without "
"binary distributions will fail to install when this option is "
"used on them.")
cache_dir = partial(
Option,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
)
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="store_false",
help="Disable the cache.",
)
no_deps = partial(
Option,
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = partial(
Option,
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in.'
)
install_options = partial(
Option,
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.")
global_options = partial(
Option,
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = partial(
Option,
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
pre = partial(
Option,
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.")
# Deprecated, Remove later
always_unzip = partial(
Option,
'-Z', '--always-unzip',
dest='always_unzip',
action='store_true',
help=SUPPRESS_HELP,
)
def _merge_hash(option, opt_str, value, parser):
"""Given a value spelled "algo:digest", append the digest to a list
pointed to in a dict by the algo name."""
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(':', 1)
except ValueError:
parser.error('Arguments to %s must be a hash name '
'followed by a value, like --hash=sha256:abcde...' %
opt_str)
if algo not in STRONG_HASHES:
parser.error('Allowed hash algorithms for %s are %s.' %
(opt_str, ', '.join(STRONG_HASHES)))
parser.values.hashes.setdefault(algo, []).append(digest)
hash = partial(
Option,
'--hash',
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest='hashes',
action='callback',
callback=_merge_hash,
type='string',
help="Verify that the package's archive matches this "
'hash before installing. Example: --hash=sha256:abcdef...')
require_hashes = partial(
Option,
'--require-hashes',
dest='require_hashes',
action='store_true',
default=False,
help='Require a hash to check each requirement against, for '
'repeatable installs. This option is implied when any package in a '
'requirements file has a --hash option.')
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
no_input,
proxy,
retries,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
]
}
non_deprecated_index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
process_dependency_links,
]
}
index_group = {
'name': 'Package Index Options (including deprecated options)',
'options': non_deprecated_index_group['options'] + [
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
]
}
| 25.649435 | 79 | 0.622221 |
aced0d24844d18c2dd0ab404a2ca1cb4e79870ef | 1,239 | py | Python | src/analysis/crystFEL_tutorial/3-xtal-to-list.py | ejcjason/crystalProject | 890c6645a2e132f8b81e1dd485b61d66dd2521c3 | [
"MIT"
] | 1 | 2020-04-30T13:40:28.000Z | 2020-04-30T13:40:28.000Z | src/analysis/crystFEL_tutorial/3-xtal-to-list.py | JunCEEE/crystalProject | 890c6645a2e132f8b81e1dd485b61d66dd2521c3 | [
"MIT"
] | 4 | 2020-05-19T13:38:27.000Z | 2020-05-29T16:47:27.000Z | src/analysis/crystFEL_tutorial/3-xtal-to-list.py | JunCEEE/crystalProject | 890c6645a2e132f8b81e1dd485b61d66dd2521c3 | [
"MIT"
] | 2 | 2020-05-05T12:46:02.000Z | 2020-06-05T20:40:35.000Z | #!/gpfs/exfel/sw/software/xfel_anaconda3/1.1/bin/python
import os
LYSO = [(7.5, 8.4), (7.5, 8.4), (3.4, 4.0), (88, 92), (88, 92), (88, 92)]
#LYSO = [(3.4, 4.0), (7.5, 8.4), (7.5, 8.4), (88, 92), (88, 92), (88, 92)]
def reasonable_cell(parms):
for i in range(6):
if parms[i] < LYSO[i][0]: return False
if parms[i] > LYSO[i][1]: return False
return True
print('This tool will write out frames containing reasonable crystals')
valid = False
while valid == False:
fn = input('CrystFEL stream file to parse > ')
if not os.path.exists(fn):
print('File with provided name cannot be opened')
else:
valid = True
hit_list = []
with open(fn) as f:
for ln in f:
if 'Event:' in ln:
event = ln.split()[-1] # includes '//'
if 'Cell parameters' in ln:
cell_parms = [float(x) for x in (ln.split()[2:5] + ln.split()[6:9])]
if reasonable_cell(cell_parms):
hit_list.append('xmpl_2_vds.cxi {}'.format(event))
print(len(hit_list), 'reasonable crystals found')
out_file = input('output file name to which hit-frames shall be written > ')
with open(out_file, 'w') as f:
for ln in hit_list:
f.write('{}\n'.format(ln))
| 30.219512 | 80 | 0.582728 |
aced0d35ff47d4b99304a2baeca4cf9fb2a5b7ec | 357 | py | Python | anton/views/index.py | sunnstix/dancyPi-audio-reactive-led | f067c274bfb3a7251ba4995479950c82633a2fb0 | [
"MIT"
] | null | null | null | anton/views/index.py | sunnstix/dancyPi-audio-reactive-led | f067c274bfb3a7251ba4995479950c82633a2fb0 | [
"MIT"
] | 1 | 2021-08-22T05:03:11.000Z | 2021-08-22T05:03:11.000Z | anton/views/index.py | sunnstix/dancyPi-audio-reactive-led | f067c274bfb3a7251ba4995479950c82633a2fb0 | [
"MIT"
] | null | null | null | import anton
from anton.lights.modes import Lights
import flask
# Main Index Page
# ====================================================
@anton.app.route('/')
def show_index():
context = {
'light_modes': Lights.MODES.keys(),
'audio_modes': ['scroll','spectrum','energy']
}
return flask.render_template("index.html", **context) | 23.8 | 57 | 0.557423 |
aced0d75260a1957a46134d5bc0a23aa6986ffd5 | 2,857 | py | Python | djangosaml2/cache.py | daggaz/djangosaml2 | 95e8f3550974d1e4cb40e921873f350519adc57d | [
"Apache-2.0"
] | null | null | null | djangosaml2/cache.py | daggaz/djangosaml2 | 95e8f3550974d1e4cb40e921873f350519adc57d | [
"Apache-2.0"
] | null | null | null | djangosaml2/cache.py | daggaz/djangosaml2 | 95e8f3550974d1e4cb40e921873f350519adc57d | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2010 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from saml2.cache import Cache
class DjangoSessionCacheAdapter(dict):
"""A cache of things that are stored in the Django Session"""
key_prefix = '_saml2'
def __init__(self, django_session, key_suffix):
self.session = django_session
self.key = self.key_prefix + key_suffix
super().__init__(self._get_objects())
def _get_objects(self):
return self.session.get(self.key, {})
def _set_objects(self, objects):
self.session[self.key] = objects
def sync(self):
# Changes in inner objects do not cause session invalidation
# https://docs.djangoproject.com/en/1.9/topics/http/sessions/#when-sessions-are-saved
# add objects to session
self._set_objects(dict(self))
# invalidate session
self.session.modified = True
class OutstandingQueriesCache(object):
"""Handles the queries that have been sent to the IdP and have not
been replied yet.
"""
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session, '_outstanding_queries')
def outstanding_queries(self):
return self._db._get_objects()
def set(self, saml2_session_id, came_from):
self._db[saml2_session_id] = came_from
self._db.sync()
def delete(self, saml2_session_id):
if saml2_session_id in self._db:
del self._db[saml2_session_id]
self._db.sync()
class IdentityCache(Cache):
"""Handles information about the users that have been succesfully
logged in.
This information is useful because when the user logs out we must
know where does he come from in order to notify such IdP/AA.
The current implementation stores this information in the Django session.
"""
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session, '_identities')
self._sync = True
class StateCache(DjangoSessionCacheAdapter):
"""Store state information that is needed to associate a logout
request with its response.
"""
def __init__(self, django_session):
super().__init__(django_session, '_state')
| 32.101124 | 93 | 0.702135 |
aced0dad0d85f26aec6a8d4e560fab63fca71b66 | 18,493 | py | Python | lldb/test/API/python_api/sbdata/TestSBData.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | lldb/test/API/python_api/sbdata/TestSBData.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | lldb/test/API/python_api/sbdata/TestSBData.py | LaudateCorpus1/llvm-project | ff2e0f0c1112558b3f30d8afec7c9882c33c79e3 | [
"Apache-2.0"
] | null | null | null | """Test the SBData APIs."""
from math import fabs
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SBDataAPICase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break on inside main.cpp.
self.line = line_number('main.cpp', '// set breakpoint here')
def test_byte_order_and_address_byte_size(self):
"""Test the SBData::SetData() to ensure the byte order and address
byte size are obeyed"""
addr_data = b'\x11\x22\x33\x44\x55\x66\x77\x88'
error = lldb.SBError()
data = lldb.SBData()
data.SetData(error, addr_data, lldb.eByteOrderBig, 4)
addr = data.GetAddress(error, 0)
self.assertEqual(addr, 0x11223344);
data.SetData(error, addr_data, lldb.eByteOrderBig, 8)
addr = data.GetAddress(error, 0)
self.assertEqual(addr, 0x1122334455667788);
data.SetData(error, addr_data, lldb.eByteOrderLittle, 4)
addr = data.GetAddress(error, 0)
self.assertEqual(addr, 0x44332211);
data.SetData(error, addr_data, lldb.eByteOrderLittle, 8)
addr = data.GetAddress(error, 0)
self.assertEqual(addr, 0x8877665544332211);
def test_byte_order_and_address_byte_size_with_ownership(self):
"""Test the SBData::SetDataWithOwnership() to ensure the byte order
and address byte size are obeyed even when source date is released"""
addr_data = b'\x11\x22\x33\x44\x55\x66\x77\x88'
error = lldb.SBError()
data = lldb.SBData()
data.SetDataWithOwnership(error, addr_data, lldb.eByteOrderBig, 8)
del addr_data
addr = data.GetAddress(error, 0)
self.assertEqual(addr, 0x1122334455667788);
def test_with_run_command(self):
"""Test the SBData APIs."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
frame = thread.GetSelectedFrame()
foobar = frame.FindVariable('foobar')
self.assertTrue(foobar.IsValid())
data = foobar.GetPointeeData(0, 2)
offset = 0
error = lldb.SBError()
self.assert_data(data.GetUnsignedInt32, offset, 1)
offset += 4
low = data.GetSignedInt16(error, offset)
self.assertSuccess(error)
offset += 2
high = data.GetSignedInt16(error, offset)
self.assertSuccess(error)
offset += 2
self.assertTrue(
(low == 9 and high == 0) or (
low == 0 and high == 9),
'foo[0].b == 9')
self.assertTrue(
fabs(
data.GetFloat(
error,
offset) -
3.14) < 1,
'foo[0].c == 3.14')
self.assertSuccess(error)
offset += 4
self.assert_data(data.GetUnsignedInt32, offset, 8)
offset += 4
self.assert_data(data.GetUnsignedInt32, offset, 5)
offset += 4
self.runCmd("n")
offset = 16
self.assert_data(data.GetUnsignedInt32, offset, 5)
data = foobar.GetPointeeData(1, 1)
offset = 0
self.assert_data(data.GetSignedInt32, offset, 8)
offset += 4
self.assert_data(data.GetSignedInt32, offset, 7)
offset += 8
self.assertTrue(
data.GetUnsignedInt32(
error,
offset) == 0,
'do not read beyond end')
self.assertTrue(not error.Success())
error.Clear() # clear the error for the next test
star_foobar = foobar.Dereference()
self.assertTrue(star_foobar.IsValid())
data = star_foobar.GetData()
offset = 0
self.assert_data(data.GetUnsignedInt32, offset, 1)
offset += 4
self.assert_data(data.GetUnsignedInt32, offset, 9)
foobar_addr = star_foobar.GetLoadAddress()
foobar_addr += 12
# http://llvm.org/bugs/show_bug.cgi?id=11579
# lldb::SBValue::CreateValueFromAddress does not verify SBType::GetPointerType succeeds
# This should not crash LLDB.
nothing = foobar.CreateValueFromAddress(
"nothing", foobar_addr, star_foobar.GetType().GetBasicType(
lldb.eBasicTypeInvalid))
new_foobar = foobar.CreateValueFromAddress(
"f00", foobar_addr, star_foobar.GetType())
self.assertTrue(new_foobar.IsValid())
data = new_foobar.GetData()
self.assertEqual(data.uint32[0], 8, 'then foo[1].a == 8')
self.assertEqual(data.uint32[1], 7, 'then foo[1].b == 7')
# exploiting that sizeof(uint32) == sizeof(float)
self.assertTrue(fabs(data.float[2] - 3.14) < 1, 'foo[1].c == 3.14')
self.runCmd("n")
offset = 0
self.assert_data(data.GetUnsignedInt32, offset, 8)
offset += 4
self.assert_data(data.GetUnsignedInt32, offset, 7)
offset += 4
self.assertTrue(
fabs(
data.GetFloat(
error,
offset) -
3.14) < 1,
'foo[1].c == 3.14')
self.assertSuccess(error)
data = new_foobar.GetData()
offset = 0
self.assert_data(data.GetUnsignedInt32, offset, 8)
offset += 4
self.assert_data(data.GetUnsignedInt32, offset, 7)
offset += 4
self.assertTrue(
fabs(
data.GetFloat(
error,
offset) -
6.28) < 1,
'foo[1].c == 6.28')
self.assertSuccess(error)
self.runCmd("n")
barfoo = frame.FindVariable('barfoo')
data = barfoo.GetData()
offset = 0
self.assert_data(data.GetUnsignedInt32, offset, 1)
offset += 4
self.assert_data(data.GetUnsignedInt32, offset, 2)
offset += 4
self.assertTrue(
fabs(
data.GetFloat(
error,
offset) -
3) < 1,
'barfoo[0].c == 3')
self.assertSuccess(error)
offset += 4
self.assert_data(data.GetUnsignedInt32, offset, 4)
offset += 4
self.assert_data(data.GetUnsignedInt32, offset, 5)
offset += 4
self.assertTrue(
fabs(
data.GetFloat(
error,
offset) -
6) < 1,
'barfoo[1].c == 6')
self.assertSuccess(error)
new_object = barfoo.CreateValueFromData(
"new_object", data, barfoo.GetType().GetBasicType(
lldb.eBasicTypeInt))
self.assertEqual(new_object.GetValue(), "1", 'new_object == 1')
if data.GetByteOrder() == lldb.eByteOrderBig:
data.SetData(
error,
'\0\0\0A',
data.GetByteOrder(),
data.GetAddressByteSize())
else:
data.SetData(
error,
'A\0\0\0',
data.GetByteOrder(),
data.GetAddressByteSize())
self.assertSuccess(error)
data2 = lldb.SBData()
data2.SetData(
error,
'BCD',
data.GetByteOrder(),
data.GetAddressByteSize())
self.assertSuccess(error)
data.Append(data2)
# this breaks on EBCDIC
offset = 0
self.assert_data(data.GetUnsignedInt32, offset, 65)
offset += 4
self.assert_data(data.GetUnsignedInt8, offset, 66)
offset += 1
self.assert_data(data.GetUnsignedInt8, offset, 67)
offset += 1
self.assert_data(data.GetUnsignedInt8, offset, 68)
offset += 1
# check the new API calls introduced per LLVM llvm.org/prenhancement request
# 11619 (Allow creating SBData values from arrays or primitives in
# Python)
hello_str = "hello!"
data2 = lldb.SBData.CreateDataFromCString(
process.GetByteOrder(), process.GetAddressByteSize(), hello_str)
self.assertEqual(len(data2.uint8), len(hello_str))
self.assertEqual(data2.uint8[0], 104, 'h == 104')
self.assertEqual(data2.uint8[1], 101, 'e == 101')
self.assertEqual(data2.uint8[2], 108, 'l == 108')
self.assert_data(data2.GetUnsignedInt8, 3, 108) # l
self.assertEqual(data2.uint8[4], 111, 'o == 111')
self.assert_data(data2.GetUnsignedInt8, 5, 33) # !
uint_lists = [[1, 2, 3, 4, 5], [int(i) for i in [1, 2, 3, 4, 5]]]
int_lists = [[2, -2], [int(i) for i in [2, -2]]]
for l in uint_lists:
data2 = lldb.SBData.CreateDataFromUInt64Array(
process.GetByteOrder(), process.GetAddressByteSize(), l)
self.assert_data(data2.GetUnsignedInt64, 0, 1)
self.assert_data(data2.GetUnsignedInt64, 8, 2)
self.assert_data(data2.GetUnsignedInt64, 16, 3)
self.assert_data(data2.GetUnsignedInt64, 24, 4)
self.assert_data(data2.GetUnsignedInt64, 32, 5)
self.assertTrue(
data2.uint64s == [
1,
2,
3,
4,
5],
'read_data_helper failure: data2 == [1,2,3,4,5]')
for l in int_lists:
data2 = lldb.SBData.CreateDataFromSInt32Array(
process.GetByteOrder(), process.GetAddressByteSize(), l)
self.assertTrue(
data2.sint32[
0:2] == [
2, -2], 'signed32 data2 = [2,-2]')
data2.Append(
lldb.SBData.CreateDataFromSInt64Array(
process.GetByteOrder(),
process.GetAddressByteSize(),
int_lists[0]))
self.assert_data(data2.GetSignedInt32, 0, 2)
self.assert_data(data2.GetSignedInt32, 4, -2)
self.assertTrue(
data2.sint64[
1:3] == [
2, -2], 'signed64 data2 = [2,-2]')
for l in int_lists:
data2 = lldb.SBData.CreateDataFromSInt64Array(
process.GetByteOrder(), process.GetAddressByteSize(), l)
self.assert_data(data2.GetSignedInt64, 0, 2)
self.assert_data(data2.GetSignedInt64, 8, -2)
self.assertTrue(
data2.sint64[
0:2] == [
2, -2], 'signed64 data2 = [2,-2]')
for l in uint_lists:
data2 = lldb.SBData.CreateDataFromUInt32Array(
process.GetByteOrder(), process.GetAddressByteSize(), l)
self.assert_data(data2.GetUnsignedInt32, 0, 1)
self.assert_data(data2.GetUnsignedInt32, 4, 2)
self.assert_data(data2.GetUnsignedInt32, 8, 3)
self.assert_data(data2.GetUnsignedInt32, 12, 4)
self.assert_data(data2.GetUnsignedInt32, 16, 5)
bool_list = [True, True, False, False, True, False]
data2 = lldb.SBData.CreateDataFromSInt32Array(
process.GetByteOrder(), process.GetAddressByteSize(), bool_list)
self.assertTrue(
data2.sint32[
0:6] == [
1,
1,
0,
0,
1,
0],
'signed32 data2 = [1, 1, 0, 0, 1, 0]')
data2 = lldb.SBData.CreateDataFromUInt32Array(
process.GetByteOrder(), process.GetAddressByteSize(), bool_list)
self.assertTrue(
data2.uint32[
0:6] == [
1,
1,
0,
0,
1,
0],
'unsigned32 data2 = [1, 1, 0, 0, 1, 0]')
data2 = lldb.SBData.CreateDataFromSInt64Array(
process.GetByteOrder(), process.GetAddressByteSize(), bool_list)
self.assertTrue(
data2.sint64[
0:6] == [
1,
1,
0,
0,
1,
0],
'signed64 data2 = [1, 1, 0, 0, 1, 0]')
data2 = lldb.SBData.CreateDataFromUInt64Array(
process.GetByteOrder(), process.GetAddressByteSize(), bool_list)
self.assertTrue(
data2.uint64[
0:6] == [
1,
1,
0,
0,
1,
0],
'signed64 data2 = [1, 1, 0, 0, 1, 0]')
data2 = lldb.SBData.CreateDataFromDoubleArray(
process.GetByteOrder(), process.GetAddressByteSize(), [
3.14, 6.28, 2.71])
self.assertTrue(
fabs(
data2.GetDouble(
error,
0) -
3.14) < 0.5,
'double data2[0] = 3.14')
self.assertSuccess(error)
self.assertTrue(
fabs(
data2.GetDouble(
error,
8) -
6.28) < 0.5,
'double data2[1] = 6.28')
self.assertSuccess(error)
self.assertTrue(
fabs(
data2.GetDouble(
error,
16) -
2.71) < 0.5,
'double data2[2] = 2.71')
self.assertSuccess(error)
data2 = lldb.SBData()
data2.SetDataFromCString(hello_str)
self.assertEqual(len(data2.uint8), len(hello_str))
self.assert_data(data2.GetUnsignedInt8, 0, 104)
self.assert_data(data2.GetUnsignedInt8, 1, 101)
self.assert_data(data2.GetUnsignedInt8, 2, 108)
self.assert_data(data2.GetUnsignedInt8, 3, 108)
self.assert_data(data2.GetUnsignedInt8, 4, 111)
self.assert_data(data2.GetUnsignedInt8, 5, 33)
data2.SetDataFromUInt64Array([1, 2, 3, 4, 5])
self.assert_data(data2.GetUnsignedInt64, 0, 1)
self.assert_data(data2.GetUnsignedInt64, 8, 2)
self.assert_data(data2.GetUnsignedInt64, 16, 3)
self.assert_data(data2.GetUnsignedInt64, 24, 4)
self.assert_data(data2.GetUnsignedInt64, 32, 5)
self.assertEqual(
data2.uint64[0], 1,
'read_data_helper failure: set data2[0] = 1')
self.assertEqual(
data2.uint64[1], 2,
'read_data_helper failure: set data2[1] = 2')
self.assertEqual(
data2.uint64[2], 3,
'read_data_helper failure: set data2[2] = 3')
self.assertEqual(
data2.uint64[3], 4,
'read_data_helper failure: set data2[3] = 4')
self.assertEqual(
data2.uint64[4], 5,
'read_data_helper failure: set data2[4] = 5')
self.assertTrue(
data2.uint64[
0:2] == [
1,
2],
'read_data_helper failure: set data2[0:2] = [1,2]')
data2.SetDataFromSInt32Array([2, -2])
self.assert_data(data2.GetSignedInt32, 0, 2)
self.assert_data(data2.GetSignedInt32, 4, -2)
data2.SetDataFromSInt64Array([2, -2])
self.assert_data(data2.GetSignedInt64, 0, 2)
self.assert_data(data2.GetSignedInt64, 8, -2)
data2.SetDataFromUInt32Array([1, 2, 3, 4, 5])
self.assert_data(data2.GetUnsignedInt32, 0, 1)
self.assert_data(data2.GetUnsignedInt32, 4, 2)
self.assert_data(data2.GetUnsignedInt32, 8, 3)
self.assert_data(data2.GetUnsignedInt32, 12, 4)
self.assert_data(data2.GetUnsignedInt32, 16, 5)
self.assertEqual(
data2.uint32[0], 1,
'read_data_helper failure: set 32-bit data2[0] = 1')
self.assertEqual(
data2.uint32[1], 2,
'read_data_helper failure: set 32-bit data2[1] = 2')
self.assertEqual(
data2.uint32[2], 3,
'read_data_helper failure: set 32-bit data2[2] = 3')
self.assertEqual(
data2.uint32[3], 4,
'read_data_helper failure: set 32-bit data2[3] = 4')
self.assertEqual(
data2.uint32[4], 5,
'read_data_helper failure: set 32-bit data2[4] = 5')
data2.SetDataFromDoubleArray([3.14, 6.28, 2.71])
self.assertTrue(fabs(data2.GetDouble(error, 0) - 3.14)
< 0.5, 'set double data2[0] = 3.14')
self.assertTrue(fabs(data2.GetDouble(error, 8) - 6.28)
< 0.5, 'set double data2[1] = 6.28')
self.assertTrue(fabs(data2.GetDouble(error, 16) - 2.71)
< 0.5, 'set double data2[2] = 2.71')
self.assertTrue(
fabs(
data2.double[0] -
3.14) < 0.5,
'read_data_helper failure: set double data2[0] = 3.14')
self.assertTrue(
fabs(
data2.double[1] -
6.28) < 0.5,
'read_data_helper failure: set double data2[1] = 6.28')
self.assertTrue(
fabs(
data2.double[2] -
2.71) < 0.5,
'read_data_helper failure: set double data2[2] = 2.71')
def assert_data(self, func, arg, expected):
""" Asserts func(SBError error, arg) == expected. """
error = lldb.SBError()
result = func(error, arg)
if not error.Success():
stream = lldb.SBStream()
error.GetDescription(stream)
self.assertTrue(
error.Success(), "%s(error, %s) did not succeed: %s" %
(func.__name__, arg, stream.GetData()))
self.assertTrue(
expected == result, "%s(error, %s) == %s != %s" %
(func.__name__, arg, result, expected))
| 34.826742 | 95 | 0.53815 |
aced0dba9226a3b6d56083c258ff76a0769cdf20 | 800 | py | Python | vue_backend/middleware/usermiddleware.py | hanson190505/coteam | 8bd01f4edc2a0b2a65dc18d68e36efb11cbdf576 | [
"MIT"
] | 1 | 2021-03-18T17:04:52.000Z | 2021-03-18T17:04:52.000Z | vue_backend/middleware/usermiddleware.py | hanson190505/coteam | 8bd01f4edc2a0b2a65dc18d68e36efb11cbdf576 | [
"MIT"
] | 11 | 2020-04-03T04:16:24.000Z | 2022-03-26T10:36:49.000Z | vue_backend/middleware/usermiddleware.py | hanson190505/coteam | 8bd01f4edc2a0b2a65dc18d68e36efb11cbdf576 | [
"MIT"
] | null | null | null | from time import time
from django.core.cache import cache
from django.http import HttpResponse
from django.utils.deprecation import MiddlewareMixin
class UserMiddleware(MiddlewareMixin):
def process_request(self, request):
# print(request.method)
# print(request.path)
# print(request.META.get('CONTENT_TYPE'))
# print(request.POST.get('username'))
# print(request.body)
# meta = request.META
# for k,v in meta.items():
# print(k)
# print('----------')
# print(v)
# 访问频率控制
ip = request.META.get('REMOTE_ADDR')
if cache.get(ip):
if cache.get(ip) >= time()-10:
return HttpResponse('登录过于频繁,请稍后重试')
else:
cache.set(ip, time(), 10)
| 27.586207 | 52 | 0.5725 |
aced0fb50190b3a222b5088ddea0886f38e7b1a3 | 374 | py | Python | hermes/corpus/processors.py | dbracewell/pyHermes | 09964eb566b74d1d3ae2b99849b06c4d07242e5b | [
"Apache-2.0"
] | null | null | null | hermes/corpus/processors.py | dbracewell/pyHermes | 09964eb566b74d1d3ae2b99849b06c4d07242e5b | [
"Apache-2.0"
] | null | null | null | hermes/corpus/processors.py | dbracewell/pyHermes | 09964eb566b74d1d3ae2b99849b06c4d07242e5b | [
"Apache-2.0"
] | null | null | null | import typing
from collections import Counter
from .corpus import Corpus
class BaseExtractor:
def __init__(self, binary=False, to_string=None, annotation_type='token'):
self._binary = binary
self._to_string = to_string
self._annotation_type = annotation_type
def process(self, corpus: Corpus) -> Counter:
raise NotImplementedError
| 26.714286 | 78 | 0.724599 |
aced0fc3aad4f539551b314b23eba0ca5fa407e0 | 4,039 | py | Python | mustacheyou/stacher.py | bpftomc/mustacheyou | 71bd50d0f332400c077be1c07ad7c36f033a623b | [
"MIT"
] | null | null | null | mustacheyou/stacher.py | bpftomc/mustacheyou | 71bd50d0f332400c077be1c07ad7c36f033a623b | [
"MIT"
] | null | null | null | mustacheyou/stacher.py | bpftomc/mustacheyou | 71bd50d0f332400c077be1c07ad7c36f033a623b | [
"MIT"
] | null | null | null |
import argparse
import chevron
from copy import deepcopy
import logging
from os import getcwd
from os.path import isfile, join, splitdrive
import re
import yaml
from mustacheyou.base import MustacheYouBase
logging.basicConfig(level=logging.DEBUG)
# logging.basicConfig(level=logging.DEBUG, filename="stacher.log", encoding='utf-8')
# if __name__ == "__main__":
def make():
parser = argparse.ArgumentParser(description='Process MustacheYou config.')
parser.add_argument('--infile', '-i', required=True,
help='Input YAML file')
parser.add_argument('--outdir', '-o',
help='Output folder for MustacheYou results')
parser.add_argument('--mustache', '--templates', '-t', '-m',
help='Template file in Mustache syntax or a folder of such files')
parser.add_argument('--yaml_path', nargs='*', type=str,
help='To use only a subset of the YAML input file based on a path of mappings')
args = parser.parse_args()
infile = args.infile
drive = splitdrive(infile)
# logging.info(f"Infile: {args.infile}")
if not re.match('^[/]', infile) and drive[0] == '':
infile = join(getcwd(), infile)
# logging.info(f"Updated infile with CWD: {infile}")
if not isfile(infile):
raise Exception(f"No such file as infile {infile}")
maker = MustacheYou(infile, args.outdir, args.mustache, args.yaml_path)
maker.make()
class MustacheYou(MustacheYouBase):
extra_template_dirs = []
def __init__(self, yaml_config, dest_dir=None, extra_template_dirs=None, yaml_path=None):
if extra_template_dirs:
self.extra_template_dirs.extend([x.strip() for x in extra_template_dirs.split(',')])
# self.dest_dir = dest_dir
config = None
if isinstance(yaml_config, list):
config = {}
for better_be_a_dict in yaml_config:
data = deepcopy(config.get('data', {}))
for key, value in better_be_a_dict.items():
config[key] = value
for key, value in better_be_a_dict.get('data', {}).items():
data[key] = value
config['data'] = data
# TODO MAYBE SOMEDAY: Let list be a list of strings - and combine more than one YAML file the same way we combine dicts.
elif isinstance(yaml_config, dict):
config = yaml_config
else:
self.yaml_file = yaml_config
with open(self.yaml_file, 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
logging.error(f"Failed to parse YAML file {self.yaml_file}: {exc}")
raise exc
if not extra_template_dirs:
self.extra_template_dirs = config.get('mustache', ['.'])
# logging.info(f"extra_template_dirs {self.extra_template_dirs}")
if not isinstance(self.extra_template_dirs, list):
self.extra_template_dirs = [self.extra_template_dirs]
# logging.info(f"extra_template_dirs {self.extra_template_dirs}")
outdir = dest_dir
if not outdir:
outdir = config.get('outdir', 'mustached')
config['outdir'] = outdir
config['mustache_templates_dir'] = self.extra_template_dirs[0]
# logging.info(f"mustache_templates_dir {config['mustache_templates_dir']}")
if extra_template_dirs and len(extra_template_dirs) > 1:
config['extra_template_dirs'] = self.extra_template_dirs[1:]
if yaml_path:
config['yaml_path'] = yaml_path
logging.info(f"Top config: outdir {outdir}, mustache_templates_dir {config['mustache_templates_dir']}, extra_template_dirs {extra_template_dirs}, yaml_path {yaml_path}")
logging.info(f"Config: {config}")
super().__init__(config)
def make(self):
result = True
if not super().make():
result = False
return result
| 44.384615 | 177 | 0.626145 |
aced10869c098295825904ad2a81cb647b8016d8 | 4,278 | py | Python | mythril/support/truffle.py | yrashk/mythril | 0cea8f562726da468ab4761ff1ff3746ab0d747a | [
"MIT"
] | 1 | 2018-09-07T10:17:35.000Z | 2018-09-07T10:17:35.000Z | mythril/support/truffle.py | yrashk/mythril | 0cea8f562726da468ab4761ff1ff3746ab0d747a | [
"MIT"
] | null | null | null | mythril/support/truffle.py | yrashk/mythril | 0cea8f562726da468ab4761ff1ff3746ab0d747a | [
"MIT"
] | 1 | 2018-06-14T08:36:03.000Z | 2018-06-14T08:36:03.000Z | import os
from pathlib import PurePath
import re
import sys
import json
import logging
from mythril.ether.ethcontract import ETHContract
from mythril.ether.soliditycontract import SourceMapping
from mythril.exceptions import CriticalError
from mythril.analysis.security import fire_lasers
from mythril.analysis.symbolic import SymExecWrapper
from mythril.analysis.report import Report
from mythril.ether import util
from mythril.laser.ethereum.util import get_instruction_index
def analyze_truffle_project(sigs, args):
project_root = os.getcwd()
build_dir = os.path.join(project_root, "build", "contracts")
files = os.listdir(build_dir)
for filename in files:
if re.match(r'.*\.json$', filename) and filename != "Migrations.json":
with open(os.path.join(build_dir, filename)) as cf:
contractdata = json.load(cf)
try:
name = contractdata['contractName']
bytecode = contractdata['deployedBytecode']
filename = PurePath(contractdata['sourcePath']).name
except KeyError:
print("Unable to parse contract data. Please use Truffle 4 to compile your project.")
sys.exit()
if len(bytecode) < 4:
continue
sigs.import_from_solidity_source(contractdata['sourcePath'])
sigs.write()
ethcontract = ETHContract(bytecode, name=name)
address = util.get_indexed_address(0)
sym = SymExecWrapper(ethcontract, address, args.strategy, max_depth=args.max_depth,
create_timeout=args.create_timeout, execution_timeout=args.execution_timeout)
issues = fire_lasers(sym)
if not len(issues):
if args.outform == 'text' or args.outform == 'markdown':
print("# Analysis result for " + name + "\n\nNo issues found.")
else:
result = {'contract': name, 'result': {'success': True, 'error': None, 'issues': []}}
print(json.dumps(result))
else:
report = Report()
# augment with source code
disassembly = ethcontract.disassembly
source = contractdata['source']
deployed_source_map = contractdata['deployedSourceMap'].split(";")
mappings = []
for item in deployed_source_map:
mapping = item.split(":")
if len(mapping) > 0 and len(mapping[0]) > 0:
offset = int(mapping[0])
if len(mapping) > 1 and len(mapping[1]) > 0:
length = int(mapping[1])
if len(mapping) > 2 and len(mapping[2]) > 0:
idx = int(mapping[2])
lineno = source.encode('utf-8')[0:offset].count('\n'.encode('utf-8')) + 1
mappings.append(SourceMapping(idx, offset, length, lineno))
for issue in issues:
index = get_instruction_index(disassembly.instruction_list, issue.address)
if index:
try:
offset = mappings[index].offset
length = mappings[index].length
issue.filename = filename
issue.code = source.encode('utf-8')[offset:offset + length].decode('utf-8')
issue.lineno = mappings[index].lineno
except IndexError:
logging.debug("No code mapping at index %d", index)
report.append_issue(issue)
if args.outform == 'json':
result = {'contract': name, 'result': {'success': True, 'error': None, 'issues': list(map(lambda x: x.as_dict, issues))}}
print(json.dumps(result))
else:
if args.outform == 'text':
print("# Analysis result for " + name + ":\n\n" + report.as_text())
elif args.outform == 'markdown':
print(report.as_markdown())
| 37.526316 | 141 | 0.539972 |
aced12a872d5d6a379db460b7a85e1ddb1394977 | 44,583 | py | Python | statsmodels/tsa/tests/results/arima112_results.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 15 | 2015-03-03T09:47:42.000Z | 2022-01-05T18:28:31.000Z | statsmodels/tsa/tests/results/arima112_results.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 7 | 2015-11-20T08:33:04.000Z | 2020-07-24T19:34:39.000Z | statsmodels/tsa/tests/results/arima112_results.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 14 | 2015-01-06T22:08:34.000Z | 2021-01-01T16:33:23.000Z | import numpy as np
llf = np.array([-245.40783909604])
nobs = np.array([ 202])
k = np.array([ 5])
k_exog = np.array([ 1])
sigma = np.array([ .8100467417583])
chi2 = np.array([ 2153.20304012])
df_model = np.array([ 3])
k_ar = np.array([ 1])
k_ma = np.array([ 2])
params = np.array([ .92817025087557,
-.89593490671979,
1.3025011610587,
.30250063082791,
.8100467417583])
cov_params = np.array([ .00638581549851,
.0001858475428,
2.8222806545671,
.8538806860364,
-1.1429127085819,
.0001858475428,
.00132037832566,
-.14420925344502,
-.04447007102804,
.0576156187095,
2.8222806545671,
-.14420925344502,
40397.568324803,
12222.977216556,
-16359.547340433,
.8538806860364,
-.04447007102804,
12222.977216556,
3698.2722243412,
-4949.8609964351,
-1.1429127085819,
.0576156187095,
-16359.547340433,
-4949.8609964351,
6625.0231409853]).reshape(5,5)
xb = np.array([ .92817026376724,
.92817026376724,
.69511789083481,
.77192437648773,
.66135895252228,
.77525061368942,
.64687132835388,
.79659670591354,
.65842008590698,
.71215486526489,
.69971066713333,
.72092038393021,
.68201982975006,
.76510280370712,
.64253836870193,
.78239262104034,
.64609551429749,
.74087703227997,
.71774411201477,
.7119727730751,
.73067259788513,
.67785596847534,
.70898467302322,
.71334755420685,
.72984194755554,
.7017787694931,
.75292426347733,
.67507487535477,
.78219056129456,
.78040039539337,
.71250075101852,
.82028061151505,
.63505899906158,
.79452306032181,
.72773635387421,
.79555094242096,
.76685506105423,
.77427339553833,
.82101213932037,
.77917188405991,
.78917801380157,
.86641925573349,
.78457218408585,
.83697980642319,
.83281791210175,
.85224026441574,
.75030690431595,
.8551008105278,
.78025943040848,
.72790426015854,
.84552866220474,
.72061747312546,
.78669738769531,
.73868823051453,
.78071022033691,
.78002023696899,
.83737623691559,
.98988044261932,
.72882527112961,
1.2245427370071,
.85331875085831,
1.1637357473373,
.86477434635162,
1.3248475790024,
.81245219707489,
.98008638620377,
.85591268539429,
1.0162551403046,
.8165408372879,
.78947591781616,
.94166398048401,
.93266606330872,
.85924750566483,
1.1245046854019,
.75576168298721,
1.0030617713928,
.91267073154449,
1.0848042964935,
1.0778224468231,
1.1551086902618,
.97817331552505,
1.4012540578842,
1.2360861301422,
1.3335381746292,
1.4352362155914,
1.4941285848618,
.9415163397789,
1.437669634819,
1.2404690980911,
1.2285294532776,
1.3219480514526,
1.1560415029526,
.83524394035339,
.87116771936417,
1.5561962127686,
.47358739376068,
.78093349933624,
.90549737215042,
1.0217791795731,
.86397403478622,
1.1526786088943,
.87662625312805,
.95803648233414,
.89513635635376,
.85281348228455,
1.0852742195129,
.76808404922485,
.96872144937515,
1.0732915401459,
.02145584858954,
1.3687089681625,
.50049883127213,
1.3895837068558,
.6889950633049,
1.2795144319534,
.7050421833992,
1.2218985557556,
.74481928348541,
1.3074514865875,
.7919961810112,
1.2807723283768,
1.0120536088943,
1.1938916444778,
.68923074007034,
1.6174983978271,
.64740318059921,
1.4949930906296,
1.2678960561752,
1.0586776733398,
.55762887001038,
1.2790743112564,
.66515874862671,
1.2538269758224,
.70554333925247,
1.2391568422318,
.75241559743881,
1.2129040956497,
.69235223531723,
1.0785228013992,
.8043577671051,
1.0037930011749,
.78750842809677,
1.1880930662155,
.74399447441101,
1.1791603565216,
.85870295763016,
1.0032330751419,
.8019300699234,
1.1696527004242,
.92376220226288,
.99186056852341,
.94733852148056,
1.0748032331467,
.64247089624405,
.95419937372208,
.92043441534042,
.8104555606842,
.66252142190933,
1.1178470849991,
.69223344326019,
1.0570795536041,
.90239083766937,
.95320242643356,
1.0541093349457,
1.0082466602325,
1.1376332044601,
1.1841852664948,
.90440809726715,
1.2733660936356,
.66835701465607,
1.1515763998032,
.44600257277489,
.93500959873199,
1.0847823619843,
.83353632688522,
1.0442448854446,
1.077241897583,
.71010553836823,
.89557945728302,
1.0163468122482,
1.094814658165,
.89641278982162,
1.2808450460434,
1.0223702192307,
.96094745397568,
1.309353351593,
.73499941825867,
2.4902238845825,
-.2579345703125,
1.9272556304932,
.53125941753387,
.7708500623703,
1.0312130451202,
1.6360099315643,
.6022145152092,
1.6338716745377,
1.3494771718979,
1.1322995424271,
2.1901025772095,
-.72639065980911,
-.37026473879814,
1.2391144037247,
1.1353877782822])
y = np.array([np.nan,
29.908170700073,
29.84511756897,
30.121925354004,
30.031360626221,
30.315252304077,
30.196870803833,
30.5465965271,
30.498420715332,
30.52215385437,
30.619710922241,
30.70092010498,
30.722021102905,
30.975101470947,
30.862537384033,
31.162391662598,
31.086095809937,
31.220876693726,
31.407745361328,
31.461973190308,
31.670673370361,
31.627857208252,
31.728984832764,
31.833349227905,
32.009841918945,
32.08177947998,
32.33292388916,
32.325073242188,
32.662189483643,
33.060398101807,
33.162502288818,
33.670280456543,
33.535060882568,
33.894519805908,
34.127738952637,
34.495552062988,
34.866851806641,
35.17427444458,
35.721012115479,
36.079170227051,
36.489177703857,
37.16641998291,
37.584571838379,
38.136978149414,
38.732818603516,
39.352241516113,
39.65030670166,
40.255104064941,
40.68025970459,
40.827903747559,
41.445526123047,
41.620620727539,
41.986698150635,
42.238689422607,
42.580707550049,
42.98002243042,
43.537376403809,
44.689880371094,
44.928825378418,
46.824542999268,
47.653316497803,
49.263732910156,
50.164772033691,
52.324848175049,
53.112449645996,
53.980087280273,
54.855911254883,
55.916255950928,
56.616539001465,
56.889472961426,
57.941665649414,
58.832668304443,
59.55924987793,
61.124504089355,
61.555759429932,
62.603061676025,
63.612670898438,
64.984802246094,
66.577819824219,
68.255104064941,
69.478172302246,
72.001251220703,
74.236083984375,
76.533538818359,
79.435234069824,
82.39412689209,
83.541511535645,
86.137664794922,
88.440467834473,
90.32852935791,
92.82194519043,
94.556045532227,
95.235244750977,
95.871170043945,
99.056198120117,
98.573585510254,
98.680938720703,
99.705497741699,
100.82178497314,
101.66397857666,
103.25267791748,
104.17662811279,
105.0580368042,
105.99513244629,
106.55281066895,
108.08527374268,
108.46807861328,
109.46871948242,
110.97328948975,
108.72145080566,
110.86870574951,
110.70049285889,
112.78958892822,
113.38899230957,
115.0795211792,
115.70503997803,
117.22190093994,
117.94481658936,
119.80744934082,
120.69200134277,
122.48076629639,
124.11205291748,
125.69389343262,
126.08923339844,
129.11749267578,
129.54739379883,
131.99499511719,
134.66789245605,
135.75866699219,
135.6576385498,
137.47906494141,
137.86515808105,
139.55383300781,
140.10552978516,
141.73915100098,
142.45240783691,
144.01290893555,
144.49235534668,
145.57852172852,
146.40435791016,
147.30380249023,
147.98750305176,
149.58808898926,
150.14398193359,
151.67915344238,
152.65870666504,
153.6032409668,
154.30192565918,
155.86964416504,
157.02377319336,
157.99186706543,
159.14733886719,
160.47479248047,
160.54246520996,
161.35418701172,
162.42044067383,
162.81045532227,
162.86251831055,
164.31784057617,
164.59222412109,
165.75708007813,
166.80238342285,
167.65319824219,
169.15411376953,
170.30824279785,
172.03762817383,
173.88418579102,
174.80439758301,
176.87336730957,
177.06834411621,
178.55157470703,
178.04600524902,
178.63500976563,
180.38478088379,
180.83354187012,
182.24424743652,
183.67724609375,
183.91009521484,
184.59558105469,
185.91633605957,
187.39482116699,
188.29640197754,
190.38084411621,
191.82237243652,
192.76095581055,
195.10935974121,
195.43499755859,
201.69021606445,
199.14205932617,
202.62725830078,
203.23126220703,
202.67083740234,
204.60522460938,
207.55601501465,
207.94021606445,
210.76686096191,
213.84446716309,
215.12928771973,
220.80010986328,
216.16261291504,
211.80372619629,
213.91012573242,
215.60438537598])
resid = np.array([np.nan,
-.7581701874733,
-.49511715769768,
-.75192391872406,
-.49135887622833,
-.76525229215622,
-.44687059521675,
-.70659655332565,
-.68842077255249,
-.60215425491333,
-.63971120119095,
-.66091901063919,
-.51202166080475,
-.75510257482529,
-.48253855109215,
-.72239124774933,
-.60609650611877,
-.53087604045868,
-.65774464607239,
-.52197223901749,
-.7206723690033,
-.60785627365112,
-.6089842915535,
-.55334770679474,
-.62984347343445,
-.50177800655365,
-.68292456865311,
-.44507533311844,
-.38219094276428,
-.61039841175079,
-.31250306963921,
-.77027755975723,
-.4350620508194,
-.494520008564,
-.42773708701134,
-.39555323123932,
-.46685197949409,
-.27427339553833,
-.42101442813873,
-.37917038798332,
-.18917952477932,
-.36641922593117,
-.28457221388817,
-.23697751760483,
-.23281940817833,
-.45223876833916,
-.25030693411827,
-.35510078072548,
-.58026248216629,
-.22790426015854,
-.54552561044693,
-.42061823606491,
-.48669815063477,
-.43868899345398,
-.38070866465569,
-.28002023696899,
.16262374818325,
-.48988044261932,
.67117244005203,
-.02454199641943,
.44668045639992,
.0362650193274,
.83522641658783,
-.02484837733209,
-.11245145648718,
.01991361007094,
.0440888479352,
-.1162573993206,
-.51654160022736,
.11052562296391,
-.04166246205568,
-.13266679644585,
.4407517015934,
-.32450538873672,
.04423752427101,
.0969405695796,
.28733000159264,
.51519411802292,
.52217602729797,
.24489280581474,
1.1218250989914,
.99874752759933,
.96391087770462,
1.4664648771286,
1.4647653102875,
.20586840808392,
1.1584821939468,
1.062330365181,
.65953236818314,
1.1714720726013,
.57805341482162,
-.15604154765606,
-.23524549603462,
1.6288322210312,
-.95619779825211,
-.67358434200287,
.1190680116415,
.09450265020132,
-.02177914790809,
.43602138757706,
.04732597246766,
-.07663082331419,
.0419635027647,
-.29513788223267,
.44718953967094,
-.38527730107307,
.0319189876318,
.43128004670143,
-2.2732961177826,
.77854722738266,
-.66871201992035,
.69950574636459,
-.08958829939365,
.41101104021072,
-.07951752096415,
.2949578166008,
-.02190163731575,
.5551837682724,
.0925500690937,
.50799924135208,
.61922925710678,
.38794788718224,
-.29389011859894,
1.4107677936554,
-.21750450134277,
.95260292291641,
1.4050008058548,
.03210696578026,
-.65866851806641,
.54236197471619,
-.27907428145409,
.43484738469124,
-.15383619070053,
.39446276426315,
-.03915995359421,
.34759050607681,
-.21290412545204,
.00764474179596,
.02148328535259,
-.10436081886292,
-.10379911959171,
.41248852014542,
-.18809306621552,
.35601159930229,
.12084264308214,
-.05869990959764,
-.10323911905289,
.39806687831879,
.2303563952446,
-.02376830019057,
.20813637971878,
.25265842676163,
-.57480323314667,
-.14247089624405,
.14580672979355,
-.4204343855381,
-.61045861244202,
.33747857809067,
-.41785016655922,
.10776958614588,
.14291742444038,
-.1023878082633,
.44680669903755,
.14588765799999,
.59174418449402,
.66236984729767,
.01581169478595,
.7956041097641,
-.47337827086449,
.33164295554161,
-.95156413316727,
-.34601172804832,
.66499650478363,
-.38478538393974,
.36646059155464,
.35576421022415,
-.47725108265877,
-.21010553836823,
.30441749095917,
.38366231322289,
.00517613813281,
.80359941720963,
.41915187239647,
-.02237024717033,
1.039052605629,
-.409359395504,
3.7650005817413,
-2.2902269363403,
1.5579376220703,
.072744384408,
-1.3312624692917,
.90316116809845,
1.3147799968719,
-.21801064908504,
1.1927837133408,
1.7281278371811,
.15252174437046,
3.4807071685791,
-3.9110956192017,
-3.9886209964752,
.86727404594421,
.55887448787689,
.78061258792877])
yr = np.array([np.nan,
-.7581701874733,
-.49511715769768,
-.75192391872406,
-.49135887622833,
-.76525229215622,
-.44687059521675,
-.70659655332565,
-.68842077255249,
-.60215425491333,
-.63971120119095,
-.66091901063919,
-.51202166080475,
-.75510257482529,
-.48253855109215,
-.72239124774933,
-.60609650611877,
-.53087604045868,
-.65774464607239,
-.52197223901749,
-.7206723690033,
-.60785627365112,
-.6089842915535,
-.55334770679474,
-.62984347343445,
-.50177800655365,
-.68292456865311,
-.44507533311844,
-.38219094276428,
-.61039841175079,
-.31250306963921,
-.77027755975723,
-.4350620508194,
-.494520008564,
-.42773708701134,
-.39555323123932,
-.46685197949409,
-.27427339553833,
-.42101442813873,
-.37917038798332,
-.18917952477932,
-.36641922593117,
-.28457221388817,
-.23697751760483,
-.23281940817833,
-.45223876833916,
-.25030693411827,
-.35510078072548,
-.58026248216629,
-.22790426015854,
-.54552561044693,
-.42061823606491,
-.48669815063477,
-.43868899345398,
-.38070866465569,
-.28002023696899,
.16262374818325,
-.48988044261932,
.67117244005203,
-.02454199641943,
.44668045639992,
.0362650193274,
.83522641658783,
-.02484837733209,
-.11245145648718,
.01991361007094,
.0440888479352,
-.1162573993206,
-.51654160022736,
.11052562296391,
-.04166246205568,
-.13266679644585,
.4407517015934,
-.32450538873672,
.04423752427101,
.0969405695796,
.28733000159264,
.51519411802292,
.52217602729797,
.24489280581474,
1.1218250989914,
.99874752759933,
.96391087770462,
1.4664648771286,
1.4647653102875,
.20586840808392,
1.1584821939468,
1.062330365181,
.65953236818314,
1.1714720726013,
.57805341482162,
-.15604154765606,
-.23524549603462,
1.6288322210312,
-.95619779825211,
-.67358434200287,
.1190680116415,
.09450265020132,
-.02177914790809,
.43602138757706,
.04732597246766,
-.07663082331419,
.0419635027647,
-.29513788223267,
.44718953967094,
-.38527730107307,
.0319189876318,
.43128004670143,
-2.2732961177826,
.77854722738266,
-.66871201992035,
.69950574636459,
-.08958829939365,
.41101104021072,
-.07951752096415,
.2949578166008,
-.02190163731575,
.5551837682724,
.0925500690937,
.50799924135208,
.61922925710678,
.38794788718224,
-.29389011859894,
1.4107677936554,
-.21750450134277,
.95260292291641,
1.4050008058548,
.03210696578026,
-.65866851806641,
.54236197471619,
-.27907428145409,
.43484738469124,
-.15383619070053,
.39446276426315,
-.03915995359421,
.34759050607681,
-.21290412545204,
.00764474179596,
.02148328535259,
-.10436081886292,
-.10379911959171,
.41248852014542,
-.18809306621552,
.35601159930229,
.12084264308214,
-.05869990959764,
-.10323911905289,
.39806687831879,
.2303563952446,
-.02376830019057,
.20813637971878,
.25265842676163,
-.57480323314667,
-.14247089624405,
.14580672979355,
-.4204343855381,
-.61045861244202,
.33747857809067,
-.41785016655922,
.10776958614588,
.14291742444038,
-.1023878082633,
.44680669903755,
.14588765799999,
.59174418449402,
.66236984729767,
.01581169478595,
.7956041097641,
-.47337827086449,
.33164295554161,
-.95156413316727,
-.34601172804832,
.66499650478363,
-.38478538393974,
.36646059155464,
.35576421022415,
-.47725108265877,
-.21010553836823,
.30441749095917,
.38366231322289,
.00517613813281,
.80359941720963,
.41915187239647,
-.02237024717033,
1.039052605629,
-.409359395504,
3.7650005817413,
-2.2902269363403,
1.5579376220703,
.072744384408,
-1.3312624692917,
.90316116809845,
1.3147799968719,
-.21801064908504,
1.1927837133408,
1.7281278371811,
.15252174437046,
3.4807071685791,
-3.9110956192017,
-3.9886209964752,
.86727404594421,
.55887448787689,
.78061258792877])
mse = np.array([ .77732294797897,
.77732294797897,
.70387578010559,
.69261533021927,
.68906670808792,
.68708789348602,
.68558460474014,
.68429106473923,
.6831266283989,
.68206071853638,
.68107759952545,
.68016695976257,
.67932069301605,
.67853212356567,
.67779558897018,
.67710596323013,
.67645901441574,
.6758508682251,
.67527812719345,
.67473775148392,
.67422717809677,
.67374390363693,
.67328584194183,
.6728510260582,
.67243778705597,
.67204451560974,
.67166984081268,
.67131245136261,
.67097115516663,
.67064493894577,
.67033278942108,
.67003381252289,
.66974723339081,
.66947221755981,
.66920816898346,
.66895437240601,
.66871029138565,
.66847538948059,
.66824907064438,
.66803097724915,
.66782057285309,
.66761755943298,
.66742146015167,
.66723203659058,
.66704881191254,
.66687160730362,
.66670006513596,
.66653394699097,
.66637301445007,
.66621696949005,
.66606563329697,
.66591882705688,
.66577625274658,
.66563785076141,
.66550332307816,
.66537261009216,
.6652455329895,
.66512185335159,
.66500157117844,
.66488444805145,
.66477036476135,
.66465926170349,
.66455101966858,
.66444545984268,
.6643425822258,
.66424214839935,
.66414421796799,
.66404861211777,
.66395533084869,
.66386413574219,
.66377514600754,
.66368812322617,
.66360312700272,
.66351997852325,
.66343873739243,
.6633592247963,
.66328144073486,
.66320532560349,
.66313081979752,
.66305786371231,
.66298645734787,
.6629164814949,
.66284799575806,
.66278082132339,
.66271501779556,
.66265046596527,
.66258722543716,
.6625252366066,
.66246438026428,
.66240465641022,
.66234612464905,
.66228866577148,
.66223222017288,
.66217684745789,
.66212248802185,
.66206908226013,
.66201663017273,
.661965072155,
.66191446781158,
.6618646979332,
.66181582212448,
.66176778078079,
.66172051429749,
.66167408227921,
.66162836551666,
.66158348321915,
.66153925657272,
.66149580478668,
.66145300865173,
.66141092777252,
.6613695025444,
.66132873296738,
.6612885594368,
.66124904155731,
.66121011972427,
.66117179393768,
.66113406419754,
.6610968708992,
.66106027364731,
.66102415323257,
.66098862886429,
.66095358133316,
.66091907024384,
.66088503599167,
.66085147857666,
.66081839799881,
.66078579425812,
.66075360774994,
.66072189807892,
.66069066524506,
.66065979003906,
.66062939167023,
.66059935092926,
.66056972742081,
.66054052114487,
.6605116724968,
.66048324108124,
.6604551076889,
.66042739152908,
.66040003299713,
.66037303209305,
.66034632921219,
.66032004356384,
.66029399633408,
.66026836633682,
.66024297475815,
.66021794080734,
.66019320487976,
.6601687669754,
.66014462709427,
.66012072563171,
.66009718179703,
.66007387638092,
.66005086898804,
.66002810001373,
.66000562906265,
.65998339653015,
.65996146202087,
.65993976593018,
.65991830825806,
.65989708900452,
.65987610816956,
.65985536575317,
.65983480215073,
.6598145365715,
.65979450941086,
.65977466106415,
.65975499153137,
.65973562002182,
.6597164273262,
.65969741344452,
.65967857837677,
.6596599817276,
.65964162349701,
.65962338447571,
.65960538387299,
.6595875620842,
.65956991910934,
.65955245494843,
.65953516960144,
.65951806306839,
.65950113534927,
.65948438644409,
.6594677567482,
.65945136547089,
.65943509340286,
.65941900014877,
.65940302610397,
.65938723087311,
.65937161445618,
.65935611724854,
.65934079885483,
.65932559967041,
.65931057929993,
.65929567813873,
.65928089618683,
.65926629304886,
.65925180912018,
.65923744440079,
.65922319889069,
.65920913219452,
.65919518470764,
.65918135643005])
stdp = np.array([ .92817026376724,
.92817026376724,
.69511789083481,
.77192437648773,
.66135895252228,
.77525061368942,
.64687132835388,
.79659670591354,
.65842008590698,
.71215486526489,
.69971066713333,
.72092038393021,
.68201982975006,
.76510280370712,
.64253836870193,
.78239262104034,
.64609551429749,
.74087703227997,
.71774411201477,
.7119727730751,
.73067259788513,
.67785596847534,
.70898467302322,
.71334755420685,
.72984194755554,
.7017787694931,
.75292426347733,
.67507487535477,
.78219056129456,
.78040039539337,
.71250075101852,
.82028061151505,
.63505899906158,
.79452306032181,
.72773635387421,
.79555094242096,
.76685506105423,
.77427339553833,
.82101213932037,
.77917188405991,
.78917801380157,
.86641925573349,
.78457218408585,
.83697980642319,
.83281791210175,
.85224026441574,
.75030690431595,
.8551008105278,
.78025943040848,
.72790426015854,
.84552866220474,
.72061747312546,
.78669738769531,
.73868823051453,
.78071022033691,
.78002023696899,
.83737623691559,
.98988044261932,
.72882527112961,
1.2245427370071,
.85331875085831,
1.1637357473373,
.86477434635162,
1.3248475790024,
.81245219707489,
.98008638620377,
.85591268539429,
1.0162551403046,
.8165408372879,
.78947591781616,
.94166398048401,
.93266606330872,
.85924750566483,
1.1245046854019,
.75576168298721,
1.0030617713928,
.91267073154449,
1.0848042964935,
1.0778224468231,
1.1551086902618,
.97817331552505,
1.4012540578842,
1.2360861301422,
1.3335381746292,
1.4352362155914,
1.4941285848618,
.9415163397789,
1.437669634819,
1.2404690980911,
1.2285294532776,
1.3219480514526,
1.1560415029526,
.83524394035339,
.87116771936417,
1.5561962127686,
.47358739376068,
.78093349933624,
.90549737215042,
1.0217791795731,
.86397403478622,
1.1526786088943,
.87662625312805,
.95803648233414,
.89513635635376,
.85281348228455,
1.0852742195129,
.76808404922485,
.96872144937515,
1.0732915401459,
.02145584858954,
1.3687089681625,
.50049883127213,
1.3895837068558,
.6889950633049,
1.2795144319534,
.7050421833992,
1.2218985557556,
.74481928348541,
1.3074514865875,
.7919961810112,
1.2807723283768,
1.0120536088943,
1.1938916444778,
.68923074007034,
1.6174983978271,
.64740318059921,
1.4949930906296,
1.2678960561752,
1.0586776733398,
.55762887001038,
1.2790743112564,
.66515874862671,
1.2538269758224,
.70554333925247,
1.2391568422318,
.75241559743881,
1.2129040956497,
.69235223531723,
1.0785228013992,
.8043577671051,
1.0037930011749,
.78750842809677,
1.1880930662155,
.74399447441101,
1.1791603565216,
.85870295763016,
1.0032330751419,
.8019300699234,
1.1696527004242,
.92376220226288,
.99186056852341,
.94733852148056,
1.0748032331467,
.64247089624405,
.95419937372208,
.92043441534042,
.8104555606842,
.66252142190933,
1.1178470849991,
.69223344326019,
1.0570795536041,
.90239083766937,
.95320242643356,
1.0541093349457,
1.0082466602325,
1.1376332044601,
1.1841852664948,
.90440809726715,
1.2733660936356,
.66835701465607,
1.1515763998032,
.44600257277489,
.93500959873199,
1.0847823619843,
.83353632688522,
1.0442448854446,
1.077241897583,
.71010553836823,
.89557945728302,
1.0163468122482,
1.094814658165,
.89641278982162,
1.2808450460434,
1.0223702192307,
.96094745397568,
1.309353351593,
.73499941825867,
2.4902238845825,
-.2579345703125,
1.9272556304932,
.53125941753387,
.7708500623703,
1.0312130451202,
1.6360099315643,
.6022145152092,
1.6338716745377,
1.3494771718979,
1.1322995424271,
2.1901025772095,
-.72639065980911,
-.37026473879814,
1.2391144037247,
1.1353877782822])
icstats = np.array([ 202,
np.nan,
-245.40783909604,
5,
500.81567819208,
517.35701667909])
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )
| 34.506966 | 229 | 0.400781 |
aced12afbe0f38c6e0cc8db43d24541085dbd378 | 1,175 | py | Python | tests/test_integrations/test_account_view.py | jamesbrobb/dj-stripe | a16e2f48a6e5fefbc41ff92b1919a09f4ca6a32d | [
"BSD-3-Clause"
] | 1 | 2016-03-02T00:06:39.000Z | 2016-03-02T00:06:39.000Z | tests/test_integrations/test_account_view.py | jamesbrobb/dj-stripe | a16e2f48a6e5fefbc41ff92b1919a09f4ca6a32d | [
"BSD-3-Clause"
] | null | null | null | tests/test_integrations/test_account_view.py | jamesbrobb/dj-stripe | a16e2f48a6e5fefbc41ff92b1919a09f4ca6a32d | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
# Only run tests if the local environment includes these items
if settings.STRIPE_PUBLIC_KEY and settings.STRIPE_SECRET_KEY:
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
import stripe
stripe.api_key = settings.STRIPE_SECRET_KEY
from djstripe.models import Customer
User = get_user_model()
class AccountEmailViewTests(TestCase):
def setUp(self):
self.url = reverse("djstripe:account")
self.user = User.objects.create_user(
username="testuser",
email="test@example.com",
password="123")
def test_autocreate_customer(self):
# raise Exception(settings.TEMPLATE_DIRS)
self.assertEqual(Customer.objects.count(), 0)
# simply visiting the page should generate a new customer record.
self.assertTrue(self.client.login(username=self.user.username, password="123"))
r = self.client.get(self.url)
print(r.content)
self.assertEqual(Customer.objects.count(), 1)
| 32.638889 | 91 | 0.665532 |
aced12d4260d0b3e6e52e287a240f7c1a73460ae | 7,294 | py | Python | v1/input_data.py | tai271828/musegan | 4000c74f134b51ea324ce7c250fae5fe91d5df0a | [
"MIT"
] | 1,119 | 2017-11-06T02:29:10.000Z | 2022-03-29T21:15:50.000Z | v1/input_data.py | tai271828/musegan | 4000c74f134b51ea324ce7c250fae5fe91d5df0a | [
"MIT"
] | 126 | 2018-01-15T14:54:43.000Z | 2022-03-12T07:09:07.000Z | v1/input_data.py | tai271828/musegan | 4000c74f134b51ea324ce7c250fae5fe91d5df0a | [
"MIT"
] | 289 | 2017-12-12T05:50:26.000Z | 2022-03-25T07:19:01.000Z | from __future__ import print_function
import numpy as np
import os
import SharedArray as sa
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
class InputData:
def __init__(self, model, batch_size=64):
self.model = model # to get endpoint
self.batch_size = batch_size
self.z = dict()
self.x = dict()
def add_data(self, path_new, key='train'):
self.x[key] = np.load(path_new)
print('data size:', self.x[key].shape)
def add_data_sa(self, path_new, key='train'):
self.x[key] = sa.attach(path_new)
print('data size:', self.x[key].shape)
def add_data_np(self, data, key='train'):
self.x[key] = data
print('data size:', self.x[key].shape)
def get_batch_num(self, key='train'):
return len(self.x[key]) // self.batch_size
def get_batch(self, idx=0, data_size=None, key='train'):
data_size = self.batch_size if data_size is None else data_size
st = self.batch_size*idx
return self.x[key][st:st+data_size] * 2. - 1.
def get_rand_smaples(self, sample_size=64, key='train'):
random_idx = np.random.choice(len(self.x[key]), sample_size, replace=False)
return self.x[key][random_idx]*2. - 1.
def gen_feed_dict(self, idx=0, data_size=None, key='train', z=None):
batch_size = self.batch_size if data_size is None else data_size
feed_dict = self.gen_z_dict(data_size=data_size, z=z)
if key is not None:
x = self.get_batch(idx, data_size, key)
feed_dict[self.model.x] = x
return feed_dict
#######################################################################################################################
# Image
#######################################################################################################################
class InputDataMNIST(InputData):
dataset_dir = 'dataset/mnist/original'
def __init__(self, model, batch_size=64):
self.model = model # to get endpoint
self.batch_size = batch_size
self.x = dict()
mnist = input_data.read_data_sets(self.dataset_dir, one_hot = True)
self.add_data_np(mnist.train.images.reshape((-1,28,28,1)), 'train')
self.add_data_np(mnist.test.images.reshape((-1,28,28,1)), 'test')
def gen_feed_dict(self, idx=0, data_size=None, key='train'):
batch_size = self.batch_size if data_size is None else data_size
z = np.random.uniform(-1., 1., size=(self.batch_size, self.model.z_dim)).astype(np.float32)
x = self.get_batch(idx, data_size, key)
feed_dict = {self.model.z: z, self.model.x: x}
return feed_dict
#######################################################################################################################
# Music
#######################################################################################################################
# Nowbar
class InputDataNowBarHybrid(InputData):
def gen_z_dict(self, data_size=None, z=None):
batch_size = self.batch_size if data_size is None else data_size
if z is not None:
self.z = z
else:
self.z = dict()
self.z['inter']= np.random.normal(0, 0.1, [batch_size, self.model.z_inter_dim]).astype(np.float32)
self.z['intra'] = np.random.normal(0, 0.1, [batch_size, self.model.z_intra_dim, self.model.track_dim]).astype(np.float32)
z_dict = {self.model.z_intra: self.z['intra'], self.model.z_inter:self.z['inter']}
return z_dict
class InputDataNowBarJamming(InputData):
def gen_z_dict(self, data_size=None, z=None):
batch_size = self.batch_size if data_size is None else data_size
if z is not None:
self.z = z
else:
self.z = dict()
self.z['intra'] = np.random.normal(0, 0.1, [batch_size, self.model.z_intra_dim, self.model.track_dim]).astype(np.float32)
z_dict = {self.model.z_intra: self.z['intra']}
return z_dict
class InputDataNowBarComposer(InputData):
def gen_z_dict(self, data_size=None, z=None):
batch_size = self.batch_size if data_size is None else data_size
if z is not None:
self.z = z
else:
self.z = dict()
self.z['inter'] = np.random.normal(0, 0.1, [batch_size, self.model.z_inter_dim]).astype(np.float32)
z_dict = {self.model.z_inter: self.z['inter']}
return z_dict
# temporal
class InputDataTemporalHybrid(InputData):
def gen_z_dict(self, data_size=None, z=None):
batch_size = self.batch_size if data_size is None else data_size
if z is not None:
self.z = z
else:
self.z = dict()
self.z['z_intra_v'] = np.random.normal(0, 0.1, [batch_size, self.model.z_intra_dim, self.model.track_dim]).astype(np.float32)
self.z['z_intra_i'] = np.random.normal(0, 0.1, [batch_size, self.model.z_intra_dim, self.model.track_dim]).astype(np.float32)
self.z['z_inter_v'] = np.random.normal(0, 0.1, [batch_size, self.model.z_inter_dim]).astype(np.float32)
self.z['z_inter_i'] = np.random.normal(0, 0.1, [batch_size, self.model.z_inter_dim]).astype(np.float32)
feed_dict = {self.model.z_intra_v: self.z['z_intra_v'], self.model.z_intra_i: self.z['z_intra_i'],
self.model.z_inter_v: self.z['z_inter_v'], self.model.z_inter_i: self.z['z_inter_i']}
return feed_dict
class InputDataTemporalJamming(InputData):
def gen_z_dict(self, data_size=None, z=None):
batch_size = self.batch_size if data_size is None else data_size
if z is not None:
self.z = z
else:
self.z = dict()
self.z['z_intra_v'] = np.random.normal(0, 0.1, [batch_size, self.model.z_intra_dim, self.model.track_dim]).astype(np.float32)
self.z['z_intra_i'] = np.random.normal(0, 0.1, [batch_size, self.model.z_intra_dim, self.model.track_dim]).astype(np.float32)
feed_dict = {self.model.z_intra_v: self.z['z_intra_v'], self.model.z_intra_i: self.z['z_intra_i']}
return feed_dict
class InputDataTemporalComposer(InputData):
def gen_z_dict(self, idx=0, data_size=None, z=None):
batch_size = self.batch_size if data_size is None else data_size
if z is not None:
self.z = z
else:
self.z = dict()
self.z['z_inter_v'] = np.random.normal(0, 0.1, [batch_size, self.model.z_inter_dim]).astype(np.float32)
self.z['z_inter_i'] = np.random.normal(0, 0.1, [batch_size, self.model.z_inter_dim]).astype(np.float32)
feed_dict = {self.model.z_inter_v: self.z['z_inter_v'], self.model.z_inter_i: self.z['z_inter_i']}
return feed_dict
class InputDataRNNComposer(InputData):
def gen_feed_dict(self, idx=0, data_size=None, z=None):
batch_size = self.batch_size if data_size is None else data_size
if z is not None:
self.z = z
else:
self.z = dict()
self.z['z_inter'] = np.random.normal(0, 0.1, [batch_size, self.model.output_bar, self.model.z_inter_dim]).astype(np.float32)
feed_dict = {self.model.z_inter: self.z['z_inter']}
return feed_dict
| 42.654971 | 137 | 0.59734 |
aced13f30f75cd46f442dc850184d73a35187243 | 37,430 | py | Python | cat/code.py | cleiver/codeandtalk.com | 132cadddcfc97924969af68130baea2c3abd18cb | [
"Apache-2.0"
] | 60 | 2016-11-24T13:38:03.000Z | 2022-03-23T06:57:13.000Z | cat/code.py | cleiver/codeandtalk.com | 132cadddcfc97924969af68130baea2c3abd18cb | [
"Apache-2.0"
] | 384 | 2016-11-24T13:50:58.000Z | 2022-03-30T14:57:35.000Z | cat/code.py | cleiver/codeandtalk.com | 132cadddcfc97924969af68130baea2c3abd18cb | [
"Apache-2.0"
] | 167 | 2016-11-24T10:20:25.000Z | 2022-03-25T10:23:38.000Z | import csv
import copy
from datetime import datetime
import glob
import json
import os
import re
import urllib
import sys
#import string
from jinja2 import Environment, PackageLoader
import logging
from cat import tools
class CATerror(Exception):
pass
def read_chars():
tr = {}
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(root, 'cat', 'chars.csv'), encoding="utf-8") as fh:
rd = csv.reader(fh, delimiter=',')
for row in rd:
tr[row[0]] = row[1]
return tr
tr = read_chars()
def topic2path(tag):
t = tag.lower()
if t == 'c++':
return t
#t = t.translate(string.maketrans("abc", "def"))
for k in tr.keys():
t = re.sub(k, tr[k], t)
t = re.sub(r'[.+ ()&/:]', '-', t)
if re.search(r'[^a-z0-9-]', t):
raise CATerror("Characters of '{}' need to be mapped in 'cat/chars.csv'".format(t))
t = re.sub(r'[^a-z0-9]+', '-', t)
return t
def html2txt(html):
#text = re.sub(r'<a\s+href="[^"]+">([^<]+)</a>', '$1', html)
text = re.sub(r'</?[^>]+>', '', html)
return text
class GenerateSite(object):
def __init__(self):
logging.basicConfig(filename='generate.log', level=logging.DEBUG)
self.errors = []
self.root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.now = datetime.now().strftime('%Y-%m-%d')
self.people = {}
self.redirects = []
self.people_search = {}
self.tags = {}
self.blasters = []
self.html = os.path.join(self.root, 'html')
self.data = os.path.join(self.root, 'data')
if 'CAT_TEST' in os.environ:
self.data = os.path.join(self.root, os.environ['CAT_TEST'])
self.featured_by_blaster = {}
self.featured_by_date = {}
self.events = {}
self.stats = {
'has_coc' : 0,
'has_coc_future' : 0,
'has_a11y' : 0,
'has_a11y_future' : 0,
'has_diversity_tickets' : 0,
'has_diversity_tickets_future' : 0,
'cities' : {},
'countries' : {},
#'tags' : {},
}
with open(os.path.join(self.data, 'locations.json'), encoding="utf-8") as fh:
self.locations = json.load(fh)
def read_all(self):
self.read_sources()
self.read_tags()
self.read_blasters()
self.read_events()
self.read_series()
self.read_people()
self.read_videos()
self.read_podcast_episodes()
def process_videos(self):
for video in self.videos:
short_description = html2txt(video.get('description', ''))
short_description = re.sub(r'"', '', short_description)
short_description = re.sub(r'\s+', ' ', short_description)
video['short_description'] = short_description
limit = 128
if len(short_description) > 128:
video['short_description'] = short_description[0:limit]
def generate_site(self):
self.read_all()
self.stats['podcasts'] = len(self.sources)
self.stats['people'] = len(self.people)
self.stats['episodes'] = sum(len(x['episodes']) for x in self.sources)
self.check_people()
self.check_videos()
self.process_videos()
cat = {
'people' : copy.deepcopy(self.people),
'videos' : copy.deepcopy(self.videos),
'blasters' : copy.deepcopy(self.blasters),
}
self.preprocess_events()
cat['events'] = copy.deepcopy(self.events)
cat['tags'] = copy.deepcopy(self.tags)
cat['stats'] = copy.deepcopy(self.stats)
cat['series'] = copy.deepcopy(self.series)
cat['podcasts'] = copy.deepcopy(self.sources)
self.save_all(cat)
def save_all(self, cat):
with open(os.path.join(self.html, 'cat.json'), 'w', encoding="utf-8") as fh:
json.dump(cat, fh)
if len(sys.argv) > 1:
for e in cat.keys():
with open(os.path.join(self.html, e + '.json'), 'w', encoding="utf-8") as fh:
json.dump(cat[e], fh)
def read_sources(self):
self.sources = []
sources_file = os.path.join(self.data, 'sources.json')
if os.path.exists(sources_file):
with open(sources_file, encoding="utf-8") as fh:
self.sources = json.load(fh)
else:
logging.info('sources file {} is missing'.format(sources_file))
def read_tags(self):
with open(os.path.join(self.data, 'tags.json'), encoding="utf-8") as fh:
self.tags = json.load(fh)
for tag in self.tags:
self.tags[ tag ]['episodes'] = []
for f in ['videos', 'total', 'future']:
self.tags[ tag ][f] = 0
return
def read_blasters(self):
blasters_file = os.path.join(self.data, 'blasters.csv')
if os.path.exists(blasters_file):
with open(blasters_file, encoding="utf-8") as fh:
rd = csv.DictReader(fh, delimiter=';')
for row in rd:
self.blasters.append(row)
else:
logging.info('blasters file {} is missing'.format(blasters_file))
return
def read_events(self):
for filename in glob.glob(os.path.join(self.data, 'events', '*')):
logging.info('processing {}'.format(filename))
if filename[-5:] != '.json':
self.errors.append('ERROR 7: filename is not .json file. "{}"'.format(filename))
continue
if filename[len(self.data):] != filename[len(self.data):].lower():
self.errors.append('ERROR 6: filename is not all lower case. "{}"'.format(filename))
if not re.search('^[a-z0-9-]+\.json$', os.path.basename(filename)):
self.errors.append('ERROR 8: filename has characters that are not accepted (a-z0-9-). "{}"'.format(os.path.basename(filename)))
#print("Reading {}".format(filename))
conf = {}
try:
with open(filename, encoding="utf-8") as fh:
this = json.load(fh)
nickname = os.path.basename(filename)
nickname = nickname[0:-5]
#print(nickname)
this['nickname'] = nickname
this['file_date'] = datetime.fromtimestamp( os.path.getctime(filename) ).strftime('%Y-%m-%d')
self.check_dates(this, filename)
# verify year in filename. Won't be needed after the merge of series
event_year = this['event_start'][0:4]
try:
nickname.index(event_year)
except ValueError:
self.errors.append('ERROR 9: Invalid file name. Should contain the year "{}". In file "{}".'.format(event_year, filename))
self.check_fields(this, filename)
self.check_name(this, filename)
self.check_website(this, filename)
self.check_diversity(this, filename)
self.check_social(this, filename)
self.check_location(this, filename)
self.check_tags(this, filename)
self.check_comments(this, filename)
self.events[ this['nickname'] ] = this
except json.decoder.JSONDecodeError as e:
self.errors.append("ERROR 54: JSON Parsing error in {}. Details: {}".format(filename, e))
except CATerror:
raise
# except Exception as e:
# raise CATerror("ERROR 1: Unhandled error: {} in file {}".format(e, filename))
if self.errors != []:
raise CATerror('\n'.join(self.errors))
return
def check_fields(self, this, filename):
valid_fields = set([
'accessibility',
'cfp_end',
'cfp_class', # ???
'code_of_conduct',
'comment',
'description',
'diversitytickets',
'diversitytickets_text',
'diversitytickets_url',
'event_end',
'event_start',
'facebook',
'file_date', # added
'github',
'hashtag',
'languages',
'location',
'name',
'nickname',
'private_comments',
'tags',
'twitter',
'youtube',
'videos_url',
'vimeo',
'website',
])
current_fields = set(this.keys())
if not current_fields.issubset(valid_fields):
self.errors.append('ERROR 52: Invalid fields {}. {}'.format(current_fields - valid_fields, filename))
def check_comments(self, this, filename):
if 'private_comments' in this:
if this['private_comments'].__class__.__name__ != 'str':
self.errors.append('ERROR 51: The "private_comments" field must be a simple string. {}'.format(filename))
def check_name(self, this, filename):
if 'name' not in this or this['name'] == '':
self.errors.append('ERROR 15: Missing or empty "name" field in {}'.format(filename))
return
if re.search(r'\d\d\d\d\s*$', this['name']):
self.errors.append('ERROR 16: The conference "name" should not include the year. Seen in {}'.format(filename))
def check_website(self, this, filename):
if 'website' not in this or not re.search(r'^https?://.{8}', this['website']):
self.errors.append('ERROR 17: Missing or invalid "website" field in {}'.format(filename))
def check_dates(self, this, filename):
date_format = r'^\d\d\d\d-\d\d-\d\d$'
for f in ['event_start', 'event_end', 'cfp_end']:
if f in this and this[f] and not re.search(date_format, this[f]):
self.errors.append('ERROR 22: Invalid {} {} in {}'.format(f, this[f], filename))
start_date = datetime.strptime(this['event_start'], '%Y-%m-%d')
end_date = datetime.strptime(this['event_end'], '%Y-%m-%d')
if end_date < start_date :
self.errors.append('ERROR 23: Invalid event dates (Start after End) in {}'.format(filename))
if 'cfp_end' in this and this['cfp_end']:
cfp_date = datetime.strptime(this['cfp_end'], '%Y-%m-%d')
if cfp_date > start_date:
self.errors.append('ERROR 24: Invalid CFP date (CFP after Start) in {}'.format(filename))
this['cfp_class'] = 'cfp_none'
cfp = this.get('cfp_end', '')
if cfp != '':
if cfp < self.now:
this['cfp_class'] = 'cfp_past'
else:
this['cfp_class'] = 'cfp_future'
def check_diversity(self, this, filename):
diversity = this.get('diversitytickets')
if diversity:
if not re.search(r'^\d+$', diversity):
self.errors.append('ERROR 25: diversitytickets must be a number. Use diversitytickets_url and diversitytickets_text for alternatives {} in {}'.format(this, filename))
code_of_conduct = this.get('code_of_conduct')
if code_of_conduct:
if this['code_of_conduct'].__class__.__name__ != 'str':
self.errors.append('ERROR 631: "code_of_conduct" needs to be a single string and not {} in {}'.format(this['code_of_conduct'], filename))
return
def check_social(self, this, filename):
if 'twitter' in this and this['twitter'] != '':
if not re.search(r'^[a-zA-Z0-9_]+$', this['twitter']):
self.errors.append('ERROR 26: Invalid twitter handle "{}" in {}'.format(this['twitter'], filename))
if 'youtube' in this and this['youtube'] != '' and this['youtube'] != '-':
#if not re.search(r'^P[a-zA-Z0-9_-]+$', this['youtube']):
if re.search(r'https?://', this['youtube']):
self.errors.append('ERROR 27: Invalid youtube playlist "{}" in {}'.format(this['youtube'], filename))
if 'facebook' in this and this['facebook'] != '':
if not re.search(r'^https?://www.facebook.com/', this['facebook']):
self.errors.append('ERROR 28: Invalid facebook entry "{}" in {}. Include entire Facebook URL.'.format(this['facebook'], filename))
if 'hashtag' in this and this['hashtag'] != '':
if this['hashtag'].__class__.__name__ != 'str':
self.errors.append('ERROR 531: Hashtag needs to be a single string and not {} in {}'.format(this['hashtag'], filename))
return
if not re.search(r'^[אפa-zA-Z0-9_]+$', this['hashtag']):
self.errors.append('ERROR 53: Invalid hashtag handle "{}" in {}'.format(this['hashtag'], filename))
return
def check_location(self, this, filename):
if 'location' not in this or not this['location']:
self.errors.append('ERROR 21: The "location" field is missing. See docs/EVENTS.md. In file {}.'.format(filename))
return
location = this['location']
if 'city' not in location or not location['city']:
self.errors.append('ERROR 18: The "city" field is missing. See docs/EVENTS.md. In file {}.'.format(filename))
if not 'country' in location or not location['country']:
self.errors.append('ERROR 20: The "country" field is missing. See docs/EVENTS.md. In file {}.'.format(filename))
return
if location['country'] not in self.locations:
self.errors.append('ERROR 13: The value of country "{}" is not in our list. If this was not a typo, add it to data/locations.json. Found in {}'.format(location['country'], filename))
return
city_name = '{}, {}'.format(location['city'], location['country'])
city_page = topic2path('{} {}'.format(location['city'], location['country']))
# In some countries we require a state:
# verify that the country/state/city exists as required and they are from the expected values
if location['country'] in ['Australia', 'Brasil', 'Canada', 'India', 'USA', 'UK']:
if 'state' not in location or not location['state']:
self.errors.append('ERROR 19: The "state" field is missing. See docs/EVENTS.md. In file {}.'.format(filename))
else:
if location['state'] not in self.locations[ location['country'] ]:
self.errors.append('ERROR 12: The value of state "{}" is not in our list. If this was not a typo, add it to data/locations.json. Found in {}'.format(location['state'], filename))
else:
if location['city'] not in self.locations[ location['country'] ][ location['state'] ]:
self.errors.append('ERROR 10: The value of city "{}" is not in our list. If this was not a typo, add it to data/locations.json. Found in {}'.format(location['city'], filename))
city_name = '{}, {}, {}'.format(location['city'], location['state'], location['country'])
city_page = topic2path('{} {} {}'.format(location['city'], location['state'], location['country']))
else:
#if 'state' in location and location['state']:
# self.errors.append('State {} should not be in {}'.format(location['state'], this))
if location['city'] not in self.locations[ location['country'] ]:
self.errors.append('ERROR 11: The value of city "{}" is not in our list. If this was not a typo, add it to data/locations.json. Found in {}'.format(location['city'], filename))
this['city_name'] = city_name
this['city_page'] = city_page
if city_page not in self.stats['cities']:
self.stats['cities'][city_page] = {
'name' : city_name,
'total' : 0,
'future' : 0,
}
self.stats['cities'][city_page]['total'] += 1
if this['event_start'] >= self.now:
self.stats['cities'][city_page]['future'] += 1
country_name = location['country']
country_page = re.sub(r'\s+', '-', country_name.lower())
this['country_page'] = country_page
if country_page not in self.stats['countries']:
self.stats['countries'][country_page] = {
'name' : country_name,
'total' : 0,
'future' : 0,
}
self.stats['countries'][country_page]['total'] += 1
if this['event_start'] >= self.now:
self.stats['countries'][country_page]['future'] += 1
def check_tags(self, this, filename):
my_topics = []
#print(this)
if 'tags' not in this:
self.errors.append('ERROR 29: tags missing from {}'.format(p))
return
for t in this['tags']:
if t not in self.tags:
self.errors.append('ERROR 14: Tag "{}" is not in the list of tags found in data/tags.json. Check for typo. Add new tags if missing from our list. in file {}'.format(t, filename))
my_topics.append({
'name' : t,
'path' : t,
})
this['topics'] = my_topics
for tag in this['topics']:
p = tag['path']
if p not in self.tags:
self.errors.append('ERROR 30: Missing tag "{}"'.format(p))
continue
#self.tags[p]['events'].append(this)
self.tags[p]['total'] += 1
if this['event_start'] >= self.now:
self.tags[p]['future'] += 1
#self.stats['tags'][p]['total'] += 1
#if this['event_start'] >= self.now:
# self.stats['tags'][p]['future'] += 1
def read_people(self):
path = os.path.join(self.data, 'people')
for filename in glob.glob(os.path.join(path, '*.json')):
if filename[len(self.root):] != filename[len(self.root):].lower():
self.errors.append('ERROR 31: filename is not all lower case. "{}"'.format(filename))
try:
this = {}
nickname = os.path.basename(filename)
nickname = nickname[0:-5]
with open(filename, encoding="utf-8") as fh:
this = json.load(fh)
for f in this:
if f == 'description':
continue
if f == 'topics':
for t in this[f]:
if re.search(r'\s\Z', t):
self.errors.append('ERROR 32: Trailing space in "{}" {}'.format(f, filename))
if re.search(r'\A\s', t):
self.errors.append('ERROR 33: Leading space in "{}" {}'.format(f, filename))
continue
if re.search(r'\s\Z', this[f]):
self.errors.append('ERROR 34: Trailing space in "{}" {}'.format(f, filename))
if re.search(r'\A\s', this[f]):
self.errors.append('ERROR 35: Leading space in "{} {}'.format(f, filename))
if 'redirect' in this:
self.redirects.append({
'from' : nickname,
'to' : this['redirect'],
})
continue
for field in ['twitter', 'github', 'home']:
if field not in this:
#print("WARN: {} missing for {}".format(field, nickname))
pass
elif this[field] == '-':
this[field] = None
self.people[nickname] = {
'info': this,
#'episodes' : [],
#'hosting' : [],
#'videos' : [],
#'file_date' : datetime.fromtimestamp( os.path.getctime(filename) ).strftime('%Y-%m-%d'),
}
person = {
'name' : this['name'],
}
if 'country' in this:
person['location'] = this['country']
self.people_search[nickname] = person
except CATerror:
raise
except Exception as e:
self.errors.append("ERROR 2: Unhanded error: {} in file {}".format(e, filename))
return
def read_series(self):
with open(os.path.join(self.data, 'series.json'), encoding="utf-8") as fh:
self.series = json.load(fh)
for s in self.series:
if s == '':
raise CATerror('ERROR 36: Empty key in series {}'.format(self.series[s]))
def read_videos(self):
self.videos = []
path = os.path.join(self.data, 'videos')
if not os.path.exists(path):
return;
events = os.listdir(path)
for event in events:
dir_path = os.path.join(path, event)
for video_file_path in glob.glob(os.path.join(dir_path, '*.json')):
video_file = os.path.basename(video_file_path)
html_file_path = video_file_path[0:-4] + 'html'
with open(video_file_path, encoding="utf-8") as fh:
try:
video = json.load(fh)
video['filename'] = video_file[0:-5]
video['event'] = event
video['file_date'] = datetime.fromtimestamp( os.path.getctime(video_file_path) ).strftime('%Y-%m-%d')
if os.path.exists(html_file_path):
with open(html_file_path, encoding="utf-8") as hfh:
video['description'] = hfh.read()
self.videos.append(video)
except Exception as e:
raise CATerror('ERROR 37: There was an exception reading {}\n{}'.format(video_file_path, e))
# Make sure we have a length field
if 'length' not in video:
raise CATerror('ERROR 38: Video {}/{}.json was featured but has no length'.format(self.events[ video['event'] ]['nickname'], video['filename']))
if video['length'] == '':
raise CATerror('ERROR 39: Video {}/{}.json was featured but had empty length'.format(self.events[ video['event'] ]['nickname'], video['filename']))
video['l'] = tools.in_sec(video['length'])
if 'tags' in video:
tags = []
for t in video['tags']:
p = topic2path(t)
tags.append({
'text': t,
'link': p,
})
video['tags'] = tags
self.stats['videos'] = len(self.videos)
return
def read_podcast_episodes(self):
self.episodes = []
for src in self.sources:
#print("Processing source {}".format(src['name']))
file = os.path.join(self.data, 'podcasts', src['name'] + '.json')
src['episodes'] = []
if os.path.exists(file):
with open(file, encoding="utf-8") as fh:
try:
new_episodes = json.load(fh)
for episode in new_episodes:
episode['source'] = src['name']
if 'ep' not in episode:
#print("WARN ep missing from {} episode {}".format(src['name'], episode['permalink']))
pass
self.episodes.extend(new_episodes)
src['episodes'] = new_episodes
except json.decoder.JSONDecodeError as e:
raise CATerror("ERROR 3: Could not read in {} {}".format(file, e))
for e in self.episodes:
#print(e)
if 'tags' in e:
tags = []
for tag in e['tags']:
path = topic2path(tag)
if path not in tags:
tags.append({
'text' : tag,
'link' : path,
})
if path not in self.tags:
raise CATerror('ERROR 40: Missing tag "{}"'.format(path))
self.tags[path]['episodes'].append(e)
e['tags'] = tags
def _add_events_to_series(self):
'''
Go over all the events and based on the longest matching prefix of their filenames,
put them in one of the entries in the series.
To each event add the name of the series it is in.
TODO: In the future we might add an exception if an event is not in any of the series.
'''
for s in self.series.keys():
self.series[s]['events'] = []
other = []
for nickname in self.events.keys():
e = self.events[nickname]
event = {
'nickname' : e['nickname'],
'name' : e['name'],
'event_start' : e['event_start'],
}
for s in sorted(self.series.keys(), key=lambda s: len(s), reverse=True):
l = len(s)
if event['nickname'][0:l] == s:
self.series[s]['events'].append(event)
event['series'] = s
e['series'] = s
break
else:
#TODO: create series for every event and then turn on the exception?
#print("Event without series: {}".format(event['nickname']))
#self.errors.append('ERROR 41: Event without series: {}'.format(event['nickname']))
other.append(event)
for s in self.series.keys():
self.series[s]['events'].sort(key=lambda x: x['event_start'])
if self.series[s]['events'] == []:
self.errors.append('ERROR 91: Series without event: {}'.format(s))
if self.errors != []:
raise CATerror('\n'.join(self.errors))
#self.event_in_series = {}
#for e in self.series[s]['events']:
# self.event_in_series[ e['nickname'] ] = s
def _process_videos(self):
for b in self.blasters:
self.featured_by_blaster[ b['file'] ] = []
for video in self.videos:
video['event'] = {
'name' : self.events[ video['event'] ]['name'],
'nickname' : self.events[ video['event'] ]['nickname'],
'website' : self.events[ video['event'] ]['website'],
'twitter' : self.events[ video['event'] ]['twitter'],
}
# collect featured videos
featured = video.get('featured')
blasters = video.get('blasters', [])
if featured:
class_name = ''
if video['featured'] == self.now:
class_name = 'today_feature'
elif video['featured'] > self.now:
class_name = 'future_feature'
this_video = {
'class_name' : class_name,
'blasters' : video['blasters'],
'featured' : video['featured'],
'recorded' : video['recorded'],
'filename' : video['filename'],
'length' : video['length'],
'title' : video['title'],
'event' : {
'nickname' : video['event']['nickname'],
'name' : video['event']['name'],
},
}
if featured not in self.featured_by_date:
self.featured_by_date[featured] = []
self.featured_by_date[featured].append(this_video)
if len(blasters) == 0:
raise CATerror('ERROR 42: featured without blaster data/videos/{}/{}.json'.format(video['event']['nickname'], video['filename']))
for b in blasters:
if b not in self.featured_by_blaster:
self.featured_by_blaster[ b ] = []
#TODO mark these:
#print("Blaster {} is used but not in the blaster list".format(b))
self.featured_by_blaster[b].append(this_video)
speakers = {}
for s in video['speakers']:
if s in self.people:
speakers[s] = self.people[s]
if 'videos' not in self.people[s]:
self.people[s]['videos'] = []
self.people[s]['videos'].append({
'recorded' : video['recorded'],
'title' : video['title'],
# short_description
'event' : video['event'],
'filename' : video['filename'],
'thumbnail_url' : video['thumbnail_url'],
})
if not 'tags' in self.people_search[s]:
self.people_search[s]['tags'] = set()
if 'tags' in video:
for t in video['tags']:
self.people_search[s]['tags'].add(t['link'])
#else:
# TODO: shall we requre tags for each video?
else:
raise CATerror('ERROR 43: Missing people file for "{}" in {}/videos/{}/{}.json'.format(s, self.data, video['event']['nickname'], video['filename']))
video['speakers'] = speakers
if 'tags' in video:
for t in video['tags']:
p = t['link']
if p not in self.tags:
raise CATerror('ERROR 44: Missing tag "{}"'.format(p))
self.tags[p]['videos'] += 1
#print(self.featured_by_blaster)
def _process_podcasts(self):
for e in self.episodes:
if 'guests' in e:
for g in e['guests']:
if g not in self.people:
raise CATerror("ERROR 4: '{}' is not in the list of people".format(g))
if 'episodes' not in self.people[g]:
self.people[g]['episodes'] = []
self.people[g]['episodes'].append(e)
if 'hosts' in e:
for h in e['hosts']:
if h not in self.people:
raise CATerror("ERROR 5: '{}' is not in the list of people".format(h))
if 'hosting' not in self.people[h]:
self.people[h]['hosting'] = []
self.people[h]['hosting'].append(e)
def _process_events(self):
self.event_videos = {}
for v in self.videos:
if v['event']['nickname'] not in self.event_videos:
self.event_videos[ v['event']['nickname'] ] = []
self.event_videos[ v['event']['nickname'] ].append(v)
for nickname in self.events.keys():
event = self.events[nickname]
if event['nickname'] in self.event_videos:
event['videos'] = self.event_videos[ event['nickname'] ]
if event.get('diversitytickets'):
self.stats['has_diversity_tickets'] += 1
if event['event_start'] >= self.now:
self.stats['has_diversity_tickets_future'] += 1
if event.get('code_of_conduct'):
self.stats['has_coc'] += 1
if event['event_start'] >= self.now:
self.stats['has_coc_future'] += 1
if event.get('accessibility'):
self.stats['has_a11y']
if event['event_start'] >= self.now:
self.stats['has_a11y_future'] += 1
if 'cfp_end' in event and event['cfp_end'] >= self.now:
tweet_cfp = 'The CfP of {} ends on {} see {} via https://codeandtalk.com/'.format(event['name'], event['cfp_end'], event['website'])
if event['twitter']:
tweet_cfp += ' @' + event['twitter']
for t in event['topics']:
tweet_cfp += ' #' + t['name']
event['tweet_cfp'] = urllib.parse.quote(tweet_cfp)
tweet_me = event['name']
tweet_me += ' on ' + event['event_start']
tweet_me += ' in ' + event['location']['city']
if 'state' in event:
tweet_me += ', ' + event['location']['state']
tweet_me += ' ' + event['location']['country']
if event['twitter']:
tweet_me += ' @' + event['twitter']
tweet_me += " " + event['website']
for t in event['topics']:
tweet_me += ' #' + t['name']
#tweet_me += ' via @codeandtalk'
tweet_me += ' via https://codeandtalk.com/'
event['tweet_me'] = urllib.parse.quote(tweet_me)
def preprocess_events(self):
self.stats['total'] = len(self.events)
self.stats['future'] = len(list(filter(lambda x: x['event_start'] >= self.now, self.events.values())))
self.stats['cfp'] = len(list(filter(lambda x: x.get('cfp_end', '') >= self.now, self.events.values())))
self._add_events_to_series()
self._process_videos()
self._process_podcasts()
self._process_events()
if self.stats['future'] == 0:
self.stats['coc_future_perc'] = 0
self.stats['diversity_tickets_future_perc'] = 0
self.stats['a11y_future_perc'] = 0
else:
self.stats['coc_future_perc'] = int(100 * self.stats['has_coc_future'] / self.stats['future'])
self.stats['diversity_tickets_future_perc'] = int(100 * self.stats['has_diversity_tickets_future'] / self.stats['future'])
self.stats['a11y_future_perc'] = int(100 * self.stats['has_a11y_future'] / self.stats['future'])
return
def check_videos(self):
"""
Go over all the JSON files representing videos and check validity:
- Check if they have a "recorded" field with a YYYY-MM-DD timestamp - report if not
TODO: Check if they have values for "speakers" - report if not
TODO: Check if they have embedded HTML in the description field (they should be moved out to a separate file)
"""
valid_fields = ['title', 'thumbnail_url', 'tags', 'recorded', 'description', 'videos', 'speakers', 'abstract', 'slides', 'language', 'featured', 'length', 'blasters',
'views', 'likes', 'favorite', 'skipped', 'l']
valid_fields.extend(['filename', 'event', 'file_date']) # generated fields
required_fields = ['title', 'recorded']
valid_languages = ["Hebrew", "Dutch", "Spanish", "Portuguese", "German", "French"]
for video in self.videos:
for f in video.keys():
if f not in valid_fields:
raise CATerror('ERROR 45: Invalid field "{}" in {}'.format(f, video))
for f in required_fields:
if f not in video:
raise CATerror('ERROR 46: Mssing required field: "{}" in {}'.format(f, video))
if not re.search(r'^\d\d\d\d-\d\d-\d\d$', video['recorded']):
raise CATerror('ERROR 47: Invalid "recorded" field: {:20} in {}'.format(video['recorded'], video))
if 'language' in video:
if video['language'] not in valid_languages:
raise CATerror('ERROR 47: Invalid language "{}" in video data/videos/{}/{}.json'.format(video['language'], video['event'], video['filename']))
video['title'] += ' (' + video['language'] + ')'
if 'length' in video and video['length'] != "":
if not re.search(r'(\d?\d:)\d\d$', video['length']):
raise CATerror('ERROR 48: Invalid format in length field "{}" in data/videos/{}/{}.json'.format(video['length'], video['event'], video['filename']))
def check_people(self):
"""
Go over all the files in the data/people directory and check if all the fields are in the list of valid_fields
"""
valid_fields = ['name', 'github', 'twitter', 'home', 'country', 'gplus', 'nickname', 'city', 'state', 'slides', 'comment', 'topics', 'description', 'linkedin']
for nickname in self.people.keys():
if 'name' not in self.people[nickname]['info']:
raise CATerror('ERROR 49: file {} does not have a "name" field'.format(nickname))
for f in self.people[nickname]['info']:
if f not in valid_fields:
raise CATerror('ERROR 50: Invlaid field "{}" in person {}'.format(f, nickname))
# vim: expandtab
| 44.087161 | 200 | 0.511862 |
aced147577f2bc8253b0775a1a7dd56e0cf18081 | 1,325 | py | Python | SM_openSMILE/openSMILE_preprocessing/mp3_to_wav.py | ChildMindInstitute/SM_EEG | 6f4b3329d7c3b93f64c740cf540ac19a107f1b2e | [
"Apache-2.0"
] | null | null | null | SM_openSMILE/openSMILE_preprocessing/mp3_to_wav.py | ChildMindInstitute/SM_EEG | 6f4b3329d7c3b93f64c740cf540ac19a107f1b2e | [
"Apache-2.0"
] | 5 | 2018-02-13T15:35:13.000Z | 2018-02-15T23:05:39.000Z | SM_openSMILE/openSMILE_preprocessing/mp3_to_wav.py | ChildMindInstitute/selective-mutism-eeg | 6f4b3329d7c3b93f64c740cf540ac19a107f1b2e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
mp3_to_wav.py
Script to quickly convert an mp3 file to a waveform file.
Author:
– Jon Clucas, 2016 (jon.clucas@childmind.org)
© 2016, Child Mind Institute, Apache v2.0 License
Created on Fri Dec 23 12:43:40 2016
@author: jon.clucas
"""
import argparse
from os import path
from pydub import AudioSegment
def mp3_to_wav(in_file):
# get the mp3
to_convert = AudioSegment.from_mp3(in_file)
# make an output filename
out_base = path.basename(in_file.strip('.mp3').strip('.MP3'))
out_i = 0
out_file = path.join(path.dirname(in_file), ''.join([out_base, '.wav']))
while path.exists(out_file):
out_file = path.join(path.dirname(in_file), ''.join([out_base, '_',
str(out_i), '.wav']))
out_i = out_i + 1
# do the conversion verbosely
print(''.join(["Converting ", in_file, " to ", out_file]))
to_convert.export(out_file, format="wav")
def main():
# script can be run from the command line
parser = argparse.ArgumentParser(description='get mp3')
parser.add_argument('in_file', metavar='in_file', type=str)
arg = parser.parse_args()
mp3_to_wav(arg.in_file)
# ============================================================================
if __name__ == '__main__':
main()
| 28.191489 | 78 | 0.619623 |
aced15853a704411589158260e0ae047d8422d9a | 3,602 | py | Python | ja_model/ja_model/utils/data.py | james-alvey-42/ProgramTools | bb34a1ffc33bb70c2c431d9a310a92deb3f0adf9 | [
"MIT"
] | null | null | null | ja_model/ja_model/utils/data.py | james-alvey-42/ProgramTools | bb34a1ffc33bb70c2c431d9a310a92deb3f0adf9 | [
"MIT"
] | 3 | 2019-11-04T12:30:59.000Z | 2019-11-04T12:48:40.000Z | ja_model/ja_model/utils/data.py | james-alvey-42/ProgramTools | bb34a1ffc33bb70c2c431d9a310a92deb3f0adf9 | [
"MIT"
] | null | null | null | from math import sqrt
import numpy as np
import random
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from itertools import chain, combinations
from sklearn.metrics import mean_squared_error
def nn_out_to_list(nn_out):
Y_pred = []
for i in range(nn_out.shape[0]):
Y_pred.append(nn_out[i][0])
return Y_pred
def random_date_range(start_date, max_date, length=365):
"""
random_date_range(start_date, max_date, length=365)
Returns a random tuple of datetime strings with a date difference of (length) days from a given start date and a maximum end date.
Args:
start_date : datetime
max_date : datetime - maximum end date
length : int - length of datetime interval, default is one year
Returns:
st_date, en_date : string, string
"""
day_difference_dt = max_date - start_date
day_difference = day_difference_dt.days - 365
random_days = random.randint(0, day_difference)
start_date_dt = start_date + timedelta(days = random_days)
end_date_dt = start_date_dt + timedelta(days = 365)
st_date = start_date_dt.strftime("%d-%b-%Y")
en_date = end_date_dt.strftime("%d-%b-%Y")
return st_date, en_date
def powerset(iterable):
"""
powerset([1,2,3]) --> [(), (1,), (2,), (3,), (1,2), (1,3), (2,3), (1,2,3)]
Args:
iterable : iterable (e.g. list, tuple,...) - set to generate possible subsets of
Returns:
list - list of possible subsets
"""
xs = list(iterable)
# note we return an iterator rather than a list
tuples = chain.from_iterable(combinations(xs,n) for n in range(len(xs)+1))
return list(tuples)
def _rmse(observations, forecast):
"""
Args:
observations : np.array - observed values in the test dataset
forecast : np.array - predictions fronm the trained model
Returns:
(error, percent) : tuple - returns a tuple of the absolute error as well as the error as a percentage of the mean of the observed values
"""
rmse = sqrt(mean_squared_error(observations, forecast))
percent = 0
for idx in range(len(observations)):
percent += abs(observations[idx] - forecast[idx])/observations[idx]
percent = 100*percent/len(observations)
return (rmse, percent)
def _test_metric(observations, forecast, metric_function="rmse"):
"""
Args:
observations : np.array - observed values in the test dataset
forecast : np.array - predictions fronm the trained model
metric_function : string - choice of error metric function, availbale choices are ('rmse')
Returns:
(error, percent) : tuple - returns a tuple of the absolute error as well as the error as a percentage of the mean of the observed values
"""
options = {'rmse': _rmse}
options_string = ''
for key in options.keys():
options_string = options_string + '"' + key + '"' + ', '
options_string = options_string[:-2]
try:
if (len(observations) != len(forecast)):
raise IndexError('ERROR: Observations and Forecast arrays are of different lengths.')
else:
temp_function = options[metric_function.lower()]
return temp_function(observations, forecast)
except IndexError as ie:
print(ie.args[0])
except KeyError as ke:
print('ERROR: Available metric functions are ({})'.format(options_string))
if __name__ == "__main__":
print('Inside data.py module, check path.')
| 37.520833 | 145 | 0.650194 |
aced15d593af886ad3a2f6827987a212e922f91e | 23 | py | Python | AutotestPlatform/other/__init__.py | yzypals/AutoTestingPlatform | cfb2c53337406347fad37bd65568b22cdc76fdca | [
"Apache-2.0"
] | null | null | null | AutotestPlatform/other/__init__.py | yzypals/AutoTestingPlatform | cfb2c53337406347fad37bd65568b22cdc76fdca | [
"Apache-2.0"
] | 2 | 2020-06-06T00:51:32.000Z | 2021-06-10T22:40:50.000Z | AutotestPlatform/other/__init__.py | yzypals/AutoTestingPlatform | cfb2c53337406347fad37bd65568b22cdc76fdca | [
"Apache-2.0"
] | 1 | 2020-05-31T03:49:24.000Z | 2020-05-31T03:49:24.000Z | __author__ = 'laifuyu'
| 11.5 | 22 | 0.73913 |
aced172fe515dce5da44ee0e03e681483320f1de | 1,726 | py | Python | civil/library/dashboard/savedsearch.py | christopinka/django-civil | d134624da9d36c4ba0bea2df8a21698df196bdf6 | [
"Apache-2.0"
] | 3 | 2020-06-15T21:01:06.000Z | 2022-02-17T17:41:57.000Z | civil/library/dashboard/savedsearch.py | christopinka/django-civil | d134624da9d36c4ba0bea2df8a21698df196bdf6 | [
"Apache-2.0"
] | null | null | null | civil/library/dashboard/savedsearch.py | christopinka/django-civil | d134624da9d36c4ba0bea2df8a21698df196bdf6 | [
"Apache-2.0"
] | 1 | 2021-11-06T18:33:29.000Z | 2021-11-06T18:33:29.000Z | # -*- coding: utf-8 -*-
from django.utils.text import capfirst
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.utils.itercompat import is_iterable
from grappelli.dashboard.modules import DashboardModule
from civil.apps.search.models import SavedSearch, SavedSearchItem
#==============================================================================
class SavedSearches(DashboardModule):
"""
A module that displays a list of saved searches.
"""
title = _('Saved Search')
template = 'grappelli/dashboard/modules/link_list.html'
class_name = 'link-list'
limit = 10
def init_with_context(self, context):
if self._initialized:
return
request = context['request']
new_children = []
if self.limit <= 0:
self.limit = limit
searches = SavedSearch.objects.filter(user=request.user).order_by('-when')[:self.limit]
for s in searches:
link_dict = { 'title': s.name, 'url': s.path, 'external': False }
items = SavedSearchItem.objects.filter(search=s).count()
if items == 0: link_dict['description'] = "No selected objects"
elif items == 1: link_dict['description'] = "1 selected object"
elif items > 1: link_dict['description'] = "%d selected objects" % items
new_children.append(link_dict)
else:
link_dict = { 'title': 'No searches defined', 'url': '.', 'external': True }
new_children.append(link_dict)
self.children = new_children
self._initialized = True | 36.723404 | 95 | 0.614716 |
aced17702f54caf9d5747f2c1681d4b7122bc0cc | 3,444 | py | Python | nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/cxx.py | juanfra684/Nuitka | 0e276895fadabefb598232f2ccf8cc7736c9a85b | [
"Apache-2.0"
] | 5,421 | 2018-09-24T08:04:06.000Z | 2022-03-31T20:02:37.000Z | nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/cxx.py | juanfra684/Nuitka | 0e276895fadabefb598232f2ccf8cc7736c9a85b | [
"Apache-2.0"
] | 1,348 | 2018-09-22T13:41:00.000Z | 2022-03-31T22:33:40.000Z | nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/cxx.py | juanfra684/Nuitka | 0e276895fadabefb598232f2ccf8cc7736c9a85b | [
"Apache-2.0"
] | 396 | 2018-09-28T15:37:03.000Z | 2022-03-29T10:52:09.000Z | """SCons.Tool.c++
Tool-specific initialization for generic Posix C++ compilers.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/cxx.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import os.path
import SCons.Tool
import SCons.Defaults
import SCons.Util
compilers = ['CC', 'c++']
CXXSuffixes = ['.cpp', '.cc', '.cxx', '.c++', '.C++', '.mm']
if SCons.Util.case_sensitive_suffixes('.c', '.C'):
CXXSuffixes.append('.C')
def iscplusplus(source):
if not source:
# Source might be None for unusual cases like SConf.
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext in CXXSuffixes:
return 1
return 0
def generate(env):
"""
Add Builders and construction variables for Visual Age C++ compilers
to an Environment.
"""
import SCons.Tool
import SCons.Tool.cc
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
SCons.Tool.cc.add_common_cc_variables(env)
if 'CXX' not in env:
env['CXX'] = env.Detect(compilers) or compilers[0]
env['CXXFLAGS'] = SCons.Util.CLVar('')
env['CXXCOM'] = '$CXX -o $TARGET -c $CXXFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '$SHCXX -o $TARGET -c $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
env['SHOBJSUFFIX'] = '.os'
env['OBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0
env['CXXFILESUFFIX'] = '.cc'
def exists(env):
return env.Detect(env.get('CXX', compilers))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 34.09901 | 114 | 0.695412 |
aced17de2aea0878ee01264a255a515e2e08cc71 | 1,540 | py | Python | indexpy/utils/importer.py | abersheeran/index.py | 12d1e39a9490c663206f113e8d0b10fb29c5320b | [
"Apache-2.0"
] | 242 | 2019-06-15T07:14:21.000Z | 2021-06-22T00:46:03.000Z | indexpy/utils/importer.py | webclinic017/index.py | 5d7cb5c2de20ef633473bdca58d06306e62a7fed | [
"Apache-2.0"
] | 26 | 2020-07-07T06:43:42.000Z | 2021-06-17T02:01:52.000Z | indexpy/utils/importer.py | webclinic017/index.py | 5d7cb5c2de20ef633473bdca58d06306e62a7fed | [
"Apache-2.0"
] | 25 | 2019-10-26T06:48:02.000Z | 2021-06-16T19:58:25.000Z | import importlib
import os
from types import ModuleType
from typing import Any, Optional
class ImportFromStringError(Exception):
pass
def import_from_string(import_str: str) -> Any:
module_str, _, attrs_str = import_str.partition(":")
if not module_str or not attrs_str:
message = (
'Import string "{import_str}" must be in format "<module>:<attribute>".'
)
raise ImportFromStringError(message.format(import_str=import_str))
try:
module = importlib.import_module(module_str)
except ImportError as exc:
if exc.name != module_str:
raise exc from None
message = 'Could not import module "{module_str}".'
raise ImportFromStringError(message.format(module_str=module_str))
instance = module
try:
for attr_str in attrs_str.split("."):
instance = getattr(instance, attr_str)
except AttributeError:
message = 'Attribute "{attrs_str}" not found in module "{module_str}".'
raise ImportFromStringError(
message.format(attrs_str=attrs_str, module_str=module_str)
)
return instance
def import_module(name: str) -> Optional[ModuleType]:
"""
try importlib.import_module, nothing to do when module not be found.
"""
if os.path.exists(os.path.join(os.getcwd(), name + ".py")) or os.path.exists(
os.path.join(os.getcwd(), name, "__init__.py")
):
return importlib.import_module(name)
return None # nothing to do when module not be found
| 31.428571 | 84 | 0.662987 |
aced17ee7cc2a017c3d0ca427c2daf19ae341cb1 | 1,266 | py | Python | todo/api/views.py | NazmusShakib/django-p1 | 2b25d7dbbaf8c42aa2e7d66949e2879a94516b0b | [
"MIT"
] | null | null | null | todo/api/views.py | NazmusShakib/django-p1 | 2b25d7dbbaf8c42aa2e7d66949e2879a94516b0b | [
"MIT"
] | 9 | 2020-02-12T00:18:04.000Z | 2022-02-10T10:38:45.000Z | todo/api/views.py | NazmusShakib/django-p1 | 2b25d7dbbaf8c42aa2e7d66949e2879a94516b0b | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, permissions
from todo.models import Todo
from .serializers import TodoSerializer
class TodoListApiView(APIView):
# add permission to check if user is authenticated
# permission_classes = [permissions.IsAuthenticated]
# 1. List all
def get(self, request, *args, **kwargs):
'''
List all the todo items for given requested user
'''
todos = Todo.objects.filter(created_by = request.user.id)
serializer = TodoSerializer(todos, many=True)
return Response(serializer.data, status = status.HTTP_200_OK)
# 2. Create
def post(self, request, *args, **kwargs):
'''
Create the Todo with given todo data
'''
data = {
'task' : request.data.get('task'),
'completed' : request.data.get('completed'),
'created_by' : request.user.id
}
serializer = TodoSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status= status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 35.166667 | 79 | 0.652449 |
aced180c4cc997b80e619c2a4ab1c3432de5d30c | 4,958 | py | Python | hw1/run.py | kspathak/cse547 | 2379c6435c871720aa7da53d3c8066a628e81830 | [
"MIT"
] | null | null | null | hw1/run.py | kspathak/cse547 | 2379c6435c871720aa7da53d3c8066a628e81830 | [
"MIT"
] | null | null | null | hw1/run.py | kspathak/cse547 | 2379c6435c871720aa7da53d3c8066a628e81830 | [
"MIT"
] | 1 | 2021-02-18T01:39:20.000Z | 2021-02-18T01:39:20.000Z | #!/usr/bin/env python
import logging
import time
from absl import app
from absl import flags
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from cse547.data import CocoSingleLabelFeaturesDataset, TensorTransform
from cse547.evaluation import evaluate_binary_classifier
from cse547.loss import BinaryCrossEntropy, MeanSquaredError
from cse547.models import LinearClassifier, MultiLayerPerceptron
from cse547.train import train, TrainingEvaluator, TrainingSummarizer
# Mode
flags.DEFINE_enum('mode', 'train', ['train', 'evaluation', 'predict'],
'Specifies the task.')
# Data flags
flags.DEFINE_string('data_dir', 'data', "Data directory.")
flags.DEFINE_enum('dataset', 'train',
['train', 'test', 'validation'],
'Specifies the dataset.')
flags.DEFINE_enum('size', 'tiny',
['tiny', 'small'],
'Specifies the size of the dataset to use.')
# Model flags
flags.DEFINE_enum('model', 'linear', ['linear', 'multilayer_perceptron'],
'The model type to use.')
flags.DEFINE_multi_integer('model_multilayer_perceptron_hidden_units', [256],
'The number of hidden units for the multi-layer perceptron.')
# Training flags, ignored by evaluation jobs
flags.DEFINE_integer('train_batch_size', 8, 'Batch sizes during training.')
flags.DEFINE_float('train_l2_regularization', 4e-4,
'L2 regularization in the loss function.')
flags.DEFINE_integer('train_epochs', 32,
'The number of times to iterate over the data in training.')
flags.DEFINE_float('train_optimizer_learning_rate', 1e-5,
'The learning rate for stochastic gradient descent.')
flags.DEFINE_float('train_optimizer_momentum', 0.7,
'Nesterov\'s momentum for acceleration.')
flags.DEFINE_integer('train_summary_steps', 250,
'How often to summarize the model.')
flags.DEFINE_integer('train_evaluation_steps', 1000,
'How often to evaluate the model.')
flags.DEFINE_enum('train_loss_function', 'cross_entropy', ['cross_entropy', 'mean_squared_error'],
'Which loss function to use when training.')
logger = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def main(argv):
dataset = CocoSingleLabelFeaturesDataset(FLAGS.data_dir, FLAGS.dataset, FLAGS.size,
transform=TensorTransform())
# TODO(phillypham): Move this to a different job, so it doesn't block training.
# test_dataset = CocoSingleLabelFeaturesDataset(FLAGS.data_dir, 'test', FLAGS.size,
# transform=TensorTransform())
validation_dataset = CocoSingleLabelFeaturesDataset(FLAGS.data_dir, 'validation', FLAGS.size,
transform=TensorTransform())
data_loader = DataLoader(dataset, batch_size=FLAGS.train_batch_size,
shuffle=True, num_workers=2)
n_features = dataset[0]['features'].size()[0]
hidden_units = FLAGS.model_multilayer_perceptron_hidden_units
model = (LinearClassifier(n_features, 1)
if FLAGS.model == 'linear' else
MultiLayerPerceptron(n_features, 1, hidden_units))
loss_fn = (BinaryCrossEntropy() if FLAGS.train_loss_function == 'cross_entropy'
else MeanSquaredError())
optimizer = optim.SGD(
model.parameters(),
lr=FLAGS.train_optimizer_learning_rate,
momentum=FLAGS.train_optimizer_momentum,
weight_decay=FLAGS.train_l2_regularization)
# Define training hooks
training_evaluator = TrainingEvaluator(
FLAGS.train_evaluation_steps,
model=model,
loss_fn=loss_fn,
evaluation_fn=evaluate_binary_classifier,
datasets = {
'training': dataset,
# 'test': test_dataset,
'validation': validation_dataset,
})
hooks = [
TrainingSummarizer(FLAGS.train_summary_steps),
training_evaluator,
]
training_start_time = time.time()
logger.info("Training is starting.")
train(
model=model,
data_loader=data_loader,
optimizer=optimizer,
loss_fn=loss_fn,
epochs=FLAGS.train_epochs,
hooks = hooks)
logger.info("Training completed after %d seconds.", time.time() - training_start_time)
if FLAGS.model == 'multilayer_perceptron':
logger.info("Model hyperparameters: {'l2_penalty': %f, 'hidden_units': %s}",
FLAGS.train_l2_regularization,
FLAGS.model_multilayer_perceptron_hidden_units)
else:
logger.info("Model hyperparameters: {'l2_penalty': %f}",
FLAGS.train_l2_regularization)
logger.info("Training evaluation log: %s", training_evaluator.log)
if __name__ == '__main__':
app.run(main)
| 39.664 | 98 | 0.658935 |
aced18fcc65b55ce8c16299ca056f2e280abf51f | 462 | py | Python | wards/migrations/0005_auto_20191020_2136.py | Naomi-sigu/awwwards | 075ea018f6ae44c48bac4b9fedd5d8883a6fce73 | [
"MIT"
] | null | null | null | wards/migrations/0005_auto_20191020_2136.py | Naomi-sigu/awwwards | 075ea018f6ae44c48bac4b9fedd5d8883a6fce73 | [
"MIT"
] | 6 | 2020-06-05T23:55:45.000Z | 2022-03-12T00:02:52.000Z | wards/migrations/0005_auto_20191020_2136.py | Naomi-sigu/awwwards | 075ea018f6ae44c48bac4b9fedd5d8883a6fce73 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-20 18:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wards', '0004_auto_20191020_2122'),
]
operations = [
migrations.AlterField(
model_name='projects',
name='project_image',
field=models.ImageField(upload_to='media/'),
),
]
| 22 | 56 | 0.621212 |
aced19ba80b09bde44e991cb1cf2aac5a0fd068b | 2,468 | py | Python | setup.py | akamai/distimate | d6ff3144b56bd35f61f81d93157aac30b8d36015 | [
"Apache-2.0"
] | 2 | 2020-06-15T12:05:55.000Z | 2020-11-03T08:39:49.000Z | setup.py | akamai/distimate | d6ff3144b56bd35f61f81d93157aac30b8d36015 | [
"Apache-2.0"
] | null | null | null | setup.py | akamai/distimate | d6ff3144b56bd35f61f81d93157aac30b8d36015 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Akamai Technologies, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
with open("README.md", "r") as fp:
long_description = fp.read()
setup(
name="distimate",
version="0.2.dev",
author="Miloslav Pojman",
author_email="mpojman@akamai.com",
description="Distributions visualized",
keywords=[
"statistics", "plotting", "histogram", "distribution",
"CDF", "PDF", "quantile", "percentile", "Pandas", "Matplotlib",
],
license="Apache License 2.0",
url="https://github.com/akamai/distimate",
project_urls={
"Documentation": "https://distimate.readthedocs.io/",
"Source Code": "https://github.com/akamai/distimate",
},
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages("src"),
package_dir={"": "src"},
python_requires=">=3.6",
install_requires=[
"numpy",
],
extras_require={
"dev": ["flake8", "pytest"],
"pandas": ["pandas>=1.0.0"],
},
classifiers=[
"Framework :: IPython",
"Framework :: Jupyter",
"Framework :: Matplotlib",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
zip_safe=False,
include_package_data=True,
)
| 34.760563 | 74 | 0.646272 |
aced19c2b1df0ced9f83a41dfba343b5ac7f1ac1 | 18 | py | Python | Day_004/my_module.py | masedos/100DaysOfCodePython | 5e52ca07635aaf29b4a83d865d2e063364a11a66 | [
"MIT"
] | null | null | null | Day_004/my_module.py | masedos/100DaysOfCodePython | 5e52ca07635aaf29b4a83d865d2e063364a11a66 | [
"MIT"
] | null | null | null | Day_004/my_module.py | masedos/100DaysOfCodePython | 5e52ca07635aaf29b4a83d865d2e063364a11a66 | [
"MIT"
] | null | null | null | pi = 3.14159265359 | 18 | 18 | 0.777778 |
aced1a644e27ebb080e5aab9b76609a8f4ff4730 | 5,988 | py | Python | website/users/views.py | KiOui/TOSTI | 72e65889c193727dcf3e3716b10c78a9774e1136 | [
"MIT"
] | 1 | 2020-05-10T21:10:43.000Z | 2020-05-10T21:10:43.000Z | website/users/views.py | KiOui/TOSTI | 72e65889c193727dcf3e3716b10c78a9774e1136 | [
"MIT"
] | 158 | 2020-05-04T12:37:41.000Z | 2022-03-31T20:15:07.000Z | website/users/views.py | KiOui/TOSTI | 72e65889c193727dcf3e3716b10c78a9774e1136 | [
"MIT"
] | null | null | null | from django.contrib.auth import login, logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views.generic import TemplateView
from tosti.filter import Filter
from .forms import LoginForm, AccountForm
from .services import get_openid_verifier, update_staff_status
class LoginView(TemplateView):
"""Login view."""
template_name = "users/login.html"
remember_cookie = "_remembered_username"
def get(self, request, **kwargs):
"""
GET request for the login view.
:param request: the request
:param kwargs: keyword arguments
:return: a render of the login view
"""
if request.user.is_authenticated:
if request.GET.get("next"):
return redirect(request.GET.get("next"))
else:
return redirect("users:account")
form = LoginForm()
remembered_username = request.COOKIES.get(self.remember_cookie, None)
if remembered_username is not None:
form.fields["username"].initial = remembered_username
return render(request, self.template_name, {"form": form})
def post(self, request, **kwargs):
"""
POST request for the login view.
Redirects a user to the OpenID verification server.
:param request: the request
:param kwargs: keyword arguments
:return: a redirect to the OpenID verification server if the LoginForm is valid, a render of the login page
otherwise
"""
form = LoginForm(request.POST)
if request.user.is_authenticated:
if request.GET.get("next"):
return redirect(request.GET.get("next"))
else:
return redirect("users:account")
if form.is_valid():
openid_verifier = get_openid_verifier(request)
verify_url = openid_verifier.get_request_url(form.cleaned_data.get("username"))
response = redirect(verify_url)
if form.cleaned_data.get("remember"):
response.set_cookie(self.remember_cookie, form.cleaned_data.get("username"))
return response
return render(request, self.template_name, {"form": form})
class VerifyView(TemplateView):
"""Verify view."""
template_name = "users/verify.html"
def get(self, request, **kwargs):
"""
GET request for verify view.
This view will verify if the given signature is valid against the OpenID server endpoint.
:param request: the request
:param kwargs: keyword arguments
:return: a redirect to the index page with the user logged in if the signature is valid, a render of an error
page otherwise
"""
openid_verifier = get_openid_verifier(request)
user, created = openid_verifier.extract_user()
if user:
update_staff_status(user)
login(request, user, backend="django.contrib.auth.backends.ModelBackend")
next_page = request.GET.get("next")
if next_page:
return redirect(next_page)
elif created:
return redirect("welcome")
else:
return redirect("index")
return render(request, self.template_name)
class LogoutView(TemplateView):
"""Logout view."""
template_name = "users/logout.html"
def get(self, request, **kwargs):
"""
GET request for the logout view.
This view logs out a user
:param request: the request
:param kwargs: keyword arguments
:return: a redirect to the home page if a user is not logged in, redirects to the next GET parameter if it is
set, returns a render of the logout page otherwise
"""
next_page = request.GET.get("next")
if request.user.is_authenticated:
logout(request)
if next_page:
return redirect(next_page)
return render(request, self.template_name)
else:
if next_page:
return redirect(next_page)
return redirect("/")
class AccountView(LoginRequiredMixin, TemplateView):
"""Account view."""
template_name = "users/account.html"
user_data_tabs = Filter()
def get(self, request, **kwargs):
"""
GET request for the account view.
:param request: the request
:param kwargs: keyword arguments
:return: a render of the account view
"""
form = AccountForm(
initial={
"first_name": request.user.first_name,
"last_name": request.user.last_name,
"username": request.user.username,
"email": request.user.email,
"association": request.user.profile.association,
}
)
active = request.GET.get("active", "users")
tabs = self.user_data_tabs.do_filter([])
rendered_tab = None
for tab in tabs:
if active == tab["slug"]:
rendered_tab = tab["renderer"](request, tab, reverse("users:account"))
return render(
request, self.template_name, {"form": form, "active": active, "tabs": tabs, "rendered_tab": rendered_tab}
)
def post(self, request, **kwargs):
"""
POST request for the account view.
:param request: the request
:param kwargs: keyword arguments
:return: a render of the account view
"""
form = AccountForm(request.POST)
tabs = self.user_data_tabs.do_filter([])
if form.is_valid():
request.user.profile.association = form.cleaned_data.get("association")
request.user.profile.save()
return render(
request, self.template_name, {"form": form, "active": "users", "tabs": tabs, "rendered_tab": None}
)
| 33.640449 | 117 | 0.611222 |
aced1a653aebb4ae539aef10f40b50d3ee93da31 | 957 | py | Python | azure-mgmt-network/azure/mgmt/network/v2018_07_01/models/vpn_connection_paged.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2018_07_01/models/vpn_connection_paged.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2018_07_01/models/vpn_connection_paged.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class VpnConnectionPaged(Paged):
"""
A paging container for iterating over a list of :class:`VpnConnection <azure.mgmt.network.v2018_07_01.models.VpnConnection>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[VpnConnection]'}
}
def __init__(self, *args, **kwargs):
super(VpnConnectionPaged, self).__init__(*args, **kwargs)
| 34.178571 | 135 | 0.583072 |
aced1b7ac85efa8564db757c4f134c910de79fc9 | 564 | py | Python | aula02092019/03.py | mariosiqueira/aedpython | 27276aba5555a02b5a162b3a3d2807669ade8a8e | [
"CNRI-Python"
] | null | null | null | aula02092019/03.py | mariosiqueira/aedpython | 27276aba5555a02b5a162b3a3d2807669ade8a8e | [
"CNRI-Python"
] | null | null | null | aula02092019/03.py | mariosiqueira/aedpython | 27276aba5555a02b5a162b3a3d2807669ade8a8e | [
"CNRI-Python"
] | null | null | null | import random
def gnomeSort(vetor):
pivot = 0
while (pivot < len(vetor) - 1):
if(vetor[pivot] > vetor[pivot + 1]):
menor = vetor[pivot + 1]
vetor[pivot + 1] = vetor[pivot]
vetor[pivot] = menor
if(pivot > 0):#Verifica se tem elemento antes do pivot
pivot -= 1 #fez troca e tem anterior, decrementa.
else:#o pivot e seu proximo estao em ordem.
pivot += 1 #incrementa o pivot
return vetor
vetor = list(range(0, 21))
random.shuffle(vetor)
print(gnomeSort(vetor)) | 31.333333 | 66 | 0.578014 |
aced1b964622222405dc250d2e1a1c0314debee7 | 11,329 | py | Python | mmdeploy/codebase/mmdet/deploy/object_detection.py | hanrui1sensetime/mmdeploy | f2594c624b67910e55e24418832bd96685425b2f | [
"Apache-2.0"
] | 1 | 2021-12-30T06:29:46.000Z | 2021-12-30T06:29:46.000Z | mmdeploy/codebase/mmdet/deploy/object_detection.py | wwjwy/mmdeploy | c6fccd0121618c8c4dc07f49823c377003475040 | [
"Apache-2.0"
] | null | null | null | mmdeploy/codebase/mmdet/deploy/object_detection.py | wwjwy/mmdeploy | c6fccd0121618c8c4dc07f49823c377003475040 | [
"Apache-2.0"
] | 1 | 2022-02-10T04:31:10.000Z | 2022-02-10T04:31:10.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer
from torch.utils.data import Dataset
from mmdeploy.utils import Task
from mmdeploy.utils.config_utils import get_input_shape
from ...base import BaseTask
from .mmdetection import MMDET_TASK
def process_model_config(model_cfg: mmcv.Config,
imgs: Union[Sequence[str], Sequence[np.ndarray]],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (mmcv.Config): The model config.
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted
data type are List[str], List[np.ndarray].
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
mmcv.Config: the model config after processing.
"""
from mmdet.datasets import replace_ImageToTensor
cfg = model_cfg.copy()
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
# for static exporting
if input_shape is not None:
cfg.data.test.pipeline[1]['img_scale'] = tuple(input_shape)
transforms = cfg.data.test.pipeline[1]['transforms']
for trans in transforms:
trans_type = trans['type']
if trans_type == 'Resize':
trans['keep_ratio'] = False
elif trans_type == 'Pad':
trans['size_divisor'] = 1
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
return cfg
@MMDET_TASK.register_module(Task.OBJECT_DETECTION.value)
class ObjectDetection(BaseTask):
def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config,
device: str) -> None:
super().__init__(model_cfg, deploy_cfg, device)
def init_backend_model(self,
model_files: Optional[str] = None,
**kwargs) -> torch.nn.Module:
"""Initialize backend model.
Args:
model_files (Sequence[str]): Input model files.
Returns:
nn.Module: An initialized backend model.
"""
from .object_detection_model import build_object_detection_model
model = build_object_detection_model(
model_files, self.model_cfg, self.deploy_cfg, device=self.device)
return model.eval()
def init_pytorch_model(self,
model_checkpoint: Optional[str] = None,
cfg_options: Optional[Dict] = None,
**kwargs) -> torch.nn.Module:
"""Initialize torch model.
Args:
model_checkpoint (str): The checkpoint file of torch model,
defaults to `None`.
cfg_options (dict): Optional config key-pair parameters.
Returns:
nn.Module: An initialized torch model generated by other OpenMMLab
codebases.
"""
from mmdet.apis import init_detector
model = init_detector(self.model_cfg, model_checkpoint, self.device,
cfg_options)
return model.eval()
def create_input(self,
imgs: Union[str, np.ndarray],
input_shape: Sequence[int] = None) \
-> Tuple[Dict, torch.Tensor]:
"""Create input for detector.
Args:
task (Task): Specifying task type.
imgs (Any): Input image(s), accpeted data type are `str`,
`np.ndarray`, `torch.Tensor`.
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Defaults to `None`.
Returns:
tuple: (data, img), meta information for the input image and input.
"""
from mmdet.datasets.pipelines import Compose
from mmcv.parallel import collate, scatter
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
cfg = process_model_config(self.model_cfg, imgs, input_shape)
test_pipeline = Compose(cfg.data.test.pipeline)
data_list = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
data_list.append(data)
data = collate(data_list, samples_per_gpu=len(imgs))
data['img_metas'] = [
img_metas.data[0] for img_metas in data['img_metas']
]
data['img'] = [img.data[0] for img in data['img']]
if self.device != 'cpu':
data = scatter(data, [self.device])[0]
return data, data['img']
def visualize(self,
model: torch.nn.Module,
image: Union[str, np.ndarray],
result: list,
output_file: str,
window_name: str,
show_result: bool = False,
score_thr: float = 0.3):
"""Visualize predictions of a model.
Args:
model (nn.Module): Input model.
image (str | np.ndarray): Input image to draw predictions on.
result (list): A list of predictions.
output_file (str): Output file to save drawn image.
window_name (str): The name of visualization window. Defaults to
an empty string.
show_result (bool): Whether to show result in windows, defaults
to `False`.
score_thr (float): The score threshold to display the bbox.
Defaults to 0.3.
"""
show_img = mmcv.imread(image) if isinstance(image, str) else image
output_file = None if show_result else output_file
model.show_result(
show_img,
result=result,
win_name=window_name,
show=show_result,
out_file=output_file,
score_thr=score_thr)
@staticmethod
def run_inference(model: torch.nn.Module,
model_inputs: Dict[str, torch.Tensor]) -> list:
"""Run inference once for a object detection model of mmdet.
Args:
model (nn.Module): Input model.
model_inputs (dict): A dict containing model inputs tensor and
meta info.
Returns:
list: The predictions of model inference.
"""
return model(**model_inputs, return_loss=False, rescale=True)
@staticmethod
def get_partition_cfg(partition_type: str) -> Dict:
"""Get a certain partition config for mmdet.
Args:
partition_type (str): A string specifying partition type.
Returns:
dict: A dictionary of partition config.
"""
from .model_partition_cfg import MMDET_PARTITION_CFG
assert (partition_type in MMDET_PARTITION_CFG), \
f'Unknown partition_type {partition_type}'
return MMDET_PARTITION_CFG[partition_type]
@staticmethod
def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor:
"""Get input tensor from input data.
Args:
input_data (dict): Input data containing meta info and image
tensor.
Returns:
torch.Tensor: An image in `Tensor`.
"""
img_tensor = input_data['img'][0]
if isinstance(img_tensor, DataContainer):
img_tensor = img_tensor.data[0]
return img_tensor
@staticmethod
def evaluate_outputs(model_cfg: mmcv.Config,
outputs: Sequence,
dataset: Dataset,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False):
"""Perform post-processing to predictions of model.
Args:
model_cfg (mmcv.Config): Model config.
outputs (list): A list of predictions of model inference.
dataset (Dataset): Input dataset to run test.
metrics (str): Evaluation metrics, which depends on
the codebase and the dataset, e.g., "bbox", "segm", "proposal"
for COCO, and "mAP", "recall" for PASCAL VOC in mmdet.
out (str): Output result file in pickle format, defaults to `None`.
metric_options (dict): Custom options for evaluation, will be
kwargs for dataset.evaluate() function. Defaults to `None`.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server. Defaults
to `False`.
"""
if out:
logging.info(f'\nwriting results to {out}')
mmcv.dump(outputs, out)
kwargs = {} if metric_options is None else metric_options
if format_only:
dataset.format_results(outputs, **kwargs)
if metrics:
eval_kwargs = model_cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule', 'dynamic_intervals'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=metrics, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.
Return:
dict: Composed of the preprocess information.
"""
input_shape = get_input_shape(self.deploy_cfg)
model_cfg = process_model_config(self.model_cfg, [''], input_shape)
preprocess = model_cfg.data.test.pipeline
return preprocess
def get_postprocess(self) -> Dict:
"""Get the postprocess information for SDK.
Return:
dict: Composed of the postprocess information.
"""
postprocess = self.model_cfg.model.test_cfg
if 'rpn' in postprocess:
postprocess['min_bbox_size'] = postprocess['rpn']['min_bbox_size']
if 'rcnn' in postprocess:
postprocess['score_thr'] = postprocess['rcnn']['score_thr']
if 'mask_thr_binary' in postprocess['rcnn']:
postprocess['mask_thr_binary'] = postprocess['rcnn'][
'mask_thr_binary']
return postprocess
def get_model_name(self) -> str:
"""Get the model name.
Return:
str: the name of the model.
"""
assert 'type' in self.model_cfg.model, 'model config contains no type'
name = self.model_cfg.model.type.lower()
return name
| 37.889632 | 79 | 0.583017 |
aced1c1f0e3ba8f5f19f0a316bdcfc240f6498c9 | 4,327 | py | Python | scraper.py | rchopinw/scrape_from_hk_exchange | 557afac95357582337493476b483e5907a86c1f1 | [
"MIT"
] | null | null | null | scraper.py | rchopinw/scrape_from_hk_exchange | 557afac95357582337493476b483e5907a86c1f1 | [
"MIT"
] | null | null | null | scraper.py | rchopinw/scrape_from_hk_exchange | 557afac95357582337493476b483e5907a86c1f1 | [
"MIT"
] | null | null | null | from os.path import expanduser
import random
import cfscrape
import numpy as np
import pandas as pd
import urllib
from collections import defaultdict, Counter
import json
class Scraper:
def __init__(self):
self.status = {
'yss': 'LT',
'sx': 'LP',
'ch': 'W',
'bjj': 'RJ',
'clz': 'A'
}
self.status_name = {
'LT': '已上市',
'LP': '失效',
'W': '撤回',
'RJ': '被拒绝',
'A': '处理中'
}
self.header = 'https://www1.hkexnews.hk/app/'
def scrape(self, ):
web = cfscrape.create_scraper()
print('Please enter a year in 4 digits.')
year = str(input())
json_id = 1638657787440 + (-1)**random.randint(0, 1) * random.randint(0, 1000)
response = web.get(
'https://www1.hkexnews.hk/ncms/json/eds/app_{}_sehk_c.json?_={}'.format(
year, json_id
)
)
info = json.loads(
response.text
)
update_date, content = info['uDate'], info['app']
print('Last update date from website: {}.'.format(update_date))
print('Please enter a starting date in 4 digits, e.g., 0423 indicates 23rd, April.')
start_date = str(input())
print('Please enter an ending date in 4 digits, e.g., 1201 indicates 1st, December.')
end_date = str(input())
start_date = year + start_date
end_date = year + end_date
# getting files between two dates
formatted_content = [
{'id': x['id'],
'date': self.__rank_reference(x['d']),
'company_name': x['a'],
'status_code': x['s'],
'status_name': self.status_name[x['s']],
'full_file': x['ls'][0]['u1'],
'partial_file': 0
}
for x in content
if start_date <= self.__rank_reference(x['d']) <= end_date and x['ls']
]
count = Counter(
x['status_name'] for x in formatted_content
)
print(
'Between {} and {}, '.format(start_date, end_date),
' '.join('{}: {} |'.format(x, y) for x, y in count.items())
)
# getting status
print(
'Please enter one or more status from the following: ',
''.join(
'{}: {} | '.format(x, y)
for x, y in zip(self.status.keys(), self.status_name.values())
),
'\n',
'For multiple input, please use comma to separate, e.g.: yss,sx'
)
status = input()
status = {
self.status[x.strip()]
for x in status.split(',')
}
filtered_content = [
x
for x in formatted_content
if x['status_code'] in status
]
print('{} related file found.'.format(len(filtered_content)))
# downloading
print('Please input a download path in the form of /Users/bangxixiao/Desktop',
', or enter d to download directly to Desktop.')
path = input()
if path == 'd':
path = expanduser('~/Desktop')
for i, file in enumerate(filtered_content):
print(
'Downloading {} | {}/{}'.format(
file['company_name'], i, len(filtered_content)
)
)
if file['full_file'] == '#':
print(
'Unable to find corresponding pdf w.r.t. {}'.format(
file['company_name']
),
', it has the status of {}.'.format(
file['status_name']
)
)
continue
d_path = path + '/{}_{}_{}.pdf'.format(
file['date'], file['company_name'], file['status_name']
)
try:
urllib.request.urlretrieve(
self.header + file['full_file'], d_path
)
except:
print('Error encountered at: ', file)
continue
print('Done, file saved at {}.'.format(path))
def __rank_reference(self, s):
sp = s.split('/')
return sp[2] + sp[1] + sp[0]
| 32.291045 | 93 | 0.470765 |
aced1c5d5c8e391750a5fd91158d2f0aef73850c | 45,655 | py | Python | mars/deploy/local/tests/test_cluster.py | tomzhang/mars-1 | 6f1d85e37eb1b383251314cb0ba13e06288af03d | [
"Apache-2.0"
] | null | null | null | mars/deploy/local/tests/test_cluster.py | tomzhang/mars-1 | 6f1d85e37eb1b383251314cb0ba13e06288af03d | [
"Apache-2.0"
] | null | null | null | mars/deploy/local/tests/test_cluster.py | tomzhang/mars-1 | 6f1d85e37eb1b383251314cb0ba13e06288af03d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
import pickle
import sys
import time
import tempfile
import traceback
import unittest
import uuid
import cloudpickle
import numpy as np
import pandas as pd
try:
import h5py
except ImportError:
h5py = None
from mars import tensor as mt
from mars import dataframe as md
from mars import remote as mr
from mars.config import options, option_context
from mars.deploy.local.core import new_cluster, LocalDistributedCluster, gen_endpoint
from mars.errors import ExecutionFailed
from mars.serialize import BytesField, Int64Field
from mars.tensor.operands import TensorOperand
from mars.tensor.arithmetic.core import TensorElementWise
from mars.tensor.arithmetic.abs import TensorAbs
from mars.session import new_session, Session, ClusterSession
from mars.scheduler import SessionManagerActor
from mars.scheduler.utils import SchedulerClusterInfoActor
from mars.worker.dispatcher import DispatchActor
from mars.web.session import Session as WebSession
from mars.context import get_context, RunningMode
from mars.utils import serialize_function
from mars.tests.core import mock, require_cudf
logger = logging.getLogger(__name__)
_exec_timeout = 120 if 'CI' in os.environ else -1
def _on_deserialize_fail(x):
raise TypeError('intend to throw error on' + str(x))
class SerializeMustFailOperand(TensorOperand, TensorElementWise):
_op_type_ = 356789
_f = Int64Field('f', on_deserialize=_on_deserialize_fail)
def __init__(self, f=None, **kw):
super().__init__(_f=f, **kw)
class TileFailOperand(TensorAbs):
_op_type_ = 198732951
_exc_serial = BytesField('exc_serial')
@classmethod
def tile(cls, op):
if op._exc_serial is not None:
raise pickle.loads(op._exc_serial)
return super().tile(op)
class ExecFailOperand(TensorAbs):
_op_type_ = 196432154
_exc_serial = BytesField('exc_serial')
@classmethod
def tile(cls, op):
tileables = super().tile(op)
# make sure chunks
tileables[0]._shape = (np.nan, np.nan)
return tileables
@classmethod
def execute(cls, ctx, op):
if op._exc_serial is not None:
raise pickle.loads(op._exc_serial)
return super().execute(ctx, op)
class TileWithContextOperand(TensorAbs):
_op_type_ = 9870102948
_multiplier = Int64Field('multiplier')
@classmethod
def tile(cls, op):
context = get_context()
if context.running_mode != RunningMode.local_cluster:
raise AssertionError
inp_chunk = op.inputs[0].chunks[0]
inp_size = context.get_chunk_metas([inp_chunk.key])[0].chunk_size
chunk_op = op.copy().reset_key()
chunk_op._multiplier = inp_size
chunk = chunk_op.new_chunk([inp_chunk], shape=inp_chunk.shape)
new_op = op.copy()
return new_op.new_tensors(op.inputs, shape=op.outputs[0].shape,
order=op.outputs[0].order, nsplits=op.inputs[0].nsplits,
chunks=[chunk])
@classmethod
def execute(cls, ctx, op):
ctx[op.outputs[0].key] = ctx[op.inputs[0].key] * op._multiplier
@unittest.skipIf(sys.platform == 'win32', 'does not run in windows')
@mock.patch('webbrowser.open_new_tab', new=lambda *_, **__: True)
class Test(unittest.TestCase):
def setUp(self):
super().setUp()
self._old_default_cpu_usage = options.scheduler.default_cpu_usage
options.scheduler.default_cpu_usage = 0
def tearDown(self):
super().tearDown()
options.scheduler.default_cpu_usage = self._old_default_cpu_usage
def testLocalCluster(self, *_):
endpoint = gen_endpoint('0.0.0.0')
with LocalDistributedCluster(endpoint, scheduler_n_process=2, worker_n_process=3,
shared_memory='20M') as cluster:
pool = cluster.pool
self.assertTrue(pool.has_actor(pool.actor_ref(
SchedulerClusterInfoActor.default_uid())))
self.assertTrue(pool.has_actor(pool.actor_ref(SessionManagerActor.default_uid())))
self.assertTrue(pool.has_actor(pool.actor_ref(DispatchActor.default_uid())))
with new_session(endpoint) as session:
api = session._api
t = mt.ones((3, 3), chunk_size=2)
result = session.run(t, timeout=_exec_timeout)
np.testing.assert_array_equal(result, np.ones((3, 3)))
self.assertNotIn(session._session_id, api.session_manager.get_sessions())
def testLocalClusterWithWeb(self, *_):
import psutil
with new_cluster(scheduler_n_process=2, worker_n_process=3,
shared_memory='20M', web=True) as cluster:
cluster_proc = psutil.Process(cluster._cluster_process.pid)
web_proc = psutil.Process(cluster._web_process.pid)
processes = list(cluster_proc.children(recursive=True)) + \
list(web_proc.children(recursive=True))
with cluster.session as session:
t = mt.ones((3, 3), chunk_size=2)
result = session.run(t, timeout=_exec_timeout)
np.testing.assert_array_equal(result, np.ones((3, 3)))
with new_session('http://' + cluster._web_endpoint) as session:
t = mt.ones((3, 3), chunk_size=2)
result = session.run(t, timeout=_exec_timeout)
np.testing.assert_array_equal(result, np.ones((3, 3)))
check_time = time.time()
while any(p.is_running() for p in processes):
time.sleep(0.1)
if check_time + 10 < time.time():
logger.error('Processes still running: %r',
[' '.join(p.cmdline()) for p in processes if p.is_running()])
self.assertFalse(any(p.is_running() for p in processes))
def testLocalClusterError(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=3,
shared_memory='20M', web=True, options={'scheduler.retry_num': 1}) as cluster:
# Note that it is nested exception and we want to check the message
# of the inner exeception, thus assertRaises won't work.
with cluster.session as session:
t = mt.array(["1", "2", "3", "4"])
try:
session.run(t + 1)
except: # noqa: E722
etype, exp, tb = sys.exc_info()
self.assertEqual(etype, ExecutionFailed)
self.assertIsInstance(exp, ExecutionFailed)
formatted_tb = '\n'.join(traceback.format_exception(etype, exp, tb))
self.assertIn('TypeError', formatted_tb)
self.assertIn('ufunc', formatted_tb)
self.assertIn('add', formatted_tb)
self.assertIn('signature matching types', formatted_tb)
with new_session('http://' + cluster._web_endpoint) as session:
t = mt.array(["1", "2", "3", "4"])
try:
session.run(t + 1)
except: # noqa: E722
etype, exp, tb = sys.exc_info()
self.assertEqual(etype, ExecutionFailed)
self.assertIsInstance(exp, ExecutionFailed)
formatted_tb = '\n'.join(traceback.format_exception(etype, exp, tb))
self.assertIn('TypeError', formatted_tb)
self.assertIn('ufunc', formatted_tb)
self.assertIn('add', formatted_tb)
self.assertIn('signature matching types', formatted_tb)
def testNSchedulersNWorkers(self, *_):
calc_cpu_cnt = functools.partial(lambda: 4)
self.assertEqual(LocalDistributedCluster._calc_scheduler_worker_n_process(
None, None, None, calc_cpu_count=calc_cpu_cnt), (2, 4))
# scheduler and worker needs at least 2 processes
self.assertEqual(LocalDistributedCluster._calc_scheduler_worker_n_process(
1, None, None, calc_cpu_count=calc_cpu_cnt), (2, 2))
self.assertEqual(LocalDistributedCluster._calc_scheduler_worker_n_process(
3, None, None, calc_cpu_count=calc_cpu_cnt), (2, 2))
self.assertEqual(LocalDistributedCluster._calc_scheduler_worker_n_process(
5, None, None, calc_cpu_count=calc_cpu_cnt), (2, 3))
self.assertEqual(LocalDistributedCluster._calc_scheduler_worker_n_process(
None, 1, None, calc_cpu_count=calc_cpu_cnt), (1, 4))
self.assertEqual(LocalDistributedCluster._calc_scheduler_worker_n_process(
None, 3, None, calc_cpu_count=calc_cpu_cnt), (3, 4))
self.assertEqual(LocalDistributedCluster._calc_scheduler_worker_n_process(
None, None, 3, calc_cpu_count=calc_cpu_cnt), (2, 3))
self.assertEqual(LocalDistributedCluster._calc_scheduler_worker_n_process(
5, 3, 2, calc_cpu_count=calc_cpu_cnt), (3, 2))
def testSingleOutputTensorExecute(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M') as cluster:
self.assertIs(cluster.session, Session.default_or_local())
t = mt.random.rand(10)
r = t.sum()
res = r.to_numpy()
self.assertTrue(np.isscalar(res))
self.assertLess(res, 10)
t = mt.random.rand(10)
r = t.sum() * 4 - 1
res = r.to_numpy()
self.assertLess(res, 39)
def testMultipleOutputTensorExecute(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M') as cluster:
session = cluster.session
t = mt.random.rand(20, 5, chunk_size=5)
r = mt.linalg.svd(t)
res = session.run((t,) + r, timeout=_exec_timeout)
U, s, V = res[1:]
np.testing.assert_allclose(res[0], U.dot(np.diag(s).dot(V)))
raw = np.random.rand(20, 5)
# to test the fuse, the graph should be fused
t = mt.array(raw)
U, s, V = mt.linalg.svd(t)
r = U.dot(mt.diag(s).dot(V))
res = r.to_numpy()
np.testing.assert_allclose(raw, res)
# test submit part of svd outputs
t = mt.array(raw)
U, s, V = mt.linalg.svd(t)
with new_session(cluster.endpoint) as session2:
U_result, s_result = session2.run(U, s, timeout=_exec_timeout)
U_expected, s_expectd, _ = np.linalg.svd(raw, full_matrices=False)
np.testing.assert_allclose(U_result, U_expected)
np.testing.assert_allclose(s_result, s_expectd)
with new_session(cluster.endpoint) as session2:
U_result, s_result = session2.run(U + 1, s + 1, timeout=_exec_timeout)
U_expected, s_expectd, _ = np.linalg.svd(raw, full_matrices=False)
np.testing.assert_allclose(U_result, U_expected + 1)
np.testing.assert_allclose(s_result, s_expectd + 1)
with new_session(cluster.endpoint) as session2:
t = mt.array(raw)
_, s, _ = mt.linalg.svd(t)
del _
s_result = session2.run(s, timeout=_exec_timeout)
s_expected = np.linalg.svd(raw, full_matrices=False)[1]
np.testing.assert_allclose(s_result, s_expected)
def testIndexTensorExecute(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M') as cluster:
session = cluster.session
a = mt.random.rand(10, 5)
idx = slice(0, 5), slice(0, 5)
a[idx] = 2
a_splits = mt.split(a, 2)
r1, r2 = session.run(a_splits[0], a[idx], timeout=_exec_timeout)
np.testing.assert_array_equal(r1, r2)
np.testing.assert_array_equal(r1, np.ones((5, 5)) * 2)
with new_session(cluster.endpoint) as session2:
a = mt.random.rand(10, 5)
idx = slice(0, 5), slice(0, 5)
a[idx] = mt.ones((5, 5)) * 2
r = session2.run(a[idx], timeout=_exec_timeout)
np.testing.assert_array_equal(r, np.ones((5, 5)) * 2)
with new_session(cluster.endpoint) as session3:
a = mt.random.rand(100, 5)
slice1 = a[:10]
slice2 = a[10:20]
r1, r2, expected = session3.run(slice1, slice2, a, timeout=_exec_timeout)
np.testing.assert_array_equal(r1, expected[:10])
np.testing.assert_array_equal(r2, expected[10:20])
with new_session(cluster.endpoint) as session4:
a = mt.random.rand(100, 5)
a[:10] = mt.ones((10, 5))
a[10:20] = 2
r = session4.run(a, timeout=_exec_timeout)
np.testing.assert_array_equal(r[:10], np.ones((10, 5)))
np.testing.assert_array_equal(r[10:20], np.ones((10, 5)) * 2)
with new_session(cluster.endpoint) as session5:
raw = np.random.rand(10, 10)
a = mt.tensor(raw, chunk_size=(5, 4))
b = a[a.argmin(axis=1), mt.tensor(np.arange(10))]
r = session5.run(b, timeout=_exec_timeout, compose=False)
np.testing.assert_array_equal(r, raw[raw.argmin(axis=1), np.arange(10)])
def testBoolIndexingExecute(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
a = mt.random.rand(8, 8, chunk_size=4)
a[2:6, 2:6] = mt.ones((4, 4)) * 2
b = a[a > 1]
self.assertEqual(b.shape, (np.nan,))
cluster.session.run(b, fetch=False, timeout=_exec_timeout)
self.assertEqual(b.shape, (16,))
c = b.reshape((4, 4))
self.assertEqual(c.shape, (4, 4))
with new_session('http://' + cluster._web_endpoint) as session2:
a = mt.random.rand(8, 8, chunk_size=4)
a[2:6, 2:6] = mt.ones((4, 4)) * 2
b = a[a > 1]
self.assertEqual(b.shape, (np.nan,))
session2.run(b, fetch=False, timeout=_exec_timeout)
self.assertEqual(b.shape, (16,))
c = b.reshape((4, 4))
self.assertEqual(c.shape, (4, 4))
# test unknown-shape fusion
with new_session('http://' + cluster._web_endpoint) as session2:
a = mt.random.rand(6, 6, chunk_size=3)
a[2:5, 2:5] = mt.ones((3, 3)) * 2
b = (a[a > 1] - 1) * 2
r = session2.run(b, timeout=_exec_timeout)
np.testing.assert_array_equal(r, np.ones((9,)) * 2)
def testExecutableTuple(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
with new_session('http://' + cluster._web_endpoint).as_default():
a = mt.ones((20, 10), chunk_size=10)
u, s, v = (mt.linalg.svd(a)).execute().fetch()
np.testing.assert_allclose(u.dot(np.diag(s).dot(v)), np.ones((20, 10)))
def testRerunTensor(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M') as cluster:
session = cluster.session
a = mt.ones((10, 10)) + 1
result1 = session.run(a, timeout=_exec_timeout)
np.testing.assert_array_equal(result1, np.ones((10, 10)) + 1)
result2 = session.run(a, timeout=_exec_timeout)
np.testing.assert_array_equal(result1, result2)
with new_session(cluster.endpoint) as session2:
a = mt.random.rand(10, 10)
a_result1 = session2.run(a, timeout=_exec_timeout)
b = mt.ones((10, 10))
a_result2, b_result = session2.run(a, b, timeout=_exec_timeout)
np.testing.assert_array_equal(a_result1, a_result2)
np.testing.assert_array_equal(b_result, np.ones((10, 10)))
def testRunWithoutFetch(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M') as cluster:
session = cluster.session
a = mt.ones((10, 20)) + 1
self.assertIsNone(session.run(a, fetch=False, timeout=_exec_timeout))
np.testing.assert_array_equal(a.to_numpy(session=session), np.ones((10, 20)) + 1)
def testGraphFail(self, *_):
op = SerializeMustFailOperand(f=3)
tensor = op.new_tensor(None, (3, 3))
try:
raise ValueError
except: # noqa: E722
exc = sys.exc_info()[1]
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', modules=[__name__],
options={'scheduler.retry_num': 1}) as cluster:
with self.assertRaises(ExecutionFailed):
try:
cluster.session.run(tensor, timeout=_exec_timeout)
except ExecutionFailed as ex:
self.assertIsInstance(ex.__cause__, TypeError)
raise
data = mt.tensor(np.random.rand(10, 20))
data2 = TileFailOperand(_exc_serial=pickle.dumps(exc)).new_tensor([data], shape=data.shape)
with self.assertRaises(ExecutionFailed):
try:
cluster.session.run(data2)
except ExecutionFailed as ex:
self.assertIsInstance(ex.__cause__, ValueError)
raise
data = mt.tensor(np.random.rand(20, 10))
data2 = ExecFailOperand(_exc_serial=pickle.dumps(exc)).new_tensor([data], shape=data.shape)
with self.assertRaises(ExecutionFailed):
try:
cluster.session.run(data2)
except ExecutionFailed as ex:
self.assertIsInstance(ex.__cause__, ValueError)
raise
def testFetchTensor(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
session = cluster.session
a1 = mt.ones((10, 20), chunk_size=8) + 1
r1 = session.run(a1, timeout=_exec_timeout)
r2 = session.fetch(a1)
np.testing.assert_array_equal(r1, r2)
r3 = session.run(a1 * 2, timeout=_exec_timeout)
np.testing.assert_array_equal(r3, r1 * 2)
a2 = mt.ones((10, 20), chunk_size=8) + 1
r4 = session.run(a2, timeout=_exec_timeout)
np.testing.assert_array_equal(r4, r1)
del a1
r4 = session.run(a2, timeout=_exec_timeout)
np.testing.assert_array_equal(r4, r1)
with new_session('http://' + cluster._web_endpoint) as session:
a3 = mt.ones((5, 10), chunk_size=3) + 1
r1 = session.run(a3, timeout=_exec_timeout)
r2 = session.fetch(a3)
np.testing.assert_array_equal(r1, r2)
r3 = session.run(a3 * 2, timeout=_exec_timeout)
np.testing.assert_array_equal(r3, r1 * 2)
a4 = mt.ones((5, 10), chunk_size=3) + 1
r4 = session.run(a4, timeout=_exec_timeout)
np.testing.assert_array_equal(r4, r1)
del a3
r4 = session.run(a4, timeout=_exec_timeout)
np.testing.assert_array_equal(r4, r1)
def testFetchDataFrame(self, *_):
from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df
from mars.dataframe.arithmetic import add
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
session = cluster.session
data1 = pd.DataFrame(np.random.rand(10, 10))
df1 = from_pandas_df(data1, chunk_size=5)
data2 = pd.DataFrame(np.random.rand(10, 10))
df2 = from_pandas_df(data2, chunk_size=6)
df3 = add(df1, df2)
r1 = session.run(df3, compose=False, timeout=_exec_timeout)
r2 = session.fetch(df3)
pd.testing.assert_frame_equal(r1, r2)
data4 = pd.DataFrame(np.random.rand(10, 10))
df4 = from_pandas_df(data4, chunk_size=6)
df5 = add(df3, df4)
r1 = session.run(df5, compose=False, timeout=_exec_timeout)
r2 = session.fetch(df5)
pd.testing.assert_frame_equal(r1, r2)
df6 = df5.sum()
r1 = session.run(df6, timeout=_exec_timeout)
r2 = session.fetch(df6)
pd.testing.assert_series_equal(r1, r2)
def testMultiSessionDecref(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
session = cluster.session
a = mt.ones((10, 20), chunk_size=8)
b = mt.ones((10, 20), chunk_size=8)
self.assertEqual(a.key, b.key)
r1 = session.run(a, timeout=_exec_timeout)
r1_fetch = session.fetch(a)
np.testing.assert_array_equal(r1, r1_fetch)
web_session = new_session('http://' + cluster._web_endpoint)
r2 = web_session.run(a, timeout=_exec_timeout)
r2_fetch = web_session.fetch(a)
np.testing.assert_array_equal(r1, r2)
np.testing.assert_array_equal(r2, r2_fetch)
local_session = new_session()
r3 = local_session.run(a)
r3_fetch = local_session.fetch(a)
np.testing.assert_array_equal(r1, r3)
np.testing.assert_array_equal(r3, r3_fetch)
del a
self.assertEqual(len(local_session._sess._executor.chunk_result), 0)
with self.assertRaises(ValueError):
session.fetch(b)
with self.assertRaises(ValueError):
web_session.fetch(b)
def testEagerMode(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
self.assertIsInstance(Session.default_or_local()._sess, ClusterSession)
with option_context({'eager_mode': True}):
a_data = np.random.rand(10, 10)
a = mt.tensor(a_data, chunk_size=3)
np.testing.assert_array_equal(a, a_data)
r1 = a + 1
expected1 = a_data + 1
np.testing.assert_array_equal(r1, expected1)
r2 = r1.dot(r1)
expected2 = expected1.dot(expected1)
np.testing.assert_array_almost_equal(r2, expected2)
a = mt.ones((10, 10), chunk_size=3)
with self.assertRaises(ValueError):
a.fetch()
r = a.dot(a)
np.testing.assert_array_equal(r.to_numpy(), np.ones((10, 10)) * 10)
with new_session('http://' + cluster._web_endpoint).as_default():
self.assertIsInstance(Session.default_or_local()._sess, WebSession)
with option_context({'eager_mode': True}):
a_data = np.random.rand(10, 10)
a = mt.tensor(a_data, chunk_size=3)
np.testing.assert_array_equal(a, a_data)
r1 = a + 1
expected1 = a_data + 1
np.testing.assert_array_equal(r1, expected1)
r2 = r1.dot(r1)
expected2 = expected1.dot(expected1)
np.testing.assert_array_almost_equal(r2, expected2)
web_session = Session.default_or_local()._sess
self.assertEqual(web_session.get_task_count(), 3)
a = mt.ones((10, 10), chunk_size=3)
with self.assertRaises(ValueError):
a.fetch()
r = a.dot(a)
np.testing.assert_array_equal(r.to_numpy(), np.ones((10, 10)) * 10)
with new_session('http://' + cluster._web_endpoint).as_default():
from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df
from mars.dataframe.datasource.series import from_pandas as from_pandas_series
from mars.dataframe.arithmetic import add
self.assertIsInstance(Session.default_or_local()._sess, WebSession)
with option_context({'eager_mode': True}):
data1 = pd.DataFrame(np.random.rand(10, 10), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9],
columns=[4, 1, 3, 2, 10, 5, 9, 8, 6, 7])
df1 = from_pandas_df(data1, chunk_size=5)
pd.testing.assert_frame_equal(df1.fetch(), data1)
data2 = pd.DataFrame(np.random.rand(10, 10), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3],
columns=[5, 9, 12, 3, 11, 10, 6, 4, 1, 2])
df2 = from_pandas_df(data2, chunk_size=6)
pd.testing.assert_frame_equal(df2.fetch(), data2)
df3 = add(df1, df2)
pd.testing.assert_frame_equal(df3.fetch(), data1 + data2)
s1 = pd.Series(np.random.rand(10), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3])
series1 = from_pandas_series(s1)
pd.testing.assert_series_equal(series1.fetch(), s1)
web_session = Session.default_or_local()._sess
self.assertEqual(web_session.get_task_count(), 4)
def testSparse(self, *_):
import scipy.sparse as sps
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=False) as cluster:
session = cluster.session
# calculate sparse with no element in matrix
a = sps.csr_matrix((10000, 10000))
b = sps.csr_matrix((10000, 1))
t1 = mt.tensor(a)
t2 = mt.tensor(b)
session.run(t1 * t2, timeout=_exec_timeout)
def testRunWithoutCompose(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=False) as cluster:
session = cluster.session
arr1 = (mt.ones((10, 10), chunk_size=3) + 1) * 2
r1 = session.run(arr1, timeout=_exec_timeout)
arr2 = (mt.ones((10, 10), chunk_size=4) + 1) * 2
r2 = session.run(arr2, compose=False, timeout=_exec_timeout)
np.testing.assert_array_equal(r1, r2)
def testExistingOperand(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M') as cluster:
session = cluster.session
a = mt.ones((3, 3), chunk_size=2)
r1 = session.run(a, compose=False, timeout=_exec_timeout)
np.testing.assert_array_equal(r1, np.ones((3, 3)))
b = mt.ones((4, 4), chunk_size=2) + 1
r2 = session.run(b, compose=False, timeout=_exec_timeout)
np.testing.assert_array_equal(r2, np.ones((4, 4)) + 1)
del a
b = mt.ones((3, 3), chunk_size=2)
r2 = session.run(b, compose=False, timeout=_exec_timeout)
np.testing.assert_array_equal(r2, np.ones((3, 3)))
del b
c = mt.ones((4, 4), chunk_size=2) + 1
c = c.dot(c)
r3 = session.run(c, compose=False, timeout=_exec_timeout)
np.testing.assert_array_equal(r3, np.ones((4, 4)) * 16)
d = mt.ones((5, 5), chunk_size=2)
d = d.dot(d)
r4 = session.run(d, compose=False, timeout=_exec_timeout)
np.testing.assert_array_equal(r4, np.ones((5, 5)) * 5)
def testTiledTensor(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M') as cluster:
session = cluster.session
a = mt.ones((10, 10), chunk_size=3)
b = a.dot(a)
b = b.tiles()
r = session.run(b, timeout=_exec_timeout)
np.testing.assert_array_equal(r, np.ones((10, 10)) * 10)
a = a.tiles()
b = a + 1
r = session.run(b, timeout=_exec_timeout)
np.testing.assert_array_equal(r, np.ones((10, 10)) + 1)
def testFetchSlices(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
session = cluster.session
a = mt.random.rand(10, 10, 10, chunk_size=3)
r = session.run(a)
r_slice1 = session.fetch(a[:2])
np.testing.assert_array_equal(r[:2], r_slice1)
r_slice2 = session.fetch(a[2:8, 2:8])
np.testing.assert_array_equal(r[2:8, 2:8], r_slice2)
r_slice3 = session.fetch(a[:, 2:])
np.testing.assert_array_equal(r[:, 2:], r_slice3)
r_slice4 = session.fetch(a[:, 2:, -5:])
np.testing.assert_array_equal(r[:, 2:, -5:], r_slice4)
r_slice5 = session.fetch(a[0])
np.testing.assert_array_equal(r[0], r_slice5)
# test repr
with np.printoptions(threshold=100):
raw = np.random.randint(1000, size=(3, 4, 6))
b = mt.tensor(raw, chunk_size=3)
self.assertEqual(repr(b.execute(session=session)),
repr(raw))
web_session = new_session('http://' + cluster._web_endpoint)
r = web_session.run(a)
r_slice1 = web_session.fetch(a[:2])
np.testing.assert_array_equal(r[:2], r_slice1)
r_slice2 = web_session.fetch(a[2:8, 2:8])
np.testing.assert_array_equal(r[2:8, 2:8], r_slice2)
r_slice3 = web_session.fetch(a[:, 2:])
np.testing.assert_array_equal(r[:, 2:], r_slice3)
r_slice4 = web_session.fetch(a[:, 2:, -5:])
np.testing.assert_array_equal(r[:, 2:, -5:], r_slice4)
r_slice5 = web_session.fetch(a[4])
np.testing.assert_array_equal(r[4], r_slice5)
def testFetchDataFrameSlices(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
session = cluster.session
a = mt.random.rand(10, 10, chunk_size=3)
df = md.DataFrame(a)
r = session.run(df)
r_slice1 = session.fetch(df.iloc[:2])
pd.testing.assert_frame_equal(r.iloc[:2], r_slice1)
r_slice2 = session.fetch(df.iloc[2:8, 2:8])
pd.testing.assert_frame_equal(r.iloc[2:8, 2:8], r_slice2)
r_slice3 = session.fetch(df.iloc[:, 2:])
pd.testing.assert_frame_equal(r.iloc[:, 2:], r_slice3)
r_slice4 = session.fetch(df.iloc[:, -5:])
pd.testing.assert_frame_equal(r.iloc[:, -5:], r_slice4)
r_slice5 = session.fetch(df.iloc[4])
pd.testing.assert_series_equal(r.iloc[4], r_slice5)
r_slice6 = session.fetch(df.iloc[6:9])
pd.testing.assert_frame_equal(r.iloc[6:9], r_slice6)
# test repr
pdf = pd.DataFrame(np.random.randint(1000, size=(80, 10)))
df2 = md.DataFrame(pdf, chunk_size=41)
self.assertEqual(repr(df2.execute(session=session)), repr(pdf))
ps = pdf[0]
s = md.Series(ps, chunk_size=41)
self.assertEqual(repr(s.execute(session=session)), repr(ps))
web_session = new_session('http://' + cluster._web_endpoint)
r = web_session.run(df)
r_slice1 = web_session.fetch(df.iloc[:2])
pd.testing.assert_frame_equal(r.iloc[:2], r_slice1)
r_slice2 = web_session.fetch(df.iloc[2:8, 2:8])
pd.testing.assert_frame_equal(r.iloc[2:8, 2:8], r_slice2)
r_slice3 = web_session.fetch(df.iloc[:, 2:])
pd.testing.assert_frame_equal(r.iloc[:, 2:], r_slice3)
r_slice4 = web_session.fetch(df.iloc[:, -5:])
pd.testing.assert_frame_equal(r.iloc[:, -5:], r_slice4)
r_slice5 = web_session.fetch(df.iloc[4])
pd.testing.assert_series_equal(r.iloc[4], r_slice5)
r_slice6 = web_session.fetch(df.iloc[6:9])
pd.testing.assert_frame_equal(r.iloc[6:9], r_slice6)
def testClusterSession(self):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
sess1 = cluster.session
sess2 = new_session(cluster.endpoint, session_id=sess1.session_id)
self.assertNotEqual(sess1, sess2)
self.assertEqual(sess1.session_id, sess2.session_id)
session_id = str(uuid.uuid4())
with self.assertRaises(ValueError) as cm:
new_session(cluster.endpoint, session_id=session_id)
expected_msg = "The session with id = %s doesn't exist" % session_id
self.assertEqual(cm.exception.args[0], expected_msg)
sess1.close()
with self.assertRaises(ValueError) as cm:
new_session(cluster.endpoint, session_id=sess1.session_id)
expected_msg = "The session with id = %s doesn't exist" % sess1.session_id
self.assertEqual(cm.exception.args[0], expected_msg)
web_sess1 = new_session('http://' + cluster._web_endpoint)
web_sess2 = new_session('http://' + cluster._web_endpoint, session_id=web_sess1.session_id)
self.assertNotEqual(web_sess1, web_sess2)
self.assertEqual(web_sess1.session_id, web_sess2.session_id)
session_id = str(uuid.uuid4())
with self.assertRaises(ValueError) as cm:
new_session('http://' + cluster._web_endpoint, session_id=session_id)
expected_msg = "The session with id = %s doesn't exist" % session_id
self.assertEqual(cm.exception.args[0], expected_msg)
web_sess1.close()
with self.assertRaises(ValueError) as cm:
new_session('http://' + cluster._web_endpoint, session_id=web_sess1.session_id)
expected_msg = "The session with id = %s doesn't exist" % web_sess1.session_id
self.assertEqual(cm.exception.args[0], expected_msg)
def testTensorOrder(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
session = cluster.session
data = np.asfortranarray(np.random.rand(10, 7))
a = mt.asfortranarray(data, chunk_size=3)
b = (a + 1) * 2
res = session.run(b, timeout=_exec_timeout)
expected = (data + 1) * 2
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
c = b.reshape(7, 10, order='F')
res = session.run(c, timeout=_exec_timeout)
expected = ((data + 1) * 2).reshape((7, 10), order='F')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
def testIterativeDependency(self, *_):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True):
with tempfile.TemporaryDirectory() as d:
file_path = os.path.join(d, 'test.csv')
df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=['a', 'b', 'c'])
df.to_csv(file_path)
mdf1 = md.read_csv(file_path, index_col=0, chunk_bytes=10)
r1 = mdf1.iloc[:3].to_pandas()
pd.testing.assert_frame_equal(df[:3], r1)
mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=10)
r2 = mdf2.iloc[:3].to_pandas()
pd.testing.assert_frame_equal(df[:3], r2)
f = mdf1[mdf1.a > mdf2.a]
r3 = f.iloc[:3].to_pandas()
pd.testing.assert_frame_equal(r3, df[df.a > df.a])
def testDataFrameShuffle(self, *_):
from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df
from mars.dataframe.merge.merge import merge
from mars.dataframe.utils import sort_dataframe_inplace
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
session = cluster.session
data1 = pd.DataFrame(np.arange(20).reshape((4, 5)) + 1, columns=['a', 'b', 'c', 'd', 'e'])
data2 = pd.DataFrame(np.arange(20).reshape((5, 4)) + 1, columns=['a', 'b', 'x', 'y'])
df1 = from_pandas_df(data1, chunk_size=2)
df2 = from_pandas_df(data2, chunk_size=2)
r1 = data1.merge(data2)
r2 = session.run(merge(df1, df2), timeout=_exec_timeout)
pd.testing.assert_frame_equal(sort_dataframe_inplace(r1, 0), sort_dataframe_inplace(r2, 0))
r1 = data1.merge(data2, how='inner', on=['a', 'b'])
r2 = session.run(merge(df1, df2, how='inner', on=['a', 'b']), timeout=_exec_timeout)
pd.testing.assert_frame_equal(sort_dataframe_inplace(r1, 0), sort_dataframe_inplace(r2, 0))
web_session = new_session('http://' + cluster._web_endpoint)
r1 = data1.merge(data2)
r2 = web_session.run(merge(df1, df2), timeout=_exec_timeout)
pd.testing.assert_frame_equal(sort_dataframe_inplace(r1, 0), sort_dataframe_inplace(r2, 0))
r1 = data1.merge(data2, how='inner', on=['a', 'b'])
r2 = web_session.run(merge(df1, df2, how='inner', on=['a', 'b']), timeout=_exec_timeout)
pd.testing.assert_frame_equal(sort_dataframe_inplace(r1, 0), sort_dataframe_inplace(r2, 0))
@require_cudf
def testCudaCluster(self, *_):
from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', web=True) as cluster:
session = cluster.session
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = df.to_gpu()
result = session.run(cdf)
pd.testing.assert_frame_equal(pdf, result)
def testTileContextInLocalCluster(self):
with new_cluster(scheduler_n_process=2, worker_n_process=2,
shared_memory='20M', modules=[__name__], web=True) as cluster:
session = cluster.session
raw = np.random.rand(10, 20)
data = mt.tensor(raw)
session.run(data)
data2 = TileWithContextOperand().new_tensor([data], shape=data.shape)
result = session.run(data2)
np.testing.assert_array_equal(raw * raw.nbytes, result)
@unittest.skipIf(h5py is None, 'h5py not installed')
def testStoreHDF5ForLocalCluster(self):
with new_cluster(worker_n_process=2,
shared_memory='20M', web=True) as cluster:
session = cluster.session
raw = np.random.RandomState(0).rand(10, 20)
t = mt.tensor(raw, chunk_size=11)
dataset = 'test_dataset'
with tempfile.TemporaryDirectory() as d:
filename = os.path.join(d, 'test_read_{}.hdf5'.format(int(time.time())))
r = mt.tohdf5(filename, t, dataset=dataset)
session.run(r, timeout=_exec_timeout)
with h5py.File(filename, 'r') as f:
result = np.asarray(f[dataset])
np.testing.assert_array_equal(result, raw)
def testRemoteFunctionInLocalCluster(self):
with new_cluster(scheduler_n_process=2, worker_n_process=3,
shared_memory='20M', modules=[__name__], web=True) as cluster:
session = cluster.session
def f(x):
return x + 1
def g(x, y):
return x * y
a = mr.spawn(f, 3)
b = mr.spawn(f, 4)
c = mr.spawn(g, (a, b))
r = session.run(c, timeout=_exec_timeout)
self.assertEqual(r, 20)
e = mr.spawn(f, mr.spawn(f, 2))
r = session.run(e, timeout=_exec_timeout)
self.assertEqual(r, 4)
session2 = new_session(cluster.endpoint)
expect_session_id = session2.session_id
def f2():
session = Session.default
assert isinstance(session._sess, ClusterSession)
assert session._sess.session_id == expect_session_id
t = mt.ones((3, 2))
return t.sum().to_numpy()
self.assertEqual(cloudpickle.loads(cloudpickle.dumps(Session.default)).session_id,
session.session_id)
self.assertIsInstance(serialize_function(f2), bytes)
d = mr.spawn(f2, retry_when_fail=False)
r = session2.run(d, timeout=_exec_timeout)
self.assertEqual(r, 6)
# test input tileable
def f(t, x):
return (t * x).sum().to_numpy()
rs = np.random.RandomState(0)
raw = rs.rand(5, 4)
t1 = mt.tensor(raw, chunk_size=3)
t2 = t1.sum(axis=0)
s = mr.spawn(f, args=(t2, 3), retry_when_fail=False)
r = session.run(s, timeout=_exec_timeout)
expected = (raw.sum(axis=0) * 3).sum()
self.assertAlmostEqual(r, expected)
# test named tileable
session3 = new_session(cluster.endpoint)
t = mt.ones((10, 10), chunk_size=3)
session3.run(t, name='t_name')
def f3():
import mars.tensor as mt
s = mt.named_tensor(name='t_name')
return (s + 1).to_numpy()
d = mr.spawn(f3, retry_when_fail=False)
r = session3.run(d, timeout=_exec_timeout)
np.testing.assert_array_equal(r, np.ones((10, 10)) + 1)
def testLearnInLocalCluster(self, *_):
from mars.learn.neighbors import NearestNeighbors
from sklearn.neighbors import NearestNeighbors as SkNearestNeighbors
from mars.learn.metrics import roc_curve, auc
from sklearn.metrics import roc_curve as sklearn_roc_curve, auc as sklearn_auc
with new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='20M') as cluster:
rs = np.random.RandomState(0)
raw_X = rs.rand(10, 5)
raw_Y = rs.rand(8, 5)
X = mt.tensor(raw_X, chunk_size=7)
Y = mt.tensor(raw_Y, chunk_size=(5, 3))
nn = NearestNeighbors(n_neighbors=3)
nn.fit(X)
ret = nn.kneighbors(Y, session=cluster.session)
snn = SkNearestNeighbors(n_neighbors=3)
snn.fit(raw_X)
expected = snn.kneighbors(raw_Y)
result = [r.fetch() for r in ret]
np.testing.assert_almost_equal(result[0], expected[0])
np.testing.assert_almost_equal(result[1], expected[1])
rs = np.random.RandomState(0)
raw = pd.DataFrame({'a': rs.randint(0, 10, (10,)),
'b': rs.rand(10)})
df = md.DataFrame(raw)
y = df['a'].to_tensor().astype('int')
pred = df['b'].to_tensor().astype('float')
fpr, tpr, thresholds = roc_curve(y, pred, pos_label=2)
m = auc(fpr, tpr)
sk_fpr, sk_tpr, sk_threshod = sklearn_roc_curve(raw['a'].to_numpy().astype('int'),
raw['b'].to_numpy().astype('float'),
pos_label=2)
expect_m = sklearn_auc(sk_fpr, sk_tpr)
self.assertAlmostEqual(m.fetch(), expect_m)
| 40.763393 | 104 | 0.57906 |
aced1ceae0a0921bcb5a9449f8e0cd26c7d2ee60 | 684 | py | Python | SDKs/Aspose.Slides-Cloud-SDK-for-Python/asposeslidescloud/models/TextItem.py | mudassirfayyaz/Aspose.Slides-for-Cloud | a024892c2f89c5bc51b9de349cc5f4cfbbb2ab94 | [
"MIT"
] | null | null | null | SDKs/Aspose.Slides-Cloud-SDK-for-Python/asposeslidescloud/models/TextItem.py | mudassirfayyaz/Aspose.Slides-for-Cloud | a024892c2f89c5bc51b9de349cc5f4cfbbb2ab94 | [
"MIT"
] | null | null | null | SDKs/Aspose.Slides-Cloud-SDK-for-Python/asposeslidescloud/models/TextItem.py | mudassirfayyaz/Aspose.Slides-for-Cloud | a024892c2f89c5bc51b9de349cc5f4cfbbb2ab94 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
class TextItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'Uri': 'ResourceUri',
'Text': 'str'
}
self.attributeMap = {
'Uri': 'Uri','Text': 'Text'}
self.Uri = None # ResourceUri
self.Text = None # str
| 26.307692 | 97 | 0.555556 |
aced1e531cb332e1f1f2e6bb4aeed6bcf11b6c89 | 132 | py | Python | mechroutines/ktp/__init__.py | keceli/mechdriver | 978994ba5c77b6df00078b639c4482dacf269440 | [
"Apache-2.0"
] | 1 | 2022-03-22T20:47:04.000Z | 2022-03-22T20:47:04.000Z | mechroutines/ktp/__init__.py | keceli/mechdriver | 978994ba5c77b6df00078b639c4482dacf269440 | [
"Apache-2.0"
] | 1 | 2021-02-12T21:11:16.000Z | 2021-12-07T21:32:14.000Z | mechroutines/ktp/__init__.py | keceli/mechdriver | 978994ba5c77b6df00078b639c4482dacf269440 | [
"Apache-2.0"
] | 8 | 2019-12-18T20:09:46.000Z | 2020-11-14T16:37:28.000Z | """ rate stuff
"""
from mechroutines.ktp import rates
from mechroutines.ktp import label
__all__ = [
'rates',
'label',
]
| 11 | 34 | 0.651515 |
aced1ec2b7ccf34b498eca7f769f1cba5114e837 | 155 | py | Python | src/while_loop.py | piyushc0/Python | c5bbb2311b1d12fc834f35fd3bdf14dc4adf0e89 | [
"MIT"
] | 1 | 2020-10-21T02:46:52.000Z | 2020-10-21T02:46:52.000Z | src/while_loop.py | Phunt05/Python | c5bbb2311b1d12fc834f35fd3bdf14dc4adf0e89 | [
"MIT"
] | null | null | null | src/while_loop.py | Phunt05/Python | c5bbb2311b1d12fc834f35fd3bdf14dc4adf0e89 | [
"MIT"
] | null | null | null | i = 1
while i <= 5:
print(i)
i = i + 1
print("Done")
# Triangles
j = 1
while j <= 10:
print('*' * j)
j = j + 1
print("It is a triangle")
| 11.071429 | 25 | 0.470968 |
aced2093eeaa625952aba426417bb2dcd899aa84 | 11,029 | py | Python | src/maze_solver/maze_solving_click_mode/maze_solver.py | Kihoon0716/self_driving-loading- | 084874ca1558ee92883bb32a74aa72726ac31744 | [
"Apache-2.0"
] | 10 | 2018-05-06T06:31:04.000Z | 2021-01-05T03:15:04.000Z | src/maze_solver/maze_solving_click_mode/maze_solver.py | Kihoon0716/self_driving-loading- | 084874ca1558ee92883bb32a74aa72726ac31744 | [
"Apache-2.0"
] | 1 | 2018-02-02T04:05:57.000Z | 2018-02-02T04:05:57.000Z | src/maze_solver/maze_solving_click_mode/maze_solver.py | Kihoon0716/self_driving-loading- | 084874ca1558ee92883bb32a74aa72726ac31744 | [
"Apache-2.0"
] | 12 | 2018-02-04T05:34:37.000Z | 2020-06-11T08:24:55.000Z | #!/usr/bin/env python
import rospy
from nav_msgs.msg import OccupancyGrid
from std_msgs.msg import Int16
from geometry_msgs.msg import Twist
from compressed_image_transport import *
from nav_msgs.msg import Odometry
from sensor_msgs.msg import LaserScan
from tf2_msgs.msg import TFMessage
import tf
import math
import cv2
import numpy as np
from maze_solving_algorithm import Solver
import threading
def existance(arr, num):
for i in range(0, len(arr)):
if arr[i] == num:
return True
return False
def configure(arr):
arr_ = []
for i in range(0, len(arr)):
if existance(arr_, arr[i]) == False:
arr_.append(arr[i])
return arr_
def distance_dot2line(a, b, c, x0, y0):
distance = abs(x0*a + y0*b + c)/math.sqrt(a*a + b*b)
return distance
def distance_dot2dot(x1, y1, x2, y2):
distance = math.sqrt((x2 - x1)*(x2 - x1) + (y2 - y1)*(y2-y1))
return distance
def collision_test(start, goal, map, difference_low, difference_col):
start = [start[0] - difference_low, start[1] - difference_col]
goal = [goal[0] - difference_low, goal[1] - difference_col]
if goal[0] != start[0]:
a = (goal[1] - start[1]) / (goal[0] - start[0])
b = -a*start[0] + start[1]
for i in range(min(start[0], goal[0]), max(start[0], goal[0])):
if map[i][int(a*i + b)] == True:
return 'danger'
else:
for i in range(min(start[1], goal[1]), max(start[1], goal[1])):
if map[start[0]][i] == True:
return 'danger'
return 'safe'
def euler_from_quaternion(quaternion):
theta = tf.transformations.euler_from_quaternion(quaternion)[2] - 3.141592 / 2
if theta < 0:
theta = theta + 3.141592 * 2
return theta
class Maze_pathfinder():
def __init__(self):
self._sub = rospy.Subscriber('/map', OccupancyGrid, self.callback, queue_size=1)
self._sub = rospy.Subscriber('/odom', Odometry, self.callback2, queue_size=1)
self._sub = rospy.Subscriber('/scan', LaserScan, self.callback3, queue_size=1)
# self._sub = rospy.Subscriber('/tf', TFMessage, self.callback4, queue_size=1)
self._pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self.img = np.zeros((384, 384, 3), np.uint8)
self.low_position = 0
self.col_position = 0
self.destination_low = 0
self.destination_col = 0
self.theta = 0
self.state = 'stop' # path_finding, stop, going, direction_setting
self.shortest_path = [[0,0]]
self.path = [0,0]
def define_destination(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.destination_low = y
self.destination_col = x
self.state = 'path_finding'
def callback(self, map):
thread1 = threading.Thread(target=self.path_finding, args=(map,))
thread1.setDaemon(True)
thread1.start()
def path_finding(self, map):
self.img = np.zeros((384, 384, 3), np.uint8)
for i in range(0, 384):
for j in range(0, 384):
if map.data[384*j + i] == -1:
self.img[i][j][0] = 255
if map.data[384*j + i] == 0:
self.img[i][j][1] = 255
if map.data[384*j + i] == 100:
self.img[i][j][0] = 0
self.img[i][j][1] = 0
self.img[i][j][2] = 0
# Draw direction
self.img = cv2.line(self.img, (self.col_position, self.low_position), (self.col_position + int(10*math.cos(self.theta)), self.low_position - int(10*math.sin(self.theta))), (0, 255, 255), 1)
if self.state == 'path_finding':
print 'path finding....'
solver = Solver([self.low_position, self.col_position], [self.destination_low, self.destination_col], map.data)
solver.solve_distance()
solver.find_shortest_path()
self.shortest_path = solver.shortest_path
self.state = 'direction_setting'
print 'path finding end!'
while(1):
if len(self.shortest_path) > 2:
if solver.collision_test([self.low_position, self.col_position], self.shortest_path[len(self.shortest_path)-3]) == 'safe':
_ = self.shortest_path.pop()
print 'poped out'
else:
print 'collision'
break
else:
break
self.path = self.shortest_path[len(self.shortest_path)-2]
if self.path == [0,0]:
print "something wrong!"
if self.state != 'stop' and self.state != 'path_finding':
self.img = cv2.line(self.img, (self.path[1], self.path[0]), (self.path[1], self.path[0]), (0, 170, 255), 2)
for i in range(len(self.shortest_path)):
self.img = cv2.line(self.img, (self.shortest_path[i][1], self.shortest_path[i][0]), (self.shortest_path[i][1], self.shortest_path[i][0]), (255, 0, 255), 2)
if i != 0:
self.img = cv2.line(self.img, (self.shortest_path[i][1], self.shortest_path[i][0]), (self.shortest_path[i - 1][1], self.shortest_path[i - 1][0]), (0, 255, 255), 1)
else:
self.img = cv2.line(self.img, (self.col_position, self.low_position), (self.shortest_path[i - 1][1], self.shortest_path[i - 1][0]), (0, 255, 255), 1)
def callback2(self, odometry):
#print 'map_to_odom', self.tf_map_to_odom[0], self.tf_map_to_odom[1]
#print 'odom_to_base', self.tf_odom_to_base[0], self.tf_odom_to_base[1]
#print 'odom', odometry.pose.pose.position.x, odometry.pose.pose.position.y
#print self.tf_map_to_odom[0] + self.tf_odom_to_base[0], self.tf_map_to_odom[1] + self.tf_odom_to_base[1]
#quaternion = (odometry.pose.pose.orientation.x, odometry.pose.pose.orientation.y, odometry.pose.pose.orientation.z, odometry.pose.pose.orientation.w)
#self.theta = euler_from_quaternion(quaternion)
direction_desired = math.atan2(self.low_position - self.path[0], self.path[1] - self.col_position)
if direction_desired < 0:
direction_desired = direction_desired + 3.141592*2
if self.state == 'direction_setting':
# calculate degree and direction
if direction_desired > self.theta:
if direction_desired - self.theta < 3.141592:
turn_direction = 'left'
else:
turn_direction = 'right'
else:
if self.theta - direction_desired < 3.141592:
turn_direction = 'right'
else:
turn_direction = 'left'
# publish topic
difference = abs(direction_desired - self.theta)
if difference > 3.141592:
difference = 3.141592*2 - difference
if difference > 0.3:
turn_speed = 0.6
elif difference > 0.2:
turn_speed = 0.3
elif difference > 0.1:
turn_speed = 0.1
elif difference > 0.01:
turn_speed = 0.05
else:
turn_speed = 0
self.state = 'going'
vel = Twist()
if turn_direction =='left':
vel.angular.z = turn_speed
else:
vel.angular.z = - turn_speed
vel.angular.x = 0
vel.angular.y = 0
vel.linear.x = 0
vel.linear.y = 0
vel.linear.z = 0
self._pub.publish(vel)
if self.state == 'going':
a = math.tan(self.theta + 3.141592/2)
b = -1
c = -a*self.low_position + self.col_position
distance_expected = distance_dot2line(a, b, c, self.path[0], self.path[1])
distance_now = distance_dot2dot(self.low_position, self.col_position, self.path[0], self.path[1])
distance_from_destination = distance_dot2dot(self.low_position, self.col_position, self.destination_low, self.destination_col)
# print 'expected : ', distance_expected, 'now : ', distance_now
if distance_expected > 1:
self.state = 'direction_setting'
if distance_from_destination == 0:
self.state = 'stop'
elif distance_now == 0:
self.state = 'path_finding'
vel = Twist()
vel.angular.x = 0
vel.angular.y = 0
vel.angular.z = 0
vel.linear.x = 0.06
vel.linear.y = 0
vel.linear.z = 0
self._pub.publish(vel)
if self.state == 'stop':
vel = Twist()
vel.angular.x = 0
vel.angular.y = 0
vel.angular.z = 0
vel.linear.x = 0
vel.linear.y = 0
vel.linear.z = 0
self._pub.publish(vel)
def callback3(self, scan):
cv2.namedWindow('SLAM')
cv2.setMouseCallback('SLAM', self.define_destination)
img_copy = np.zeros((384, 384, 3), np.uint8)
np.copyto(img_copy, self.img)
for i in range(360):
low_scan = int(scan.ranges[i] * math.sin(i*3.141592/180 + self.theta) * 20)
col_scan = int(scan.ranges[i] * math.cos(i*3.141592/180 + self.theta) * 20)
img_copy[self.low_position - low_scan][self.col_position + col_scan][0] = 0
img_copy[self.low_position - low_scan][self.col_position + col_scan][1] = 0
img_copy[self.low_position - low_scan][self.col_position + col_scan][2] = 255
img_copy = cv2.line(img_copy, (self.col_position, self.low_position), (self.col_position, self.low_position), (0, 0, 255), 2)
img_large = cv2.resize(img_copy,(1000,1000))
cv2.imshow("SLAM", img_copy), cv2.waitKey(1)
cv2.imshow("SLAM_large", img_large), cv2.waitKey(1)
def tf_listener_map_to_base(self):
listener = tf.TransformListener()
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
try:
(trans, rot) = listener.lookupTransform('/map', '/base_footprint', rospy.Time(0))
self.low_position = 192 + int((trans[0]) * 20) + 7
self.col_position = 192 + int((trans[1]) * 20) + 8
self.theta = euler_from_quaternion(rot)
print 'basefootprint', trans, rot
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
rate.sleep()
def main(self):
thread_tf_listener_map_to_base = threading.Thread(target = self.tf_listener_map_to_base)
thread_tf_listener_map_to_base.start()
rospy.spin()
if __name__ == '__main__':
rospy.init_node('maze_pathfinder')
mazesolver = Maze_pathfinder()
mazesolver.main()
| 39.960145 | 197 | 0.568955 |
aced2125877d1ef54871d1a8907e7ecb932661eb | 4,845 | py | Python | tools/auralink/current.py | AuraUAS/aura-core | 4711521074db72ba9089213e14455d89dc5306c0 | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 8 | 2016-08-03T19:35:03.000Z | 2019-12-15T06:25:05.000Z | tools/auralink/current.py | AuraUAS/aura-core | 4711521074db72ba9089213e14455d89dc5306c0 | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 4 | 2018-09-27T15:48:56.000Z | 2018-11-05T12:38:10.000Z | tools/auralink/current.py | AuraUAS/aura-core | 4711521074db72ba9089213e14455d89dc5306c0 | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 5 | 2017-06-28T19:15:36.000Z | 2020-02-19T19:31:24.000Z | import math
from PropertyTree import PropertyNode
airdata_node = PropertyNode('/sensors/airdata')
filter_node = PropertyNode('/filters/filter/0')
pilot_node = PropertyNode('/sensors/pilot_input')
status_node = PropertyNode('/status')
pos_node = PropertyNode("/position")
vel_node = PropertyNode("/velocity")
targets_node = PropertyNode("/autopilot/targets")
tecs_node = PropertyNode("/autopilot/tecs")
power_node = PropertyNode("/sensors/power")
tecs_config_node = PropertyNode("/config/autopilot/TECS")
r2d = 180.0 / math.pi
mps2kt = 1.9438444924406046432
kt2mps = 0.5144444444444444444
ft2m = 0.3048
g = 9.81
last_time = 0.0
# crude battery % interpolation model
# 100 - 4.2
# 83% - 3.8
# 27% - 3.65
# 0% - 3.5
batv = [ 3.3, 3.50, 3.65, 3.80, 4.20 ]
batp = [ 0.0, 0.05, 0.27, 0.83, 1.00 ]
from scipy.interpolate import interp1d
batf = interp1d(batv, batp)
filt_perc = 1.0
def compute_tecs():
if filter_node.getDouble('timestamp') < 0.01:
# do nothing if filter not inited
return
mass_kg = tecs_config_node.getDouble("mass_kg")
if mass_kg < 0.01:
mass_kg = 3.0
if tecs_config_node.hasChild("weight_bal"):
wb = tecs_config_node.getDouble("weight_bal")
else:
wb = 1.0
# fixem:
wb = 0.0
alt_m = filter_node.getDouble("altitude_m")
vel_mps = vel_node.getDouble("airspeed_smoothed_kt") * kt2mps
target_alt_m = targets_node.getDouble("altitude_msl_ft") * ft2m
target_vel_mps = targets_node.getDouble("airspeed_kt") * kt2mps
energy_pot = mass_kg * g * alt_m
energy_kin = 0.5 * mass_kg * vel_mps * vel_mps
target_pot = mass_kg * g * target_alt_m
target_kin = 0.5 * mass_kg * target_vel_mps * target_vel_mps
error_pot = target_pot - energy_pot
error_kin = target_kin - energy_kin
# print(filter_node.getDouble('timestamp'), 'target_alt:', target_alt_m, 'tgt_pot:', target_pot, 'E_pot:', energy_pot, 'Err_kin:', error_kin, 'Err_pot:', error_pot)
error_total = error_pot + error_kin
error_bal = (2.0 - wb) * error_kin - wb * error_pot
tecs_node.setDouble("energy_total", energy_pot + energy_kin )
tecs_node.setDouble("target_total", target_pot + target_kin )
tecs_node.setDouble("error_total", error_total)
tecs_node.setDouble("error_diff", error_bal)
def compute_derived_data():
global last_time
# compute ground track heading/speed
vn = filter_node.getDouble("vn_ms")
ve = filter_node.getDouble("ve_ms")
vd = filter_node.getDouble("vd_ms")
hdg = (math.pi * 0.5 - math.atan2(vn, ve)) * r2d
vel_ms = math.sqrt( vn*vn + ve*ve + vd*vd )
filter_node.setDouble("groundtrack_deg", hdg)
filter_node.setDouble("groundspeed_ms", vel_ms)
filter_node.setDouble("groundspeed_kt", vel_ms * mps2kt)
# compute frame dt
current_time = filter_node.getDouble('timestamp')
dt = current_time - last_time
last_time = current_time
# local 'airborne' helper (not official)
if vel_node.getDouble('airspeed_smoothed_kt') >= 15:
in_flight = True
else:
in_flight = False
status_node.setBool("in_flight", in_flight)
# local autopilot timer
ap_enabled = False
if pilot_node.getDouble("channel", 0) > 0:
ap_enabled = True
if in_flight and ap_enabled:
timer = status_node.getDouble('local_autopilot_timer')
timer += dt
status_node.setDouble('local_autopilot_timer', timer)
# estimate distance traveled from filter velocity and dt
if in_flight:
if not status_node.getBool('onboard_flight_time'):
ft = status_node.getDouble('flight_timer')
ft += dt
status_node.setDouble('flight_timer', ft)
od = status_node.getDouble('flight_odometer')
od += vel_ms * dt
status_node.setDouble('flight_odometer', od)
throttle_timer = status_node.getDouble("throttle_timer")
if pilot_node.getDouble("channel", 2) > 0.1:
throttle_timer += dt
status_node.setDouble("throttle_timer", throttle_timer)
# autopilot error metrics
roll_error = targets_node.getDouble('roll_deg') - filter_node.getDouble('roll_deg')
#print 'error %.4f,%.1f' % (filter_node.getDouble('timestamp'), roll_error)
volts = power_node.getDouble("main_vcc")
amps = power_node.getDouble("main_amps")
watts = volts * amps
power_node.setDouble("main_watts", watts)
cell_volts = power_node.getDouble("cell_vcc")
if cell_volts < 3.3: cell_volts = 3.3
if cell_volts > 4.2: cell_volts = 4.2
batt_perc = batf(cell_volts)
global filt_perc
if filt_perc is None:
filt_perc = batt_perc
else:
filt_perc = 0.9995 * filt_perc + 0.0005 * batt_perc
power_node.setDouble("battery_perc", filt_perc)
# TECS
compute_tecs()
| 33.881119 | 168 | 0.677193 |
aced218c29784e7f0462073b875d8102cf45be9c | 1,474 | py | Python | test/build.py | Enhex/arithmetic_class | 0e8c930edc3385f809a03c00422a944e4a00ccc7 | [
"Apache-2.0"
] | null | null | null | test/build.py | Enhex/arithmetic_class | 0e8c930edc3385f809a03c00422a944e4a00ccc7 | [
"Apache-2.0"
] | null | null | null | test/build.py | Enhex/arithmetic_class | 0e8c930edc3385f809a03c00422a944e4a00ccc7 | [
"Apache-2.0"
] | null | null | null | import os
from sys import platform
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--release", help="Generate release build (debug by default).", action="store_true")
args = parser.parse_args()
build_type = 'Release' if args.release else 'Debug'
source = os.path.dirname(os.path.realpath(__file__))
def create_symlink(src, dst):
# create all parent directories of the symlink one
parent_dir = os.path.dirname(dst)
os.makedirs(parent_dir, exist_ok=True)
try:
os.symlink(src, dst)
except:
pass
def build(source, build_type, symlinks = [], symlink_pairs = []):
build_dir = '../../build/' + build_type + '/'
# create build directory
os.makedirs(build_dir, exist_ok=True)
os.chdir(build_dir)
# symlinks
for path in symlinks:
create_symlink(source + '/' + path, './' + path)
for src_path, dst_path in symlink_pairs:
create_symlink(source + '/' + src_path, './' + dst_path)
# conan
#os.system('conan install "' + source + '/" --build=outdated -s arch=x86_64 -s build_type=' + build_type)
# choose premake generator based on OS
os.chdir(source)
def premake_generate(generator):
os.system('premake5 ' + generator + ' --location="' + build_dir + '"')
if platform == 'win32':
premake_generate('vs2019')
else:
premake_generate('gmake2')
premake_generate('vscode')
build(source, build_type) | 27.811321 | 110 | 0.648575 |
aced219fb7fd93392cc73c80266adaf5601ec3bd | 133 | py | Python | cafe/__init__.py | m-star18/cafeteria-simulation | ae2ee6d749ad438e6e830a9488b272b31995f60a | [
"Apache-2.0"
] | 1 | 2021-06-11T17:33:37.000Z | 2021-06-11T17:33:37.000Z | cafe/__init__.py | m-star18/cafeteria-simulation | ae2ee6d749ad438e6e830a9488b272b31995f60a | [
"Apache-2.0"
] | 10 | 2020-09-05T15:25:12.000Z | 2021-05-12T23:07:58.000Z | cafe/__init__.py | m-star18/cafeteria-simulation | ae2ee6d749ad438e6e830a9488b272b31995f60a | [
"Apache-2.0"
] | null | null | null | from cafe import core
from cafe import example
Cafeteria = core.Cafeteria
TOYOTA = example.Toyota()
basic = example.basic_algorithm
| 19 | 31 | 0.804511 |
aced21f7f783e8eb2bdca32336c7984af76cd022 | 38,164 | py | Python | xml_report_generator.py | jamasoftware-ps/custom_report_generator | 483c01ba84e0f9682065ab9008e33129fc24ae01 | [
"MIT"
] | null | null | null | xml_report_generator.py | jamasoftware-ps/custom_report_generator | 483c01ba84e0f9682065ab9008e33129fc24ae01 | [
"MIT"
] | null | null | null | xml_report_generator.py | jamasoftware-ps/custom_report_generator | 483c01ba84e0f9682065ab9008e33129fc24ae01 | [
"MIT"
] | null | null | null | import logging
import os
import shutil
import sys
import xml.etree.ElementTree as ET
import xml.dom.minidom
from py_jama_rest_client.client import JamaClient
from py_jama_rest_client.client import APIException
import app.py_jama_script_runner as pjsr
import html2text
# Config Constants: TEST DEV ENV
DI_REQUIREMENT_ITEM_TYPE_ID = 87
SSR_OR_FUNCTIONS_ITEM_TYPE_ID = 87
SUB_SYSTEM_REQUIREMENT_ITEM_TYPE_ID = 87
PROCESS_SPEC_ITEM_TYPE_ID = 138
DESIGN_SPEC_ITEM_TYPE_ID = 135
# Pick-list option API ID's for test class selection
TC_MODULE_OQC_PCS = 624
TC_MODULE_IQC_MAT_MCS = 627
TC_IMFG_MCS = 630
TC_FIELD_INSTALL = 633
TC_SUB_MODULE_OQC_PCS = 636
TC_SUB_MODULE_IQC = 639
TC_ADD_TO_ALL_TC = 0
TC_UNASSIGNED = 0
# Field Names
# DS_UFN Prefix stands for DesignSpec_UniqueFieldName
DS_UFN_PARAMETER_NAME = 'name'
DS_UFN_SW_VAR_NAME = 'sw_variable_name$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
DS_UFN_LSL = 'lsl$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
DS_UFN_USL = 'usl$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
DS_UFN_UNITS = 'units$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
DS_UFN_TCS = 'test_class_selection$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
DS_UFN_FIO = 'for__information_only$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
# PS_UFN prefix stands for ProcessSpec_UniqueFieldName
PS_UFN_NAME = 'name'
PS_UFN_PARAMETER_TYPE = 'parameter_type_pl$' + str(PROCESS_SPEC_ITEM_TYPE_ID)
PS_UFN_NOMINAL_TARGET = 'nominal_target$' + str(PROCESS_SPEC_ITEM_TYPE_ID)
PS_UFN_TCS = 'test_class_selection$' + str(PROCESS_SPEC_ITEM_TYPE_ID)
PS_UFN_SW_VAR_NAME = 'name'
PS_UFN_UNITS = 'units$' + str(PROCESS_SPEC_ITEM_TYPE_ID)
# # Config Constants: PROD ENV
# DI_REQUIREMENT_ITEM_TYPE_ID = 89057
# SSR_OR_FUNCTIONS_ITEM_TYPE_ID = 89147
# SUB_SYSTEM_REQUIREMENT_ITEM_TYPE_ID = 89098
# PROCESS_SPEC_ITEM_TYPE_ID = 89141
# DESIGN_SPEC_ITEM_TYPE_ID = 89042
#
# # Pick-list option API ID's for test class selection
# TC_MODULE_OQC_PCS = 157409
# TC_MODULE_IQC_MAT_MCS = 157410
# TC_IMFG_MCS = 157411
# TC_FIELD_INSTALL = 157412
# TC_SUB_MODULE_OQC_PCS = 157838
# TC_SUB_MODULE_IQC = 157836
# # Non QC Input Sheet Spec
# TC_ADD_TO_ALL_TC = 157755
# TC_UNASSIGNED = 157405
#
# # Field Names
# # DS_UFN Prefix stands for DesignSpec_UniqueFieldName
# DS_UFN_PARAMETER_NAME = 'name'
# DS_UFN_SW_VAR_NAME = 'SWV$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
# DS_UFN_LSL = 'lsl$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
# DS_UFN_USL = 'usl$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
# DS_UFN_UNITS = 'units$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
# DS_UFN_TCS = 'test_class_selection$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
# DS_UFN_FIO = 'fio__information_only$' + str(DESIGN_SPEC_ITEM_TYPE_ID)
#
# # PS_UFN prefix stands for ProcessSpec_UniqueFieldName
# PS_UFN_NAME = 'name'
# PS_UFN_PARAMETER_TYPE = 'parameter_type$' + str(PROCESS_SPEC_ITEM_TYPE_ID)
# PS_UFN_NOMINAL_TARGET = 'nominal_target$' + str(PROCESS_SPEC_ITEM_TYPE_ID)
# PS_UFN_TCS = 'test_class_selection_$' + str(PROCESS_SPEC_ITEM_TYPE_ID)
# PS_UFN_SW_VAR_NAME = 'sw_variable_name$' + str(PROCESS_SPEC_ITEM_TYPE_ID)
# PS_UFN_UNITS = 'units$' + str(PROCESS_SPEC_ITEM_TYPE_ID)
TC_TO_FILENAME_LOOKUP = {
TC_MODULE_OQC_PCS: 'Module OQC(PCS)',
TC_MODULE_IQC_MAT_MCS: 'Module IQC_MAT(MCS)',
TC_IMFG_MCS: 'iMFG(MCS)',
TC_FIELD_INSTALL: 'FieldInstall',
TC_SUB_MODULE_OQC_PCS: 'Sub-Module OQC(PCS)',
TC_SUB_MODULE_IQC: 'Sub-Module IQC',
}
# The list of custom widgets we want for our customized script runner app
custom_widgets = {
"baseline_id": {
"type": pjsr.STRING_FIELD_WIDGET,
"label": "Baseline ID:"
},
"test_class_selections": {
"type": pjsr.PICKLIST_OPTION_CHOOSER_FIELD_WIDGET,
"label": "Test class selection",
"item_type": PROCESS_SPEC_ITEM_TYPE_ID,
"picklist_field": PS_UFN_TCS
},
"mapping_version": {
"type": pjsr.RADIO_BUTTON_FIELD_WIDGET,
"label": "Mapping Version:",
"options": ["A", "B", "C"]
},
"output_location": {
"type": pjsr.DIRECTORY_CHOOSER_WIDGET,
"label": "Output Directory:"
}
}
# Parameter type: Picklist Option ID -> Picklist Option Name value lookup
parameter_type_option_id_to_name_lookup = {}
# Get a logger for error reporting
logger = logging.getLogger('xml_report_generator')
def is_number(s):
"""
Helper method to determine if this string can be cast to a float
:param s: String to attempt to turn into a float
:return: True or False
"""
try:
float(s)
return True
except ValueError:
return False
def get_baseline_number_from_name(baseline):
"""
This helper function will retrieve parse the default baseline name structure for the # in it.
The default structure is B# <DATE> for example a baseline name:, 'B12 032120' would return '12'
:param baseline: The baseline that needs its name parsed
:return: The Baseline "revision" number
"""
baseline_name = baseline.get('name')
# Split on the space, get the first part; drop the 'B'
return str(baseline_name).split(' ')[0][1:]
def get_document_number(baseline_items):
try:
for item in baseline_items:
if item.get('fields').get('name') == 'Input Sheet Package Document Number':
htmlParser = html2text.HTML2Text()
htmlParser.ignore_links = True
return htmlParser.handle(item.get('fields').get('description')).strip()
except:
pass
return ""
class XMLReportGenerator:
def __init__(self):
self.jama_client: JamaClient = None
self.temp_dir = './'
self.app = pjsr.PyJamaScriptRunner(custom_widgets, self.run)
# Next line launches gui; it will not return until the program exits.
self.app.mainloop()
# ------ No more code below this line except to do any required cleanup post app exit. -----
def run(self, **kwargs):
"""
The Entry point for this applications business logic.
:param kwargs: A dict that must contains all needed parameters i.e. client
:return: None
"""
# Collect our parameters from the GUI
self.jama_client: JamaClient = kwargs.get('client')
baseline_id = kwargs.get('baseline_id')
mapping_version = int(kwargs.get('mapping_version'))
work_dir = kwargs.get('output_location')
test_class_selections = kwargs.get('test_class_selections')
# Create temp folder
self.temp_dir = os.path.join(work_dir, 'jama_temp')
if os.path.exists(self.temp_dir):
# Cleanup work directory
shutil.rmtree(self.temp_dir, ignore_errors=True, onerror=None)
# Setup output directory
for test_class in test_class_selections:
try:
test_class_dir = os.path.join(self.temp_dir, test_class.get('name'))
os.makedirs(test_class_dir)
except OSError as ex:
logger.warning("directory already exists. " + ex)
# Fetch the Baseline and the Baseline items.
self.app.emit_message("Fetching Baseline items")
try:
logger.info('Fetching Baseline: ' + baseline_id)
baseline = self.jama_client.get_baseline(baseline_id)
except Exception as ex:
self.app.emit_message("Unable to fetch baseline")
logger.error(ex)
self.__finished()
return
try:
self.baseline_items = self.jama_client.get_baselines_versioneditems(baseline_id)
logger.info('Items in baseline: ' + str(len(self.baseline_items)))
except APIException as ex:
self.app.emit_message("Unable to fetch baseline items")
logger.error(ex)
self.__finished()
return
# Filter out appropriate Upstream Item Type
if mapping_version == 0:
# Filter out the Sub-System Requirements
upstream_items = [item for item in self.baseline_items if
item.get('itemType') == DI_REQUIREMENT_ITEM_TYPE_ID]
else:
# Filter out the 'sub system requirement or function' type Requirements
upstream_items = [item for item in self.baseline_items if
item.get('itemType') == SSR_OR_FUNCTIONS_ITEM_TYPE_ID]
# Process Sub-System Requirements
ssr_count = len(upstream_items)
if ssr_count == 0:
self.app.emit_message("No items found in baseline.")
for i, sub_system_requirement in enumerate(upstream_items):
# Update UI with status
self.app.update_progress(i / ssr_count * 100)
self.app.set_status_message("Processing item: %i/%i" % (i, ssr_count))
self.app.emit_message("Processing: %s" % (sub_system_requirement.get('fields').get('name')))
# Get the Report Name.
xml_report_name = sub_system_requirement.get('fields').get('name')
if '(' not in xml_report_name or ')' not in xml_report_name:
self.app.emit_message("\tSkipping this item because it does not contain () in the name field.")
continue
xml_report_name = xml_report_name[xml_report_name.find('(') + 1: xml_report_name.find(')')] + '.xml'
# Remove any slash from the filename
if '/' in xml_report_name:
xml_report_name = ''.join(xml_report_name.split('/'))
if '\\' in xml_report_name:
xml_report_name = ''.join(xml_report_name.split('\\'))
# Fetch downstream relationships for this item.
downstream_related_items = self.jama_client.get_items_downstream_related(sub_system_requirement.get('id'))
# Filter out the Process Spec's and the design specs
downstream_process_specs = []
downstream_design_specs = []
if downstream_related_items is not None:
for downstream_item in downstream_related_items:
downstream_item_type = downstream_item.get('itemType')
if downstream_item_type == PROCESS_SPEC_ITEM_TYPE_ID:
downstream_process_specs.append(downstream_item)
elif downstream_item_type == DESIGN_SPEC_ITEM_TYPE_ID:
downstream_design_specs.append(downstream_item)
# Build XML Report for each type of test class the current item and populate with data from downstream items
for test_class in test_class_selections:
try:
current_tc_ds_process_specs = []
current_tc_ds_design_specs = []
if downstream_process_specs is not None:
# Filter down to only the Process Specs specific to this Test Class
current_tc_ds_process_specs = [ps for ps in downstream_process_specs
if PS_UFN_TCS not in ps.get('fields')
or test_class['id'] in ps.get('fields').get(PS_UFN_TCS)
or (len(ps.get('fields').get(PS_UFN_TCS)) == 1
and TC_UNASSIGNED in ps.get('fields').get(PS_UFN_TCS))]
if downstream_design_specs is not None:
# Filter down to only the Design Specs specific to this Test Class
current_tc_ds_design_specs = [ds for ds in downstream_design_specs
if DS_UFN_TCS in ds.get('fields')
# TODO: add in or logic: or mapping version A and test class = some specific string
and (test_class['id'] in ds.get('fields').get(DS_UFN_TCS)
or TC_ADD_TO_ALL_TC in ds.get('fields').get(DS_UFN_TCS))]
# Do not output the file if there are not specs to output
if (len(current_tc_ds_design_specs) + len(current_tc_ds_process_specs)) == 0:
continue
# Check for duplicate downstream items
self.check_duplicates(current_tc_ds_design_specs, DS_UFN_SW_VAR_NAME, "design spec")
self.check_duplicates(current_tc_ds_process_specs, PS_UFN_NAME, "process spec")
# Create a new XML DOM
xml_report: ET.ElementTree
xml_report = self.build_report(baseline,
current_tc_ds_process_specs,
current_tc_ds_design_specs,
mapping_version,
test_class)
except Exception as e:
self.app.emit_message("Failed to build report: {}".format(str(e)))
logger.error("ERROR at line {}".format(sys.exc_info()[-1].tb_lineno))
continue
# Save this XML report to the appropriate output directory
xml_location = os.path.join(self.temp_dir, test_class.get('name'), xml_report_name)
self.app.emit_message(
"\tAdding report " + xml_report_name + " to " + test_class.get('name'))
try:
xml_report.write(xml_location, encoding="utf-8", xml_declaration=True, short_empty_elements=False,
method="xml")
# Pretty print: it would be more efficient to do one file operation. this is fast enough though
mini_dom_xml = xml.dom.minidom.parse(xml_location)
with open(xml_location, 'w') as xml_fd:
encoding = None
if mapping_version == 2:
encoding = "utf-8"
mini_dom_xml.writexml(xml_fd, indent='\t', addindent='\t', newl='\n', encoding=encoding)
except Exception as e:
self.app.emit_message("Unable to write report: " + xml_location)
logger.error(str(e))
# Zip all output.
# ZIP FILE NAME: InputSheetPackage_V<Baseline #>
try:
zip_file_name = os.path.join(work_dir, 'InputSheetPackage_V' + get_baseline_number_from_name(baseline))
shutil.make_archive(zip_file_name, 'zip', self.temp_dir)
except Exception as e:
self.app.emit_message("Unable to package as zip please see logs for more information")
logger.error(str(e))
# Cleanup work directory
shutil.rmtree(self.temp_dir, ignore_errors=True, onerror=None)
# All done.
self.__finished()
def __finished(self):
self.app.update_progress(100)
self.app.emit_message("Done")
self.app.set_status_message("Ready")
def fetch_parameter_type_option_name(self, parameter_type_option_id):
"""
This method will fetch the Picklist Option and set the name value into the global lookup table.
:param parameter_type_option_id: The integer ID of the picklist option to fetch.
:return: None
"""
try:
picklist_option = self.jama_client.get_pick_list_option(parameter_type_option_id)
parameter_type_option_id_to_name_lookup[parameter_type_option_id] = picklist_option.get('name')
except (APIException, KeyError) as ex:
logger.error("Unable to fetch pick list option ID: {}\n{}".format(str(parameter_type_option_id), ex))
def build_parameter_element(self, parameters_element, process_spec):
"""
Constructs a parameter element from a process spec item
:param parameters_element: The parent element
:param process_spec: dictionary with process spec item data
:return: None
"""
try:
# We must dissect the string of the form "TypeParameter_Type.Unit"
try:
parameter_type_option_id = process_spec.get('fields').get(PS_UFN_PARAMETER_TYPE)
if parameter_type_option_id not in parameter_type_option_id_to_name_lookup:
self.fetch_parameter_type_option_name(parameter_type_option_id)
parameter_type_string = parameter_type_option_id_to_name_lookup.get(parameter_type_option_id)
param_type_string_parts = parameter_type_string.split('_')
type_param_tag = param_type_string_parts[0]
type_tag = param_type_string_parts[1].split('.')[0]
if len(param_type_string_parts[1].split('.')) > 1:
unit_text = param_type_string_parts[1].split('.')[1]
else:
unit_text = ''
except Exception as e:
self.app.emit_message("Unable to parse parameter type string for process spec: "
+ str(process_spec.get('id')))
logger.warning(
"Unable to parse parameter type string for process spec: " + str(process_spec.get('id')))
logger.error(e)
return
# Handle the special use case of point list parameters.
if type_param_tag == "Point2DListParameter" or type_param_tag == "SimDataExtremeParameter":
try:
param_element = ET.SubElement(parameters_element, type_param_tag,
{'Name': process_spec.get('fields').get(PS_UFN_NAME)})
points_element = ET.SubElement(param_element, 'Points')
for point in process_spec.get('fields').get(PS_UFN_NOMINAL_TARGET).split('/'):
point_element = ET.SubElement(points_element, 'Point')
point_element.text = point
unit_element = ET.SubElement(param_element, 'Unit')
unit_element.text = unit_text
except Exception as e:
logger.error("Unable to process Point2DList Parameter")
logger.error(e)
logger.error("ERROR at line {}".format(sys.exc_info()[-1].tb_lineno))
# Handle the special use case of Length list parameters.
elif type_param_tag == "LengthListParameter":
try:
param_element = ET.SubElement(parameters_element, type_param_tag,
{'Name': process_spec.get('fields').get(PS_UFN_NAME)})
points_element = ET.SubElement(param_element, 'Lengths')
for point in process_spec.get('fields').get(PS_UFN_NOMINAL_TARGET).split('/'):
point_element = ET.SubElement(points_element, 'Length')
point_element.text = point
unit_element = ET.SubElement(param_element, 'Unit')
unit_element.text = unit_text
except Exception as e:
logger.error("Unable to process Length List Parameter")
logger.error(e)
logger.error("ERROR at line {}".format(sys.exc_info()[-1].tb_lineno))
# Handle the special use case of Angle list parameters.
elif type_param_tag == "AngleListParameter":
try:
param_element = ET.SubElement(parameters_element, type_param_tag,
{'Name': process_spec.get('fields').get(PS_UFN_NAME)})
points_element = ET.SubElement(param_element, 'Angles')
for point in process_spec.get('fields').get(PS_UFN_NOMINAL_TARGET).split('/'):
point_element = ET.SubElement(points_element, 'Angle')
point_element.text = point
unit_element = ET.SubElement(param_element, 'Unit')
unit_element.text = unit_text
except Exception as e:
logger.error("Unable to process Angle List Parameter")
logger.error(e)
logger.error("ERROR at line {}".format(sys.exc_info()[-1].tb_lineno))
# Otherwise build the standard type param structure
else:
param_element = ET.SubElement(parameters_element, type_param_tag,
{'Name': process_spec.get('fields').get(PS_UFN_NAME)})
type_element = ET.SubElement(param_element, type_tag)
type_element.text = process_spec.get('fields').get(PS_UFN_NOMINAL_TARGET)
unit_element = ET.SubElement(param_element, 'Unit')
unit_element.text = unit_text
except Exception as e:
logger.error("Unable to process Process Specification: " + process_spec.get('fields').get('name'))
logger.error("ERROR at line {}".format(sys.exc_info()[-1].tb_lineno))
logger.error(e)
def build_parameter_element_b(self, parameters_element, process_spec):
"""
Constructs a parameter element from a process spec item
mapping version B
:param parameters_element: The parent element
:param process_spec: dictionary with process spec item data
:return: None
"""
try:
name = ''
valueType = ''
if (PS_UFN_UNITS in process_spec.get('fields')):
name = process_spec.get('fields').get(PS_UFN_SW_VAR_NAME)
if (PS_UFN_UNITS in process_spec.get('fields')):
valueType = process_spec.get('fields').get(PS_UFN_UNITS)
# Set Parameters Element
param_element = ET.SubElement(parameters_element, "Parameter", {'Name': name, 'ValueType': valueType})
value_content = process_spec.get('fields').get(PS_UFN_NOMINAL_TARGET)
if '/' in value_content:
# Build value list
param_element.tag = "ParameterList"
values_element = ET.SubElement(param_element, "Values")
for value in value_content.split('/'):
value_element = ET.SubElement(values_element, 'Value')
value_element.text = str(value)
else:
# Build single value element
value_element = ET.SubElement(param_element, "Value")
value_element.text = str(value_content)
# Build metadatas
metas_element = ET.SubElement(param_element, "Metadatas")
meta_name_element = ET.SubElement(metas_element, "Metadata", {"Name": "StandardName"})
meta_name_element.text = process_spec.get('fields').get('name')
except Exception as e:
logger.error("Unable to process Process Specification: " + process_spec.get('fields').get('name'))
logger.error("ERROR at line {}".format(sys.exc_info()[-1].tb_lineno))
logger.error(e)
def build_spec_element(self, specifications_element, design_spec, mapping_version):
"""
Constructs a Spec element from a design spec item :param specifications_element: The parent element :param
design_spec: dictionary with process design spec item data :param mapping_version: integer 0 for versionA and 1
for versionB effectively swaps parameter name and sw variable name
:return: None
"""
try:
# Mapping version A
name = design_spec.get('fields').get(DS_UFN_SW_VAR_NAME)
display_name = design_spec.get('fields').get(DS_UFN_PARAMETER_NAME)
# Attribute values must not be None
if name is None:
name = ""
if display_name is None:
display_name = ""
spec_ele_attributes = {
'Name': name.strip(),
'DisplayName': display_name.strip()
}
tag_name = 'Spec'
if mapping_version == 2:
tag_name = 'Specification'
spec_element = ET.Element(tag_name, spec_ele_attributes)
# Construct Min / Max values
try:
ds_fields = design_spec.get('fields')
lsl_decimal_places = None
usl_decimal_places = None
# is there a value in the field?
if DS_UFN_LSL in ds_fields:
lsl = design_spec.get('fields').get(DS_UFN_LSL)
if is_number(lsl):
min_element = ET.SubElement(spec_element, 'Min')
min_element.text = lsl
if '.' in lsl:
lsl_decimal_places = len(lsl.split('.')[1])
else:
lsl_decimal_places = 0
if DS_UFN_USL in ds_fields:
usl = design_spec.get('fields').get(DS_UFN_USL)
if is_number(usl):
max_element = ET.SubElement(spec_element, 'Max')
max_element.text = usl
if '.' in usl:
usl_decimal_places = len(usl.split('.')[1])
else:
usl_decimal_places = 0
if lsl_decimal_places is not None or usl_decimal_places is not None:
# add decimal places element iff not Mapping Version C
if mapping_version != 2:
decimal_places_element = ET.SubElement(spec_element, 'DecimalPlaces')
if lsl_decimal_places is not None and usl_decimal_places is not None:
if lsl_decimal_places >= usl_decimal_places:
decimal_places_element.text = str(lsl_decimal_places)
else:
decimal_places_element.text = str(usl_decimal_places)
elif lsl_decimal_places is None and usl_decimal_places is not None:
decimal_places_element.text = str(usl_decimal_places)
elif lsl_decimal_places is not None and usl_decimal_places is None:
decimal_places_element.text = str(lsl_decimal_places)
# Add unit of measure element
if mapping_version == 2:
units_parameter = design_spec.get('fields').get(DS_UFN_UNITS)
# TODO: confirm with Daniel with this implementation
if units_parameter is not None:
spec_element.set('Unit', design_spec.get('fields').get(DS_UFN_UNITS))
else:
unit_of_measure_element = ET.SubElement(spec_element, 'UnitOfMeasure')
unit_of_measure_element.text = design_spec.get('fields').get(DS_UFN_UNITS)
# Add FIO element
try:
fio = design_spec.get('fields').get(DS_UFN_FIO)
if fio is None:
fio = False
except KeyError:
fio = False
fio_element = ET.Element('ForInformationOnly')
if fio:
# Mapping version C do not output any spec with FIO = true
if mapping_version == 2:
return
fio_element.text = 'true'
else:
fio_element.text = 'false'
# Only output FIO element if not mapping C
if mapping_version != 2:
spec_element.append(fio_element)
specifications_element.append(spec_element)
return
except KeyError as e:
logger.error("Unable to process Design Spec: " + str(e))
except Exception as e:
logger.error(
"Unable to process Design Spec: {}[{}] {}".format(design_spec.get('fields').get('name'),
str(design_spec.get('id')),
str(e)))
self.app.emit_message("Unable to process Design Spec: {}[{}]".format(design_spec.get('fields').get('name'),
str(design_spec.get('id'))))
def build_spec_element_b(self, specifications_element, design_spec):
"""
Constructs a Spec element from a design spec item :param specifications_element: The parent element :param
design_spec: dictionary with process design spec item data :param mapping_version: integer 0 for versionA and 1
for versionB effectively swaps parameter name and sw variable name
:return: None
"""
try:
# Mapping version B
name = design_spec.get('fields').get(DS_UFN_SW_VAR_NAME)
display_name = design_spec.get('fields').get(DS_UFN_PARAMETER_NAME)
# Attribute values must not be None
if name is None:
name = ""
if display_name is None:
display_name = ""
# build out the attributes object for this spec element.
spec_ele_attributes = {
'Name': name.strip(),
'DisplayName': display_name.strip(),
'DecimalsPlaces': self.calculate_decimal_places(design_spec.get('fields'), design_spec),
'ForInformationOnly': str.lower(str(design_spec.get('fields').get(DS_UFN_FIO))),
}
uom = design_spec.get('fields').get(DS_UFN_UNITS)
if uom is not None:
spec_ele_attributes['UnitOfMeasure'] = uom
else:
self.app.emit_message("Missing unit of measure field for item: " + str(design_spec.get('id')))
tag_name = 'Specification'
spec_element = ET.Element(tag_name, spec_ele_attributes)
# Construct Min / Max values
try:
ds_fields = design_spec.get('fields')
# is there a value in the field?
if DS_UFN_LSL in ds_fields:
lsl = design_spec.get('fields').get(DS_UFN_LSL)
if is_number(lsl):
min_element = ET.SubElement(spec_element, 'Min')
min_element.text = lsl
if DS_UFN_USL in ds_fields:
usl = design_spec.get('fields').get(DS_UFN_USL)
if is_number(usl):
max_element = ET.SubElement(spec_element, 'Max')
max_element.text = usl
spec_ele_attributes['DecimalPlaces'] = self.calculate_decimal_places(ds_fields, design_spec)
spec_ele_attributes["UnitOfMeasure"] = design_spec.get('fields').get(DS_UFN_UNITS)
# Add FIO element
try:
fio = design_spec.get('fields').get(DS_UFN_FIO)
if fio is None:
fio = False
except KeyError:
fio = False
if fio:
spec_ele_attributes['ForInformationOnly'] = "true"
else:
spec_ele_attributes['ForInformationOnly'] = "false"
specifications_element.append(spec_element)
return
except KeyError as e:
logger.error("Unable to process Design Spec: " + str(e))
except Exception as e:
logger.error(
"Unable to process Design Spec: {}[{}] {}".format(design_spec.get('fields').get('name'),
str(design_spec.get('id')),
str(e)))
self.app.emit_message("Unable to process Design Spec: {}[{}]".format(design_spec.get('fields').get('name'),
str(design_spec.get('id'))))
@staticmethod
def calculate_decimal_places(ds_fields, design_spec):
lsl_decimal_places = None
usl_decimal_places = None
# is there a value in the field?
if DS_UFN_LSL in ds_fields:
lsl = design_spec.get('fields').get(DS_UFN_LSL)
if '.' in lsl:
lsl_decimal_places = len(lsl.split('.')[1])
else:
lsl_decimal_places = 0
if DS_UFN_USL in ds_fields:
usl = design_spec.get('fields').get(DS_UFN_USL)
if '.' in usl:
usl_decimal_places = len(usl.split('.')[1])
else:
usl_decimal_places = 0
if lsl_decimal_places is not None or usl_decimal_places is not None:
if lsl_decimal_places is not None and usl_decimal_places is not None:
if lsl_decimal_places >= usl_decimal_places:
return str(lsl_decimal_places)
else:
return str(usl_decimal_places)
elif lsl_decimal_places is None and usl_decimal_places is not None:
return str(usl_decimal_places)
elif lsl_decimal_places is not None and usl_decimal_places is None:
return str(lsl_decimal_places)
def build_report(self, baseline, downstream_process_specs, downstream_design_specs, mapping_version, test_class):
"""
This method will build the XML report document and return it as a string.
:param baseline: A dictionary representing the baseline, used for name generation
:param downstream_process_specs: A List representing the the List of downstream process specs
:param downstream_design_specs: A List representing the the List of downstream design specs
:param mapping_version: This will be a 0 for mapping version A or 1 for mapping version B
:return: the constructed ElementTree XML Report for this subsystem requirement and all of its specs
"""
# Create the root element for the document
root_element_name = 'InputSheet'
if mapping_version == 2:
root_element_name = "TestSpecifications"
root_input_sheet_element = ET.Element(root_element_name)
root_input_sheet_element.set("xmlns:xsd", "http://www.w3.org/2001/XMLSchema")
root_input_sheet_element.set("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
# --- mapping C ---
if mapping_version == 2:
root_input_sheet_element.set('SpecType', test_class.get('name'))
root_input_sheet_element.set('Version', get_baseline_number_from_name(baseline))
root_input_sheet_element.set('DocumentReleasePartNumber', get_document_number(self.baseline_items))
# --- mapping B ---
elif mapping_version == 1:
metadata_element: ET.Element = ET.SubElement(root_input_sheet_element, 'Metadatas')
meta_version_element: ET.Element = ET.SubElement(metadata_element, 'Metadata', {"Name": "Version"})
meta_version_element.text = get_baseline_number_from_name(baseline)
meta_name_element: ET.Element = ET.SubElement(metadata_element, 'Metadata', {"Name": "Type"})
meta_name_element.text = test_class['name']
meta_doc_num_element: ET.Element = ET.SubElement(metadata_element, 'Metadata', {"Name": "DocumentNumber"})
meta_doc_num_element.text = get_document_number(self.baseline_items)
# --- mapping A ---
else:
# Add a type element
type_element: ET.Element = ET.SubElement(root_input_sheet_element, 'Type')
type_element.text = test_class['name']
# Add Revision element
revision_element: ET.Element = ET.SubElement(root_input_sheet_element, 'Revision')
revision_element.text = get_baseline_number_from_name(baseline)
# Add Parameters from process specs
parameters_element: ET.Element = ET.SubElement(root_input_sheet_element, 'Parameters')
for process_spec in downstream_process_specs:
if mapping_version == 1:
self.build_parameter_element_b(parameters_element, process_spec)
else:
self.build_parameter_element(parameters_element, process_spec)
# Add Specifications from Design Specs
specifications_element: ET.Element = ET.SubElement(root_input_sheet_element, 'Specifications')
for design_spec in downstream_design_specs:
if mapping_version == 1:
self.build_spec_element_b(specifications_element, design_spec)
else:
self.build_spec_element(specifications_element, design_spec, mapping_version)
return ET.ElementTree(element=root_input_sheet_element)
def check_duplicates(self, downstream_items, duplicate_field_name, item_type_display_name):
dupe_map = {}
for item in downstream_items:
field_data = item.get('fields').get(duplicate_field_name)
# TODO: field data is coming in resolving to None here. then this is being pushed through dupe map
if field_data in dupe_map:
dupe_msg = "Detected duplicate {}.\n\t{}:{}\n\tConflicting Item ID's: {} - {} ".format(
item_type_display_name,
duplicate_field_name,
field_data,
item.get('id'),
dupe_map.get(field_data))
self.app.emit_message(dupe_msg)
logger.warning(dupe_msg)
else:
# dont add the none type to this dupe map.
if field_data is not None:
dupe_map[field_data] = item.get('id')
if __name__ == "__main__":
app = XMLReportGenerator()
| 48.126103 | 137 | 0.594382 |
aced22116bfa25e88d882f80cff225b2c4b85603 | 113 | py | Python | test/mixpack.py | eaybek/mixpack | eba6bbf47ca4432abdf256cf6818a2d9bd3c0d1c | [
"MIT"
] | null | null | null | test/mixpack.py | eaybek/mixpack | eba6bbf47ca4432abdf256cf6818a2d9bd3c0d1c | [
"MIT"
] | null | null | null | test/mixpack.py | eaybek/mixpack | eba6bbf47ca4432abdf256cf6818a2d9bd3c0d1c | [
"MIT"
] | null | null | null | import unittest
class MixpackTest(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
| 12.555556 | 37 | 0.707965 |
aced225690c01a3ab6802c5bba4dd0f02560837c | 3,553 | py | Python | project_plantware/project_plantware/settings.py | naiem2525/plantware | 5d72989780ff39b59949dde649052d9d01729c86 | [
"bzip2-1.0.6"
] | null | null | null | project_plantware/project_plantware/settings.py | naiem2525/plantware | 5d72989780ff39b59949dde649052d9d01729c86 | [
"bzip2-1.0.6"
] | null | null | null | project_plantware/project_plantware/settings.py | naiem2525/plantware | 5d72989780ff39b59949dde649052d9d01729c86 | [
"bzip2-1.0.6"
] | null | null | null | """
Django settings for project_plantware project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1+40#+9psvf36h&ah+j2g*v3h!!im-_yt=11un_q%$lvkjxebu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'six',
'warehouse',
'account',
'store',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_plantware.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_plantware.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_URL = '/images/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images')
#SMTP Configaretion
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'plantware.admn@gmail.com'
EMAIL_HOST_PASSWORD = 'django_admin'
| 24.503448 | 91 | 0.700253 |
aced22aad856957875393102eb31c99b356f4604 | 14,699 | py | Python | utility.py | bfirner/armada-player-demo | c621d421bb34eab01eed4a169da619b5187e54c5 | [
"MIT"
] | null | null | null | utility.py | bfirner/armada-player-demo | c621d421bb34eab01eed4a169da619b5187e54c5 | [
"MIT"
] | null | null | null | utility.py | bfirner/armada-player-demo | c621d421bb34eab01eed4a169da619b5187e54c5 | [
"MIT"
] | null | null | null | #
# Copyright Bernhard Firner, 2019-2020
#
# Utility functions.
import csv
import math
import torch
from game_constants import (ArmadaDimensions, ArmadaTypes)
from ship import Ship
# Array of matching token indexes
def token_index(token, defender):
return [idx for idx in range(len(defender.defense_tokens)) if token in defender.defense_tokens[idx]]
def get_rotation_matrix(heading):
"""Get the rotation matrix for the given heading.
Arguments:
heading (float): Rotation in radians.
Returns:
torch.tensor: 2x2 rotation matrix
"""
return torch.tensor([[math.cos(heading), -math.sin(heading)],
[math.sin(heading), math.cos(heading)]])
def get_corners(ship):
"""Get the corner coordinates of a ship.
This is a useful function for collisions, distances, etc. Traversing the returned edges in
order will walk the perimeter of the object.
Arguments:
ship (Ship): The ship.
Returns:
torch.tensor: 4x2 matrix with the x,y coordinates of the corners in the order: left rear,
left front, right rear, right front.
"""
# Get the ship location information
location = ship.get_range('location')
heading = ship.get('heading')
size = ArmadaDimensions.ship_bases_feet[ArmadaDimensions.size_names[int(ship.get('size'))]]
# To find the corners first rotate and then add in the current location translation
corners = torch.zeros(4, 2)
for xi, x in enumerate([-1, 1]):
# We flip the order of iteration through the y dimension based upon the x so that the
# corners end up in order. This is helpful for future iterations. The iteration sequence is
# [-1,1], [-1,-1], [1,-1], [1, 1].
for yi, y in enumerate([x * -1, x * 1]):
corners[2*xi + yi, 0] = x * size[0]/2.
corners[2*xi + yi, 1] = y * size[1]/2.
# Now rotate. Translation is done next so this rotation is about the origin.
rotation_matrix = get_rotation_matrix(heading)
# TODO FIXME Corners are not being rotated about the correct reference point and the rectangle
# is being turned into a trapezoid
corners = torch.matmul(corners, rotation_matrix)
# Now translate
corners[:,0] += location[0]
corners[:,1] += location[1]
return corners
def get_edges(corners):
"""Get the equations for the edges (the perimeter) of an object given its corners.
This is a useful function for collissions, distances, etc for objects defined by line segments.
The corners must be ordered so that a walk from index 0, 1, ..., -1, 0 defines the perimeter of
the object.
Arguments:
ship (Ship): The ship.
Returns:
(List[tuple(float, float, float)]): List of tuples of (x origin, x end, y origin, slope)
"""
location = ship.get_range('location')
heading = ship.get_range('heading')
edges = []
with torch.no_grad:
perimeter = torch.cat((corners, corners[:1]))
for i in range(corners.size(0)):
first = perimeter[i].item()
second = perimeter[i + 1].item()
slope = ((second[1] - first[1]) / (second[0] - first[0])).item()
edges.append(first, second, perimeted[i][1].item(), slope)
return edges
def find_intersection(line_a, line_b):
"""Find the intersection point (if it exists) of two line segments, a and b.
This uses an algorithm based upon LeMothe's "Tricks of the Windows Game Programming Gurus". We
could have used some fancy cross product stuff here, but this is much more comprehensible.
Each segment is parameterized by variables, s and t, such at when s=0 we are at line_a[0] and
when s[1] we are at line_a[1] (and so on for t). We then solve for the values of s and t where
the equations for line_a and line_b give the same result. If we cannot find values for s and t
that satisfy this then there is no intersection.
Arguments:
line_a (tensor): 2x2 tensor describing line a. line_a[0] is the first point.
line_b (tensor): The same thing is line_a.
Returns:
None or 2 element torch.tensor
"""
xslope_a = line_a[1,0] - line_a[0,0]
xslope_b = line_b[1,0] - line_b[0,0]
yslope_a = line_a[1,1] - line_a[0,1]
yslope_b = line_b[1,1] - line_b[0,1]
# Find the parameters where the lines intersect
s_numerator = -yslope_a * (line_a[0,0] - line_b[0,0]) + xslope_a * (line_a[0,1] - line_b[0, 1])
t_numerator = -yslope_b * (line_a[0,0] - line_b[0, 0]) + xslope_b * (line_a[0,1] - line_b[0,1])
denominator = (-xslope_b * yslope_a + xslope_a * yslope_b)
# If the lines are parallel then the slope will cancel out and the denominator will be 0.
# For simplicity we will just say that they do not intersect in that case.
if 0 == denominator:
# Early return for efficiency
return None
# Check one range at a time to possibly skip a second division operation
s = s_numerator / denominator
if 0 <= s and s <= 1:
t = t_numerator / denominator
if 0 <= t and t <= 1:
intersection = [line_a[0,0] + t * xslope_a, line_a[0, 1] + t * yslope_a]
return torch.tensor(intersection)
# No intersection
return None
# TODO Write functions to measure distances and ranges between objects
def ruler_distance(a, b):
"""Return the distance between two objects
Distance is measured as an index into ArmadaTypes.ruler_distance_feet or a number outside of the
outside to indicate being outside of the ruler.
Arguments:
a (object): Measure distance from this object
b (object): Measure distance to this object
Returns:
tuple(int, (float, float)): Index into ArmadaTypes.ruler_distance_feet or an index outside
of the table if the distance is greater than the ruler length
and the coordinates of the shortest path.
Raises:
ValueError if a or b are not recognized types.
"""
if not (isinstance(a, Ship) and isinstance(b, Ship)):
raise ValueError("Cannot measure distances between non-Ship types.")
# First check for an overlap. Do this by seeing if any lines overlap. This can be done by taking
# two endpoint of an edge for ship A (ax1, ay1), (ax2, ay2) and two endpoint of an edge of ship
# B (bx1, by1), (bx2, by2) and doing some cross product stuff. If the lines intersect then the
# angle from (ax1, ay1), (ax2, ay2), (bx1, by1) and (ax1, ay1), (ax2, ay2), (bx2, by2) must be
# different because the endpoints of B's line must line on either side of A's line because the
# lines cut their one another. This should also hold true going from B's line to the endpoints
# of A's line since they must cut each other. If not, then the line segment would cut the other
# if it were longer, but it falls short.
a_corners = get_corners(a)
b_corners = get_corners(b)
# Simplify some things by repeating the first point so we go through every edge segment.
a_perimeter = torch.cat((a_corners, a_corners[:1]))
b_perimeter = torch.cat((b_corners, b_corners[:1]))
# Brute force looping.
for ai in range(a_corners.size(0)):
for bi in range(b_corners.size(0)):
a_segment = a_perimeter[ai:ai+2]
b_segment = b_perimeter[bi:bi+2]
intersect = find_intersection(a_segment, b_segment)
if intersect is not None:
return 0, (intersect, intersect)
# No intersection, we will have to find the closest point.
# First notice that if we only draw lines from the corners of objects 'a' and 'b' then this is
# sufficient. If we find the shortest path from each corner to the side of the other object we
# end up with 32 lines.
# There are two cases for each shortest line:
# 1. We can draw a line from the corner that is perpendicular to the side. This is the shortest
# path.
# 2. The perpendicular line does not intersect the side. In this case one of the corners is
# closest.
# Obviously this can be optimized to skip the 2 farther sides. With 3 corners and 2 sides on
# each ship we would only search 12 possible shortest lines, but we would need to do additional
# comparisons to determine the closest sides. Since this is a discussion of constant time none
# of this will have a large impact so this function will be as simple as possible, unlike these
# comments.
# Calculate the corner distances
distance = torch.nn.PairwiseDistance()
# Future proofing this code a bit by checking the size of the corners rather than assuming 4.
a_num_corners = a_corners.size(0)
b_num_corners = b_corners.size(0)
distances = torch.zeros(a_num_corners * b_num_corners)
for corner in range(a_corners.size(0)):
out_index = corner * a_num_corners
distances[out_index:out_index + b_num_corners] = distance(
a_corners[corner].expand(b_num_corners, 2), b_corners)
# Also need to calculate the perpendicular line distances
# To simplify things we can just calculate the distance from the two sides adjacent to the two
# closest points
min_corner_distance, index = torch.min(distances, 0)
a_corner = int(math.floor(index.item() / b_num_corners))
b_corner = int((index.item() % b_num_corners))
# Now check the distances from the corners to the sides. If a perpendicular line from the side
# of one ship cannot be drawn to the corner of the other then the corner line is shorter.
shortest_path = min_corner_distance
shortest_points = (a_corners[a_corner], b_corners[b_corner])
# First find the distance from a_corner to the sides of b and b_corner to the sides of a
possible_paths = []
for offset in range(b_corner - 1, b_corner + 1):
possible_paths.append((a_corners[a_corner],
(b_corners[offset], b_corners[(offset + 1) % b_num_corners])))
for offset in range(a_corner - 1, a_corner + 1):
possible_paths.append((b_corners[b_corner],
(a_corners[offset], a_corners[(offset + 1) % a_num_corners])))
# TODO Calculate the perpendicular distance from the side to the corner. If a perpendicular line
# cannot be drawn then the corner to corner distance is the best possible.
for corner, side in possible_paths:
# Calculate the line perpendicular to the side could go from the side to the corner
side_slope = (side[1][1] - side[0][1]) / (side[1][0] - side[0][0])
# We'll skip being clever here and just make a gigantic perpendicular line and then check
# for an intersection.
if 0. == side_slope:
perp_point_one = torch.tensor([corner[0].item(),
corner[1].item() - ArmadaDimensions.play_area_height_feet])
perp_point_two = torch.tensor([corner[0].item(),
corner[1].item() + ArmadaDimensions.play_area_height_feet])
else:
perp_slope = 1. / side_slope
perp_point_one = torch.tensor([corner[0].item() - ArmadaDimensions.play_area_width_feet,
corner[1].item() - ArmadaDimensions.play_area_width_feet * perp_slope])
perp_point_two = torch.tensor([corner[0].item() + ArmadaDimensions.play_area_width_feet,
corner[1].item() + ArmadaDimensions.play_area_width_feet * perp_slope])
intercept = find_intersection(torch.stack(side), torch.stack((perp_point_one, perp_point_two)))
if intercept is not None:
distance = math.sqrt((corner[0].item() - intercept[0].item())**2 +
(corner[1].item() - intercept[1].item())**2)
if distance < shortest_path:
shortest_path = distance
shortest_points = (corner, intercept)
# Find the ruler index for the shortest past
ruler_index = 0
ruler = ArmadaDimensions.ruler_distance_feet
while ruler_index < len(ruler) and shortest_path > ruler[ruler_index]:
ruler_index += 1
return ruler_index, shortest_points
# TODO Write a function to output the world state in a visualizable format
def tokens_available(token, defender, accuracy_tokens = None):
"""Return a tuple indicating if a red or green token is available.
Arguments:
token (str) : The token types (one of ArmadaTypes.defense_tokens)
defender(Ship): The defending ship whose tokens to check.
Returns:
tuple(bool, bool): True if a green or red token is available, respectively.
"""
green = False
red = False
token_offset = ArmadaTypes.defense_tokens.index(token)
green_offset, green_size = defender.get_index("green_defense_tokens")
red_offset, red_size = defender.get_index("red_defense_tokens")
green_offset += token_offset
red_offset += token_offset
green_sum = defender.encoding[green_offset].item()
red_sum = defender.encoding[green_offset].item()
if accuracy_tokens:
green_sum = max(0., green_sum - accuracy_tokens[token_offset])
red_sum -= max(0., red_sum - accuracy_tokens[token_offset + len(ArmadaTypes.defense_tokens)])
return (green_sum, red_sum)
def max_damage_index(pool_faces):
for face in ["hit_crit", "hit_hit", "crit", "hit"]:
if face in pool_faces:
return pool_faces.index(face)
return None
def face_index(face, pool_faces):
return [idx for idx in range(len(pool_faces)) if face in pool_faces[idx]]
def parseShips(filename):
keys = {}
ship_templates = {}
with open(filename, newline='') as ships:
shipreader = csv.reader(ships, delimiter=',', quotechar='|')
rowcount = 0
for row in shipreader:
# parse the header first to find the column keys
if ( 0 == rowcount ):
count = 0
for key in row:
count = count + 1
keys[count] = key
else:
newship = {}
count = 0
# Fill in all of the information on this vessel
for key in row:
count = count + 1
newship[keys[count]] = key
# Create a new ship template
ship_templates[newship['Ship Name']] = newship
rowcount = rowcount + 1
return keys, ship_templates
def print_roll(colors, roll):
for i in range(0, len(colors)):
print("{}: {} {}".format(i, colors[i], roll[i]))
| 47.263666 | 114 | 0.652153 |
aced23157a9d567d2fb0367c0cd1cb1ee8964b54 | 1,036 | py | Python | src/ggrc/models/pbc_list.py | sriharshakappala/ggrc-core | 7561ce27cd987d73468a44df5b6e2b7425f050ef | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-04-21T12:21:17.000Z | 2019-04-21T12:21:17.000Z | src/ggrc/models/pbc_list.py | sriharshakappala/ggrc-core | 7561ce27cd987d73468a44df5b6e2b7425f050ef | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc/models/pbc_list.py | sriharshakappala/ggrc-core | 7561ce27cd987d73468a44df5b6e2b7425f050ef | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By:
# Maintained By: vraj@reciprocitylabs.com
from ggrc import db
from .mixins import deferred, Base
class PbcList(Base, db.Model):
__tablename__ = 'pbc_lists'
audit_cycle_id = deferred(
db.Column(db.Integer, db.ForeignKey('cycles.id'), nullable=False),
'PbcList')
requests = db.relationship(
'Request', backref='pbc_list', cascade='all, delete-orphan')
control_assessments = db.relationship(
'ControlAssessment', backref='pbc_list', cascade='all, delete-orphan')
_publish_attrs = [
'audit_cycle',
'requests',
'control_assessments',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(PbcList, cls).eager_query()
return query.options(
orm.joinedload('audit_cycle'),
orm.subqueryload('requests'),
orm.subqueryload('control_assessments'))
| 28.777778 | 78 | 0.69112 |
aced239e48e30b0ba409fa1950a62d262059af60 | 667 | py | Python | web/django/translate/forms.py | rydcormier/translate | fc279ec4a3782845a9fef0bf35ec72dc06200001 | [
"MIT"
] | null | null | null | web/django/translate/forms.py | rydcormier/translate | fc279ec4a3782845a9fef0bf35ec72dc06200001 | [
"MIT"
] | null | null | null | web/django/translate/forms.py | rydcormier/translate | fc279ec4a3782845a9fef0bf35ec72dc06200001 | [
"MIT"
] | null | null | null | # translate/forms.py
from django import forms
from .models import Translation
class TranslationForm(forms.ModelForm):
"""Form for the Translation model."""
class Meta:
model = Translation
fields = [ 'source', 'input', 'target', 'output']
def __init__(self, translation=None, *args, **kwargs):
try:
data = {
'source': translation.source,
'input' : translation.input,
'target': translation.target,
'output': translation.output
}
except AttributeError:
data = translation
super().__init__(data, *args, **kwargs)
| 25.653846 | 58 | 0.565217 |
aced24708afd909828fa354ab76c17bf1e946d35 | 192 | py | Python | saleor/lib/python3.7/site-packages/stripe/api_resources/country_spec.py | cxsper/saleor | 5566ddcdaf8f72ba872eca869798e66eb9cdae44 | [
"BSD-3-Clause"
] | null | null | null | saleor/lib/python3.7/site-packages/stripe/api_resources/country_spec.py | cxsper/saleor | 5566ddcdaf8f72ba872eca869798e66eb9cdae44 | [
"BSD-3-Clause"
] | 12 | 2019-12-04T23:48:45.000Z | 2022-03-11T23:53:30.000Z | venv/Lib/site-packages/stripe/api_resources/country_spec.py | ryankibayhan/ryb-ecommerce | 15fa3bcb624be528926458b466ad7fe7fef5158e | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
from stripe.api_resources import abstract
class CountrySpec(abstract.ListableAPIResource):
OBJECT_NAME = "country_spec"
| 24 | 64 | 0.833333 |
aced24eddd04dde6bd428f4b55dc8658041c6d05 | 3,956 | py | Python | functional_tests/test_book.py | PMPL-Arieken/django-locallibrary-tutorial | 667cd9d06043718bcfc8c02a3ff71c848803eb06 | [
"CC0-1.0"
] | 1 | 2020-12-05T16:10:20.000Z | 2020-12-05T16:10:20.000Z | functional_tests/test_book.py | PMPL-Arieken/django-locallibrary-tutorial | 667cd9d06043718bcfc8c02a3ff71c848803eb06 | [
"CC0-1.0"
] | 67 | 2020-12-04T13:39:26.000Z | 2022-03-21T15:07:31.000Z | functional_tests/test_book.py | PMPL-Arieken/django-locallibrary-tutorial | 667cd9d06043718bcfc8c02a3ff71c848803eb06 | [
"CC0-1.0"
] | 1 | 2020-11-27T13:47:16.000Z | 2020-11-27T13:47:16.000Z | import time
from .base import FunctionalTest
from catalog.models import Book
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
bookCatalogLink = '/catalog/books/'
bookDetailsLink = '/catalog/book/'
class TestBookPage(FunctionalTest):
submit_selector = 'input[type=submit]'
def setUp(self):
return super().setUp()
def tearDown(self):
return super().tearDown()
def test_book_page_empty(self):
self.browser.get(self.live_server_url + bookCatalogLink)
self.assertEqual(self.browser.title, 'Local Library')
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertEqual(header_text, 'Book List')
list_text = self.browser.find_element_by_tag_name('p').text
self.assertEqual(list_text, 'There are no books in the library.')
def test_book_page_filled(self):
self.setUpBooks()
self.browser.get(self.live_server_url + bookCatalogLink)
time.sleep(1)
book_list = self.browser.find_element_by_id('book-list')
rows = book_list.find_elements_by_tag_name('li')
self.assertIn('Book Title (Smith, John)', [row.text for row in rows])
def test_book_page_create(self):
self.login(self.admin)
self.setUpBooks()
self.browser.get(self.live_server_url + '/book/create/')
time.sleep(10)
title = self.browser.find_element_by_css_selector('input[name=title]')
author_box = Select(self.browser.find_element_by_name('author'))
summary = self.browser.find_element_by_css_selector('textarea[name=summary]')
isbn = self.browser.find_element_by_css_selector('input[name=isbn]')
genre_box = Select(self.browser.find_element_by_name('genre'))
language = Select(self.browser.find_element_by_name('language'))
submit = self.browser.find_element_by_css_selector(self.submit_selector)
title.send_keys('Book Title 2')
author_box.select_by_visible_text('Smith, John')
summary.send_keys('Summary of Book 2')
isbn.send_keys('1234567890123')
genre_box.select_by_visible_text('Fantasy')
language.select_by_visible_text('English')
submit.send_keys(Keys.ENTER)
time.sleep(1)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertEqual(header_text, 'Title: Book Title 2')
def test_book_page_delete(self):
self.setUpBooks()
book = Book.objects.all()[0]
self.login(self.admin)
self.browser.get(self.live_server_url + bookDetailsLink + str(book.id))
delete_button = self.browser.find_element_by_link_text('Delete')
delete_button.click()
submit = self.browser.find_element_by_css_selector(self.submit_selector)
submit.send_keys(Keys.ENTER)
time.sleep(1)
self.browser.get(self.live_server_url + bookCatalogLink)
list_text = self.browser.find_element_by_tag_name('p').text
self.assertEqual(list_text, 'There are no books in the library.')
def test_book_page_update(self):
self.setUpBooks()
book = Book.objects.all()[0]
self.login(self.admin)
self.browser.get(self.live_server_url + bookDetailsLink + str(book.id))
delete_button = self.browser.find_element_by_link_text('Update')
delete_button.click()
title = self.browser.find_element_by_css_selector('input[name=title]')
title.clear()
title.send_keys('Laskar')
submit = self.browser.find_element_by_css_selector(self.submit_selector)
submit.send_keys(Keys.ENTER)
time.sleep(1)
self.browser.get(self.live_server_url + bookCatalogLink)
book_list = self.browser.find_element_by_id('book-list')
rows = book_list.find_elements_by_tag_name('li')
self.assertIn('Laskar (Smith, John)', [row.text for row in rows]) | 37.320755 | 85 | 0.690091 |
aced25bf4a931423cdf2eee736b039af1e51fe66 | 49,656 | py | Python | plasmapy/utils/decorators/checks.py | Quettle/PlasmaPy | 9689c83b991832c32158cca8b3f94525b59bde18 | [
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | 2 | 2020-09-16T08:53:45.000Z | 2022-01-29T18:00:10.000Z | plasmapy/utils/decorators/checks.py | RAJAGOPALAN-GANGADHARAN/PlasmaPy | 6df9583cc47375687a07300c0aa11ba31634d770 | [
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | null | null | null | plasmapy/utils/decorators/checks.py | RAJAGOPALAN-GANGADHARAN/PlasmaPy | 6df9583cc47375687a07300c0aa11ba31634d770 | [
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | null | null | null | """
Decorator for checking input/output arguments of functions.
"""
__all__ = [
"check_values",
"check_units",
"check_relativistic",
"CheckBase",
"CheckUnits",
"CheckValues",
]
import collections
import functools
import inspect
import numpy as np
import warnings
from astropy import units as u
from astropy.constants import c
from functools import reduce
from operator import add
from typing import Any, Dict, List, Tuple, Union
from plasmapy.utils.decorators.helpers import preserve_signature
from plasmapy.utils.exceptions import (
PlasmaPyWarning,
RelativityError,
RelativityWarning,
)
try:
from astropy.units.equivalencies import Equivalency
except ImportError:
# TODO: remove once we have dependency Astropy >= 3.2.1
# astropy defined the Equivalency class in v3.2.1
class Equivalency:
pass
class CheckBase:
"""
Base class for 'Check' decorator classes.
Parameters
----------
checks_on_return
specified checks on the return of the wrapped function
**checks
specified checks on the input arguments of the wrapped function
"""
def __init__(self, checks_on_return=None, **checks):
self._checks = checks
if checks_on_return is not None:
self._checks["checks_on_return"] = checks_on_return
@property
def checks(self):
"""
Requested checks on the decorated function's input arguments
and/or return.
"""
return self._checks
class CheckValues(CheckBase):
"""
A decorator class to 'check' -- limit/control -- the values of input and return
arguments to a function or method.
Parameters
----------
checks_on_return: Dict[str, bool]
Specifications for value checks on the return of the function being wrapped.
(see `check values`_ for valid specifications)
**checks: Dict[str, Dict[str, bool]]
Specifications for value checks on the input arguments of the function
being wrapped. Each keyword argument in `checks` is the name of a function
argument to be checked and the keyword value contains the value check
specifications.
.. _`check values`:
The value check specifications are defined within a dictionary containing
the keys defined below. If the dictionary is empty or omitting keys,
then the default value will be assumed for the missing keys.
================ ======= ================================================
Key Type Description
================ ======= ================================================
can_be_negative `bool` [DEFAULT `True`] values can be negative
can_be_complex `bool` [DEFAULT `False`] values can be complex numbers
can_be_inf `bool` [DEFAULT `True`] values can be :data:`~numpy.inf`
can_be_nan `bool` [DEFAULT `True`] values can be :data:`~numpy.nan`
none_shall_pass `bool` [DEFAULT `False`] values can be a python `None`
can_be_zero `bool` [DEFAULT `True`] values can be zero
================ ======= ================================================
Notes
-----
* Checking of function arguments `*args` and `**kwargs` is not supported.
Examples
--------
.. code-block:: python
from plasmapy.utils.decorators.checks import CheckValues
@CheckValues(arg1={'can_be_negative': False, 'can_be_nan': False},
arg2={'can_be_inf': False},
checks_on_return={'none_shall_pass': True)
def foo(arg1, arg2):
return None
# on a method
class Foo:
@CheckValues(arg1={'can_be_negative': False, 'can_be_nan': False},
arg2={'can_be_inf': False},
checks_on_return={'none_shall_pass': True)
def bar(self, arg1, arg2):
return None
"""
#: Default values for the possible 'check' keys.
# To add a new check to the class, the following needs to be done:
# 1. Add a key & default value to the `__check_defaults` dictionary
# 2. Add a corresponding if-statement to method `_check_value`
#
__check_defaults = {
"can_be_negative": True,
"can_be_complex": False,
"can_be_inf": True,
"can_be_nan": True,
"none_shall_pass": False,
"can_be_zero": True,
}
def __init__(
self, checks_on_return: Dict[str, bool] = None, **checks: Dict[str, bool]
):
super().__init__(checks_on_return=checks_on_return, **checks)
def __call__(self, f):
"""
Decorate a function.
Parameters
----------
f
Function to be wrapped
Returns
-------
function
wrapped function of `f`
"""
self.f = f
wrapped_sign = inspect.signature(f)
@preserve_signature
@functools.wraps(f)
def wrapper(*args, **kwargs):
# map args and kwargs to function parameters
bound_args = wrapped_sign.bind(*args, **kwargs)
bound_args.apply_defaults()
# get checks
checks = self._get_value_checks(bound_args)
# check input arguments
for arg_name in checks:
# skip check of output/return
if arg_name == "checks_on_return":
continue
# check argument
self._check_value(
bound_args.arguments[arg_name], arg_name, checks[arg_name]
)
# call function
_return = f(**bound_args.arguments)
# check function return
if "checks_on_return" in checks:
self._check_value(
_return, "checks_on_return", checks["checks_on_return"]
)
return _return
return wrapper
def _get_value_checks(
self, bound_args: inspect.BoundArguments
) -> Dict[str, Dict[str, bool]]:
"""
Review :attr:`checks` and function bound arguments to build a complete 'checks'
dictionary. If a check key is omitted from the argument checks, then a default
value is assumed (see `check values`_).
Parameters
----------
bound_args: :class:`inspect.BoundArguments`
arguments passed into the function being wrapped
.. code-block:: python
bound_args = inspect.signature(f).bind(*args, **kwargs)
Returns
-------
Dict[str, Dict[str, bool]]
A complete 'checks' dictionary for checking function input arguments
and return.
"""
# initialize validation dictionary
out_checks = {}
# Iterate through function bound arguments + return and build `out_checks:
#
# artificially add "return" to parameters
things_to_check = bound_args.signature.parameters.copy()
things_to_check["checks_on_return"] = inspect.Parameter(
"checks_on_return",
inspect.Parameter.POSITIONAL_ONLY,
annotation=bound_args.signature.return_annotation,
)
for param in things_to_check.values():
# variable arguments are NOT checked
# e.g. in foo(x, y, *args, d=None, **kwargs) variable arguments
# *args and **kwargs will NOT be checked
#
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# grab the checks dictionary for the desired parameter
try:
param_in_checks = self.checks[param.name]
except KeyError:
# checks for parameter not specified
continue
# build `out_checks`
# read checks and/or apply defaults values
out_checks[param.name] = {}
for v_name, v_default in self.__check_defaults.items():
try:
out_checks[param.name][v_name] = param_in_checks.get(
v_name, v_default
)
except AttributeError:
# for the case that checks are defined for an argument,
# but is NOT a dictionary
# (e.g. CheckValues(x=u.cm) ... this scenario could happen
# during subclassing)
out_checks[param.name][v_name] = v_default
# Does `self.checks` indicate arguments not used by f?
missing_params = [
param for param in set(self.checks.keys()) - set(out_checks.keys())
]
if len(missing_params) > 0:
params_str = ", ".join(missing_params)
warnings.warn(
PlasmaPyWarning(
f"Expected to value check parameters {params_str} but they "
f"are missing from the call to {self.f.__name__}"
)
)
return out_checks
def _check_value(self, arg, arg_name: str, arg_checks: Dict[str, bool]):
"""
Perform checks `arg_checks` on function argument `arg`.
Parameters
----------
arg
The argument to be checked
arg_name: str
The name of the argument to be checked
arg_checks: Dict[str, bool]
The requested checks for the argument
Raises
------
ValueError
raised if a check fails
"""
if arg_name == "checks_on_return":
valueerror_msg = "The return value "
else:
valueerror_msg = f"The argument '{arg_name}' "
valueerror_msg += f"to function {self.f.__name__}() can not contain"
# check values
# * 'none_shall_pass' always needs to be checked first
ckeys = list(self.__check_defaults.keys())
ckeys.remove("none_shall_pass")
ckeys = ("none_shall_pass",) + tuple(ckeys)
for ckey in ckeys:
if ckey == "none_shall_pass":
if arg is None and arg_checks[ckey]:
break
elif arg is None:
raise ValueError(f"{valueerror_msg} Nones.")
elif ckey == "can_be_negative":
if not arg_checks[ckey] and np.any(arg < 0):
raise ValueError(f"{valueerror_msg} negative numbers.")
elif ckey == "can_be_complex":
if not arg_checks[ckey] and np.any(np.iscomplexobj(arg)):
raise ValueError(f"{valueerror_msg} complex numbers.")
elif ckey == "can_be_inf":
if not arg_checks[ckey] and np.any(np.isinf(arg)):
raise ValueError(f"{valueerror_msg} infs.")
elif ckey == "can_be_nan":
if not arg_checks["can_be_nan"] and np.any(np.isnan(arg)):
raise ValueError(f"{valueerror_msg} NaNs.")
elif ckey == "can_be_zero":
if not arg_checks[ckey] and np.any(arg == 0):
raise ValueError(f"{valueerror_msg} zeros.")
class CheckUnits(CheckBase):
"""
A decorator class to 'check' -- limit/control -- the units of input and return
arguments to a function or method.
Parameters
----------
checks_on_return: list of astropy :mod:`~astropy.units` or dict of unit specifications
Specifications for unit checks on the return of the function being wrapped.
(see `check units`_ for valid specifications)
**checks: list of astropy :mod:`~astropy.units` or dict of unit specifications
Specifications for unit checks on the input arguments of the function
being wrapped. Each keyword argument in `checks` is the name of a function
argument to be checked and the keyword value contains the unit check
specifications.
.. _`check units`:
Unit checks can be defined by passing one of the astropy
:mod:`~astropy.units`, a list of astropy units, or a dictionary containing
the keys defined below. Units can also be defined with function
annotations, but must be consistent with decorator `**checks` arguments if
used concurrently. If a key is omitted, then the default value will be assumed.
====================== ======= ================================================
Key Type Description
====================== ======= ================================================
units list of desired astropy :mod:`~astropy.units`
equivalencies | [DEFAULT `None`] A list of equivalent pairs to
try if
| the units are not directly convertible.
| (see :mod:`~astropy.units.equivalencies`,
and/or `astropy equivalencies`_)
pass_equivalent_units `bool` | [DEFAULT `False`] allow equivalent units
| to pass
====================== ======= ================================================
Notes
-----
* Checking of function arguments `*args` and `**kwargs` is not supported.
* Decorator does NOT perform any unit conversions.
* If it is desired that `None` values do not raise errors or warnings, then
include `None` in the list of units or as a default value for the function
argument.
* If units are not specified in `checks`, then the decorator will attempt
to identify desired units by examining the function annotations.
Examples
--------
Define units with decorator parameters::
import astropy.units as u
from plasmapy.utils.decorators import CheckUnits
@CheckUnits(arg1={'units': u.cm},
arg2=u.cm,
checks_on_return=[u.cm, u.km])
def foo(arg1, arg2):
return arg1 + arg2
# or on a method
class Foo:
@CheckUnits(arg1={'units': u.cm},
arg2=u.cm,
checks_on_return=[u.cm, u.km])
def bar(self, arg1, arg2):
return arg1 + arg2
Define units with function annotations::
import astropy.units as u
from plasmapy.utils.decorators import CheckUnits
@CheckUnits()
def foo(arg1: u.cm, arg2: u.cm) -> u.cm:
return arg1 + arg2
# or on a method
class Foo:
@CheckUnits()
def bar(self, arg1: u.cm, arg2: u.cm) -> u.cm:
return arg1 + arg2
Allow `None` values to pass, on input and output::
import astropy.units as u
from plasmapy.utils.decorators import CheckUnits
@CheckUnits(checks_on_return=[u.cm, None])
def foo(arg1: u.cm = None):
return arg1
Allow return values to have equivalent units::
import astropy.units as u
from plasmapy.utils.decorators import CheckUnits
@CheckUnits(arg1={'units': u.cm},
checks_on_return={'units': u.km,
'pass_equivalent_units': True})
def foo(arg1):
return arg1
Allow equivalent units to pass with specified equivalencies::
import astropy.units as u
from plasmapy.utils.decorators import CheckUnits
@CheckUnits(arg1={'units': u.K,
'equivalencies': u.temperature_energy(),
'pass_equivalent_units': True})
def foo(arg1):
return arg1
.. _astropy equivalencies:
https://docs.astropy.org/en/stable/units/equivalencies.html
"""
#: Default values for the possible 'check' keys.
# To add a new check the the class, the following needs to be done:
# 1. Add a key & default value to the `__check_defaults` dictionary
# 2. Add a corresponding conditioning statement to `_get_unit_checks`
# 3. Add a corresponding behavior to `_check_unit`
#
__check_defaults = {
"units": None,
"equivalencies": None,
"pass_equivalent_units": False,
"none_shall_pass": False,
}
def __init__(
self,
checks_on_return: Union[u.Unit, List[u.Unit], Dict[str, Any]] = None,
**checks: Union[u.Unit, List[u.Unit], Dict[str, Any]],
):
super().__init__(checks_on_return=checks_on_return, **checks)
def __call__(self, f):
"""
Decorate a function.
Parameters
----------
f
Function to be wrapped
Returns
-------
function
wrapped function of `f`
"""
self.f = f
wrapped_sign = inspect.signature(f)
@preserve_signature
@functools.wraps(f)
def wrapper(*args, **kwargs):
# combine args and kwargs into dictionary
bound_args = wrapped_sign.bind(*args, **kwargs)
bound_args.apply_defaults()
# get checks
checks = self._get_unit_checks(bound_args)
# check (input) argument units
for arg_name in checks:
# skip check of output/return
if arg_name == "checks_on_return":
continue
# check argument
self._check_unit(
bound_args.arguments[arg_name], arg_name, checks[arg_name]
)
# call function
_return = f(**bound_args.arguments)
# check output
if "checks_on_return" in checks:
self._check_unit(
_return, "checks_on_return", checks["checks_on_return"]
)
return _return
return wrapper
def _get_unit_checks(
self, bound_args: inspect.BoundArguments
) -> Dict[str, Dict[str, Any]]:
"""
Review :attr:`checks` and function bound arguments to build a complete 'checks'
dictionary. If a check key is omitted from the argument checks, then a default
value is assumed (see `check units`_)
Parameters
----------
bound_args: :class:`inspect.BoundArguments`
arguments passed into the function being wrapped
.. code-block:: python
bound_args = inspect.signature(f).bind(*args, **kwargs)
Returns
-------
Dict[str, Dict[str, Any]]
A complete 'checks' dictionary for checking function input arguments
and return.
"""
# initialize validation dictionary
out_checks = {}
# Iterate through function bound arguments + return and build `out_checks`:
#
# artificially add "return" to parameters
things_to_check = bound_args.signature.parameters.copy()
things_to_check["checks_on_return"] = inspect.Parameter(
"checks_on_return",
inspect.Parameter.POSITIONAL_ONLY,
annotation=bound_args.signature.return_annotation,
)
for param in things_to_check.values():
# variable arguments are NOT checked
# e.g. in foo(x, y, *args, d=None, **kwargs) variable arguments
# *args and **kwargs will NOT be checked
#
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# grab the checks dictionary for the desired parameter
try:
param_checks = self.checks[param.name]
except KeyError:
param_checks = None
# -- Determine target units `_units` --
# target units can be defined in one of three ways (in
# preferential order):
# 1. direct keyword pass-through
# i.e. CheckUnits(x=u.cm)
# CheckUnits(x=[u.cm, u.s])
# 2. keyword pass-through via dictionary definition
# i.e. CheckUnits(x={'units': u.cm})
# CheckUnits(x={'units': [u.cm, u.s]})
# 3. function annotations
#
# * if option (3) is used simultaneously with option (1) or (2), then
# checks defined by (3) must be consistent with checks from (1) or (2)
# to avoid raising an error.
# * if None is included in the units list, then None values are allowed
#
_none_shall_pass = False
_units = None
_units_are_from_anno = False
if param_checks is not None:
# checks for argument were defined with decorator
try:
_units = param_checks["units"]
except TypeError:
# if checks is NOT None and is NOT a dictionary, then assume
# only units were specified
# e.g. CheckUnits(x=u.cm)
#
_units = param_checks
except KeyError:
# if checks does NOT have 'units' but is still a dictionary,
# then other check conditions may have been specified and the
# user is relying on function annotations to define desired
# units
_units = None
# If no units have been specified by decorator checks, then look for
# function annotations.
#
# Reconcile units specified by decorator checks and function annotations
_units_anno = None
if param.annotation is not inspect.Parameter.empty:
# unit annotations defined
_units_anno = param.annotation
if _units is None and _units_anno is None and param_checks is None:
# no checks specified and no unit annotations defined
continue
elif _units is None and _units_anno is None:
# checks specified, but NO unit checks
msg = "No astropy.units specified for "
if param.name == "checks_on_return":
msg += "return value "
else:
msg += f"argument {param.name} "
msg += f"of function {self.f.__name__}()."
raise ValueError(msg)
elif _units is None:
_units = _units_anno
_units_are_from_anno = True
_units_anno = None
# Ensure `_units` is an iterable
if not isinstance(_units, collections.abc.Iterable):
_units = [_units]
if not isinstance(_units_anno, collections.abc.Iterable):
_units_anno = [_units_anno]
# Is None allowed?
if None in _units or param.default is None:
_none_shall_pass = True
# Remove Nones
if None in _units:
_units = [t for t in _units if t is not None]
if None in _units_anno:
_units_anno = [t for t in _units_anno if t is not None]
# ensure all _units are astropy.units.Unit or physical types &
# define 'units' for unit checks &
# define 'none_shall_pass' check
_units = self._condition_target_units(
_units, from_annotations=_units_are_from_anno
)
_units_anno = self._condition_target_units(
_units_anno, from_annotations=True
)
if not all(_u in _units for _u in _units_anno):
raise ValueError(
f"For argument '{param.name}', "
f"annotation units ({_units_anno}) are not included in the units "
f"specified by decorator arguments ({_units}). Use either "
f"decorator arguments or function annotations to defined unit "
f"types, or make sure annotation specifications match decorator "
f"argument specifications."
)
if len(_units) == 0 and len(_units_anno) == 0 and param_checks is None:
# annotations did not specify units
continue
elif len(_units) == 0 and len(_units_anno) == 0:
# checks specified, but NO unit checks
msg = "No astropy.units specified for "
if param.name == "checks_on_return":
msg += "return value "
else:
msg += f"argument {param.name} "
msg += f"of function {self.f.__name__}()."
raise ValueError(msg)
out_checks[param.name] = {
"units": _units,
"none_shall_pass": _none_shall_pass,
}
# -- Determine target equivalencies --
# Unit equivalences can be defined by:
# 1. keyword pass-through via dictionary definition
# e.g. CheckUnits(x={'units': u.C,
# 'equivalencies': u.temperature})
#
# initialize equivalencies
try:
_equivs = param_checks["equivalencies"]
except (KeyError, TypeError):
_equivs = self.__check_defaults["equivalencies"]
# ensure equivalences are properly formatted
if _equivs is None or _equivs == [None]:
_equivs = None
elif isinstance(_equivs, Equivalency):
pass
elif isinstance(_equivs, (list, tuple)):
# flatten list to non-list elements
if isinstance(_equivs, tuple):
_equivs = [_equivs]
else:
_equivs = self._flatten_equivalencies_list(_equivs)
# ensure passed equivalencies list is structured properly
# [(), ...]
# or [Equivalency(), ...]
#
# * All equivalencies must be a list of 2, 3, or 4 element tuples
# structured like...
# (from_unit, to_unit, forward_func, backward_func)
#
if all(isinstance(el, Equivalency) for el in _equivs):
_equivs = reduce(add, _equivs)
else:
_equivs = self._normalize_equivalencies(_equivs)
out_checks[param.name]["equivalencies"] = _equivs
# -- Determine if equivalent units pass --
try:
peu = param_checks.get(
"pass_equivalent_units",
self.__check_defaults["pass_equivalent_units"],
)
except (AttributeError, TypeError):
peu = self.__check_defaults["pass_equivalent_units"]
out_checks[param.name]["pass_equivalent_units"] = peu
# Does `self.checks` indicate arguments not used by f?
missing_params = [
param for param in set(self.checks.keys()) - set(out_checks.keys())
]
if len(missing_params) > 0:
params_str = ", ".join(missing_params)
warnings.warn(
PlasmaPyWarning(
f"Expected to unit check parameters {params_str} but they "
f"are missing from the call to {self.f.__name__}"
)
)
return out_checks
def _check_unit(self, arg, arg_name: str, arg_checks: Dict[str, Any]):
"""
Perform unit checks `arg_checks` on function argument `arg`.
Parameters
----------
arg
The argument to be checked
arg_name: str
The name of the argument to be checked
arg_checks: Dict[str, Any]
The requested checks for the argument
Raises
------
ValueError
If `arg` is `None` when `arg_checks['none_shall_pass']=False`
TypeError
If `arg` does not have `units`
:class:`astropy.units.UnitTypeError`
If the units of `arg` do not satisfy conditions of `arg_checks`
"""
arg, unit, equiv, err = self._check_unit_core(arg, arg_name, arg_checks)
if err is not None:
raise err
def _check_unit_core(
self, arg, arg_name: str, arg_checks: Dict[str, Any]
) -> Tuple[
Union[None, u.Quantity],
Union[None, u.Unit],
Union[None, List[Any]],
Union[None, Exception],
]:
"""
Determines if `arg` passes unit checks `arg_checks` and if the units of
`arg` is equivalent to any units specified in `arg_checks`.
Parameters
----------
arg
The argument to be checked
arg_name: str
The name of the argument to be checked
arg_checks: Dict[str, Any]
The requested checks for the argument
Returns
-------
(`arg`, `unit`, `equivalencies`, `error`)
* `arg` is the original input argument `arg` or `None` if unit
checks fail
* `unit` is the identified astropy :mod:`~astropy.units` that `arg`
can be converted to or `None` if none exist
* `equivalencies` is the astropy :mod:`~astropy.units.equivalencies`
used for the unit conversion or `None`
* `error` is the `Exception` associated with the failed unit checks
or `None` for successful unit checks
"""
# initialize str for error messages
if arg_name == "checks_on_return":
err_msg = "The return value "
else:
err_msg = f"The argument '{arg_name}' "
err_msg += f"to function {self.f.__name__}()"
# initialize ValueError message
valueerror_msg = f"{err_msg} can not contain"
# initialize TypeError message
typeerror_msg = f"{err_msg} should be an astropy Quantity with "
if len(arg_checks["units"]) == 1:
typeerror_msg += f"the following unit: {arg_checks['units'][0]}"
else:
typeerror_msg += "one of the following units: "
for unit in arg_checks["units"]:
typeerror_msg += str(unit)
if unit != arg_checks["units"][-1]:
typeerror_msg += ", "
if arg_checks["none_shall_pass"]:
typeerror_msg += "or None "
# pass Nones if allowed
if arg is None:
if arg_checks["none_shall_pass"]:
return arg, None, None, None
else:
return None, None, None, ValueError(f"{valueerror_msg} Nones")
# check units
in_acceptable_units = []
equiv = arg_checks["equivalencies"]
for unit in arg_checks["units"]:
try:
in_acceptable_units.append(
arg.unit.is_equivalent(unit, equivalencies=equiv)
)
except AttributeError:
if hasattr(arg, "unit"):
err_specifier = (
"a 'unit' attribute without an 'is_equivalent' method"
)
else:
err_specifier = "no 'unit' attribute"
msg = (
f"{err_msg} has {err_specifier}. "
f"Use an astropy Quantity instead."
)
return None, None, None, TypeError(msg)
# How many acceptable units?
nacceptable = np.count_nonzero(in_acceptable_units)
unit = None
equiv = None
err = None
if nacceptable == 0:
# NO equivalent units
arg = None
err = u.UnitTypeError(typeerror_msg)
else:
# is there an exact match?
units_arr = np.array(arg_checks["units"])
units_equal_mask = np.equal(units_arr, arg.unit)
units_mask = np.logical_and(units_equal_mask, in_acceptable_units)
if np.count_nonzero(units_mask) == 1:
# matched exactly to a desired unit
unit = units_arr[units_mask][0]
equiv = arg_checks["equivalencies"]
elif nacceptable == 1:
# there is a match to 1 equivalent unit
unit = units_arr[in_acceptable_units][0]
equiv = arg_checks["equivalencies"]
if not arg_checks["pass_equivalent_units"]:
err = u.UnitTypeError(typeerror_msg)
elif arg_checks["pass_equivalent_units"]:
# there is a match to more than one equivalent units
pass
else:
# there is a match to more than 1 equivalent units
arg = None
err = u.UnitTypeError(typeerror_msg)
return arg, unit, equiv, err
@staticmethod
def _condition_target_units(targets: List, from_annotations: bool = False):
"""
From a list of target units (either as a string or astropy
:class:`~astropy.units.Unit` objects), return a list of conditioned
:class:`~astropy.units.Unit` objects.
Parameters
----------
targets: list of target units
list of units (either as a string or :class:`~astropy.units.Unit`)
to be conditioned into astropy :class:`~astropy.units.Unit` objects
from_annotations: bool
(Default `False`) Indicates if `targets` originated from function/method
annotations versus decorator input arguments.
Returns
-------
list:
list of `targets` converted into astropy
:class:`~astropy.units.Unit` objects
Raises
------
TypeError
If `target` is not a valid type for :class:`~astropy.units.Unit` when
`from_annotations == True`,
ValueError
If a `target` is a valid unit type but not a valid value for
:class:`~astropy.units.Unit`.
"""
# Note: this method does not allow for astropy physical types. This is
# done because we expect all use cases of CheckUnits to define the
# exact units desired.
#
allowed_units = []
for target in targets:
try:
target_unit = u.Unit(target)
allowed_units.append(target_unit)
except TypeError as err:
# not a unit type
if not from_annotations:
raise err
continue
return allowed_units
@staticmethod
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies to ensure each is in a 4-tuple form::
(from_unit, to_unit, forward_func, backward_func)
`forward_func` maps `from_unit` into `to_unit` and `backward_func` does
the reverse.
Parameters
----------
equivalencies: list of equivalent pairs
list of astropy :mod:`~astropy.units.equivalencies` to be normalized
Raises
------
ValueError
if an equivalency can not be interpreted
Notes
-----
* the code here was copied and modified from
:func:`astropy.units.core._normalize_equivalencies` from AstroPy
version 3.2.3
* this will work on both the old style list equivalencies (pre AstroPy v3.2.1)
and the modern equivalencies defined with the
:class:`~astropy.units.equivalencies.Equivalency` class
"""
if equivalencies is None:
return []
normalized = []
def return_argument(x):
return x
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
from_unit, to_unit = equiv
a = b = return_argument
elif len(equiv) == 3:
from_unit, to_unit, a = equiv
b = a
elif len(equiv) == 4:
from_unit, to_unit, a, b = equiv
else:
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
if not (
from_unit is u.Unit(from_unit)
and (to_unit is None or to_unit is u.Unit(to_unit))
and callable(a)
and callable(b)
):
raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}")
normalized.append((from_unit, to_unit, a, b))
return normalized
def _flatten_equivalencies_list(self, elist):
"""
Given a list of equivalencies, flatten out any sub-element lists
Parameters
----------
elist: list
list of astropy :mod:`~astropy.units.equivalencies` to be flattened
Returns
-------
list
a flattened list of astropy :mod:`~astropy.units.equivalencies`
"""
new_list = []
for el in elist:
if not isinstance(el, list):
new_list.append(el)
else:
new_list.extend(self._flatten_equivalencies_list(el))
return new_list
def check_units(
func=None, checks_on_return: Dict[str, Any] = None, **checks: Dict[str, Any]
):
"""
A decorator to 'check' -- limit/control -- the units of input and return
arguments to a function or method.
Parameters
----------
func:
The function to be decorated
checks_on_return: list of astropy :mod:`~astropy.units` or dict of unit specifications
Specifications for unit checks on the return of the function being wrapped.
(see `check units`_ for valid specifications)
**checks: list of astropy :mod:`~astropy.units` or dict of unit specifications
Specifications for unit checks on the input arguments of the function
being wrapped. Each keyword argument in `checks` is the name of a function
argument to be checked and the keyword value contains the unit check
specifications.
.. _`check units`:
Unit checks can be defined by passing one of the astropy
:mod:`~astropy.units`, a list of astropy units, or a dictionary containing
the keys defined below. Units can also be defined with function
annotations, but must be consistent with decorator `**checks` arguments if
used concurrently. If a key is omitted, then the default value will be assumed.
====================== ======= ================================================
Key Type Description
====================== ======= ================================================
units list of desired astropy :mod:`~astropy.units`
equivalencies | [DEFAULT `None`] A list of equivalent pairs to
try if
| the units are not directly convertible.
| (see :mod:`~astropy.units.equivalencies`,
and/or `astropy equivalencies`_)
pass_equivalent_units `bool` | [DEFAULT `False`] allow equivalent units
| to pass
====================== ======= ================================================
Notes
-----
* Checking of function arguments `*args` and `**kwargs` is not supported.
* Decorator does NOT perform any unit conversions, look to
:func:`~plasmapy.utils.decorators.validate_quantities` if that functionality is
desired.
* If it is desired that `None` values do not raise errors or warnings, then
include `None` in the list of units or as a default value for the function
argument.
* If units are not specified in `checks`, then the decorator will attempt
to identify desired units by examining the function annotations.
* Full functionality is defined by the class :class:`CheckUnits`.
Examples
--------
Define units with decorator parameters::
import astropy.units as u
from plasmapy.utils.decorators import check_units
@check_units(arg1={'units': u.cm},
arg2=u.cm,
checks_on_return=[u.cm, u.km])
def foo(arg1, arg2):
return arg1 + arg2
# or on a method
class Foo:
@check_units(arg1={'units': u.cm},
arg2=u.cm,
checks_on_return=[u.cm, u.km])
def bar(self, arg1, arg2):
return arg1 + arg2
Define units with function annotations::
import astropy.units as u
from plasmapy.utils.decorators import check_units
@check_units
def foo(arg1: u.cm, arg2: u.cm) -> u.cm:
return arg1 + arg2
# or on a method
class Foo:
@check_units
def bar(self, arg1: u.cm, arg2: u.cm) -> u.cm:
return arg1 + arg2
Allow `None` values to pass::
import astropy.units as u
from plasmapy.utils.decorators import check_units
@check_units(checks_on_return=[u.cm, None])
def foo(arg1: u.cm = None):
return arg1
Allow return values to have equivalent units::
import astropy.units as u
from plasmapy.utils.decorators import check_units
@check_units(arg1={'units': u.cm},
checks_on_return={'units': u.km,
'pass_equivalent_units': True})
def foo(arg1):
return arg1
Allow equivalent units to pass with specified equivalencies::
import astropy.units as u
from plasmapy.utils.decorators import check_units
@check_units(arg1={'units': u.K,
'equivalencies': u.temperature(),
'pass_equivalent_units': True})
def foo(arg1):
return arg1
.. _astropy equivalencies:
https://docs.astropy.org/en/stable/units/equivalencies.html
"""
if checks_on_return is not None:
checks["checks_on_return"] = checks_on_return
if func is not None:
# `check_units` called as a function
return CheckUnits(**checks)(func)
else:
# `check_units` called as a decorator "sugar-syntax"
return CheckUnits(**checks)
def check_values(
func=None, checks_on_return: Dict[str, bool] = None, **checks: Dict[str, bool]
):
"""
A decorator to 'check' -- limit/control -- the values of input and return
arguments to a function or method.
Parameters
----------
func:
The function to be decorated
checks_on_return: Dict[str, bool]
Specifications for value checks on the return of the function being wrapped.
(see `check values`_ for valid specifications)
**checks: Dict[str, Dict[str, bool]]
Specifications for value checks on the input arguments of the function
being wrapped. Each keyword argument in `checks` is the name of a function
argument to be checked and the keyword value contains the value check
specifications.
.. _`check values`:
The value check specifications are defined within a dictionary containing
the keys defined below. If the dictionary is empty or omitting keys,
then the default value will be assumed for the missing keys.
================ ======= ================================================
Key Type Description
================ ======= ================================================
can_be_negative `bool` [DEFAULT `True`] values can be negative
can_be_complex `bool` [DEFAULT `False`] values can be complex numbers
can_be_inf `bool` [DEFAULT `True`] values can be :data:`~numpy.inf`
can_be_nan `bool` [DEFAULT `True`] values can be :data:`~numpy.nan`
none_shall_pass `bool` [DEFAULT `False`] values can be a python `None`
can_be_zero `bool` [DEFAULT `True`] values can be zero
================ ======= ================================================
Notes
-----
* Checking of function arguments `*args` and `**kwargs` is not supported.
* Full functionality is defined by the class :class:`CheckValues`.
Examples
--------
.. code-block:: python
from plasmapy.utils.decorators import check_values
@check_values(arg1={'can_be_negative': False, 'can_be_nan': False},
arg2={'can_be_inf': False},
checks_on_return={'none_shall_pass': True)
def foo(arg1, arg2):
return None
# on a method
class Foo:
@check_values(arg1={'can_be_negative': False, 'can_be_nan': False},
arg2={'can_be_inf': False},
checks_on_return={'none_shall_pass': True)
def bar(self, arg1, arg2):
return None
"""
if checks_on_return is not None:
checks["checks_on_return"] = checks_on_return
if func is not None:
# `check_values` called as a function
return CheckValues(**checks)(func)
else:
# `check_values` called as a decorator "sugar-syntax"
return CheckValues(**checks)
def check_relativistic(func=None, betafrac=0.05):
r"""
Warns or raises an exception when the output of the decorated
function is greater than `betafrac` times the speed of light.
Parameters
----------
func : `function`, optional
The function to decorate.
betafrac : float, optional
The minimum fraction of the speed of light that will raise a
`~plasmapy.utils.RelativityWarning`. Defaults to 5%.
Returns
-------
function
Decorated function.
Raises
------
TypeError
If `V` is not a `~astropy.units.Quantity`.
~astropy.units.UnitConversionError
If `V` is not in units of velocity.
ValueError
If `V` contains any `~numpy.nan` values.
~plasmapy.utils.exceptions.RelativityError
If `V` is greater than or equal to the speed of light.
Warns
-----
: `~plasmapy.utils.exceptions.RelativityWarning`
If `V` is greater than or equal to `betafrac` times the speed of light,
but less than the speed of light.
Examples
--------
>>> from astropy import units as u
>>> @check_relativistic
... def speed():
... return 1 * u.m / u.s
Passing in a custom `betafrac`:
>>> @check_relativistic(betafrac=0.01)
... def speed():
... return 1 * u.m / u.s
"""
def decorator(f):
@preserve_signature
@functools.wraps(f)
def wrapper(*args, **kwargs):
return_ = f(*args, **kwargs)
_check_relativistic(return_, f.__name__, betafrac=betafrac)
return return_
return wrapper
if func:
return decorator(func)
return decorator
def _check_relativistic(V, funcname, betafrac=0.05):
r"""
Warn or raise error for relativistic or superrelativistic
velocities.
Parameters
----------
V : ~astropy.units.Quantity
A velocity.
funcname : str
The name of the original function to be printed in the error
messages.
betafrac : float, optional
The minimum fraction of the speed of light that will generate
a warning. Defaults to 5%.
Raises
------
TypeError
If `V` is not a `~astropy.units.Quantity`.
~astropy.units.UnitConversionError
If `V` is not in units of velocity.
ValueError
If `V` contains any `~numpy.nan` values.
RelativityError
If `V` is greater than or equal to the speed of light.
Warns
-----
~plasmapy.utils.RelativityWarning
If `V` is greater than or equal to the specified fraction of the
speed of light.
Examples
--------
>>> from astropy import units as u
>>> _check_relativistic(1*u.m/u.s, 'function_calling_this')
"""
# TODO: Replace `funcname` with func.__name__?
errmsg = "V must be a Quantity with units of velocity in _check_relativistic"
if not isinstance(V, u.Quantity):
raise TypeError(errmsg)
try:
V_over_c = (V / c).to_value(u.dimensionless_unscaled)
except Exception:
raise u.UnitConversionError(errmsg)
beta = np.max(np.abs((V_over_c)))
if beta == np.inf:
raise RelativityError(f"{funcname} is yielding an infinite velocity.")
elif beta >= 1:
raise RelativityError(
f"{funcname} is yielding a velocity that is {str(round(beta, 3))} "
f"times the speed of light."
)
elif beta >= betafrac:
warnings.warn(
f"{funcname} is yielding a velocity that is "
f"{str(round(beta * 100, 3))}% of the speed of "
f"light. Relativistic effects may be important.",
RelativityWarning,
)
| 35.317212 | 90 | 0.553307 |
aced260a9936c0dfddfb75b90db313db9395ec17 | 2,957 | py | Python | lib/dN_dS_ratio/Utils/Data_Process_Utils.py | kbasecollaborations/dN_dS_ratio | 22b7384aafceda77584f44a134ed0f1e32b99d5c | [
"MIT"
] | null | null | null | lib/dN_dS_ratio/Utils/Data_Process_Utils.py | kbasecollaborations/dN_dS_ratio | 22b7384aafceda77584f44a134ed0f1e32b99d5c | [
"MIT"
] | null | null | null | lib/dN_dS_ratio/Utils/Data_Process_Utils.py | kbasecollaborations/dN_dS_ratio | 22b7384aafceda77584f44a134ed0f1e32b99d5c | [
"MIT"
] | 1 | 2021-03-13T15:13:53.000Z | 2021-03-13T15:13:53.000Z | import json
import csv
import os
import re
import json
import gzip
import logging
import subprocess
class Data_Process_Utils:
def __init__(self):
self.path = "/kb/module/deps"
pass
def run_cmd(self, cmd):
"""
This function runs a third party command line tool
eg. bgzip etc.
:param command: command to be run
:return: success
"""
command = " ".join(cmd)
print(command)
logging.info("Running command " + command)
cmdProcess = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
for line in cmdProcess.stdout:
logging.info(line.decode("utf-8").rstrip())
cmdProcess.wait()
logging.info('return code: ' + str(cmdProcess.returncode))
if cmdProcess.returncode != 0:
raise ValueError('Error in running command with return code: '
+ command
+ str(cmdProcess.returncode) + '\n')
logging.info("command " + command + " ran successfully")
return "success"
def bgzip_vcf_file(self, filepath):
'''
:param filepath:
:return: bgzipped file path
'''
bzfilepath = filepath + ".gz"
command = ["bgzip", filepath]
self.run_cmd(command)
return bzfilepath
def index_vcf_file(self, filepath):
'''
:param filepath:
:return:
'''
command = ["tabix", "-p", "vcf", filepath]
self.run_cmd(command)
def validate_params(self, params):
'''
:param params:
:return:
'''
if 'genome_ref' not in params:
raise ValueError('required genome_ref field was not defined')
elif 'variation_ref' not in params:
raise ValueError('required variation_ref field was not defined')
elif 'gene_id' not in params:
raise ValueError('required gene_id field was not defined')
def filter_gff(self, gene_id, gff_path, gff_subsample_path):
'''
:param gene_id:
:param gff_path:
:param gff_subsample_path:
:return:
'''
command = ['grep']
command.append("\'ID=" + gene_id +"\'")
command.append(gff_path)
command.extend(['>>', gff_subsample_path])
self.run_cmd(command)
def tabix_query(self, filepath, chrom, start, end, subsample_vcf):
'''
:param filepath:
:param chrom:
:param start:
:param end:
:param subsample_vcf:
:return:
'''
command = ['tabix']
command.append(filepath)
command.append(chrom + ":" + start + "-" + end)
command.extend(['>', subsample_vcf])
self.run_cmd(command)
| 29.277228 | 78 | 0.538383 |
aced273c8bc553c388d919e143887d8e88dbd380 | 9,966 | py | Python | tests/test_v1/test_task.py | mostaphaRoudsari/hera-workflows | 2f82a7690ca42e48c3ba2f9911dbd4715aa2e093 | [
"MIT"
] | null | null | null | tests/test_v1/test_task.py | mostaphaRoudsari/hera-workflows | 2f82a7690ca42e48c3ba2f9911dbd4715aa2e093 | [
"MIT"
] | null | null | null | tests/test_v1/test_task.py | mostaphaRoudsari/hera-workflows | 2f82a7690ca42e48c3ba2f9911dbd4715aa2e093 | [
"MIT"
] | null | null | null | from typing import Dict, List, Tuple
from argo.workflows.client import V1alpha1Arguments, V1alpha1Inputs, V1Toleration
from pydantic import BaseModel, ValidationError
import pytest
from hera.v1.empty_dir_volume import EmptyDirVolume
from hera.v1.existing_volume import ExistingVolume
from hera.v1.resources import Resources
from hera.v1.retry import Retry
from hera.v1.task import Task
from hera.v1.toleration import GPUToleration
from hera.v1.volume import Volume
class MockModel(BaseModel):
field1: int = 1
field2: int = 2
def noop():
pass
def op(a):
print(a)
def kwarg_op(a: int = 42):
print(a)
def kwarg_multi_op(a: int = 42, b: int = 43):
print(a, b)
def multiop(a, b, c):
print(a, b, c)
def typedop(a) -> List[Dict[str, Tuple[int, int]]]:
print(a)
return [{'a': (a, a)}]
def longop(
very_long_parameter_name,
very_very_long_parameter_name,
very_very_very_long_parameter_name,
very_very_very_very_long_parameter_name,
very_very_very_very_very_long_parameter_name,
):
print(42)
def test_next_and_shifting_set_correct_dependencies():
t1, t2, t3 = Task('t1', noop), Task('t2', noop), Task('t3', noop)
t1.next(t2).next(t3)
assert t2.argo_task.dependencies == ['t1']
assert t3.argo_task.dependencies == ['t2']
t4, t5, t6 = Task('t4', noop), Task('t5', noop), Task('t6', noop)
t4 >> t5 >> t6
assert t5.argo_task.dependencies == ['t4']
assert t6.argo_task.dependencies == ['t5']
def test_retry_limits_fail_validation():
with pytest.raises(ValidationError):
Retry(duration=5, max_duration=4)
def test_func_and_func_param_validation_raises_on_args_not_passed():
with pytest.raises(AssertionError) as e:
Task('t', op, [])
assert str(e.value) == 'no parameters passed for function'
def test_func_and_func_param_validation_raises_on_difference():
with pytest.raises(AssertionError) as e:
Task('t', op, [{'a': 1}, {'b': 1}])
assert str(e.value) == 'mismatched function arguments and passed parameters'
def test_param_getter_returns_empty():
t = Task('t', noop)
assert not t.get_parameters()
def test_param_getter_parses_on_multi_params():
t = Task('t', op, [{'a': 1}, {'a': 2}, {'a': 3}])
params = t.get_parameters()
for p in params:
assert p.name == 'a'
assert p.value == '{{item.a}}'
def test_param_getter_parses_single_param_val_on_json_payload():
t = Task('t', op, [{'a': 1}])
param = t.get_parameters()[0]
assert param.name == 'a'
assert param.value == '1' # from json.dumps
def test_param_getter_parses_single_param_val_on_base_model_payload():
t = Task('t', op, [{'a': MockModel()}])
param = t.get_parameters()[0]
assert param.name == 'a'
assert param.value == '{"field1": 1, "field2": 2}'
def test_param_script_portion_adds_formatted_json_calls():
t = Task('t', op, [{'a': 1}])
script = t.get_param_script_portion()
assert script == 'import json\na = json.loads(\'{{inputs.parameters.a}}\')\n'
def test_script_getter_returns_expected_string():
t = Task('t', op, [{'a': 1}])
script = t.get_script()
assert script == 'import json\na = json.loads(\'{{inputs.parameters.a}}\')\n\nprint(a)\n'
t = Task('t', typedop, [{'a': 1}])
script = t.get_script()
assert script == 'import json\na = json.loads(\'{{inputs.parameters.a}}\')\n\nprint(a)\nreturn [{\'a\': (a, a)}]\n'
def test_script_getter_parses_multi_line_function():
t = Task(
't',
longop,
[
{
'very_long_parameter_name': 1,
'very_very_long_parameter_name': 2,
'very_very_very_long_parameter_name': 3,
'very_very_very_very_long_parameter_name': 4,
'very_very_very_very_very_long_parameter_name': 5,
}
],
)
expected_script = """import json
very_long_parameter_name = json.loads('{{inputs.parameters.very_long_parameter_name}}')
very_very_long_parameter_name = json.loads('{{inputs.parameters.very_very_long_parameter_name}}')
very_very_very_long_parameter_name = json.loads('{{inputs.parameters.very_very_very_long_parameter_name}}')
very_very_very_very_long_parameter_name = json.loads('{{inputs.parameters.very_very_very_very_long_parameter_name}}')
very_very_very_very_very_long_parameter_name = json.loads('{{inputs.parameters.very_very_very_very_very_long_parameter_name}}')
print(42)
"""
script = t.get_script()
assert script == expected_script
def test_resources_returned_with_appropriate_limits():
r = Resources()
t = Task('t', op, [{'a': 1}], resources=r)
resources = t.get_resources()
assert resources.limits['cpu'] == '1'
assert resources.limits['memory'] == '4Gi'
def test_resources_returned_with_gpus():
r = Resources(gpus=2)
t = Task('t', op, [{'a': 1}], resources=r)
resources = t.get_resources()
assert resources.requests['nvidia.com/gpu'] == '2'
assert resources.limits['nvidia.com/gpu'] == '2'
def test_parallel_items_assemble_base_models():
t = Task(
't',
multiop,
[
{'a': 1, 'b': {'d': 2, 'e': 3}, 'c': MockModel()},
{'a': 1, 'b': {'d': 2, 'e': 3}, 'c': MockModel()},
{'a': 1, 'b': {'d': 2, 'e': 3}, 'c': MockModel()},
],
)
items = t.get_parallel_items()
for item in items:
assert item['a'] == '1'
assert item['b'] == '{"d": 2, "e": 3}'
assert item['c'] == '{"field1": 1, "field2": 2}'
def test_volume_mounts_returns_expected_volumes():
r = Resources(
volume=Volume(name='v1', size='1Gi', mount_path='/v1'),
existing_volume=ExistingVolume(name='v2', mount_path='/v2'),
empty_dir_volume=EmptyDirVolume(name='v3'),
)
t = Task('t', noop, resources=r)
vs = t.get_volume_mounts()
assert vs[0].name == 'v1'
assert vs[0].mount_path == '/v1'
assert vs[1].name == 'v2'
assert vs[1].mount_path == '/v2'
assert vs[2].name == 'v3'
assert vs[2].mount_path == '/dev/shm'
def test_gpu_toleration_returns_expected_toleration():
tn = GPUToleration
assert tn.key == 'nvidia.com/gpu'
assert tn.effect == 'NoSchedule'
assert tn.operator == 'Equal'
assert tn.value == 'present'
def test_task_command_parses():
t = Task('t', op, [{'a': MockModel()}])
assert t.get_command() == ['python']
def test_task_spec_returns_with_parallel_items():
t = Task('t', op, [{'a': 1}, {'a': 1}, {'a': 1}])
s = t.get_task_spec()
items = [{'a': '1'}, {'a': '1'}, {'a': '1'}]
assert s.name == 't'
assert s.template == 't'
assert len(s.arguments.parameters) == 1
assert len(s.with_items) == 3
assert s.with_items == items
def test_task_spec_returns_with_single_values():
t = Task('t', op, [{'a': 1}])
s = t.get_task_spec()
assert s.name == 't'
assert s.template == 't'
assert len(s.arguments.parameters) == 1
assert s.arguments.parameters[0].name == 'a'
assert s.arguments.parameters[0].value == '1'
def test_task_template_does_not_contain_gpu_references():
t = Task('t', op, [{'a': 1}], resources=Resources())
tt = t.get_task_template()
assert isinstance(tt.name, str)
assert isinstance(tt.script.source, str)
assert isinstance(tt.arguments, V1alpha1Arguments)
assert isinstance(tt.inputs, V1alpha1Inputs)
assert tt.node_selector is None
assert tt.tolerations is None
assert tt.retry_strategy is None
def test_task_template_contains_expected_field_values_and_types():
t = Task(
't',
op,
[{'a': 1}],
resources=Resources(gpus=1),
tolerations=[GPUToleration],
node_selectors={'abc': '123-gpu'},
retry=Retry(duration=1, max_duration=2),
)
tt = t.get_task_template()
assert isinstance(tt.name, str)
assert isinstance(tt.script.source, str)
assert isinstance(tt.arguments, V1alpha1Arguments)
assert isinstance(tt.inputs, V1alpha1Inputs)
assert isinstance(tt.node_selector, dict)
assert isinstance(tt.tolerations, list)
assert all([isinstance(x, V1Toleration) for x in tt.tolerations])
assert tt.name == 't'
assert tt.script.source == 'import json\na = json.loads(\'{{inputs.parameters.a}}\')\n\nprint(a)\n'
assert tt.arguments.parameters[0].name == 'a'
assert tt.inputs.parameters[0].name == 'a'
assert len(tt.tolerations) == 1
assert tt.tolerations[0].key == 'nvidia.com/gpu'
assert tt.tolerations[0].effect == 'NoSchedule'
assert tt.tolerations[0].operator == 'Equal'
assert tt.tolerations[0].value == 'present'
assert tt.retry_strategy is not None
assert tt.retry_strategy.backoff.duration == '1'
assert tt.retry_strategy.backoff.max_duration == '2'
def test_task_template_contains_expected_retry_strategy():
r = Retry(duration=3, max_duration=9)
t = Task('t', noop, retry=r)
assert t.retry.duration == 3
assert t.retry.max_duration == 9
tt = t.get_task_template()
tr = t.get_retry_strategy()
template_backoff = tt.retry_strategy.backoff
retry_backoff = tr.backoff
assert int(template_backoff.duration) == int(retry_backoff.duration)
assert int(template_backoff.max_duration) == int(retry_backoff.max_duration)
def test_task_get_retry_returns_expected_none():
t = Task('t', noop)
tr = t.get_retry_strategy()
assert tr is None
def test_task_sets_user_kwarg_override():
t = Task('t', kwarg_op, [{'a': 43}])
assert t.parameters[0].name == 'a'
assert t.parameters[0].value == '43'
def test_task_sets_kwarg():
t = Task('t', kwarg_op)
assert t.parameters[0].name == 'a'
assert t.parameters[0].value == '42'
t = Task('t', kwarg_multi_op, [{'a': 50}])
assert t.parameters[0].name == 'a'
assert t.parameters[0].value == '50'
assert t.parameters[1].name == 'b'
assert t.parameters[1].value == '43'
| 30.384146 | 127 | 0.649709 |
aced27d37d9b30a1d256b2322966ac2115925ef3 | 911 | py | Python | app/utils/RequestUtil.py | xiao12417/flask_standard_sample | 8580cb33660247c16a7e63ff7a55f5478ac98a5b | [
"MIT"
] | 5 | 2016-05-13T06:44:52.000Z | 2021-03-21T11:55:31.000Z | app/utils/RequestUtil.py | xiao12417/flask_standard_sample | 8580cb33660247c16a7e63ff7a55f5478ac98a5b | [
"MIT"
] | null | null | null | app/utils/RequestUtil.py | xiao12417/flask_standard_sample | 8580cb33660247c16a7e63ff7a55f5478ac98a5b | [
"MIT"
] | 1 | 2017-03-27T14:48:45.000Z | 2017-03-27T14:48:45.000Z | #coding=utf-8
'''
Created on 2015年8月21日
@author: hzwangzhiwei
'''
from app.lib.http_accept_language.Parser import AcceptLangParser
#获得参数,post或者get
def get_parameter(request, key, default = None):
'''
info:获得请求参数,包括get和post,其他类型的访问不管
'''
#post参数
if request.method == 'POST':
param = request.form.get(key, default)
#get
elif request.method == 'GET':
param = request.args.get(key, default)
else:
return default
return param
#用户IP
def get_request_ip(request):
return request.remote_addr
#获得用户访问方式
def get_request_method(request):
return request.method
def get_request_ua(request):
return request.headers.get('User-Agent', '')
def get_request_accept_lang(request):
request.environ.get('HTTP_ACCEPT_LANGUAGE', '')
def get_request_lang(request):
AcceptLangParser().get_lang(request.environ.get('HTTP_ACCEPT_LANGUAGE', '')) | 22.775 | 80 | 0.700329 |
aced2894d677463324c8826b717c46693f53e624 | 819 | py | Python | tests/components/garages_amsterdam/conftest.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/garages_amsterdam/conftest.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/garages_amsterdam/conftest.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Test helpers."""
from unittest.mock import Mock, patch
import pytest
@pytest.fixture(autouse=True)
def mock_cases():
"""Mock garages_amsterdam garages."""
with patch(
"garages_amsterdam.GaragesAmsterdam.all_garages",
return_value=[
Mock(
garage_name="IJDok",
free_space_short=100,
free_space_long=10,
short_capacity=120,
long_capacity=60,
state="ok",
),
Mock(
garage_name="Arena",
free_space_short=200,
free_space_long=20,
short_capacity=240,
long_capacity=80,
state="error",
),
],
) as mock_get_garages:
yield mock_get_garages
| 24.818182 | 57 | 0.507937 |
aced28d3475972c2e89892cddaad62115c8b788e | 9,025 | py | Python | stubs.min/System/Windows/Media/__init___parts/GuidelineSet.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/Media/__init___parts/GuidelineSet.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Media/__init___parts/GuidelineSet.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class GuidelineSet(Animatable,ISealable,IAnimatable,IResource):
"""
Represents a collection of guide lines that can assist in adjusting rendered figures to a device pixel grid.
GuidelineSet()
GuidelineSet(guidelinesX: Array[float],guidelinesY: Array[float])
"""
def Clone(self):
"""
Clone(self: GuidelineSet) -> GuidelineSet
Creates a modifiable clone of this System.Windows.Media.GuidelineSet,making
deep copies of this object's values. When copying dependency properties,this
method copies resource references and data bindings (but they might no longer
resolve) but not animations or their current values.
Returns: A modifiable clone of the current object. The cloned object's
System.Windows.Freezable.IsFrozen property will be false even if the source's
System.Windows.Freezable.IsFrozen property was true.
"""
pass
def CloneCore(self,*args):
"""
CloneCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a clone (deep copy) of the specified
System.Windows.Freezable using base (non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValue(self):
"""
CloneCurrentValue(self: GuidelineSet) -> GuidelineSet
Creates a modifiable clone of this System.Windows.Media.GuidelineSet object,
making deep copies of this object's current values. Resource references,data
bindings,and animations are not copied,but their current values are.
Returns: A modifiable clone of the current object. The cloned object's
System.Windows.Freezable.IsFrozen property will be false even if the source's
System.Windows.Freezable.IsFrozen property was true.
"""
pass
def CloneCurrentValueCore(self,*args):
"""
CloneCurrentValueCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a modifiable clone (deep copy) of the specified
System.Windows.Freezable using current property values.
sourceFreezable: The System.Windows.Freezable to be cloned.
"""
pass
def CreateInstance(self,*args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self,*args):
""" CreateInstanceCore(self: GuidelineSet) -> Freezable """
pass
def FreezeCore(self,*args):
"""
FreezeCore(self: Animatable,isChecking: bool) -> bool
Makes this System.Windows.Media.Animation.Animatable object unmodifiable or
determines whether it can be made unmodifiable.
isChecking: true if this method should simply determine whether this instance can be
frozen. false if this instance should actually freeze itself when this method
is called.
Returns: If isChecking is true,this method returns true if this
System.Windows.Media.Animation.Animatable can be made unmodifiable,or false if
it cannot be made unmodifiable. If isChecking is false,this method returns
true if the if this System.Windows.Media.Animation.Animatable is now
unmodifiable,or false if it cannot be made unmodifiable,with the side effect
of having begun to change the frozen status of this object.
"""
pass
def GetAsFrozenCore(self,*args):
"""
GetAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a frozen clone of the specified System.Windows.Freezable
using base (non-animated) property values.
sourceFreezable: The instance to copy.
"""
pass
def GetCurrentValueAsFrozenCore(self,*args):
"""
GetCurrentValueAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the current instance a frozen clone of the specified
System.Windows.Freezable. If the object has animated dependency properties,
their current animated values are copied.
sourceFreezable: The System.Windows.Freezable to copy and freeze.
"""
pass
def OnChanged(self,*args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self,*args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure
and is not intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self,*args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPrope
rtyChangedEventArgs) to also invoke any System.Windows.Freezable.Changed
handlers in response to a changing dependency property of type
System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old
and new values.
"""
pass
def ReadPreamble(self,*args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid
thread. Inheritors of System.Windows.Freezable must call this method at the
beginning of any API that reads data members that are not dependency
properties.
"""
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize
the value for the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized;
otherwise,false.
ShouldSerializeProperty(self: Window_16$17,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Label_17$18,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: TextBox_18$19,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Button_19$20,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: CheckBox_20$21,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: ComboBox_21$22,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Separator_22$23,dp: DependencyProperty) -> bool
"""
pass
def WritePostscript(self,*args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the
System.Windows.Freezable and invokes its System.Windows.Freezable.OnChanged
method. Classes that derive from System.Windows.Freezable should call this
method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self,*args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being
accessed from a valid threading context. System.Windows.Freezable inheritors
should call this method at the beginning of any API that writes to data members
that are not dependency properties.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,guidelinesX=None,guidelinesY=None):
"""
__new__(cls: type)
__new__(cls: type,guidelinesX: Array[float],guidelinesY: Array[float])
"""
pass
GuidelinesX=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a series of coordinate values that represent guide lines on the X-axis.
Get: GuidelinesX(self: GuidelineSet) -> DoubleCollection
Set: GuidelinesX(self: GuidelineSet)=value
"""
GuidelinesY=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a series of coordinate values that represent guide lines on the Y-axis.
Get: GuidelinesY(self: GuidelineSet) -> DoubleCollection
Set: GuidelinesY(self: GuidelineSet)=value
"""
GuidelinesXProperty=None
GuidelinesYProperty=None
| 40.111111 | 215 | 0.728199 |
aced2a9b23d547d120ba361afe91d26b3d61cc51 | 3,750 | py | Python | lecture_02/homework2/tasks/text_processing.py | RomanSafe/epam_python_training | 3aac68062e1764af844cb3e96f9481791acffc9d | [
"MIT"
] | null | null | null | lecture_02/homework2/tasks/text_processing.py | RomanSafe/epam_python_training | 3aac68062e1764af844cb3e96f9481791acffc9d | [
"MIT"
] | 2 | 2020-12-30T19:39:36.000Z | 2020-12-30T21:49:33.000Z | lecture_02/homework2/tasks/text_processing.py | RomanSafe/epam_python_training | 3aac68062e1764af844cb3e96f9481791acffc9d | [
"MIT"
] | null | null | null | """
Given a file containing text. Complete using only default collections:
1) Find 10 longest words consisting from largest amount of unique symbols
2) Find rarest symbol for document
3) Count every punctuation char
4) Count every non ascii char
5) Find most common non ascii char for document
"""
from string import punctuation
from typing import List
def get_longest_diverse_words(file_path: str) -> List[str]:
"""Gets 10 longest words consisting from largest amount of unique symbols and
returns them in a list.
Args:
file_path: the pathname (absolute or relative to the current working directory)
of the file to be opened.
Returns:
List of 10 longest unique words.
"""
words_counter = {}
with open(file_path, encoding="unicode-escape") as file:
for line in file:
words = line.split()
for word in words:
word = word.strip(punctuation)
if word not in words_counter:
words_counter[word] = len(frozenset(word))
return sorted(
words_counter, key=lambda dict_key: words_counter[dict_key], reverse=True
)[:10]
def get_rarest_char(file_path: str) -> str:
"""Gets rarest symbol for document and returns it.
Args:
file_path: the pathname (absolute or relative to the current working directory)
of the file to be opened.
Returns:
rarest symbol.
"""
symbols_counter = {}
with open(file_path, encoding="unicode-escape") as file:
for line in file:
for symbol in line:
if symbol not in symbols_counter:
symbols_counter[symbol] = 1
else:
symbols_counter[symbol] += 1
return min(symbols_counter, key=lambda dict_key: symbols_counter[dict_key])
def count_punctuation_chars(file_path: str) -> int:
"""Counts punctuation characters and returns their amount.
Args:
file_path: the pathname (absolute or relative to the current working directory)
of the file to be opened.
Returns:
amount of punctuation characters.
"""
amount = 0
with open(file_path, encoding="unicode-escape") as file:
for line in file:
for symbol in line:
if symbol in punctuation:
amount += 1
return amount
def count_non_ascii_chars(file_path: str) -> int:
"""Counts non ascii characters and returns their amount.
Args:
file_path: the pathname (absolute or relative to the current working directory)
of the file to be opened.
Returns:
amount of non ascii characters.
"""
amount = 0
with open(file_path, encoding="unicode-escape") as file:
for line in file:
for symbol in line:
if not symbol.isascii():
amount += 1
return amount
def get_most_common_non_ascii_char(file_path: str) -> str:
"""Gets the most common non ascii character from a text document and returns it.
Args:
file_path: the pathname (absolute or relative to the current working directory)
of the file to be opened.
Returns:
the most common non ascii character.
"""
symbols_counter = {}
with open(file_path, encoding="unicode-escape") as file:
for line in file:
symbols = (symbol for symbol in line if not symbol.isascii())
for symbol in symbols:
if symbol not in symbols_counter:
symbols_counter[symbol] = 1
else:
symbols_counter[symbol] += 1
return max(symbols_counter, key=lambda dict_key: symbols_counter[dict_key])
| 30.737705 | 87 | 0.629867 |
aced2b822df24c01e3d175974e799f1b41ce92ca | 452 | py | Python | pvm/events/end_event.py | gosion/pyPvm | d7326799c907b660db11b02fd16843fdb4733eb7 | [
"MIT"
] | null | null | null | pvm/events/end_event.py | gosion/pyPvm | d7326799c907b660db11b02fd16843fdb4733eb7 | [
"MIT"
] | null | null | null | pvm/events/end_event.py | gosion/pyPvm | d7326799c907b660db11b02fd16843fdb4733eb7 | [
"MIT"
] | null | null | null | from pvm.node import Node
class EndEvent(Node):
"""流程结束事件
"""
def __init__(self, id=None):
super(EndEvent, self).__init__(id)
def add_outgoing_transition(self, transition) -> None:
raise NotImplemented()
def execute(self, token):
token.process_context.logger.info(
"End Event({}) occurs.".format(self._id)
)
return [t for t in self.outgoing_transitions if t.validate(token)]
| 23.789474 | 74 | 0.628319 |
aced2c4cadcfa439015ccfba315bd92ecdcdae16 | 622 | py | Python | src/roots.py | misssoft/Fan.Python | c9ffffefa44bc9a67dbf39089acc93b14c1d761c | [
"MIT"
] | null | null | null | src/roots.py | misssoft/Fan.Python | c9ffffefa44bc9a67dbf39089acc93b14c1d761c | [
"MIT"
] | null | null | null | src/roots.py | misssoft/Fan.Python | c9ffffefa44bc9a67dbf39089acc93b14c1d761c | [
"MIT"
] | null | null | null | import sys
def sqrt(x):
'''Compute square roots using the method of Heron of Alexandria
Args:
x: The number for which the square root is to be computed.
Returns:
The square root of x.
'''
if x<0:
raise ValueError("Cannot compute square root of a negative number.")
guess = x
i=0
while guess * guess != x and i<20:
guess = (guess + x/guess) / 2.0
i+=1
return guess
def main(x):
try:
print(sqrt(x))
except ValueError as e:
print(e, file=sys.stderr)
print("Continues")
if __name__ == '__main__':
main(sys.argv[1])
| 22.214286 | 76 | 0.583601 |
aced2d43689fd968d0527b03c90e2c96f35b3062 | 1,083 | py | Python | stanford/sms-tools/lectures/03-Fourier-properties/plots-code/zero-padding.py | phunc20/dsp | e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886 | [
"MIT"
] | 1 | 2021-03-12T18:32:06.000Z | 2021-03-12T18:32:06.000Z | stanford/sms-tools/lectures/03-Fourier-properties/plots-code/zero-padding.py | phunc20/dsp | e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886 | [
"MIT"
] | null | null | null | stanford/sms-tools/lectures/03-Fourier-properties/plots-code/zero-padding.py | phunc20/dsp | e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft, fftshift
plt.figure(1, figsize=(9.5, 6))
M = 8
N1 = 8
N2 = 16
N3 = 32
x = np.cos(2*np.pi*2/M*np.arange(M)) * np.hanning(M)
plt.subplot(4,1,1)
plt.title('x, M=8')
plt.plot(np.arange(-M/2.0,M/2), x, 'b', marker='x', lw=1.5)
plt.axis([-M/2,M/2-1,-1,1])
mX = 20 * np.log10(np.abs(fftshift(fft(x, N1))))
plt.subplot(4,1,2)
plt.plot(np.arange(-N1/2.0,N1/2), mX, marker='x', color='r', lw=1.5)
plt.axis([-N1/2,N1/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX1, N=8')
mX = 20 * np.log10(np.abs(fftshift(fft(x, N2))))
plt.subplot(4,1,3)
plt.plot(np.arange(-N2/2.0,N2/2),mX,marker='x',color='r', lw=1.5)
plt.axis([-N2/2,N2/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX2, N=16')
mX = 20 * np.log10(np.abs(fftshift(fft(x, N3))))
plt.subplot(4,1,4)
plt.plot(np.arange(-N3/2.0,N3/2),mX,marker='x',color='r', lw=1.5)
plt.axis([-N3/2,N3/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX3, N=32')
plt.tight_layout()
plt.savefig('zero-padding.png')
plt.show()
| 27.769231 | 68 | 0.641736 |
aced2d7c4f1f933476449b55a4774f7eb3bf15af | 42,856 | py | Python | patroni/postgresql/__init__.py | michael-todorovic/patroni | 09f2f579d79c8afe01afc09b6b5ea3496912cc8d | [
"MIT"
] | null | null | null | patroni/postgresql/__init__.py | michael-todorovic/patroni | 09f2f579d79c8afe01afc09b6b5ea3496912cc8d | [
"MIT"
] | null | null | null | patroni/postgresql/__init__.py | michael-todorovic/patroni | 09f2f579d79c8afe01afc09b6b5ea3496912cc8d | [
"MIT"
] | null | null | null | import logging
import os
import psycopg2
import shlex
import shutil
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from dateutil import tz
from datetime import datetime
from patroni.postgresql.callback_executor import CallbackExecutor
from patroni.postgresql.bootstrap import Bootstrap
from patroni.postgresql.cancellable import CancellableSubprocess
from patroni.postgresql.config import ConfigHandler, mtime
from patroni.postgresql.connection import Connection, get_connection_cursor
from patroni.postgresql.misc import parse_history, parse_lsn, postgres_major_version_to_int
from patroni.postgresql.postmaster import PostmasterProcess
from patroni.postgresql.slots import SlotsHandler
from patroni.exceptions import PostgresConnectionException
from patroni.utils import Retry, RetryFailedError, polling_loop, data_directory_is_empty, parse_int
from psutil import TimeoutExpired
from threading import current_thread, Lock
logger = logging.getLogger(__name__)
ACTION_ON_START = "on_start"
ACTION_ON_STOP = "on_stop"
ACTION_ON_RESTART = "on_restart"
ACTION_ON_RELOAD = "on_reload"
ACTION_ON_ROLE_CHANGE = "on_role_change"
ACTION_NOOP = "noop"
STATE_RUNNING = 'running'
STATE_REJECT = 'rejecting connections'
STATE_NO_RESPONSE = 'not responding'
STATE_UNKNOWN = 'unknown'
STOP_POLLING_INTERVAL = 1
@contextmanager
def null_context():
yield
class Postgresql(object):
POSTMASTER_START_TIME = "pg_catalog.pg_postmaster_start_time()"
TL_LSN = ("CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0 "
"ELSE ('x' || pg_catalog.substr(pg_catalog.pg_{0}file_name("
"pg_catalog.pg_current_{0}_{1}()), 1, 8))::bit(32)::int END, " # master timeline
"CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0 "
"ELSE pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_current_{0}_{1}(), '0/0')::bigint END, " # write_lsn
"pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_last_{0}_replay_{1}(), '0/0')::bigint, "
"pg_catalog.pg_{0}_{1}_diff(COALESCE(pg_catalog.pg_last_{0}_receive_{1}(), '0/0'), '0/0')::bigint, "
"pg_catalog.pg_is_in_recovery() AND pg_catalog.pg_is_{0}_replay_paused()")
def __init__(self, config):
self.name = config['name']
self.scope = config['scope']
self._data_dir = config['data_dir']
self._database = config.get('database', 'postgres')
self._version_file = os.path.join(self._data_dir, 'PG_VERSION')
self._pg_control = os.path.join(self._data_dir, 'global', 'pg_control')
self._major_version = self.get_major_version()
self._state_lock = Lock()
self.set_state('stopped')
self._pending_restart = False
self._connection = Connection()
self.config = ConfigHandler(self, config)
self.config.check_directories()
self._bin_dir = config.get('bin_dir') or ''
self.bootstrap = Bootstrap(self)
self.bootstrapping = False
self.__thread_ident = current_thread().ident
self.slots_handler = SlotsHandler(self)
self._callback_executor = CallbackExecutor()
self.__cb_called = False
self.__cb_pending = None
self.cancellable = CancellableSubprocess()
self._sysid = None
self.retry = Retry(max_tries=-1, deadline=config['retry_timeout']/2.0, max_delay=1,
retry_exceptions=PostgresConnectionException)
# Retry 'pg_is_in_recovery()' only once
self._is_leader_retry = Retry(max_tries=1, deadline=config['retry_timeout']/2.0, max_delay=1,
retry_exceptions=PostgresConnectionException)
self._role_lock = Lock()
self.set_role(self.get_postgres_role_from_data_directory())
self._state_entry_timestamp = None
self._cluster_info_state = {}
self._cached_replica_timeline = None
# Last known running process
self._postmaster_proc = None
if self.is_running():
self.set_state('running')
self.set_role('master' if self.is_leader() else 'replica')
self.config.write_postgresql_conf() # we are "joining" already running postgres
hba_saved = self.config.replace_pg_hba()
ident_saved = self.config.replace_pg_ident()
if hba_saved or ident_saved:
self.reload()
elif self.role == 'master':
self.set_role('demoted')
@property
def create_replica_methods(self):
return self.config.get('create_replica_methods', []) or self.config.get('create_replica_method', [])
@property
def major_version(self):
return self._major_version
@property
def database(self):
return self._database
@property
def data_dir(self):
return self._data_dir
@property
def callback(self):
return self.config.get('callbacks') or {}
@property
def wal_dir(self):
return os.path.join(self._data_dir, 'pg_' + self.wal_name)
@property
def wal_name(self):
return 'wal' if self._major_version >= 100000 else 'xlog'
@property
def lsn_name(self):
return 'lsn' if self._major_version >= 100000 else 'location'
@property
def cluster_info_query(self):
if self._major_version >= 90600:
extra = (", CASE WHEN latest_end_lsn IS NULL THEN NULL ELSE received_tli END,"
" slot_name, conninfo FROM pg_catalog.pg_stat_get_wal_receiver()")
if self.role == 'standby_leader':
extra = "timeline_id" + extra + ", pg_catalog.pg_control_checkpoint()"
else:
extra = "0" + extra
else:
extra = "0, NULL, NULL, NULL"
return ("SELECT " + self.TL_LSN + ", {2}").format(self.wal_name, self.lsn_name, extra)
def _version_file_exists(self):
return not self.data_directory_empty() and os.path.isfile(self._version_file)
def get_major_version(self):
if self._version_file_exists():
try:
with open(self._version_file) as f:
return postgres_major_version_to_int(f.read().strip())
except Exception:
logger.exception('Failed to read PG_VERSION from %s', self._data_dir)
return 0
def pgcommand(self, cmd):
"""Returns path to the specified PostgreSQL command"""
return os.path.join(self._bin_dir, cmd)
def pg_ctl(self, cmd, *args, **kwargs):
"""Builds and executes pg_ctl command
:returns: `!True` when return_code == 0, otherwise `!False`"""
pg_ctl = [self.pgcommand('pg_ctl'), cmd]
return subprocess.call(pg_ctl + ['-D', self._data_dir] + list(args), **kwargs) == 0
def pg_isready(self):
"""Runs pg_isready to see if PostgreSQL is accepting connections.
:returns: 'ok' if PostgreSQL is up, 'reject' if starting up, 'no_resopnse' if not up."""
r = self.config.local_connect_kwargs
cmd = [self.pgcommand('pg_isready'), '-p', r['port'], '-d', self._database]
# Host is not set if we are connecting via default unix socket
if 'host' in r:
cmd.extend(['-h', r['host']])
# We only need the username because pg_isready does not try to authenticate
if 'user' in r:
cmd.extend(['-U', r['user']])
ret = subprocess.call(cmd)
return_codes = {0: STATE_RUNNING,
1: STATE_REJECT,
2: STATE_NO_RESPONSE,
3: STATE_UNKNOWN}
return return_codes.get(ret, STATE_UNKNOWN)
def reload_config(self, config, sighup=False):
self.config.reload_config(config, sighup)
self._is_leader_retry.deadline = self.retry.deadline = config['retry_timeout']/2.0
@property
def pending_restart(self):
return self._pending_restart
def set_pending_restart(self, value):
self._pending_restart = value
@property
def sysid(self):
if not self._sysid and not self.bootstrapping:
data = self.controldata()
self._sysid = data.get('Database system identifier', "")
return self._sysid
def get_postgres_role_from_data_directory(self):
if self.data_directory_empty() or not self.controldata():
return 'uninitialized'
elif self.config.recovery_conf_exists():
return 'replica'
else:
return 'master'
@property
def server_version(self):
return self._connection.server_version
def connection(self):
return self._connection.get()
def set_connection_kwargs(self, kwargs):
self._connection.set_conn_kwargs(kwargs)
def _query(self, sql, *params):
"""We are always using the same cursor, therefore this method is not thread-safe!!!
You can call it from different threads only if you are holding explicit `AsyncExecutor` lock,
because the main thread is always holding this lock when running HA cycle."""
cursor = None
try:
cursor = self._connection.cursor()
cursor.execute(sql, params)
return cursor
except psycopg2.Error as e:
if cursor and cursor.connection.closed == 0:
# When connected via unix socket, psycopg2 can't recoginze 'connection lost'
# and leaves `_cursor_holder.connection.closed == 0`, but psycopg2.OperationalError
# is still raised (what is correct). It doesn't make sense to continiue with existing
# connection and we will close it, to avoid its reuse by the `cursor` method.
if isinstance(e, psycopg2.OperationalError):
self._connection.close()
else:
raise e
if self.state == 'restarting':
raise RetryFailedError('cluster is being restarted')
raise PostgresConnectionException('connection problems')
def query(self, sql, *args, **kwargs):
if not kwargs.get('retry', True):
return self._query(sql, *args)
try:
return self.retry(self._query, sql, *args)
except RetryFailedError as e:
raise PostgresConnectionException(str(e))
def pg_control_exists(self):
return os.path.isfile(self._pg_control)
def data_directory_empty(self):
if self.pg_control_exists():
return False
return data_directory_is_empty(self._data_dir)
def replica_method_options(self, method):
return deepcopy(self.config.get(method, {}))
def replica_method_can_work_without_replication_connection(self, method):
return method != 'basebackup' and self.replica_method_options(method).get('no_master')
def can_create_replica_without_replication_connection(self, replica_methods=None):
""" go through the replication methods to see if there are ones
that does not require a working replication connection.
"""
if replica_methods is None:
replica_methods = self.create_replica_methods
return any(self.replica_method_can_work_without_replication_connection(m) for m in replica_methods)
def reset_cluster_info_state(self):
self._cluster_info_state = {}
def _cluster_info_state_get(self, name):
if not self._cluster_info_state:
try:
result = self._is_leader_retry(self._query, self.cluster_info_query).fetchone()
self._cluster_info_state = dict(zip(['timeline', 'wal_position', 'replayed_location',
'received_location', 'replay_paused', 'pg_control_timeline',
'received_tli', 'slot_name', 'conninfo'], result))
except RetryFailedError as e: # SELECT failed two times
self._cluster_info_state = {'error': str(e)}
if not self.is_starting() and self.pg_isready() == STATE_REJECT:
self.set_state('starting')
if 'error' in self._cluster_info_state:
raise PostgresConnectionException(self._cluster_info_state['error'])
return self._cluster_info_state.get(name)
def replayed_location(self):
return self._cluster_info_state_get('replayed_location')
def received_location(self):
return self._cluster_info_state_get('received_location')
def primary_slot_name(self):
return self._cluster_info_state_get('slot_name')
def primary_conninfo(self):
return self._cluster_info_state_get('conninfo')
def received_timeline(self):
return self._cluster_info_state_get('received_tli')
def is_leader(self):
return bool(self._cluster_info_state_get('timeline'))
def pg_control_timeline(self):
try:
return int(self.controldata().get("Latest checkpoint's TimeLineID"))
except (TypeError, ValueError):
logger.exception('Failed to parse timeline from pg_controldata output')
def latest_checkpoint_location(self):
"""Returns checkpoint location for the cleanly shut down primary"""
data = self.controldata()
lsn = data.get('Latest checkpoint location')
if data.get('Database cluster state') == 'shut down' and lsn:
try:
return str(parse_lsn(lsn))
except (IndexError, ValueError) as e:
logger.error('Exception when parsing lsn %s: %r', lsn, e)
def is_running(self):
"""Returns PostmasterProcess if one is running on the data directory or None. If most recently seen process
is running updates the cached process based on pid file."""
if self._postmaster_proc:
if self._postmaster_proc.is_running():
return self._postmaster_proc
self._postmaster_proc = None
# we noticed that postgres was restarted, force syncing of replication
self.slots_handler.schedule()
self._postmaster_proc = PostmasterProcess.from_pidfile(self._data_dir)
return self._postmaster_proc
@property
def cb_called(self):
return self.__cb_called
def call_nowait(self, cb_name):
""" pick a callback command and call it without waiting for it to finish """
if self.bootstrapping:
return
if cb_name in (ACTION_ON_START, ACTION_ON_STOP, ACTION_ON_RESTART, ACTION_ON_ROLE_CHANGE):
self.__cb_called = True
if self.callback and cb_name in self.callback:
cmd = self.callback[cb_name]
try:
cmd = shlex.split(self.callback[cb_name]) + [cb_name, self.role, self.scope]
self._callback_executor.call(cmd)
except Exception:
logger.exception('callback %s %s %s %s failed', cmd, cb_name, self.role, self.scope)
@property
def role(self):
with self._role_lock:
return self._role
def set_role(self, value):
with self._role_lock:
self._role = value
@property
def state(self):
with self._state_lock:
return self._state
def set_state(self, value):
with self._state_lock:
self._state = value
self._state_entry_timestamp = time.time()
def time_in_state(self):
return time.time() - self._state_entry_timestamp
def is_starting(self):
return self.state == 'starting'
def wait_for_port_open(self, postmaster, timeout):
"""Waits until PostgreSQL opens ports."""
for _ in polling_loop(timeout):
if self.cancellable.is_cancelled:
return False
if not postmaster.is_running():
logger.error('postmaster is not running')
self.set_state('start failed')
return False
isready = self.pg_isready()
if isready != STATE_NO_RESPONSE:
if isready not in [STATE_REJECT, STATE_RUNNING]:
logger.warning("Can't determine PostgreSQL startup status, assuming running")
return True
logger.warning("Timed out waiting for PostgreSQL to start")
return False
def start(self, timeout=None, task=None, block_callbacks=False, role=None):
"""Start PostgreSQL
Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion
or failure.
:returns: True if start was initiated and postmaster ports are open, False if start failed"""
# make sure we close all connections established against
# the former node, otherwise, we might get a stalled one
# after kill -9, which would report incorrect data to
# patroni.
self._connection.close()
if self.is_running():
logger.error('Cannot start PostgreSQL because one is already running.')
self.set_state('starting')
return True
if not block_callbacks:
self.__cb_pending = ACTION_ON_START
self.set_role(role or self.get_postgres_role_from_data_directory())
self.set_state('starting')
self._pending_restart = False
try:
if not self._major_version:
self.configure_server_parameters()
configuration = self.config.effective_configuration
except Exception:
return None
self.config.check_directories()
self.config.write_postgresql_conf(configuration)
self.config.resolve_connection_addresses()
self.config.replace_pg_hba()
self.config.replace_pg_ident()
options = ['--{0}={1}'.format(p, configuration[p]) for p in self.config.CMDLINE_OPTIONS
if p in configuration and p not in ('wal_keep_segments', 'wal_keep_size')]
if self.cancellable.is_cancelled:
return False
with task or null_context():
if task and task.is_cancelled:
logger.info("PostgreSQL start cancelled.")
return False
self._postmaster_proc = PostmasterProcess.start(self.pgcommand('postgres'),
self._data_dir,
self.config.postgresql_conf,
options)
if task:
task.complete(self._postmaster_proc)
start_timeout = timeout
if not start_timeout:
try:
start_timeout = float(self.config.get('pg_ctl_timeout', 60))
except ValueError:
start_timeout = 60
# We want postmaster to open ports before we continue
if not self._postmaster_proc or not self.wait_for_port_open(self._postmaster_proc, start_timeout):
return False
ret = self.wait_for_startup(start_timeout)
if ret is not None:
return ret
elif timeout is not None:
return False
else:
return None
def checkpoint(self, connect_kwargs=None, timeout=None):
check_not_is_in_recovery = connect_kwargs is not None
connect_kwargs = connect_kwargs or self.config.local_connect_kwargs
for p in ['connect_timeout', 'options']:
connect_kwargs.pop(p, None)
if timeout:
connect_kwargs['connect_timeout'] = timeout
try:
with get_connection_cursor(**connect_kwargs) as cur:
cur.execute("SET statement_timeout = 0")
if check_not_is_in_recovery:
cur.execute('SELECT pg_catalog.pg_is_in_recovery()')
if cur.fetchone()[0]:
return 'is_in_recovery=true'
return cur.execute('CHECKPOINT')
except psycopg2.Error:
logger.exception('Exception during CHECKPOINT')
return 'not accessible or not healty'
def stop(self, mode='fast', block_callbacks=False, checkpoint=None, on_safepoint=None, stop_timeout=None):
"""Stop PostgreSQL
Supports a callback when a safepoint is reached. A safepoint is when no user backend can return a successful
commit to users. Currently this means we wait for user backends to close. But in the future alternate mechanisms
could be added.
:param on_safepoint: This callback is called when no user backends are running.
"""
if checkpoint is None:
checkpoint = False if mode == 'immediate' else True
success, pg_signaled = self._do_stop(mode, block_callbacks, checkpoint, on_safepoint, stop_timeout)
if success:
# block_callbacks is used during restart to avoid
# running start/stop callbacks in addition to restart ones
if not block_callbacks:
self.set_state('stopped')
if pg_signaled:
self.call_nowait(ACTION_ON_STOP)
else:
logger.warning('pg_ctl stop failed')
self.set_state('stop failed')
return success
def _do_stop(self, mode, block_callbacks, checkpoint, on_safepoint, stop_timeout):
postmaster = self.is_running()
if not postmaster:
if on_safepoint:
on_safepoint()
return True, False
if checkpoint and not self.is_starting():
self.checkpoint(timeout=stop_timeout)
if not block_callbacks:
self.set_state('stopping')
# Send signal to postmaster to stop
success = postmaster.signal_stop(mode, self.pgcommand('pg_ctl'))
if success is not None:
if success and on_safepoint:
on_safepoint()
return success, True
# We can skip safepoint detection if we don't have a callback
if on_safepoint:
# Wait for our connection to terminate so we can be sure that no new connections are being initiated
self._wait_for_connection_close(postmaster)
postmaster.wait_for_user_backends_to_close()
on_safepoint()
try:
postmaster.wait(timeout=stop_timeout)
except TimeoutExpired:
logger.warning("Timeout during postmaster stop, aborting Postgres.")
if not self.terminate_postmaster(postmaster, mode, stop_timeout):
postmaster.wait()
return True, True
def terminate_postmaster(self, postmaster, mode, stop_timeout):
if mode in ['fast', 'smart']:
try:
success = postmaster.signal_stop('immediate', self.pgcommand('pg_ctl'))
if success:
return True
postmaster.wait(timeout=stop_timeout)
return True
except TimeoutExpired:
pass
logger.warning("Sending SIGKILL to Postmaster and its children")
return postmaster.signal_kill()
def terminate_starting_postmaster(self, postmaster):
"""Terminates a postmaster that has not yet opened ports or possibly even written a pid file. Blocks
until the process goes away."""
postmaster.signal_stop('immediate', self.pgcommand('pg_ctl'))
postmaster.wait()
def _wait_for_connection_close(self, postmaster):
try:
with self.connection().cursor() as cur:
while postmaster.is_running(): # Need a timeout here?
cur.execute("SELECT 1")
time.sleep(STOP_POLLING_INTERVAL)
except psycopg2.Error:
pass
def reload(self, block_callbacks=False):
ret = self.pg_ctl('reload')
if ret and not block_callbacks:
self.call_nowait(ACTION_ON_RELOAD)
return ret
def check_for_startup(self):
"""Checks PostgreSQL status and returns if PostgreSQL is in the middle of startup."""
return self.is_starting() and not self.check_startup_state_changed()
def check_startup_state_changed(self):
"""Checks if PostgreSQL has completed starting up or failed or still starting.
Should only be called when state == 'starting'
:returns: True if state was changed from 'starting'
"""
ready = self.pg_isready()
if ready == STATE_REJECT:
return False
elif ready == STATE_NO_RESPONSE:
ret = not self.is_running()
if ret:
self.set_state('start failed')
self.slots_handler.schedule(False) # TODO: can remove this?
self.config.save_configuration_files(True) # TODO: maybe remove this?
return ret
else:
if ready != STATE_RUNNING:
# Bad configuration or unexpected OS error. No idea of PostgreSQL status.
# Let the main loop of run cycle clean up the mess.
logger.warning("%s status returned from pg_isready",
"Unknown" if ready == STATE_UNKNOWN else "Invalid")
self.set_state('running')
self.slots_handler.schedule()
self.config.save_configuration_files(True)
# TODO: __cb_pending can be None here after PostgreSQL restarts on its own. Do we want to call the callback?
# Previously we didn't even notice.
action = self.__cb_pending or ACTION_ON_START
self.call_nowait(action)
self.__cb_pending = None
return True
def wait_for_startup(self, timeout=None):
"""Waits for PostgreSQL startup to complete or fail.
:returns: True if start was successful, False otherwise"""
if not self.is_starting():
# Should not happen
logger.warning("wait_for_startup() called when not in starting state")
while not self.check_startup_state_changed():
if self.cancellable.is_cancelled or timeout and self.time_in_state() > timeout:
return None
time.sleep(1)
return self.state == 'running'
def restart(self, timeout=None, task=None, block_callbacks=False, role=None):
"""Restarts PostgreSQL.
When timeout parameter is set the call will block either until PostgreSQL has started, failed to start or
timeout arrives.
:returns: True when restart was successful and timeout did not expire when waiting.
"""
self.set_state('restarting')
if not block_callbacks:
self.__cb_pending = ACTION_ON_RESTART
ret = self.stop(block_callbacks=True) and self.start(timeout, task, True, role)
if not ret and not self.is_starting():
self.set_state('restart failed ({0})'.format(self.state))
return ret
def is_healthy(self):
if not self.is_running():
logger.warning('Postgresql is not running.')
return False
return True
def get_guc_value(self, name):
cmd = [self.pgcommand('postgres'), '-D', self._data_dir, '-C', name]
try:
data = subprocess.check_output(cmd)
if data:
return data.decode('utf-8').strip()
except Exception as e:
logger.error('Failed to execute %s: %r', cmd, e)
def controldata(self):
""" return the contents of pg_controldata, or non-True value if pg_controldata call failed """
# Don't try to call pg_controldata during backup restore
if self._version_file_exists() and self.state != 'creating replica':
try:
env = os.environ.copy()
env.update(LANG='C', LC_ALL='C')
data = subprocess.check_output([self.pgcommand('pg_controldata'), self._data_dir], env=env)
if data:
data = filter(lambda e: ':' in e, data.decode('utf-8').splitlines())
# pg_controldata output depends on major version. Some of parameters are prefixed by 'Current '
return {k.replace('Current ', '', 1): v.strip() for k, v in map(lambda e: e.split(':', 1), data)}
except subprocess.CalledProcessError:
logger.exception("Error when calling pg_controldata")
return {}
@contextmanager
def get_replication_connection_cursor(self, host='localhost', port=5432, **kwargs):
conn_kwargs = self.config.replication.copy()
conn_kwargs.update(host=host, port=int(port) if port else None, user=conn_kwargs.pop('username'),
connect_timeout=3, replication=1, options='-c statement_timeout=2000')
with get_connection_cursor(**conn_kwargs) as cur:
yield cur
def get_replica_timeline(self):
try:
with self.get_replication_connection_cursor(**self.config.local_replication_address) as cur:
cur.execute('IDENTIFY_SYSTEM')
return cur.fetchone()[1]
except Exception:
logger.exception('Can not fetch local timeline and lsn from replication connection')
def replica_cached_timeline(self, master_timeline):
if not self._cached_replica_timeline or not master_timeline or self._cached_replica_timeline != master_timeline:
self._cached_replica_timeline = self.get_replica_timeline()
return self._cached_replica_timeline
def get_master_timeline(self):
return self._cluster_info_state_get('timeline')
def get_history(self, timeline):
history_path = os.path.join(self.wal_dir, '{0:08X}.history'.format(timeline))
history_mtime = mtime(history_path)
if history_mtime:
try:
with open(history_path, 'r') as f:
history = f.read()
history = list(parse_history(history))
if history[-1][0] == timeline - 1:
history_mtime = datetime.fromtimestamp(history_mtime).replace(tzinfo=tz.tzlocal())
history[-1].append(history_mtime.isoformat())
return history
except Exception:
logger.exception('Failed to read and parse %s', (history_path,))
def follow(self, member, role='replica', timeout=None, do_reload=False):
recovery_params = self.config.build_recovery_params(member)
self.config.write_recovery_conf(recovery_params)
# When we demoting the master or standby_leader to replica or promoting replica to a standby_leader
# and we know for sure that postgres was already running before, we will only execute on_role_change
# callback and prevent execution of on_restart/on_start callback.
# If the role remains the same (replica or standby_leader), we will execute on_start or on_restart
change_role = self.cb_called and (self.role in ('master', 'demoted') or
not {'standby_leader', 'replica'} - {self.role, role})
if change_role:
self.__cb_pending = ACTION_NOOP
if self.is_running():
if do_reload:
self.config.write_postgresql_conf()
if self.reload(block_callbacks=change_role) and change_role:
self.set_role(role)
else:
self.restart(block_callbacks=change_role, role=role)
else:
self.start(timeout=timeout, block_callbacks=change_role, role=role)
if change_role:
# TODO: postpone this until start completes, or maybe do even earlier
self.call_nowait(ACTION_ON_ROLE_CHANGE)
return True
def _wait_promote(self, wait_seconds):
for _ in polling_loop(wait_seconds):
data = self.controldata()
if data.get('Database cluster state') == 'in production':
return True
def _pre_promote(self):
"""
Runs a fencing script after the leader lock is acquired but before the replica is promoted.
If the script exits with a non-zero code, promotion does not happen and the leader key is removed from DCS.
"""
cmd = self.config.get('pre_promote')
if not cmd:
return True
ret = self.cancellable.call(shlex.split(cmd))
if ret is not None:
logger.info('pre_promote script `%s` exited with %s', cmd, ret)
return ret == 0
def promote(self, wait_seconds, task, on_success=None, access_is_restricted=False):
if self.role == 'master':
return True
ret = self._pre_promote()
with task:
if task.is_cancelled:
return False
task.complete(ret)
if ret is False:
return False
if self.cancellable.is_cancelled:
logger.info("PostgreSQL promote cancelled.")
return False
ret = self.pg_ctl('promote', '-W')
if ret:
self.set_role('master')
if on_success is not None:
on_success()
if not access_is_restricted:
self.call_nowait(ACTION_ON_ROLE_CHANGE)
ret = self._wait_promote(wait_seconds)
return ret
@staticmethod
def _wal_position(is_leader, wal_position, received_location, replayed_location):
return wal_position if is_leader else max(received_location or 0, replayed_location or 0)
def timeline_wal_position(self):
# This method could be called from different threads (simultaneously with some other `_query` calls).
# If it is called not from main thread we will create a new cursor to execute statement.
if current_thread().ident == self.__thread_ident:
timeline = self._cluster_info_state_get('timeline')
wal_position = self._cluster_info_state_get('wal_position')
replayed_location = self.replayed_location()
received_location = self.received_location()
pg_control_timeline = self._cluster_info_state_get('pg_control_timeline')
else:
with self.connection().cursor() as cursor:
cursor.execute(self.cluster_info_query)
(timeline, wal_position, replayed_location,
received_location, _, pg_control_timeline) = cursor.fetchone()[:6]
wal_position = self._wal_position(timeline, wal_position, received_location, replayed_location)
return (timeline, wal_position, pg_control_timeline)
def postmaster_start_time(self):
try:
query = "SELECT " + self.POSTMASTER_START_TIME
if current_thread().ident == self.__thread_ident:
return self.query(query).fetchone()[0].isoformat(sep=' ')
with self.connection().cursor() as cursor:
cursor.execute(query)
return cursor.fetchone()[0].isoformat(sep=' ')
except psycopg2.Error:
return None
def last_operation(self):
return str(self._wal_position(self.is_leader(), self._cluster_info_state_get('wal_position'),
self.received_location(), self.replayed_location()))
def configure_server_parameters(self):
self._major_version = self.get_major_version()
self.config.setup_server_parameters()
return True
def pg_wal_realpath(self):
"""Returns a dict containing the symlink (key) and target (value) for the wal directory"""
links = {}
for pg_wal_dir in ('pg_xlog', 'pg_wal'):
pg_wal_path = os.path.join(self._data_dir, pg_wal_dir)
if os.path.exists(pg_wal_path) and os.path.islink(pg_wal_path):
pg_wal_realpath = os.path.realpath(pg_wal_path)
links[pg_wal_path] = pg_wal_realpath
return links
def pg_tblspc_realpaths(self):
"""Returns a dict containing the symlink (key) and target (values) for the tablespaces"""
links = {}
pg_tblsp_dir = os.path.join(self._data_dir, 'pg_tblspc')
if os.path.exists(pg_tblsp_dir):
for tsdn in os.listdir(pg_tblsp_dir):
pg_tsp_path = os.path.join(pg_tblsp_dir, tsdn)
if parse_int(tsdn) and os.path.islink(pg_tsp_path):
pg_tsp_rpath = os.path.realpath(pg_tsp_path)
links[pg_tsp_path] = pg_tsp_rpath
return links
def move_data_directory(self):
if os.path.isdir(self._data_dir) and not self.is_running():
try:
postfix = time.strftime('%Y-%m-%d-%H-%M-%S')
# let's see if the wal directory is a symlink, in this case we
# should move the target
for (source, pg_wal_realpath) in self.pg_wal_realpath().items():
logger.info('renaming WAL directory and updating symlink: %s', pg_wal_realpath)
new_name = '{0}_{1}'.format(pg_wal_realpath, postfix)
os.rename(pg_wal_realpath, new_name)
os.unlink(source)
os.symlink(new_name, source)
# Move user defined tablespace directory
for (source, pg_tsp_rpath) in self.pg_tblspc_realpaths().items():
logger.info('renaming user defined tablespace directory and updating symlink: %s', pg_tsp_rpath)
new_name = '{0}_{1}'.format(pg_tsp_rpath, postfix)
os.rename(pg_tsp_rpath, new_name)
os.unlink(source)
os.symlink(new_name, source)
new_name = '{0}_{1}'.format(self._data_dir, postfix)
logger.info('renaming data directory to %s', new_name)
os.rename(self._data_dir, new_name)
except OSError:
logger.exception("Could not rename data directory %s", self._data_dir)
def remove_data_directory(self):
self.set_role('uninitialized')
logger.info('Removing data directory: %s', self._data_dir)
try:
if os.path.islink(self._data_dir):
os.unlink(self._data_dir)
elif not os.path.exists(self._data_dir):
return
elif os.path.isfile(self._data_dir):
os.remove(self._data_dir)
elif os.path.isdir(self._data_dir):
# let's see if wal directory is a symlink, in this case we
# should clean the target
for pg_wal_realpath in self.pg_wal_realpath().values():
logger.info('Removing WAL directory: %s', pg_wal_realpath)
shutil.rmtree(pg_wal_realpath)
# Remove user defined tablespace directories
for pg_tsp_rpath in self.pg_tblspc_realpaths().values():
logger.info('Removing user defined tablespace directory: %s', pg_tsp_rpath)
shutil.rmtree(pg_tsp_rpath, ignore_errors=True)
shutil.rmtree(self._data_dir)
except (IOError, OSError):
logger.exception('Could not remove data directory %s', self._data_dir)
self.move_data_directory()
def _get_synchronous_commit_param(self):
return self.query("SHOW synchronous_commit").fetchone()[0]
def pick_synchronous_standby(self, cluster, sync_node_count=1, sync_node_maxlag=-1):
"""Finds the best candidate to be the synchronous standby.
Current synchronous standby is always preferred, unless it has disconnected or does not want to be a
synchronous standby any longer.
Parameter sync_node_maxlag(maximum_lag_on_syncnode) would help swapping unhealthy sync replica incase
if it stops responding (or hung). Please set the value high enough so it won't unncessarily swap sync
standbys during high loads. Any less or equal of 0 value keep the behavior backward compatible and
will not swap. Please note that it will not also swap sync standbys in case where all replicas are hung.
:returns tuple of candidates list and synchronous standby list.
"""
if self._major_version < 90600:
sync_node_count = 1
members = {m.name.lower(): m for m in cluster.members}
candidates = []
sync_nodes = []
replica_list = []
# Pick candidates based on who has higher replay/remote_write/flush lsn.
sync_commit_par = self._get_synchronous_commit_param()
sort_col = {'remote_apply': 'replay', 'remote_write': 'write'}.get(sync_commit_par, 'flush')
# pg_stat_replication.sync_state has 4 possible states - async, potential, quorum, sync.
# Sort clause "ORDER BY sync_state DESC" is to get the result in required order and to keep
# the result consistent in case if a synchronous standby member is slowed down OR async node
# receiving changes faster than the sync member (very rare but possible). Such cases would
# trigger sync standby member swapping frequently and the sort on sync_state desc should
# help in keeping the query result consistent.
for app_name, sync_state, replica_lsn in self.query(
"SELECT pg_catalog.lower(application_name), sync_state, pg_{2}_{1}_diff({0}_{1}, '0/0')::bigint"
" FROM pg_catalog.pg_stat_replication"
" WHERE state = 'streaming'"
" ORDER BY sync_state DESC, {0}_{1} DESC".format(sort_col, self.lsn_name, self.wal_name)):
member = members.get(app_name)
if member and not member.tags.get('nosync', False):
replica_list.append((member.name, sync_state, replica_lsn))
max_lsn = max(replica_list, key=lambda x: x[2])[2] if len(replica_list) > 1 else int(str(self.last_operation()))
for app_name, sync_state, replica_lsn in replica_list:
if sync_node_maxlag <= 0 or max_lsn - replica_lsn <= sync_node_maxlag:
candidates.append(app_name)
if sync_state == 'sync':
sync_nodes.append(app_name)
if len(candidates) >= sync_node_count:
break
return candidates, sync_nodes
def schedule_sanity_checks_after_pause(self):
"""
After coming out of pause we have to:
1. configure server parameters if necessary
2. sync replication slots, because it might happen that slots were removed
3. get new 'Database system identifier' to make sure that it wasn't changed
"""
if not self._major_version:
self.configure_server_parameters()
self.slots_handler.schedule()
self._sysid = None
| 41.851563 | 120 | 0.62983 |
aced2dbd67ea36d590e8552083392cc9a5787710 | 2,963 | py | Python | LeetCode/top-interview-questions-medium/2-set-matrix-zeroes.py | safiulanik/problem-solving | 116539750b901b55fe6e69447c8ede78f2e9ff16 | [
"MIT"
] | null | null | null | LeetCode/top-interview-questions-medium/2-set-matrix-zeroes.py | safiulanik/problem-solving | 116539750b901b55fe6e69447c8ede78f2e9ff16 | [
"MIT"
] | null | null | null | LeetCode/top-interview-questions-medium/2-set-matrix-zeroes.py | safiulanik/problem-solving | 116539750b901b55fe6e69447c8ede78f2e9ff16 | [
"MIT"
] | null | null | null | """
URL: https://leetcode.com/explore/interview/card/top-interview-questions-medium/103/array-and-strings/777/
Problem Statement:
------------------
Given an m x n integer matrix matrix, if an element is 0, set its entire row and column to 0's, and return the matrix.
You must do it in place.
Example 1:
----------------
1 1 1 1 0 1
1 0 1 -> 0 0 0
1 1 1 1 0 1
----------------
Input: matrix = [[1,1,1],[1,0,1],[1,1,1]]
Output: [[1,0,1],[0,0,0],[1,0,1]]
Example 2:
--------------------
0 1 2 0 0 0 0 0
3 4 5 2 -> 0 4 5 0
1 3 1 5 0 3 1 0
--------------------
Input: matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]
Output: [[0,0,0,0],[0,4,5,0],[0,3,1,0]]
Constraints:
m == matrix.length
n == matrix[0].length
1 <= m, n <= 200
-2^31 <= matrix[i][j] <= 2^31 - 1
Follow up:
A straightforward solution using O(mn) space is probably a bad idea.
A simple improvement uses O(m + n) space, but still not the best solution.
Could you devise a constant space solution?
"""
from typing import List
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
# """Approach 1: Using additional memory"""
# m, n = len(matrix), len(matrix[0])
# zeroes = set()
# for i in range(m):
# for j in range(n):
# if matrix[i][j] == 0:
# zeroes.add((i, j))
# for tuple in zeroes:
# i, j = tuple
# # top
# for ii in range(i-1, -1, -1):
# matrix[ii][j] = 0
# # bottom
# for ii in range(i+1, m):
# matrix[ii][j] = 0
# # left
# for jj in range(j-1, -1, -1):
# matrix[i][jj] = 0
# # right
# for jj in range(j+1, n):
# matrix[i][jj] = 0
"""Approach 2: Using constant memory"""
m, n = len(matrix), len(matrix[0])
f_col = False
for i in range(m):
if matrix[i][0] == 0:
f_col = True
for j in range(1, n):
if matrix[i][j] == 0:
matrix[i][0] = 0
matrix[0][j] = 0
for i in range(1, m):
for j in range(1, n):
if 0 in [matrix[i][0], matrix[0][j]]:
matrix[i][j] = 0
if matrix[0][0] == 0:
for j in range(n):
matrix[0][j] = 0
if f_col:
for i in range(m):
matrix[i][0] = 0
"""
Testing:
--------
"""
input_list = [
[[[1, 1, 1], [1, 0, 1], [1, 1, 1]]],
[[[0, 1, 2, 0], [3, 4, 5, 2], [1, 3, 1, 5]]],
]
output_list = [
[[1, 0, 1], [0, 0, 0], [1, 0, 1]],
[[0, 0, 0, 0], [0, 4, 5, 0], [0, 3, 1, 0]],
]
for i in range(len(input_list)):
Solution().setZeroes(*input_list[i])
print(input_list[i][0])
assert input_list[i][0] == output_list[i]
| 26.936364 | 118 | 0.452244 |
aced2e669338882619379d6cea8db9c2be6f3fdb | 1,933 | py | Python | ctrip.py | huobingli/splider | a62f0553160531a0735b249b0dc49747e9c821f9 | [
"MIT"
] | null | null | null | ctrip.py | huobingli/splider | a62f0553160531a0735b249b0dc49747e9c821f9 | [
"MIT"
] | null | null | null | ctrip.py | huobingli/splider | a62f0553160531a0735b249b0dc49747e9c821f9 | [
"MIT"
] | null | null | null | #coding:utf-8
import urllib.request
#from lxml import etree
import json
import random
import sys
#reload(sys)
sys.setdefaultencoding('utf8')
def get_json2(date,rk,CK,r):
'''根据构造出的url获取到航班数据'''
url= "http://flights.ctrip.com/domesticsearch/search/SearchFirstRouteFlights?DCity1=SHA&ACity1=SIA&SearchType=S&DDate1=%s&IsNearAirportRecommond=0&rk=%s&CK=%s&r=%s"%(date,rk,CK,r)
headers={'Host':"flights.ctrip.com",'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0",'Referer':"http://flights.ctrip.com/booking/hrb-sha-day-1.html?ddate1=2017-04-29"}
headers['Referer']="http://flights.ctrip.com/booking/hrb-sha-day-1.html?ddate1=%s"%date
req=urllib.request.Request(url,headers=headers)
res=urllib.request.urlopen(req)
content=res.read()
dict_content=json.loads(content,encoding="gb2312")
length = len(dict_content['fis'])
# print length
i = 0
for i in range(length):
if ((dict_content['fis'][i][u'lp']) < 600 ):
print (dict_content['fis'][i][u'lp']),
print (dict_content['fis'][i][u'dt']),
print (dict_content['fis'][i][u'at'])
#print (dict_content['fis'][i][u'dpbn'])
def get_parameter(date):
'''获取重要的参数
date:日期,格式示例:2016-05-13
'''
url='http://flights.ctrip.com/booking/hrb-sha-day-1.html?ddate1=%s'%date
res=urllib2.urlopen(url).read()
tree=etree.HTML(res)
pp=tree.xpath('''//body/script[1]/text()''')[0].split()
CK_original=pp[3][-34:-2]
CK=CK_original[0:5]+CK_original[13]+CK_original[5:13]+CK_original[14:]
rk=pp[-1][18:24]
num=random.random()*10
num_str="%.15f"%num
rk=num_str+rk
r=pp[-1][27:len(pp[-1])-3]
return rk,CK,r
if __name__=='__main__':
dates=['2017-04-29','2017-04-30','2017-05-01','2017-05-02']
for date in dates:
rk,CK,r=get_parameter(date)
get_json2(date,rk,CK,r)
print("-----") | 34.517857 | 211 | 0.638903 |
aced2eb63ddaa6c6567012e6e75719fffb21df87 | 689 | py | Python | dpaste/migrations/0002_auto_20170119_1038.py | jmoujaes/dpaste | 27d608e5da4b045ea112823ec8d271add42fd89d | [
"MIT"
] | null | null | null | dpaste/migrations/0002_auto_20170119_1038.py | jmoujaes/dpaste | 27d608e5da4b045ea112823ec8d271add42fd89d | [
"MIT"
] | null | null | null | dpaste/migrations/0002_auto_20170119_1038.py | jmoujaes/dpaste | 27d608e5da4b045ea112823ec8d271add42fd89d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-19 10:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dpaste', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='snippet',
name='level',
),
migrations.RemoveField(
model_name='snippet',
name='lft',
),
migrations.RemoveField(
model_name='snippet',
name='rght',
),
migrations.RemoveField(
model_name='snippet',
name='tree_id',
),
]
| 21.53125 | 48 | 0.535559 |
aced2fdd8e0a11a7118a9f2a762bc84b91313b65 | 2,156 | py | Python | vnpy_slim/tqsdk_download.py | xepoo/vnpy | 355fff28879d17734725a6e40df8b4640a7c204e | [
"MIT"
] | null | null | null | vnpy_slim/tqsdk_download.py | xepoo/vnpy | 355fff28879d17734725a6e40df8b4640a7c204e | [
"MIT"
] | null | null | null | vnpy_slim/tqsdk_download.py | xepoo/vnpy | 355fff28879d17734725a6e40df8b4640a7c204e | [
"MIT"
] | null | null | null | from datetime import datetime
from contextlib import closing
from tqsdk import TqApi, TqSim
from tqsdk.tools import DataDownloader
api = TqApi(TqSim())
download_tasks = {}
# 下载从 2018-01-01 到 2018-09-01 的 SR901 日线数据
# download_tasks["SR_daily"] = DataDownloader(api, symbol_list="CZCE.SR901", dur_sec=24 * 60 * 60,
# start_dt=date(2018, 1, 1), end_dt=date(2018, 9, 1),
# csv_file_name="SR901_daily.csv")
# 下载从 2017-01-01 到 2018-09-01 的 rb主连 5分钟线数据
# download_tasks["rb_5min"] = DataDownloader(api, symbol_list="KQ.m@SHFE.rb", dur_sec=5 * 60,
# start_dt=date(2017, 1, 1), end_dt=date(2018, 9, 1),
# csv_file_name="rb_5min.csv")
# 下载从 2018-01-01凌晨6点 到 2018-06-01下午4点 的 cu1805,cu1807,IC1803 分钟线数据,所有数据按 cu1805 的时间对齐
# 例如 cu1805 夜盘交易时段, IC1803 的各项数据为 N/A
# 例如 cu1805 13:00-13:30 不交易, 因此 IC1803 在 13:00-13:30 之间的K线数据会被跳过
download_tasks["cu_min"] = DataDownloader(api, symbol_list=["KQ.i@SHFE.bu"], dur_sec=1,
start_dt=datetime(2017, 1, 1, 6, 0, 0), end_dt=datetime(2017, 6, 1, 6, 0, 0),
csv_file_name="bu_min.csv")
# 下载从 2018-05-01凌晨0点 到 2018-06-01凌晨0点 的 T1809 盘口Tick数据
# download_tasks["T_tick"] = DataDownloader(api, symbol_list=["SHFE.cu1801"], dur_sec=0,
# start_dt=datetime(2017, 1, 1), end_dt=datetime(2018, 1, 1),
# csv_file_name="SHFE.cu1801_tick.csv")
# 使用with closing机制确保下载完成后释放对应的资源
with closing(api):
while not all([v.is_finished() for v in download_tasks.values()]):
api.wait_update()
print("progress: ", {k: ("%.2f%%" % v.get_progress()) for k, v in download_tasks.items()})
# SHFE.cu1901 - 上期所 cu1901 期货合约
# DCE.m1901 - 大商所 m1901 期货合约
# CZCE.SR901 - 郑商所 SR901 期货合约
# CFFEX.IF1901 - 中金所 IF1901 期货合约
#
# CZCE.SPD SR901&SR903 - 郑商所 SR901&SR903 跨期合约
# DCE.SP a1709&a1801 - 大商所 a1709&a1801 跨期合约
#
# DCE.m1807-C-2450 - 大商所豆粕期权
#
# KQ.m@CFFEX.IF - 中金所IF品种主连合约
# KQ.i@SHFE.bu - 上期所bu品种指数
| 45.87234 | 119 | 0.598794 |
aced30231f868c6a8c6fcd2dea253e31b092c2d9 | 113,237 | py | Python | torch/_tensor_docs.py | sanchitintel/pytorch | 416f59308023b5d98f6ea4ecdd0bcd3829edb7a7 | [
"Intel"
] | 1 | 2021-10-06T18:29:18.000Z | 2021-10-06T18:29:18.000Z | torch/_tensor_docs.py | sanchitintel/pytorch | 416f59308023b5d98f6ea4ecdd0bcd3829edb7a7 | [
"Intel"
] | null | null | null | torch/_tensor_docs.py | sanchitintel/pytorch | 416f59308023b5d98f6ea4ecdd0bcd3829edb7a7 | [
"Intel"
] | 1 | 2019-05-10T15:48:19.000Z | 2019-05-10T15:48:19.000Z | """Adds docstrings to Tensor functions"""
import torch._C
from torch._C import _add_docstr as add_docstr
from ._torch_docs import parse_kwargs
from ._torch_docs import reproducibility_notes
def add_docstr_all(method, docstr):
add_docstr(getattr(torch._C._TensorBase, method), docstr)
common_args = parse_kwargs("""
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
""")
new_common_args = parse_kwargs("""
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
Default: if None, same :class:`torch.dtype` as this tensor.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if None, same :class:`torch.device` as this tensor.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
""")
add_docstr_all('new_tensor',
r"""
new_tensor(data, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a new Tensor with :attr:`data` as the tensor data.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
.. warning::
:func:`new_tensor` always copies :attr:`data`. If you have a Tensor
``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
or :func:`torch.Tensor.detach`.
If you have a numpy array and want to avoid a copy, use
:func:`torch.from_numpy`.
.. warning::
When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
The equivalents using ``clone()`` and ``detach()`` are recommended.
Args:
data (array_like): The returned Tensor copies :attr:`data`.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones((2,), dtype=torch.int8)
>>> data = [[0, 1], [2, 3]]
>>> tensor.new_tensor(data)
tensor([[ 0, 1],
[ 2, 3]], dtype=torch.int8)
""".format(**new_common_args))
add_docstr_all('new_full',
r"""
new_full(size, fill_value, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
fill_value (scalar): the number to fill the output tensor with.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones((2,), dtype=torch.float64)
>>> tensor.new_full((3, 4), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64)
""".format(**new_common_args))
add_docstr_all('new_empty',
r"""
new_empty(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with uninitialized data.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones(())
>>> tensor.new_empty((2, 3))
tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
[ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
""".format(**new_common_args))
add_docstr_all('new_empty_strided',
r"""
new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
uninitialized data. By default, the returned Tensor has the same
:class:`torch.dtype` and :class:`torch.device` as this tensor.
Args:
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones(())
>>> tensor.new_empty_strided((2, 3), (3, 1))
tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
[ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
""".format(**new_common_args))
add_docstr_all('new_ones',
r"""
new_ones(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with ``1``.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.tensor((), dtype=torch.int32)
>>> tensor.new_ones((2, 3))
tensor([[ 1, 1, 1],
[ 1, 1, 1]], dtype=torch.int32)
""".format(**new_common_args))
add_docstr_all('new_zeros',
r"""
new_zeros(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with ``0``.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.tensor((), dtype=torch.float64)
>>> tensor.new_zeros((2, 3))
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=torch.float64)
""".format(**new_common_args))
add_docstr_all('abs',
r"""
abs() -> Tensor
See :func:`torch.abs`
""")
add_docstr_all('abs_',
r"""
abs_() -> Tensor
In-place version of :meth:`~Tensor.abs`
""")
add_docstr_all('absolute',
r"""
absolute() -> Tensor
Alias for :func:`abs`
""")
add_docstr_all('absolute_',
r"""
absolute_() -> Tensor
In-place version of :meth:`~Tensor.absolute`
Alias for :func:`abs_`
""")
add_docstr_all('acos',
r"""
acos() -> Tensor
See :func:`torch.acos`
""")
add_docstr_all('acos_',
r"""
acos_() -> Tensor
In-place version of :meth:`~Tensor.acos`
""")
add_docstr_all('arccos', r"""
arccos() -> Tensor
See :func:`torch.arccos`
""")
add_docstr_all('arccos_', r"""
arccos_() -> Tensor
In-place version of :meth:`~Tensor.arccos`
""")
add_docstr_all('acosh',
r"""
acosh() -> Tensor
See :func:`torch.acosh`
""")
add_docstr_all('acosh_',
r"""
acosh_() -> Tensor
In-place version of :meth:`~Tensor.acosh`
""")
add_docstr_all('arccosh', r"""
acosh() -> Tensor
See :func:`torch.arccosh`
""")
add_docstr_all('arccosh_', r"""
acosh_() -> Tensor
In-place version of :meth:`~Tensor.arccosh`
""")
add_docstr_all('add',
r"""
add(other, *, alpha=1) -> Tensor
Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
and :attr:`other` are specified, each element of :attr:`other` is scaled by
:attr:`alpha` before being used.
When :attr:`other` is a tensor, the shape of :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
tensor
See :func:`torch.add`
""")
add_docstr_all('add_',
r"""
add_(other, *, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.add`
""")
add_docstr_all('addbmm',
r"""
addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.addbmm`
""")
add_docstr_all('addbmm_',
r"""
addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.addbmm`
""")
add_docstr_all('addcdiv',
r"""
addcdiv(tensor1, tensor2, *, value=1) -> Tensor
See :func:`torch.addcdiv`
""")
add_docstr_all('addcdiv_',
r"""
addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
In-place version of :meth:`~Tensor.addcdiv`
""")
add_docstr_all('addcmul',
r"""
addcmul(tensor1, tensor2, *, value=1) -> Tensor
See :func:`torch.addcmul`
""")
add_docstr_all('addcmul_',
r"""
addcmul_(tensor1, tensor2, *, value=1) -> Tensor
In-place version of :meth:`~Tensor.addcmul`
""")
add_docstr_all('addmm',
r"""
addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.addmm`
""")
add_docstr_all('addmm_',
r"""
addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.addmm`
""")
add_docstr_all('addmv',
r"""
addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
See :func:`torch.addmv`
""")
add_docstr_all('addmv_',
r"""
addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.addmv`
""")
add_docstr_all('sspaddmm',
r"""
sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.sspaddmm`
""")
add_docstr_all('smm',
r"""
smm(mat) -> Tensor
See :func:`torch.smm`
""")
add_docstr_all('addr',
r"""
addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.addr`
""")
add_docstr_all('addr_',
r"""
addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.addr`
""")
add_docstr_all('align_as',
r"""
align_as(other) -> Tensor
Permutes the dimensions of the :attr:`self` tensor to match the dimension order
in the :attr:`other` tensor, adding size-one dims for any new names.
This operation is useful for explicit broadcasting by names (see examples).
All of the dims of :attr:`self` must be named in order to use this method.
The resulting tensor is a view on the original tensor.
All dimension names of :attr:`self` must be present in ``other.names``.
:attr:`other` may contain named dimensions that are not in ``self.names``;
the output tensor has a size-one dimension for each of those new names.
To align a tensor to a specific order, use :meth:`~Tensor.align_to`.
Examples::
# Example 1: Applying a mask
>>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
>>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
>>> imgs.masked_fill_(mask.align_as(imgs), 0)
# Example 2: Applying a per-channel-scale
>>> def scale_channels(input, scale):
>>> scale = scale.refine_names('C')
>>> return input * scale.align_as(input)
>>> num_channels = 3
>>> scale = torch.randn(num_channels, names=('C',))
>>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
>>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
>>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))
# scale_channels is agnostic to the dimension order of the input
>>> scale_channels(imgs, scale)
>>> scale_channels(more_imgs, scale)
>>> scale_channels(videos, scale)
.. warning::
The named tensor API is experimental and subject to change.
""")
add_docstr_all('all',
r"""
all(dim=None, keepdim=False) -> Tensor
See :func:`torch.all`
""")
add_docstr_all('allclose',
r"""
allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
See :func:`torch.allclose`
""")
add_docstr_all('angle',
r"""
angle() -> Tensor
See :func:`torch.angle`
""")
add_docstr_all('any',
r"""
any(dim=None, keepdim=False) -> Tensor
See :func:`torch.any`
""")
add_docstr_all('apply_',
r"""
apply_(callable) -> Tensor
Applies the function :attr:`callable` to each element in the tensor, replacing
each element with the value returned by :attr:`callable`.
.. note::
This function only works with CPU tensors and should not be used in code
sections that require high performance.
""")
add_docstr_all('asin', r"""
asin() -> Tensor
See :func:`torch.asin`
""")
add_docstr_all('asin_',
r"""
asin_() -> Tensor
In-place version of :meth:`~Tensor.asin`
""")
add_docstr_all('arcsin', r"""
arcsin() -> Tensor
See :func:`torch.arcsin`
""")
add_docstr_all('arcsin_', r"""
arcsin_() -> Tensor
In-place version of :meth:`~Tensor.arcsin`
""")
add_docstr_all('asinh', r"""
asinh() -> Tensor
See :func:`torch.asinh`
""")
add_docstr_all('asinh_',
r"""
asinh_() -> Tensor
In-place version of :meth:`~Tensor.asinh`
""")
add_docstr_all('arcsinh', r"""
arcsinh() -> Tensor
See :func:`torch.arcsinh`
""")
add_docstr_all('arcsinh_', r"""
arcsinh_() -> Tensor
In-place version of :meth:`~Tensor.arcsinh`
""")
add_docstr_all('as_strided', r"""
as_strided(size, stride, storage_offset=0) -> Tensor
See :func:`torch.as_strided`
""")
add_docstr_all('atan', r"""
atan() -> Tensor
See :func:`torch.atan`
""")
add_docstr_all('atan_', r"""
atan_() -> Tensor
In-place version of :meth:`~Tensor.atan`
""")
add_docstr_all('arctan', r"""
arctan() -> Tensor
See :func:`torch.arctan`
""")
add_docstr_all('arctan_', r"""
arctan_() -> Tensor
In-place version of :meth:`~Tensor.arctan`
""")
add_docstr_all('atan2', r"""
atan2(other) -> Tensor
See :func:`torch.atan2`
""")
add_docstr_all('atan2_', r"""
atan2_(other) -> Tensor
In-place version of :meth:`~Tensor.atan2`
""")
add_docstr_all('atanh', r"""
atanh() -> Tensor
See :func:`torch.atanh`
""")
add_docstr_all('atanh_', r"""
atanh_(other) -> Tensor
In-place version of :meth:`~Tensor.atanh`
""")
add_docstr_all('arctanh', r"""
arctanh() -> Tensor
See :func:`torch.arctanh`
""")
add_docstr_all('arctanh_', r"""
arctanh_(other) -> Tensor
In-place version of :meth:`~Tensor.arctanh`
""")
add_docstr_all('baddbmm',
r"""
baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.baddbmm`
""")
add_docstr_all('baddbmm_',
r"""
baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.baddbmm`
""")
add_docstr_all('bernoulli',
r"""
bernoulli(*, generator=None) -> Tensor
Returns a result tensor where each :math:`\texttt{result[i]}` is independently
sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
floating point ``dtype``, and the result will have the same ``dtype``.
See :func:`torch.bernoulli`
""")
add_docstr_all('bernoulli_',
r"""
bernoulli_(p=0.5, *, generator=None) -> Tensor
Fills each location of :attr:`self` with an independent sample from
:math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
``dtype``.
:attr:`p` should either be a scalar or tensor containing probabilities to be
used for drawing the binary random number.
If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
will be set to a value sampled from
:math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
floating point ``dtype``.
See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
""")
add_docstr_all('bincount',
r"""
bincount(weights=None, minlength=0) -> Tensor
See :func:`torch.bincount`
""")
add_docstr_all('bitwise_not',
r"""
bitwise_not() -> Tensor
See :func:`torch.bitwise_not`
""")
add_docstr_all('bitwise_not_',
r"""
bitwise_not_() -> Tensor
In-place version of :meth:`~Tensor.bitwise_not`
""")
add_docstr_all('bitwise_and',
r"""
bitwise_and() -> Tensor
See :func:`torch.bitwise_and`
""")
add_docstr_all('bitwise_and_',
r"""
bitwise_and_() -> Tensor
In-place version of :meth:`~Tensor.bitwise_and`
""")
add_docstr_all('bitwise_or',
r"""
bitwise_or() -> Tensor
See :func:`torch.bitwise_or`
""")
add_docstr_all('bitwise_or_',
r"""
bitwise_or_() -> Tensor
In-place version of :meth:`~Tensor.bitwise_or`
""")
add_docstr_all('bitwise_xor',
r"""
bitwise_xor() -> Tensor
See :func:`torch.bitwise_xor`
""")
add_docstr_all('bitwise_xor_',
r"""
bitwise_xor_() -> Tensor
In-place version of :meth:`~Tensor.bitwise_xor`
""")
add_docstr_all('bitwise_left_shift',
r"""
bitwise_left_shift(other) -> Tensor
See :func:`torch.bitwise_left_shift`
""")
add_docstr_all('bitwise_left_shift_',
r"""
bitwise_left_shift_(other) -> Tensor
In-place version of :meth:`~Tensor.bitwise_left_shift`
""")
add_docstr_all('bitwise_right_shift',
r"""
bitwise_right_shift(other) -> Tensor
See :func:`torch.bitwise_right_shift`
""")
add_docstr_all('bitwise_right_shift_',
r"""
bitwise_right_shift_(other) -> Tensor
In-place version of :meth:`~Tensor.bitwise_right_shift`
""")
add_docstr_all('broadcast_to',
r"""
broadcast_to(shape) -> Tensor
See :func:`torch.broadcast_to`.
""")
add_docstr_all('logical_and',
r"""
logical_and() -> Tensor
See :func:`torch.logical_and`
""")
add_docstr_all('logical_and_',
r"""
logical_and_() -> Tensor
In-place version of :meth:`~Tensor.logical_and`
""")
add_docstr_all('logical_not',
r"""
logical_not() -> Tensor
See :func:`torch.logical_not`
""")
add_docstr_all('logical_not_',
r"""
logical_not_() -> Tensor
In-place version of :meth:`~Tensor.logical_not`
""")
add_docstr_all('logical_or',
r"""
logical_or() -> Tensor
See :func:`torch.logical_or`
""")
add_docstr_all('logical_or_',
r"""
logical_or_() -> Tensor
In-place version of :meth:`~Tensor.logical_or`
""")
add_docstr_all('logical_xor',
r"""
logical_xor() -> Tensor
See :func:`torch.logical_xor`
""")
add_docstr_all('logical_xor_',
r"""
logical_xor_() -> Tensor
In-place version of :meth:`~Tensor.logical_xor`
""")
add_docstr_all('bmm',
r"""
bmm(batch2) -> Tensor
See :func:`torch.bmm`
""")
add_docstr_all('cauchy_',
r"""
cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
Fills the tensor with numbers drawn from the Cauchy distribution:
.. math::
f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
""")
add_docstr_all('ceil',
r"""
ceil() -> Tensor
See :func:`torch.ceil`
""")
add_docstr_all('ceil_',
r"""
ceil_() -> Tensor
In-place version of :meth:`~Tensor.ceil`
""")
add_docstr_all('cholesky',
r"""
cholesky(upper=False) -> Tensor
See :func:`torch.cholesky`
""")
add_docstr_all('cholesky_solve',
r"""
cholesky_solve(input2, upper=False) -> Tensor
See :func:`torch.cholesky_solve`
""")
add_docstr_all('cholesky_inverse',
r"""
cholesky_inverse(upper=False) -> Tensor
See :func:`torch.cholesky_inverse`
""")
add_docstr_all('clamp',
r"""
clamp(min=None, max=None) -> Tensor
See :func:`torch.clamp`
""")
add_docstr_all('clamp_',
r"""
clamp_(min=None, max=None) -> Tensor
In-place version of :meth:`~Tensor.clamp`
""")
add_docstr_all('clip', r"""
clip(min=None, max=None) -> Tensor
Alias for :meth:`~Tensor.clamp`.
""")
add_docstr_all('clip_', r"""
clip_(min=None, max=None) -> Tensor
Alias for :meth:`~Tensor.clamp_`.
""")
add_docstr_all('clone', r"""
clone(*, memory_format=torch.preserve_format) -> Tensor
See :func:`torch.clone`
""".format(**common_args))
add_docstr_all('coalesce',
r"""
coalesce() -> Tensor
Returns a coalesced copy of :attr:`self` if :attr:`self` is an
:ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
Returns :attr:`self` if :attr:`self` is a coalesced tensor.
.. warning::
Throws an error if :attr:`self` is not a sparse COO tensor.
""")
add_docstr_all('contiguous',
r"""
contiguous(memory_format=torch.contiguous_format) -> Tensor
Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
:attr:`self` tensor is already in the specified memory format, this function returns the
:attr:`self` tensor.
Args:
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
""")
add_docstr_all('copy_',
r"""
copy_(src, non_blocking=False) -> Tensor
Copies the elements from :attr:`src` into :attr:`self` tensor and returns
:attr:`self`.
The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
with the :attr:`self` tensor. It may be of a different data type or reside on a
different device.
Args:
src (Tensor): the source tensor to copy from
non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
the copy may occur asynchronously with respect to the host. For other
cases, this argument has no effect.
""")
add_docstr_all('conj',
r"""
conj() -> Tensor
See :func:`torch.conj`
""")
add_docstr_all('conj_physical',
r"""
conj_physical() -> Tensor
See :func:`torch.conj_physical`
""")
add_docstr_all('conj_physical_',
r"""
conj_physical_() -> Tensor
In-place version of :meth:`~Tensor.conj_physical`
""")
add_docstr_all('resolve_conj',
r"""
resolve_conj() -> Tensor
See :func:`torch.resolve_conj`
""")
add_docstr_all('resolve_neg',
r"""
resolve_neg() -> Tensor
See :func:`torch.resolve_neg`
""")
add_docstr_all('copysign',
r"""
copysign(other) -> Tensor
See :func:`torch.copysign`
""")
add_docstr_all('copysign_', r"""
copysign_(other) -> Tensor
In-place version of :meth:`~Tensor.copysign`
""")
add_docstr_all('cos',
r"""
cos() -> Tensor
See :func:`torch.cos`
""")
add_docstr_all('cos_',
r"""
cos_() -> Tensor
In-place version of :meth:`~Tensor.cos`
""")
add_docstr_all('cosh',
r"""
cosh() -> Tensor
See :func:`torch.cosh`
""")
add_docstr_all('cosh_',
r"""
cosh_() -> Tensor
In-place version of :meth:`~Tensor.cosh`
""")
add_docstr_all('cpu',
r"""
cpu(memory_format=torch.preserve_format) -> Tensor
Returns a copy of this object in CPU memory.
If this object is already in CPU memory and on the correct device,
then no copy is performed and the original object is returned.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('count_nonzero',
r"""
count_nonzero(dim=None) -> Tensor
See :func:`torch.count_nonzero`
""")
add_docstr_all('cov', r"""
cov(*, correction=1, fweights=None, aweights=None) -> Tensor
See :func:`torch.cov`
""")
add_docstr_all('corrcoef', r"""
corrcoef() -> Tensor
See :func:`torch.corrcoef`
""")
add_docstr_all('cross',
r"""
cross(other, dim=-1) -> Tensor
See :func:`torch.cross`
""")
add_docstr_all('cuda',
r"""
cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
Returns a copy of this object in CUDA memory.
If this object is already in CUDA memory and on the correct device,
then no copy is performed and the original object is returned.
Args:
device (:class:`torch.device`): The destination GPU device.
Defaults to the current CUDA device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host.
Otherwise, the argument has no effect. Default: ``False``.
{memory_format}
""".format(**common_args))
add_docstr_all('xpu',
r"""
xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
Returns a copy of this object in XPU memory.
If this object is already in XPU memory and on the correct device,
then no copy is performed and the original object is returned.
Args:
device (:class:`torch.device`): The destination XPU device.
Defaults to the current XPU device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host.
Otherwise, the argument has no effect. Default: ``False``.
{memory_format}
""".format(**common_args))
add_docstr_all('logcumsumexp',
r"""
logcumsumexp(dim) -> Tensor
See :func:`torch.logcumsumexp`
""")
add_docstr_all('cummax',
r"""
cummax(dim) -> (Tensor, Tensor)
See :func:`torch.cummax`
""")
add_docstr_all('cummin',
r"""
cummin(dim) -> (Tensor, Tensor)
See :func:`torch.cummin`
""")
add_docstr_all('cumprod',
r"""
cumprod(dim, dtype=None) -> Tensor
See :func:`torch.cumprod`
""")
add_docstr_all('cumprod_',
r"""
cumprod_(dim, dtype=None) -> Tensor
In-place version of :meth:`~Tensor.cumprod`
""")
add_docstr_all('cumsum',
r"""
cumsum(dim, dtype=None) -> Tensor
See :func:`torch.cumsum`
""")
add_docstr_all('cumsum_',
r"""
cumsum_(dim, dtype=None) -> Tensor
In-place version of :meth:`~Tensor.cumsum`
""")
add_docstr_all('data_ptr',
r"""
data_ptr() -> int
Returns the address of the first element of :attr:`self` tensor.
""")
add_docstr_all('dequantize',
r"""
dequantize() -> Tensor
Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
""")
add_docstr_all('dense_dim',
r"""
dense_dim() -> int
Return the number of dense dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
.. warning::
Throws an error if :attr:`self` is not a sparse tensor.
See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
""")
add_docstr_all('diag',
r"""
diag(diagonal=0) -> Tensor
See :func:`torch.diag`
""")
add_docstr_all('diag_embed',
r"""
diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
See :func:`torch.diag_embed`
""")
add_docstr_all('diagflat',
r"""
diagflat(offset=0) -> Tensor
See :func:`torch.diagflat`
""")
add_docstr_all('diagonal',
r"""
diagonal(offset=0, dim1=0, dim2=1) -> Tensor
See :func:`torch.diagonal`
""")
add_docstr_all('fill_diagonal_',
r"""
fill_diagonal_(fill_value, wrap=False) -> Tensor
Fill the main diagonal of a tensor that has at least 2-dimensions.
When dims>2, all dimensions of input must be of equal length.
This function modifies the input tensor in-place, and returns the input tensor.
Arguments:
fill_value (Scalar): the fill value
wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.
Example::
>>> a = torch.zeros(3, 3)
>>> a.fill_diagonal_(5)
tensor([[5., 0., 0.],
[0., 5., 0.],
[0., 0., 5.]])
>>> b = torch.zeros(7, 3)
>>> b.fill_diagonal_(5)
tensor([[5., 0., 0.],
[0., 5., 0.],
[0., 0., 5.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
>>> c = torch.zeros(7, 3)
>>> c.fill_diagonal_(5, wrap=True)
tensor([[5., 0., 0.],
[0., 5., 0.],
[0., 0., 5.],
[0., 0., 0.],
[5., 0., 0.],
[0., 5., 0.],
[0., 0., 5.]])
""")
add_docstr_all('floor_divide',
r"""
floor_divide(value) -> Tensor
See :func:`torch.floor_divide`
""")
add_docstr_all('floor_divide_',
r"""
floor_divide_(value) -> Tensor
In-place version of :meth:`~Tensor.floor_divide`
""")
add_docstr_all('diff',
r"""
diff(n=1, dim=-1, prepend=None, append=None) -> Tensor
See :func:`torch.diff`
""")
add_docstr_all('digamma',
r"""
digamma() -> Tensor
See :func:`torch.digamma`
""")
add_docstr_all('digamma_',
r"""
digamma_() -> Tensor
In-place version of :meth:`~Tensor.digamma`
""")
add_docstr_all('dim',
r"""
dim() -> int
Returns the number of dimensions of :attr:`self` tensor.
""")
add_docstr_all('dist',
r"""
dist(other, p=2) -> Tensor
See :func:`torch.dist`
""")
add_docstr_all('div', r"""
div(value, *, rounding_mode=None) -> Tensor
See :func:`torch.div`
""")
add_docstr_all('div_', r"""
div_(value, *, rounding_mode=None) -> Tensor
In-place version of :meth:`~Tensor.div`
""")
add_docstr_all('divide', r"""
divide(value, *, rounding_mode=None) -> Tensor
See :func:`torch.divide`
""")
add_docstr_all('divide_', r"""
divide_(value, *, rounding_mode=None) -> Tensor
In-place version of :meth:`~Tensor.divide`
""")
add_docstr_all('dot',
r"""
dot(other) -> Tensor
See :func:`torch.dot`
""")
add_docstr_all('eig',
r"""
eig(eigenvectors=False) -> (Tensor, Tensor)
See :func:`torch.eig`
""")
add_docstr_all('element_size',
r"""
element_size() -> int
Returns the size in bytes of an individual element.
Example::
>>> torch.tensor([]).element_size()
4
>>> torch.tensor([], dtype=torch.uint8).element_size()
1
""")
add_docstr_all('eq',
r"""
eq(other) -> Tensor
See :func:`torch.eq`
""")
add_docstr_all('eq_',
r"""
eq_(other) -> Tensor
In-place version of :meth:`~Tensor.eq`
""")
add_docstr_all('equal',
r"""
equal(other) -> bool
See :func:`torch.equal`
""")
add_docstr_all('erf',
r"""
erf() -> Tensor
See :func:`torch.erf`
""")
add_docstr_all('erf_',
r"""
erf_() -> Tensor
In-place version of :meth:`~Tensor.erf`
""")
add_docstr_all('erfc',
r"""
erfc() -> Tensor
See :func:`torch.erfc`
""")
add_docstr_all('erfc_',
r"""
erfc_() -> Tensor
In-place version of :meth:`~Tensor.erfc`
""")
add_docstr_all('erfinv',
r"""
erfinv() -> Tensor
See :func:`torch.erfinv`
""")
add_docstr_all('erfinv_',
r"""
erfinv_() -> Tensor
In-place version of :meth:`~Tensor.erfinv`
""")
add_docstr_all('exp',
r"""
exp() -> Tensor
See :func:`torch.exp`
""")
add_docstr_all('exp_',
r"""
exp_() -> Tensor
In-place version of :meth:`~Tensor.exp`
""")
add_docstr_all('exp2',
r"""
exp2() -> Tensor
See :func:`torch.exp2`
""")
add_docstr_all('exp2_',
r"""
exp2_() -> Tensor
In-place version of :meth:`~Tensor.exp2`
""")
add_docstr_all('expm1',
r"""
expm1() -> Tensor
See :func:`torch.expm1`
""")
add_docstr_all('expm1_',
r"""
expm1_() -> Tensor
In-place version of :meth:`~Tensor.expm1`
""")
add_docstr_all('exponential_',
r"""
exponential_(lambd=1, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements drawn from the exponential distribution:
.. math::
f(x) = \lambda e^{-\lambda x}
""")
add_docstr_all('fill_',
r"""
fill_(value) -> Tensor
Fills :attr:`self` tensor with the specified value.
""")
add_docstr_all('floor',
r"""
floor() -> Tensor
See :func:`torch.floor`
""")
add_docstr_all('flip',
r"""
flip(dims) -> Tensor
See :func:`torch.flip`
""")
add_docstr_all('fliplr',
r"""
fliplr() -> Tensor
See :func:`torch.fliplr`
""")
add_docstr_all('flipud',
r"""
flipud() -> Tensor
See :func:`torch.flipud`
""")
add_docstr_all('roll',
r"""
roll(shifts, dims) -> Tensor
See :func:`torch.roll`
""")
add_docstr_all('floor_',
r"""
floor_() -> Tensor
In-place version of :meth:`~Tensor.floor`
""")
add_docstr_all('fmod',
r"""
fmod(divisor) -> Tensor
See :func:`torch.fmod`
""")
add_docstr_all('fmod_',
r"""
fmod_(divisor) -> Tensor
In-place version of :meth:`~Tensor.fmod`
""")
add_docstr_all('frac',
r"""
frac() -> Tensor
See :func:`torch.frac`
""")
add_docstr_all('frac_',
r"""
frac_() -> Tensor
In-place version of :meth:`~Tensor.frac`
""")
add_docstr_all('frexp',
r"""
frexp(input) -> (Tensor mantissa, Tensor exponent)
See :func:`torch.frexp`
""")
add_docstr_all('flatten',
r"""
flatten(start_dim=0, end_dim=-1) -> Tensor
See :func:`torch.flatten`
""")
add_docstr_all('gather',
r"""
gather(dim, index) -> Tensor
See :func:`torch.gather`
""")
add_docstr_all('gcd',
r"""
gcd(other) -> Tensor
See :func:`torch.gcd`
""")
add_docstr_all('gcd_',
r"""
gcd_(other) -> Tensor
In-place version of :meth:`~Tensor.gcd`
""")
add_docstr_all('ge', r"""
ge(other) -> Tensor
See :func:`torch.ge`.
""")
add_docstr_all('ge_', r"""
ge_(other) -> Tensor
In-place version of :meth:`~Tensor.ge`.
""")
add_docstr_all('greater_equal', r"""
greater_equal(other) -> Tensor
See :func:`torch.greater_equal`.
""")
add_docstr_all('greater_equal_', r"""
greater_equal_(other) -> Tensor
In-place version of :meth:`~Tensor.greater_equal`.
""")
add_docstr_all('geometric_',
r"""
geometric_(p, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements drawn from the geometric distribution:
.. math::
f(X=k) = p^{k - 1} (1 - p)
""")
add_docstr_all('geqrf',
r"""
geqrf() -> (Tensor, Tensor)
See :func:`torch.geqrf`
""")
add_docstr_all('ger',
r"""
ger(vec2) -> Tensor
See :func:`torch.ger`
""")
add_docstr_all('inner', r"""
inner(other) -> Tensor
See :func:`torch.inner`.
""")
add_docstr_all('outer', r"""
outer(vec2) -> Tensor
See :func:`torch.outer`.
""")
add_docstr_all('hypot',
r"""
hypot(other) -> Tensor
See :func:`torch.hypot`
""")
add_docstr_all('hypot_',
r"""
hypot_(other) -> Tensor
In-place version of :meth:`~Tensor.hypot`
""")
add_docstr_all('i0',
r"""
i0() -> Tensor
See :func:`torch.i0`
""")
add_docstr_all('i0_',
r"""
i0_() -> Tensor
In-place version of :meth:`~Tensor.i0`
""")
add_docstr_all('igamma',
r"""
igamma(other) -> Tensor
See :func:`torch.igamma`
""")
add_docstr_all('igamma_',
r"""
igamma_(other) -> Tensor
In-place version of :meth:`~Tensor.igamma`
""")
add_docstr_all('igammac',
r"""
igammac(other) -> Tensor
See :func:`torch.igammac`
""")
add_docstr_all('igammac_',
r"""
igammac_(other) -> Tensor
In-place version of :meth:`~Tensor.igammac`
""")
add_docstr_all('indices',
r"""
indices() -> Tensor
Return the indices tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
.. warning::
Throws an error if :attr:`self` is not a sparse COO tensor.
See also :meth:`Tensor.values`.
.. note::
This method can only be called on a coalesced sparse tensor. See
:meth:`Tensor.coalesce` for details.
""")
add_docstr_all('get_device',
r"""
get_device() -> Device ordinal (Integer)
For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
For CPU tensors, an error is thrown.
Example::
>>> x = torch.randn(3, 4, 5, device='cuda:0')
>>> x.get_device()
0
>>> x.cpu().get_device() # RuntimeError: get_device is not implemented for type torch.FloatTensor
""")
add_docstr_all('values',
r"""
values() -> Tensor
Return the values tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
.. warning::
Throws an error if :attr:`self` is not a sparse COO tensor.
See also :meth:`Tensor.indices`.
.. note::
This method can only be called on a coalesced sparse tensor. See
:meth:`Tensor.coalesce` for details.
""")
add_docstr_all('gt', r"""
gt(other) -> Tensor
See :func:`torch.gt`.
""")
add_docstr_all('gt_', r"""
gt_(other) -> Tensor
In-place version of :meth:`~Tensor.gt`.
""")
add_docstr_all('greater', r"""
greater(other) -> Tensor
See :func:`torch.greater`.
""")
add_docstr_all('greater_', r"""
greater_(other) -> Tensor
In-place version of :meth:`~Tensor.greater`.
""")
add_docstr_all('has_names',
r"""
Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.
""")
add_docstr_all('hardshrink',
r"""
hardshrink(lambd=0.5) -> Tensor
See :func:`torch.nn.functional.hardshrink`
""")
add_docstr_all('heaviside',
r"""
heaviside(values) -> Tensor
See :func:`torch.heaviside`
""")
add_docstr_all('heaviside_',
r"""
heaviside_(values) -> Tensor
In-place version of :meth:`~Tensor.heaviside`
""")
add_docstr_all('histc',
r"""
histc(bins=100, min=0, max=0) -> Tensor
See :func:`torch.histc`
""")
add_docstr_all('histogram',
r"""
histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
See :func:`torch.histogram`
""")
add_docstr_all('index_add_',
r"""
index_add_(dim, index, tensor, *, alpha=1) -> Tensor
Accumulate the elements of :attr:`alpha` times :attr:`tensor` into the :attr:`self`
tensor by adding to the indices in the order given in :attr:`index`. For example,
if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
:attr:`tensor` is subtracted from the ``j``\ th row of :attr:`self`.
The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.
Note:
{forward_reproducibility_note}
Args:
dim (int): dimension along which to index
index (IntTensor or LongTensor): indices of :attr:`tensor` to select from
tensor (Tensor): the tensor containing values to add
Keyword args:
alpha (Number): the scalar multiplier for :attr:`tensor`
Example::
>>> x = torch.ones(5, 3)
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 4, 2])
>>> x.index_add_(0, index, t)
tensor([[ 2., 3., 4.],
[ 1., 1., 1.],
[ 8., 9., 10.],
[ 1., 1., 1.],
[ 5., 6., 7.]])
>>> x.index_add_(0, index, t, alpha=-1)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]])
""".format(**reproducibility_notes))
add_docstr_all('index_copy_',
r"""
index_copy_(dim, index, tensor) -> Tensor
Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
the indices in the order given in :attr:`index`. For example, if ``dim == 0``
and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
``j``\ th row of :attr:`self`.
The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.
.. note::
If :attr:`index` contains duplicate entries, multiple elements from
:attr:`tensor` will be copied to the same index of :attr:`self`. The result
is nondeterministic since it depends on which copy occurs last.
Args:
dim (int): dimension along which to index
index (LongTensor): indices of :attr:`tensor` to select from
tensor (Tensor): the tensor containing values to copy
Example::
>>> x = torch.zeros(5, 3)
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 4, 2])
>>> x.index_copy_(0, index, t)
tensor([[ 1., 2., 3.],
[ 0., 0., 0.],
[ 7., 8., 9.],
[ 0., 0., 0.],
[ 4., 5., 6.]])
""")
add_docstr_all('index_fill_',
r"""
index_fill_(dim, index, value) -> Tensor
Fills the elements of the :attr:`self` tensor with value :attr:`value` by
selecting the indices in the order given in :attr:`index`.
Args:
dim (int): dimension along which to index
index (LongTensor): indices of :attr:`self` tensor to fill in
value (float): the value to fill with
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 2])
>>> x.index_fill_(1, index, -1)
tensor([[-1., 2., -1.],
[-1., 5., -1.],
[-1., 8., -1.]])
""")
add_docstr_all('index_put_',
r"""
index_put_(indices, values, accumulate=False) -> Tensor
Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
the indices specified in :attr:`indices` (which is a tuple of Tensors). The
expression ``tensor.index_put_(indices, values)`` is equivalent to
``tensor[indices] = values``. Returns :attr:`self`.
If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
contain duplicate elements.
Args:
indices (tuple of LongTensor): tensors used to index into `self`.
values (Tensor): tensor of same dtype as `self`.
accumulate (bool): whether to accumulate into self
""")
add_docstr_all('index_put',
r"""
index_put(indices, values, accumulate=False) -> Tensor
Out-place version of :meth:`~Tensor.index_put_`.
""")
add_docstr_all('index_select',
r"""
index_select(dim, index) -> Tensor
See :func:`torch.index_select`
""")
add_docstr_all('sparse_mask',
r"""
sparse_mask(mask) -> Tensor
Returns a new :ref:`sparse tensor <sparse-docs>` with values from a
strided tensor :attr:`self` filtered by the indices of the sparse
tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
ignored. :attr:`self` and :attr:`mask` tensors must have the same
shape.
.. note::
The returned sparse tensor has the same indices as the sparse tensor
:attr:`mask`, even when the corresponding values in :attr:`self` are
zeros.
Args:
mask (Tensor): a sparse tensor whose indices are used as a filter
Example::
>>> nse = 5
>>> dims = (5, 5, 2, 2)
>>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
... torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
>>> V = torch.randn(nse, dims[2], dims[3])
>>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
>>> D = torch.randn(dims)
>>> D.sparse_mask(S)
tensor(indices=tensor([[0, 0, 0, 2],
[0, 1, 4, 3]]),
values=tensor([[[ 1.6550, 0.2397],
[-0.1611, -0.0779]],
[[ 0.2326, -1.0558],
[ 1.4711, 1.9678]],
[[-0.5138, -0.0411],
[ 1.9417, 0.5158]],
[[ 0.0793, 0.0036],
[-0.2569, -0.1055]]]),
size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
""")
add_docstr_all('inverse',
r"""
inverse() -> Tensor
See :func:`torch.inverse`
""")
add_docstr_all('isnan',
r"""
isnan() -> Tensor
See :func:`torch.isnan`
""")
add_docstr_all('isinf',
r"""
isinf() -> Tensor
See :func:`torch.isinf`
""")
add_docstr_all('isposinf',
r"""
isposinf() -> Tensor
See :func:`torch.isposinf`
""")
add_docstr_all('isneginf',
r"""
isneginf() -> Tensor
See :func:`torch.isneginf`
""")
add_docstr_all('isfinite',
r"""
isfinite() -> Tensor
See :func:`torch.isfinite`
""")
add_docstr_all('isclose',
r"""
isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
See :func:`torch.isclose`
""")
add_docstr_all('isreal',
r"""
isreal() -> Tensor
See :func:`torch.isreal`
""")
add_docstr_all('is_coalesced',
r"""
is_coalesced() -> bool
Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
<sparse-coo-docs>` that is coalesced, ``False`` otherwise.
.. warning::
Throws an error if :attr:`self` is not a sparse COO tensor.
See :meth:`coalesce` and :ref:`uncoalesced tensors <sparse-uncoalesced-coo-docs>`.
""")
add_docstr_all('is_contiguous',
r"""
is_contiguous(memory_format=torch.contiguous_format) -> bool
Returns True if :attr:`self` tensor is contiguous in memory in the order specified
by memory format.
Args:
memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
order. Default: ``torch.contiguous_format``.
""")
add_docstr_all('is_pinned',
r"""
Returns true if this tensor resides in pinned memory.
""")
add_docstr_all('is_floating_point',
r"""
is_floating_point() -> bool
Returns True if the data type of :attr:`self` is a floating point data type.
""")
add_docstr_all('is_complex',
r"""
is_complex() -> bool
Returns True if the data type of :attr:`self` is a complex data type.
""")
add_docstr_all('is_inference',
r"""
is_inference() -> bool
See :func:`torch.is_inference`
""")
add_docstr_all('is_conj',
r"""
is_conj() -> bool
Returns True if the conjugate bit of :attr:`self` is set to true.
""")
add_docstr_all('is_neg',
r"""
is_neg() -> bool
Returns True if the negative bit of :attr:`self` is set to true.
""")
add_docstr_all('is_signed',
r"""
is_signed() -> bool
Returns True if the data type of :attr:`self` is a signed data type.
""")
add_docstr_all('is_set_to',
r"""
is_set_to(tensor) -> bool
Returns True if both tensors are pointing to the exact same memory (same
storage, offset, size and stride).
""")
add_docstr_all('item', r"""
item() -> number
Returns the value of this tensor as a standard Python number. This only works
for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
This operation is not differentiable.
Example::
>>> x = torch.tensor([1.0])
>>> x.item()
1.0
""")
add_docstr_all('kron',
r"""
kron(other) -> Tensor
See :func:`torch.kron`
""")
add_docstr_all('kthvalue',
r"""
kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.kthvalue`
""")
add_docstr_all('ldexp',
r"""
ldexp(other) -> Tensor
See :func:`torch.ldexp`
""")
add_docstr_all('ldexp_',
r"""
ldexp_(other) -> Tensor
In-place version of :meth:`~Tensor.ldexp`
""")
add_docstr_all('lcm',
r"""
lcm(other) -> Tensor
See :func:`torch.lcm`
""")
add_docstr_all('lcm_',
r"""
lcm_(other) -> Tensor
In-place version of :meth:`~Tensor.lcm`
""")
add_docstr_all('le', r"""
le(other) -> Tensor
See :func:`torch.le`.
""")
add_docstr_all('le_', r"""
le_(other) -> Tensor
In-place version of :meth:`~Tensor.le`.
""")
add_docstr_all('less_equal', r"""
less_equal(other) -> Tensor
See :func:`torch.less_equal`.
""")
add_docstr_all('less_equal_', r"""
less_equal_(other) -> Tensor
In-place version of :meth:`~Tensor.less_equal`.
""")
add_docstr_all('lerp',
r"""
lerp(end, weight) -> Tensor
See :func:`torch.lerp`
""")
add_docstr_all('lerp_',
r"""
lerp_(end, weight) -> Tensor
In-place version of :meth:`~Tensor.lerp`
""")
add_docstr_all('lgamma',
r"""
lgamma() -> Tensor
See :func:`torch.lgamma`
""")
add_docstr_all('lgamma_', r"""
lgamma_() -> Tensor
In-place version of :meth:`~Tensor.lgamma`
""")
add_docstr_all('log',
r"""
log() -> Tensor
See :func:`torch.log`
""")
add_docstr_all('log_', r"""
log_() -> Tensor
In-place version of :meth:`~Tensor.log`
""")
add_docstr_all('log10',
r"""
log10() -> Tensor
See :func:`torch.log10`
""")
add_docstr_all('log10_',
r"""
log10_() -> Tensor
In-place version of :meth:`~Tensor.log10`
""")
add_docstr_all('log1p',
r"""
log1p() -> Tensor
See :func:`torch.log1p`
""")
add_docstr_all('log1p_',
r"""
log1p_() -> Tensor
In-place version of :meth:`~Tensor.log1p`
""")
add_docstr_all('log2',
r"""
log2() -> Tensor
See :func:`torch.log2`
""")
add_docstr_all('log2_',
r"""
log2_() -> Tensor
In-place version of :meth:`~Tensor.log2`
""")
add_docstr_all('logaddexp',
r"""
logaddexp(other) -> Tensor
See :func:`torch.logaddexp`
""")
add_docstr_all('logaddexp2',
r"""
logaddexp2(other) -> Tensor
See :func:`torch.logaddexp2`
""")
add_docstr_all('log_normal_', r"""
log_normal_(mean=1, std=2, *, generator=None)
Fills :attr:`self` tensor with numbers samples from the log-normal distribution
parameterized by the given mean :math:`\mu` and standard deviation
:math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
standard deviation of the underlying normal distribution, and not of the
returned distribution:
.. math::
f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
""")
add_docstr_all('logsumexp',
r"""
logsumexp(dim, keepdim=False) -> Tensor
See :func:`torch.logsumexp`
""")
add_docstr_all('lstsq',
r"""
lstsq(A) -> (Tensor, Tensor)
See :func:`torch.lstsq`
""")
add_docstr_all('lt', r"""
lt(other) -> Tensor
See :func:`torch.lt`.
""")
add_docstr_all('lt_', r"""
lt_(other) -> Tensor
In-place version of :meth:`~Tensor.lt`.
""")
add_docstr_all('less', r"""
lt(other) -> Tensor
See :func:`torch.less`.
""")
add_docstr_all('less_', r"""
less_(other) -> Tensor
In-place version of :meth:`~Tensor.less`.
""")
add_docstr_all('lu_solve',
r"""
lu_solve(LU_data, LU_pivots) -> Tensor
See :func:`torch.lu_solve`
""")
add_docstr_all('map_',
r"""
map_(tensor, callable)
Applies :attr:`callable` for each element in :attr:`self` tensor and the given
:attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.
The :attr:`callable` should have the signature::
def callable(a, b) -> number
""")
add_docstr_all('masked_scatter_',
r"""
masked_scatter_(mask, source)
Copies elements from :attr:`source` into :attr:`self` tensor at positions where
the :attr:`mask` is True.
The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
with the shape of the underlying tensor. The :attr:`source` should have at least
as many elements as the number of ones in :attr:`mask`
Args:
mask (BoolTensor): the boolean mask
source (Tensor): the tensor to copy from
.. note::
The :attr:`mask` operates on the :attr:`self` tensor, not on the given
:attr:`source` tensor.
""")
add_docstr_all('masked_fill_',
r"""
masked_fill_(mask, value)
Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
True. The shape of :attr:`mask` must be
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
tensor.
Args:
mask (BoolTensor): the boolean mask
value (float): the value to fill in with
""")
add_docstr_all('masked_select',
r"""
masked_select(mask) -> Tensor
See :func:`torch.masked_select`
""")
add_docstr_all('matrix_power', r"""
matrix_power(n) -> Tensor
.. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.
Alias for :func:`torch.linalg.matrix_power`
""")
add_docstr_all('matrix_exp',
r"""
matrix_exp() -> Tensor
See :func:`torch.matrix_exp`
""")
add_docstr_all('max',
r"""
max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
See :func:`torch.max`
""")
add_docstr_all('amax',
r"""
amax(dim=None, keepdim=False) -> Tensor
See :func:`torch.amax`
""")
add_docstr_all('maximum',
r"""
maximum(other) -> Tensor
See :func:`torch.maximum`
""")
add_docstr_all('fmax',
r"""
fmax(other) -> Tensor
See :func:`torch.fmax`
""")
add_docstr_all('argmax',
r"""
argmax(dim=None, keepdim=False) -> LongTensor
See :func:`torch.argmax`
""")
add_docstr_all('mean', r"""
mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
See :func:`torch.mean`
""")
add_docstr_all('nanmean', r"""
nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
See :func:`torch.nanmean`
""")
add_docstr_all('median',
r"""
median(dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.median`
""")
add_docstr_all('nanmedian',
r"""
nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.nanmedian`
""")
add_docstr_all('min',
r"""
min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
See :func:`torch.min`
""")
add_docstr_all('amin',
r"""
amin(dim=None, keepdim=False) -> Tensor
See :func:`torch.amin`
""")
add_docstr_all('minimum',
r"""
minimum(other) -> Tensor
See :func:`torch.minimum`
""")
add_docstr_all('aminmax', r"""
aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)
See :func:`torch.aminmax`
""")
add_docstr_all('fmin',
r"""
fmin(other) -> Tensor
See :func:`torch.fmin`
""")
add_docstr_all('argmin',
r"""
argmin(dim=None, keepdim=False) -> LongTensor
See :func:`torch.argmin`
""")
add_docstr_all('mm',
r"""
mm(mat2) -> Tensor
See :func:`torch.mm`
""")
add_docstr_all('mode',
r"""
mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.mode`
""")
add_docstr_all('movedim', r"""
movedim(source, destination) -> Tensor
See :func:`torch.movedim`
""")
add_docstr_all('moveaxis', r"""
moveaxis(source, destination) -> Tensor
See :func:`torch.moveaxis`
""")
add_docstr_all('mul', r"""
mul(value) -> Tensor
See :func:`torch.mul`.
""")
add_docstr_all('mul_', r"""
mul_(value) -> Tensor
In-place version of :meth:`~Tensor.mul`.
""")
add_docstr_all('multiply', r"""
multiply(value) -> Tensor
See :func:`torch.multiply`.
""")
add_docstr_all('multiply_', r"""
multiply_(value) -> Tensor
In-place version of :meth:`~Tensor.multiply`.
""")
add_docstr_all('multinomial',
r"""
multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
See :func:`torch.multinomial`
""")
add_docstr_all('mv',
r"""
mv(vec) -> Tensor
See :func:`torch.mv`
""")
add_docstr_all('mvlgamma',
r"""
mvlgamma(p) -> Tensor
See :func:`torch.mvlgamma`
""")
add_docstr_all('mvlgamma_',
r"""
mvlgamma_(p) -> Tensor
In-place version of :meth:`~Tensor.mvlgamma`
""")
add_docstr_all('narrow',
r"""
narrow(dimension, start, length) -> Tensor
See :func:`torch.narrow`
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> x.narrow(0, 0, 2)
tensor([[ 1, 2, 3],
[ 4, 5, 6]])
>>> x.narrow(1, 1, 2)
tensor([[ 2, 3],
[ 5, 6],
[ 8, 9]])
""")
add_docstr_all('narrow_copy',
r"""
narrow_copy(dimension, start, length) -> Tensor
Same as :meth:`Tensor.narrow` except returning a copy rather
than shared storage. This is primarily for sparse tensors, which
do not have a shared-storage narrow method. Calling ``narrow_copy``
with ``dimemsion > self.sparse_dim()`` will return a copy with the
relevant dense dimension narrowed, and ``self.shape`` updated accordingly.
""")
add_docstr_all('ndimension',
r"""
ndimension() -> int
Alias for :meth:`~Tensor.dim()`
""")
add_docstr_all('nan_to_num', r"""
nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor
See :func:`torch.nan_to_num`.
""")
add_docstr_all('nan_to_num_', r"""
nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor
In-place version of :meth:`~Tensor.nan_to_num`.
""")
add_docstr_all('ne', r"""
ne(other) -> Tensor
See :func:`torch.ne`.
""")
add_docstr_all('ne_', r"""
ne_(other) -> Tensor
In-place version of :meth:`~Tensor.ne`.
""")
add_docstr_all('not_equal', r"""
not_equal(other) -> Tensor
See :func:`torch.not_equal`.
""")
add_docstr_all('not_equal_', r"""
not_equal_(other) -> Tensor
In-place version of :meth:`~Tensor.not_equal`.
""")
add_docstr_all('neg',
r"""
neg() -> Tensor
See :func:`torch.neg`
""")
add_docstr_all('negative',
r"""
negative() -> Tensor
See :func:`torch.negative`
""")
add_docstr_all('neg_',
r"""
neg_() -> Tensor
In-place version of :meth:`~Tensor.neg`
""")
add_docstr_all('negative_',
r"""
negative_() -> Tensor
In-place version of :meth:`~Tensor.negative`
""")
add_docstr_all('nelement',
r"""
nelement() -> int
Alias for :meth:`~Tensor.numel`
""")
add_docstr_all('nextafter',
r"""
nextafter(other) -> Tensor
See :func:`torch.nextafter`
""")
add_docstr_all('nextafter_',
r"""
nextafter_(other) -> Tensor
In-place version of :meth:`~Tensor.nextafter`
""")
add_docstr_all('nonzero',
r"""
nonzero() -> LongTensor
See :func:`torch.nonzero`
""")
add_docstr_all('norm',
r"""
norm(p=2, dim=None, keepdim=False) -> Tensor
See :func:`torch.norm`
""")
add_docstr_all('normal_',
r"""
normal_(mean=0, std=1, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements samples from the normal distribution
parameterized by :attr:`mean` and :attr:`std`.
""")
add_docstr_all('numel',
r"""
numel() -> int
See :func:`torch.numel`
""")
add_docstr_all('numpy',
r"""
numpy() -> numpy.ndarray
Returns :attr:`self` tensor as a NumPy :class:`ndarray`. This tensor and the
returned :class:`ndarray` share the same underlying storage. Changes to
:attr:`self` tensor will be reflected in the :class:`ndarray` and vice versa.
""")
add_docstr_all('orgqr',
r"""
orgqr(input2) -> Tensor
See :func:`torch.orgqr`
""")
add_docstr_all('ormqr',
r"""
ormqr(input2, input3, left=True, transpose=False) -> Tensor
See :func:`torch.ormqr`
""")
add_docstr_all('permute',
r"""
permute(*dims) -> Tensor
See :func:`torch.permute`
""")
add_docstr_all('polygamma',
r"""
polygamma(n) -> Tensor
See :func:`torch.polygamma`
""")
add_docstr_all('polygamma_',
r"""
polygamma_(n) -> Tensor
In-place version of :meth:`~Tensor.polygamma`
""")
add_docstr_all('positive',
r"""
positive() -> Tensor
See :func:`torch.positive`
""")
add_docstr_all('pow',
r"""
pow(exponent) -> Tensor
See :func:`torch.pow`
""")
add_docstr_all('pow_',
r"""
pow_(exponent) -> Tensor
In-place version of :meth:`~Tensor.pow`
""")
add_docstr_all('float_power',
r"""
float_power(exponent) -> Tensor
See :func:`torch.float_power`
""")
add_docstr_all('float_power_',
r"""
float_power_(exponent) -> Tensor
In-place version of :meth:`~Tensor.float_power`
""")
add_docstr_all('prod',
r"""
prod(dim=None, keepdim=False, dtype=None) -> Tensor
See :func:`torch.prod`
""")
add_docstr_all('put_',
r"""
put_(index, source, accumulate=False) -> Tensor
Copies the elements from :attr:`source` into the positions specified by
:attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
it were a 1-D tensor.
:attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
the same shape.
If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
:attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
contain duplicate elements.
Args:
index (LongTensor): the indices into self
source (Tensor): the tensor containing values to copy from
accumulate (bool): whether to accumulate into self
Example::
>>> src = torch.tensor([[4, 3, 5],
... [6, 7, 8]])
>>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
tensor([[ 4, 9, 5],
[ 10, 7, 8]])
""")
add_docstr_all('put',
r"""
put(input, index, source, accumulate=False) -> Tensor
Out-of-place version of :meth:`torch.Tensor.put_`.
`input` corresponds to `self` in :meth:`torch.Tensor.put_`.
""")
add_docstr_all('qr',
r"""
qr(some=True) -> (Tensor, Tensor)
See :func:`torch.qr`
""")
add_docstr_all('qscheme',
r"""
qscheme() -> torch.qscheme
Returns the quantization scheme of a given QTensor.
""")
add_docstr_all('quantile', r"""
quantile(q, dim=None, keepdim=False) -> Tensor
See :func:`torch.quantile`
""")
add_docstr_all('nanquantile', r"""
nanquantile(q, dim=None, keepdim=False) -> Tensor
See :func:`torch.nanquantile`
""")
add_docstr_all('q_scale',
r"""
q_scale() -> float
Given a Tensor quantized by linear(affine) quantization,
returns the scale of the underlying quantizer().
""")
add_docstr_all('q_zero_point',
r"""
q_zero_point() -> int
Given a Tensor quantized by linear(affine) quantization,
returns the zero_point of the underlying quantizer().
""")
add_docstr_all('q_per_channel_scales',
r"""
q_per_channel_scales() -> Tensor
Given a Tensor quantized by linear (affine) per-channel quantization,
returns a Tensor of scales of the underlying quantizer. It has the number of
elements that matches the corresponding dimensions (from q_per_channel_axis) of
the tensor.
""")
add_docstr_all('q_per_channel_zero_points',
r"""
q_per_channel_zero_points() -> Tensor
Given a Tensor quantized by linear (affine) per-channel quantization,
returns a tensor of zero_points of the underlying quantizer. It has the number of
elements that matches the corresponding dimensions (from q_per_channel_axis) of
the tensor.
""")
add_docstr_all('q_per_channel_axis',
r"""
q_per_channel_axis() -> int
Given a Tensor quantized by linear (affine) per-channel quantization,
returns the index of dimension on which per-channel quantization is applied.
""")
add_docstr_all('random_',
r"""
random_(from=0, to=None, *, generator=None) -> Tensor
Fills :attr:`self` tensor with numbers sampled from the discrete uniform
distribution over ``[from, to - 1]``. If not specified, the values are usually
only bounded by :attr:`self` tensor's data type. However, for floating point
types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
will be uniform in ``[0, 2^53]``.
""")
add_docstr_all('rad2deg',
r"""
rad2deg() -> Tensor
See :func:`torch.rad2deg`
""")
add_docstr_all('rad2deg_',
r"""
rad2deg_() -> Tensor
In-place version of :meth:`~Tensor.rad2deg`
""")
add_docstr_all('deg2rad',
r"""
deg2rad() -> Tensor
See :func:`torch.deg2rad`
""")
add_docstr_all('deg2rad_',
r"""
deg2rad_() -> Tensor
In-place version of :meth:`~Tensor.deg2rad`
""")
add_docstr_all('ravel',
r"""
ravel(input) -> Tensor
see :func:`torch.ravel`
""")
add_docstr_all('reciprocal',
r"""
reciprocal() -> Tensor
See :func:`torch.reciprocal`
""")
add_docstr_all('reciprocal_',
r"""
reciprocal_() -> Tensor
In-place version of :meth:`~Tensor.reciprocal`
""")
add_docstr_all('record_stream',
r"""
record_stream(stream)
Ensures that the tensor memory is not reused for another tensor until all
current work queued on :attr:`stream` are complete.
.. note::
The caching allocator is aware of only the stream where a tensor was
allocated. Due to the awareness, it already correctly manages the life
cycle of tensors on only one stream. But if a tensor is used on a stream
different from the stream of origin, the allocator might reuse the memory
unexpectedly. Calling this method lets the allocator know which streams
have used the tensor.
""")
add_docstr_all('remainder',
r"""
remainder(divisor) -> Tensor
See :func:`torch.remainder`
""")
add_docstr_all('remainder_',
r"""
remainder_(divisor) -> Tensor
In-place version of :meth:`~Tensor.remainder`
""")
add_docstr_all('renorm',
r"""
renorm(p, dim, maxnorm) -> Tensor
See :func:`torch.renorm`
""")
add_docstr_all('renorm_',
r"""
renorm_(p, dim, maxnorm) -> Tensor
In-place version of :meth:`~Tensor.renorm`
""")
add_docstr_all('repeat',
r"""
repeat(*sizes) -> Tensor
Repeats this tensor along the specified dimensions.
Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
.. warning::
:meth:`~Tensor.repeat` behaves differently from
`numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
but is more similar to
`numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
Args:
sizes (torch.Size or int...): The number of times to repeat this tensor along each
dimension
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.repeat(4, 2)
tensor([[ 1, 2, 3, 1, 2, 3],
[ 1, 2, 3, 1, 2, 3],
[ 1, 2, 3, 1, 2, 3],
[ 1, 2, 3, 1, 2, 3]])
>>> x.repeat(4, 2, 1).size()
torch.Size([4, 2, 3])
""")
add_docstr_all('repeat_interleave',
r"""
repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
See :func:`torch.repeat_interleave`.
""")
add_docstr_all('requires_grad_',
r"""
requires_grad_(requires_grad=True) -> Tensor
Change if autograd should record operations on this tensor: sets this tensor's
:attr:`requires_grad` attribute in-place. Returns this tensor.
:func:`requires_grad_`'s main use case is to tell autograd to begin recording
operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
(because it was obtained through a DataLoader, or required preprocessing or
initialization), ``tensor.requires_grad_()`` makes it so that autograd will
begin to record operations on ``tensor``.
Args:
requires_grad (bool): If autograd should record operations on this tensor.
Default: ``True``.
Example::
>>> # Let's say we want to preprocess some saved weights and use
>>> # the result as new weights.
>>> saved_weights = [0.1, 0.2, 0.3, 0.25]
>>> loaded_weights = torch.tensor(saved_weights)
>>> weights = preprocess(loaded_weights) # some function
>>> weights
tensor([-0.5503, 0.4926, -2.1158, -0.8303])
>>> # Now, start to record operations done to weights
>>> weights.requires_grad_()
>>> out = weights.pow(2).sum()
>>> out.backward()
>>> weights.grad
tensor([-1.1007, 0.9853, -4.2316, -1.6606])
""")
add_docstr_all('reshape',
r"""
reshape(*shape) -> Tensor
Returns a tensor with the same data and number of elements as :attr:`self`
but with the specified shape. This method returns a view if :attr:`shape` is
compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
possible to return a view.
See :func:`torch.reshape`
Args:
shape (tuple of ints or int...): the desired shape
""")
add_docstr_all('reshape_as',
r"""
reshape_as(other) -> Tensor
Returns this tensor as the same shape as :attr:`other`.
``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
This method returns a view if ``other.sizes()`` is compatible with the current
shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
Please see :meth:`reshape` for more information about ``reshape``.
Args:
other (:class:`torch.Tensor`): The result tensor has the same shape
as :attr:`other`.
""")
add_docstr_all('resize_',
r"""
resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
Resizes :attr:`self` tensor to the specified size. If the number of elements is
larger than the current storage size, then the underlying storage is resized
to fit the new number of elements. If the number of elements is smaller, the
underlying storage is not changed. Existing elements are preserved but any new
memory is uninitialized.
.. warning::
This is a low-level method. The storage is reinterpreted as C-contiguous,
ignoring the current strides (unless the target size equals the current
size, in which case the tensor is left unchanged). For most purposes, you
will instead want to use :meth:`~Tensor.view()`, which checks for
contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
Args:
sizes (torch.Size or int...): the desired size
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
Tensor. Default: ``torch.contiguous_format``. Note that memory format of
:attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
Example::
>>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
>>> x.resize_(2, 2)
tensor([[ 1, 2],
[ 3, 4]])
""")
add_docstr_all('resize_as_',
r"""
resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor
Resizes the :attr:`self` tensor to be the same size as the specified
:attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
Args:
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
Tensor. Default: ``torch.contiguous_format``. Note that memory format of
:attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.
""")
add_docstr_all('rot90',
r"""
rot90(k, dims) -> Tensor
See :func:`torch.rot90`
""")
add_docstr_all('round',
r"""
round() -> Tensor
See :func:`torch.round`
""")
add_docstr_all('round_',
r"""
round_() -> Tensor
In-place version of :meth:`~Tensor.round`
""")
add_docstr_all('rsqrt',
r"""
rsqrt() -> Tensor
See :func:`torch.rsqrt`
""")
add_docstr_all('rsqrt_',
r"""
rsqrt_() -> Tensor
In-place version of :meth:`~Tensor.rsqrt`
""")
add_docstr_all('scatter_',
r"""
scatter_(dim, index, src, reduce=None) -> Tensor
Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
index is specified by its index in :attr:`src` for ``dimension != dim`` and by
the corresponding value in :attr:`index` for ``dimension = dim``.
For a 3-D tensor, :attr:`self` is updated as::
self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
:attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
the same number of dimensions. It is also required that
``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
Note that ``index`` and ``src`` do not broadcast.
Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
between ``0`` and ``self.size(dim) - 1`` inclusive.
.. warning::
When indices are not unique, the behavior is non-deterministic (one of the
values from ``src`` will be picked arbitrarily) and the gradient will be
incorrect (it will be propagated to all locations in the source that
correspond to the same index)!
.. note::
The backward pass is implemented only for ``src.shape == index.shape``.
Additionally accepts an optional :attr:`reduce` argument that allows
specification of an optional reduction operation, which is applied to all
values in the tensor :attr:`src` into :attr:`self` at the indicies
specified in the :attr:`index`. For each value in :attr:`src`, the reduction
operation is applied to an index in :attr:`self` which is specified by
its index in :attr:`src` for ``dimension != dim`` and by the corresponding
value in :attr:`index` for ``dimension = dim``.
Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
is updated as::
self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
Reducing with the addition operation is the same as using
:meth:`~torch.Tensor.scatter_add_`.
Args:
dim (int): the axis along which to index
index (LongTensor): the indices of elements to scatter, can be either empty
or of the same dimensionality as ``src``. When empty, the operation
returns ``self`` unchanged.
src (Tensor or float): the source element(s) to scatter.
reduce (str, optional): reduction operation to apply, can be either
``'add'`` or ``'multiply'``.
Example::
>>> src = torch.arange(1, 11).reshape((2, 5))
>>> src
tensor([[ 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10]])
>>> index = torch.tensor([[0, 1, 2, 0]])
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
tensor([[1, 0, 0, 4, 0],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0]])
>>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
tensor([[1, 2, 3, 0, 0],
[6, 7, 0, 0, 8],
[0, 0, 0, 0, 0]])
>>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
... 1.23, reduce='multiply')
tensor([[2.0000, 2.0000, 2.4600, 2.0000],
[2.0000, 2.0000, 2.0000, 2.4600]])
>>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
... 1.23, reduce='add')
tensor([[2.0000, 2.0000, 3.2300, 2.0000],
[2.0000, 2.0000, 2.0000, 3.2300]])
""")
add_docstr_all('scatter_add_',
r"""
scatter_add_(dim, index, src) -> Tensor
Adds all values from the tensor :attr:`other` into :attr:`self` at the indices
specified in the :attr:`index` tensor in a similar fashion as
:meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
an index in :attr:`self` which is specified by its index in :attr:`src`
for ``dimension != dim`` and by the corresponding value in :attr:`index` for
``dimension = dim``.
For a 3-D tensor, :attr:`self` is updated as::
self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
:attr:`self`, :attr:`index` and :attr:`src` should have same number of
dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
``d != dim``. Note that ``index`` and ``src`` do not broadcast.
Note:
{forward_reproducibility_note}
.. note::
The backward pass is implemented only for ``src.shape == index.shape``.
Args:
dim (int): the axis along which to index
index (LongTensor): the indices of elements to scatter and add, can be
either empty or of the same dimensionality as ``src``. When empty, the
operation returns ``self`` unchanged.
src (Tensor): the source elements to scatter and add
Example::
>>> src = torch.ones((2, 5))
>>> index = torch.tensor([[0, 1, 2, 0, 0]])
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
tensor([[1., 0., 0., 1., 1.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.]])
>>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
tensor([[2., 0., 0., 1., 1.],
[0., 2., 0., 0., 0.],
[0., 0., 2., 1., 1.]])
""".format(**reproducibility_notes))
add_docstr_all('select',
r"""
select(dim, index) -> Tensor
Slices the :attr:`self` tensor along the selected dimension at the given index.
This function returns a view of the original tensor with the given dimension removed.
Args:
dim (int): the dimension to slice
index (int): the index to select with
.. note::
:meth:`select` is equivalent to slicing. For example,
``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
""")
add_docstr_all('set_',
r"""
set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
:attr:`self` tensor will share the same storage and have the same size and
strides as :attr:`source`. Changes to elements in one tensor will be reflected
in the other.
If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
storage, offset, size, and stride.
Args:
source (Tensor or Storage): the tensor or storage to use
storage_offset (int, optional): the offset in the storage
size (torch.Size, optional): the desired size. Defaults to the size of the source.
stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
""")
add_docstr_all('sigmoid',
r"""
sigmoid() -> Tensor
See :func:`torch.sigmoid`
""")
add_docstr_all('sigmoid_',
r"""
sigmoid_() -> Tensor
In-place version of :meth:`~Tensor.sigmoid`
""")
add_docstr_all('logit',
r"""
logit() -> Tensor
See :func:`torch.logit`
""")
add_docstr_all('logit_',
r"""
logit_() -> Tensor
In-place version of :meth:`~Tensor.logit`
""")
add_docstr_all('sign',
r"""
sign() -> Tensor
See :func:`torch.sign`
""")
add_docstr_all('sign_',
r"""
sign_() -> Tensor
In-place version of :meth:`~Tensor.sign`
""")
add_docstr_all('signbit',
r"""
signbit() -> Tensor
See :func:`torch.signbit`
""")
add_docstr_all('sgn',
r"""
sgn() -> Tensor
See :func:`torch.sgn`
""")
add_docstr_all('sgn_',
r"""
sgn_() -> Tensor
In-place version of :meth:`~Tensor.sgn`
""")
add_docstr_all('sin',
r"""
sin() -> Tensor
See :func:`torch.sin`
""")
add_docstr_all('sin_',
r"""
sin_() -> Tensor
In-place version of :meth:`~Tensor.sin`
""")
add_docstr_all('sinc',
r"""
sinc() -> Tensor
See :func:`torch.sinc`
""")
add_docstr_all('sinc_',
r"""
sinc_() -> Tensor
In-place version of :meth:`~Tensor.sinc`
""")
add_docstr_all('sinh',
r"""
sinh() -> Tensor
See :func:`torch.sinh`
""")
add_docstr_all('sinh_',
r"""
sinh_() -> Tensor
In-place version of :meth:`~Tensor.sinh`
""")
add_docstr_all('size',
r"""
size(dim=None) -> torch.Size or int
Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
If ``dim`` is specified, returns an int holding the size of that dimension.
Args:
dim (int, optional): The dimension for which to retrieve the size.
Example::
>>> t = torch.empty(3, 4, 5)
>>> t.size()
torch.Size([3, 4, 5])
>>> t.size(dim=1)
4
""")
add_docstr_all('solve',
r"""
solve(A) -> Tensor, Tensor
See :func:`torch.solve`
""")
add_docstr_all('sort',
r"""
sort(dim=-1, descending=False) -> (Tensor, LongTensor)
See :func:`torch.sort`
""")
add_docstr_all('msort',
r"""
msort() -> Tensor
See :func:`torch.msort`
""")
add_docstr_all('argsort',
r"""
argsort(dim=-1, descending=False) -> LongTensor
See :func:`torch.argsort`
""")
add_docstr_all('sparse_dim',
r"""
sparse_dim() -> int
Return the number of sparse dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
.. warning::
Throws an error if :attr:`self` is not a sparse tensor.
See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
""")
add_docstr_all('sparse_resize_',
r"""
sparse_resize_(size, sparse_dim, dense_dim) -> Tensor
Resizes :attr:`self` :ref:`sparse tensor <sparse-docs>` to the desired
size and the number of sparse and dense dimensions.
.. note::
If the number of specified elements in :attr:`self` is zero, then
:attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
size and positive integers such that ``len(size) == sparse_dim +
dense_dim``.
If :attr:`self` specifies one or more elements, however, then each
dimension in :attr:`size` must not be smaller than the corresponding
dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
equal the number of dense dimensions in :attr:`self`.
.. warning::
Throws an error if :attr:`self` is not a sparse tensor.
Args:
size (torch.Size): the desired size. If :attr:`self` is non-empty
sparse tensor, the desired size cannot be smaller than the
original size.
sparse_dim (int): the number of sparse dimensions
dense_dim (int): the number of dense dimensions
""")
add_docstr_all('sparse_resize_and_clear_',
r"""
sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor
Removes all specified elements from a :ref:`sparse tensor
<sparse-docs>` :attr:`self` and resizes :attr:`self` to the desired
size and the number of sparse and dense dimensions.
.. warning:
Throws an error if :attr:`self` is not a sparse tensor.
Args:
size (torch.Size): the desired size.
sparse_dim (int): the number of sparse dimensions
dense_dim (int): the number of dense dimensions
""")
add_docstr_all('sqrt',
r"""
sqrt() -> Tensor
See :func:`torch.sqrt`
""")
add_docstr_all('sqrt_',
r"""
sqrt_() -> Tensor
In-place version of :meth:`~Tensor.sqrt`
""")
add_docstr_all('square',
r"""
square() -> Tensor
See :func:`torch.square`
""")
add_docstr_all('square_',
r"""
square_() -> Tensor
In-place version of :meth:`~Tensor.square`
""")
add_docstr_all('squeeze',
r"""
squeeze(dim=None) -> Tensor
See :func:`torch.squeeze`
""")
add_docstr_all('squeeze_',
r"""
squeeze_(dim=None) -> Tensor
In-place version of :meth:`~Tensor.squeeze`
""")
add_docstr_all('std',
r"""
std(dim, unbiased=True, keepdim=False) -> Tensor
See :func:`torch.std`
.. function:: std(unbiased=True) -> Tensor
:noindex:
See :func:`torch.std`
""")
add_docstr_all('storage_offset',
r"""
storage_offset() -> int
Returns :attr:`self` tensor's offset in the underlying storage in terms of
number of storage elements (not bytes).
Example::
>>> x = torch.tensor([1, 2, 3, 4, 5])
>>> x.storage_offset()
0
>>> x[3:].storage_offset()
3
""")
add_docstr_all('stride',
r"""
stride(dim) -> tuple or int
Returns the stride of :attr:`self` tensor.
Stride is the jump necessary to go from one element to the next one in the
specified dimension :attr:`dim`. A tuple of all strides is returned when no
argument is passed in. Otherwise, an integer value is returned as the stride in
the particular dimension :attr:`dim`.
Args:
dim (int, optional): the desired dimension in which stride is required
Example::
>>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
>>> x.stride()
(5, 1)
>>> x.stride(0)
5
>>> x.stride(-1)
1
""")
add_docstr_all('sub', r"""
sub(other, *, alpha=1) -> Tensor
See :func:`torch.sub`.
""")
add_docstr_all('sub_',
r"""
sub_(other, *, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.sub`
""")
add_docstr_all('subtract', r"""
subtract(other, *, alpha=1) -> Tensor
See :func:`torch.subtract`.
""")
add_docstr_all('subtract_', r"""
subtract_(other, *, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.subtract`.
""")
add_docstr_all('sum',
r"""
sum(dim=None, keepdim=False, dtype=None) -> Tensor
See :func:`torch.sum`
""")
add_docstr_all('nansum',
r"""
nansum(dim=None, keepdim=False, dtype=None) -> Tensor
See :func:`torch.nansum`
""")
add_docstr_all('svd',
r"""
svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
See :func:`torch.svd`
""")
add_docstr_all('symeig',
r"""
symeig(eigenvectors=False, upper=True) -> (Tensor, Tensor)
See :func:`torch.symeig`
""")
add_docstr_all('swapdims', r"""
swapdims(dim0, dim1) -> Tensor
See :func:`torch.swapdims`
""")
add_docstr_all('swapdims_',
r"""
swapdims_(dim0, dim1) -> Tensor
In-place version of :meth:`~Tensor.swapdims`
""")
add_docstr_all('swapaxes', r"""
swapaxes(axis0, axis1) -> Tensor
See :func:`torch.swapaxes`
""")
add_docstr_all('swapaxes_', r"""
swapaxes_(axis0, axis1) -> Tensor
In-place version of :meth:`~Tensor.swapaxes`
""")
add_docstr_all('t',
r"""
t() -> Tensor
See :func:`torch.t`
""")
add_docstr_all('t_',
r"""
t_() -> Tensor
In-place version of :meth:`~Tensor.t`
""")
add_docstr_all('tile',
r"""
tile(*reps) -> Tensor
See :func:`torch.tile`
""")
add_docstr_all('to',
r"""
to(*args, **kwargs) -> Tensor
Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
inferred from the arguments of ``self.to(*args, **kwargs)``.
.. note::
If the ``self`` Tensor already
has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
Otherwise, the returned tensor is a copy of ``self`` with the desired
:class:`torch.dtype` and :class:`torch.device`.
Here are the ways to call ``to``:
.. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
:noindex:
Returns a Tensor with the specified :attr:`dtype`
Args:
{memory_format}
.. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
:noindex:
Returns a Tensor with the specified :attr:`device` and (optional)
:attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
When :attr:`non_blocking`, tries to convert asynchronously with respect to
the host if possible, e.g., converting a CPU Tensor with pinned memory to a
CUDA Tensor.
When :attr:`copy` is set, a new Tensor is created even when the Tensor
already matches the desired conversion.
Args:
{memory_format}
.. method:: to(other, non_blocking=False, copy=False) -> Tensor
:noindex:
Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
asynchronously with respect to the host if possible, e.g., converting a CPU
Tensor with pinned memory to a CUDA Tensor.
When :attr:`copy` is set, a new Tensor is created even when the Tensor
already matches the desired conversion.
Example::
>>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
>>> tensor.to(torch.float64)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64)
>>> cuda0 = torch.device('cuda:0')
>>> tensor.to(cuda0)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], device='cuda:0')
>>> tensor.to(cuda0, dtype=torch.float64)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
>>> other = torch.randn((), dtype=torch.float64, device=cuda0)
>>> tensor.to(other, non_blocking=True)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
""".format(**common_args))
add_docstr_all('byte',
r"""
byte(memory_format=torch.preserve_format) -> Tensor
``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('bool',
r"""
bool(memory_format=torch.preserve_format) -> Tensor
``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('char',
r"""
char(memory_format=torch.preserve_format) -> Tensor
``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('bfloat16',
r"""
bfloat16(memory_format=torch.preserve_format) -> Tensor
``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('double',
r"""
double(memory_format=torch.preserve_format) -> Tensor
``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('float',
r"""
float(memory_format=torch.preserve_format) -> Tensor
``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('cdouble',
r"""
cdouble(memory_format=torch.preserve_format) -> Tensor
``self.cdouble()`` is equivalent to ``self.to(torch.complex128)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('cfloat',
r"""
cfloat(memory_format=torch.preserve_format) -> Tensor
``self.cfloat()`` is equivalent to ``self.to(torch.complex64)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('half',
r"""
half(memory_format=torch.preserve_format) -> Tensor
``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('int',
r"""
int(memory_format=torch.preserve_format) -> Tensor
``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('int_repr',
r"""
int_repr() -> Tensor
Given a quantized Tensor,
``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
underlying uint8_t values of the given Tensor.
""")
add_docstr_all('long',
r"""
long(memory_format=torch.preserve_format) -> Tensor
``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('short',
r"""
short(memory_format=torch.preserve_format) -> Tensor
``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
Args:
{memory_format}
""".format(**common_args))
add_docstr_all('take',
r"""
take(indices) -> Tensor
See :func:`torch.take`
""")
add_docstr_all('take_along_dim',
r"""
take_along_dim(indices, dim) -> Tensor
See :func:`torch.take_along_dim`
""")
add_docstr_all('tan',
r"""
tan() -> Tensor
See :func:`torch.tan`
""")
add_docstr_all('tan_',
r"""
tan_() -> Tensor
In-place version of :meth:`~Tensor.tan`
""")
add_docstr_all('tanh',
r"""
tanh() -> Tensor
See :func:`torch.tanh`
""")
add_docstr_all('tanh_',
r"""
tanh_() -> Tensor
In-place version of :meth:`~Tensor.tanh`
""")
add_docstr_all('tolist',
r"""
tolist() -> list or number
Returns the tensor as a (nested) list. For scalars, a standard
Python number is returned, just like with :meth:`~Tensor.item`.
Tensors are automatically moved to the CPU first if necessary.
This operation is not differentiable.
Examples::
>>> a = torch.randn(2, 2)
>>> a.tolist()
[[0.012766935862600803, 0.5415473580360413],
[-0.08909505605697632, 0.7729271650314331]]
>>> a[0,0].tolist()
0.012766935862600803
""")
add_docstr_all('topk',
r"""
topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
See :func:`torch.topk`
""")
add_docstr_all('to_dense',
r"""
to_dense() -> Tensor
Creates a strided copy of :attr:`self`.
.. warning::
Throws an error if :attr:`self` is a strided tensor.
Example::
>>> s = torch.sparse_coo_tensor(
... torch.tensor([[1, 1],
... [0, 2]]),
... torch.tensor([9, 10]),
... size=(3, 3))
>>> s.to_dense()
tensor([[ 0, 0, 0],
[ 9, 0, 10],
[ 0, 0, 0]])
""")
add_docstr_all('to_sparse',
r"""
to_sparse(sparseDims) -> Tensor
Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
:ref:`coordinate format <sparse-coo-docs>`.
Args:
sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
Example::
>>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
>>> d
tensor([[ 0, 0, 0],
[ 9, 0, 10],
[ 0, 0, 0]])
>>> d.to_sparse()
tensor(indices=tensor([[1, 1],
[0, 2]]),
values=tensor([ 9, 10]),
size=(3, 3), nnz=2, layout=torch.sparse_coo)
>>> d.to_sparse(1)
tensor(indices=tensor([[1]]),
values=tensor([[ 9, 0, 10]]),
size=(3, 3), nnz=1, layout=torch.sparse_coo)
""")
add_docstr_all('to_mkldnn',
r"""
to_mkldnn() -> Tensor
Returns a copy of the tensor in ``torch.mkldnn`` layout.
""")
add_docstr_all('trace',
r"""
trace() -> Tensor
See :func:`torch.trace`
""")
add_docstr_all('transpose',
r"""
transpose(dim0, dim1) -> Tensor
See :func:`torch.transpose`
""")
add_docstr_all('transpose_',
r"""
transpose_(dim0, dim1) -> Tensor
In-place version of :meth:`~Tensor.transpose`
""")
add_docstr_all('triangular_solve',
r"""
triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
See :func:`torch.triangular_solve`
""")
add_docstr_all('tril',
r"""
tril(k=0) -> Tensor
See :func:`torch.tril`
""")
add_docstr_all('tril_',
r"""
tril_(k=0) -> Tensor
In-place version of :meth:`~Tensor.tril`
""")
add_docstr_all('triu',
r"""
triu(k=0) -> Tensor
See :func:`torch.triu`
""")
add_docstr_all('triu_',
r"""
triu_(k=0) -> Tensor
In-place version of :meth:`~Tensor.triu`
""")
add_docstr_all('true_divide',
r"""
true_divide(value) -> Tensor
See :func:`torch.true_divide`
""")
add_docstr_all('true_divide_',
r"""
true_divide_(value) -> Tensor
In-place version of :meth:`~Tensor.true_divide_`
""")
add_docstr_all('trunc',
r"""
trunc() -> Tensor
See :func:`torch.trunc`
""")
add_docstr_all('fix',
r"""
fix() -> Tensor
See :func:`torch.fix`.
""")
add_docstr_all('trunc_',
r"""
trunc_() -> Tensor
In-place version of :meth:`~Tensor.trunc`
""")
add_docstr_all('fix_',
r"""
fix_() -> Tensor
In-place version of :meth:`~Tensor.fix`
""")
add_docstr_all('type',
r"""
type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
Returns the type if `dtype` is not provided, else casts this object to
the specified type.
If this is already of the correct type, no copy is performed and the
original object is returned.
Args:
dtype (type or string): The desired type
non_blocking (bool): If ``True``, and the source is in pinned memory
and destination is on the GPU or vice versa, the copy is performed
asynchronously with respect to the host. Otherwise, the argument
has no effect.
**kwargs: For compatibility, may contain the key ``async`` in place of
the ``non_blocking`` argument. The ``async`` arg is deprecated.
""")
add_docstr_all('type_as',
r"""
type_as(tensor) -> Tensor
Returns this tensor cast to the type of the given tensor.
This is a no-op if the tensor is already of the correct type. This is
equivalent to ``self.type(tensor.type())``
Args:
tensor (Tensor): the tensor which has the desired type
""")
add_docstr_all('unfold',
r"""
unfold(dimension, size, step) -> Tensor
Returns a view of the original tensor which contains all slices of size :attr:`size` from
:attr:`self` tensor in the dimension :attr:`dimension`.
Step between two slices is given by :attr:`step`.
If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
dimension :attr:`dimension` in the returned tensor will be
`(sizedim - size) / step + 1`.
An additional dimension of size :attr:`size` is appended in the returned tensor.
Args:
dimension (int): dimension in which unfolding happens
size (int): the size of each slice that is unfolded
step (int): the step between each slice
Example::
>>> x = torch.arange(1., 8)
>>> x
tensor([ 1., 2., 3., 4., 5., 6., 7.])
>>> x.unfold(0, 2, 1)
tensor([[ 1., 2.],
[ 2., 3.],
[ 3., 4.],
[ 4., 5.],
[ 5., 6.],
[ 6., 7.]])
>>> x.unfold(0, 2, 2)
tensor([[ 1., 2.],
[ 3., 4.],
[ 5., 6.]])
""")
add_docstr_all('uniform_',
r"""
uniform_(from=0, to=1) -> Tensor
Fills :attr:`self` tensor with numbers sampled from the continuous uniform
distribution:
.. math::
P(x) = \dfrac{1}{\text{to} - \text{from}}
""")
add_docstr_all('unsqueeze',
r"""
unsqueeze(dim) -> Tensor
See :func:`torch.unsqueeze`
""")
add_docstr_all('unsqueeze_',
r"""
unsqueeze_(dim) -> Tensor
In-place version of :meth:`~Tensor.unsqueeze`
""")
add_docstr_all('var',
r"""
var(dim, unbiased=True, keepdim=False) -> Tensor
See :func:`torch.var`
.. function:: var(unbiased=True) -> Tensor
:noindex:
See :func:`torch.var`
""")
add_docstr_all('vdot',
r"""
vdot(other) -> Tensor
See :func:`torch.vdot`
""")
add_docstr_all('view',
r"""
view(*shape) -> Tensor
Returns a new tensor with the same data as the :attr:`self` tensor but of a
different :attr:`shape`.
The returned tensor shares the same data and must have the same number
of elements, but may have a different size. For a tensor to be viewed, the new
view size must be compatible with its original size and stride, i.e., each new
view dimension must either be a subspace of an original dimension, or only span
across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
.. math::
\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
:meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
returns a view if the shapes are compatible, and copies (equivalent to calling
:meth:`contiguous`) otherwise.
Args:
shape (torch.Size or int...): the desired size
Example::
>>> x = torch.randn(4, 4)
>>> x.size()
torch.Size([4, 4])
>>> y = x.view(16)
>>> y.size()
torch.Size([16])
>>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
>>> z.size()
torch.Size([2, 8])
>>> a = torch.randn(1, 2, 3, 4)
>>> a.size()
torch.Size([1, 2, 3, 4])
>>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
>>> b.size()
torch.Size([1, 3, 2, 4])
>>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
>>> c.size()
torch.Size([1, 3, 2, 4])
>>> torch.equal(b, c)
False
.. method:: view(dtype) -> Tensor
:noindex:
Returns a new tensor with the same data as the :attr:`self` tensor but of a
different :attr:`dtype`. :attr:`dtype` must have the same number of bytes per
element as :attr:`self`'s dtype.
.. warning::
This overload is not supported by TorchScript, and using it in a Torchscript
program will cause undefined behavior.
Args:
dtype (:class:`torch.dtype`): the desired dtype
Example::
>>> x = torch.randn(4, 4)
>>> x
tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
[-0.1520, 0.7472, 0.5617, -0.8649],
[-2.4724, -0.0334, -0.2976, -0.8499],
[-0.2109, 1.9913, -0.9607, -0.6123]])
>>> x.dtype
torch.float32
>>> y = x.view(torch.int32)
>>> y
tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
[-1105482831, 1061112040, 1057999968, -1084397505],
[-1071760287, -1123489973, -1097310419, -1084649136],
[-1101533110, 1073668768, -1082790149, -1088634448]],
dtype=torch.int32)
>>> y[0, 0] = 1000000000
>>> x
tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
[-0.1520, 0.7472, 0.5617, -0.8649],
[-2.4724, -0.0334, -0.2976, -0.8499],
[-0.2109, 1.9913, -0.9607, -0.6123]])
>>> x.view(torch.int16)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
RuntimeError: Viewing a tensor as a new dtype with a different number of bytes per element is not supported.
""")
add_docstr_all('view_as',
r"""
view_as(other) -> Tensor
View this tensor as the same size as :attr:`other`.
``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
Please see :meth:`~Tensor.view` for more information about ``view``.
Args:
other (:class:`torch.Tensor`): The result tensor has the same size
as :attr:`other`.
""")
add_docstr_all('expand',
r"""
expand(*sizes) -> Tensor
Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
to a larger size.
Passing -1 as the size for a dimension means not changing the size of
that dimension.
Tensor can be also expanded to a larger number of dimensions, and the
new ones will be appended at the front. For the new dimensions, the
size cannot be set to -1.
Expanding a tensor does not allocate new memory, but only creates a
new view on the existing tensor where a dimension of size one is
expanded to a larger size by setting the ``stride`` to 0. Any dimension
of size 1 can be expanded to an arbitrary value without allocating new
memory.
Args:
*sizes (torch.Size or int...): the desired expanded size
.. warning::
More than one element of an expanded tensor may refer to a single
memory location. As a result, in-place operations (especially ones that
are vectorized) may result in incorrect behavior. If you need to write
to the tensors, please clone them first.
Example::
>>> x = torch.tensor([[1], [2], [3]])
>>> x.size()
torch.Size([3, 1])
>>> x.expand(3, 4)
tensor([[ 1, 1, 1, 1],
[ 2, 2, 2, 2],
[ 3, 3, 3, 3]])
>>> x.expand(-1, 4) # -1 means not changing the size of that dimension
tensor([[ 1, 1, 1, 1],
[ 2, 2, 2, 2],
[ 3, 3, 3, 3]])
""")
add_docstr_all('expand_as',
r"""
expand_as(other) -> Tensor
Expand this tensor to the same size as :attr:`other`.
``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
Please see :meth:`~Tensor.expand` for more information about ``expand``.
Args:
other (:class:`torch.Tensor`): The result tensor has the same size
as :attr:`other`.
""")
add_docstr_all('sum_to_size',
r"""
sum_to_size(*size) -> Tensor
Sum ``this`` tensor to :attr:`size`.
:attr:`size` must be broadcastable to ``this`` tensor size.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
""")
add_docstr_all('zero_',
r"""
zero_() -> Tensor
Fills :attr:`self` tensor with zeros.
""")
add_docstr_all('matmul',
r"""
matmul(tensor2) -> Tensor
See :func:`torch.matmul`
""")
add_docstr_all('chunk',
r"""
chunk(chunks, dim=0) -> List of Tensors
See :func:`torch.chunk`
""")
add_docstr_all('unsafe_chunk',
r"""
unsafe_chunk(chunks, dim=0) -> List of Tensors
See :func:`torch.unsafe_chunk`
""")
add_docstr_all('unsafe_split',
r"""
unsafe_split(split_size, dim=0) -> List of Tensors
See :func:`torch.unsafe_split`
""")
add_docstr_all('tensor_split',
r"""
tensor_split(indices_or_sections, dim=0) -> List of Tensors
See :func:`torch.tensor_split`
""")
add_docstr_all('hsplit',
r"""
hsplit(split_size_or_sections) -> List of Tensors
See :func:`torch.hsplit`
""")
add_docstr_all('vsplit',
r"""
vsplit(split_size_or_sections) -> List of Tensors
See :func:`torch.vsplit`
""")
add_docstr_all('dsplit',
r"""
dsplit(split_size_or_sections) -> List of Tensors
See :func:`torch.dsplit`
""")
add_docstr_all('stft',
r"""
stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor
See :func:`torch.stft`
""")
add_docstr_all('istft',
r"""
istft(n_fft, hop_length=None, win_length=None, window=None,
center=True, normalized=False, onesided=True, length=None) -> Tensor
See :func:`torch.istft`
""")
add_docstr_all('det',
r"""
det() -> Tensor
See :func:`torch.det`
""")
add_docstr_all('where',
r"""
where(condition, y) -> Tensor
``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
See :func:`torch.where`
""")
add_docstr_all('logdet',
r"""
logdet() -> Tensor
See :func:`torch.logdet`
""")
add_docstr_all('slogdet',
r"""
slogdet() -> (Tensor, Tensor)
See :func:`torch.slogdet`
""")
add_docstr_all('unbind',
r"""
unbind(dim=0) -> seq
See :func:`torch.unbind`
""")
add_docstr_all('pin_memory',
r"""
pin_memory() -> Tensor
Copies the tensor to pinned memory, if it's not already pinned.
""")
add_docstr_all('pinverse',
r"""
pinverse() -> Tensor
See :func:`torch.pinverse`
""")
add_docstr_all('index_add',
r"""
index_add(dim, index, tensor2) -> Tensor
Out-of-place version of :meth:`torch.Tensor.index_add_`.
""")
add_docstr_all('index_copy',
r"""
index_copy(dim, index, tensor2) -> Tensor
Out-of-place version of :meth:`torch.Tensor.index_copy_`.
""")
add_docstr_all('index_fill',
r"""
index_fill(dim, index, value) -> Tensor
Out-of-place version of :meth:`torch.Tensor.index_fill_`.
""")
add_docstr_all('scatter',
r"""
scatter(dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_`
""")
add_docstr_all('scatter_add',
r"""
scatter_add(dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_add_`
""")
add_docstr_all('masked_scatter',
r"""
masked_scatter(mask, tensor) -> Tensor
Out-of-place version of :meth:`torch.Tensor.masked_scatter_`
""")
add_docstr_all('xlogy',
r"""
xlogy(other) -> Tensor
See :func:`torch.xlogy`
""")
add_docstr_all('xlogy_',
r"""
xlogy_(other) -> Tensor
In-place version of :meth:`~Tensor.xlogy`
""")
add_docstr_all('masked_fill',
r"""
masked_fill(mask, value) -> Tensor
Out-of-place version of :meth:`torch.Tensor.masked_fill_`
""")
add_docstr_all('retain_grad',
r"""
retain_grad() -> None
Enables this Tensor to have their :attr:`grad` populated during
:func:`backward`. This is a no-op for leaf tensors.
""")
add_docstr_all('retains_grad',
r"""
Is ``True`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
populated during :func:`backward`, ``False`` otherwise.
""")
add_docstr_all('requires_grad',
r"""
Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise.
.. note::
The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad`
attribute will be populated, see :attr:`is_leaf` for more details.
""")
add_docstr_all('is_leaf',
r"""
All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
created by the user. This means that they are not the result of an operation and so
:attr:`grad_fn` is None.
Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
Example::
>>> a = torch.rand(10, requires_grad=True)
>>> a.is_leaf
True
>>> b = torch.rand(10, requires_grad=True).cuda()
>>> b.is_leaf
False
# b was created by the operation that cast a cpu Tensor into a cuda Tensor
>>> c = torch.rand(10, requires_grad=True) + 2
>>> c.is_leaf
False
# c was created by the addition operation
>>> d = torch.rand(10).cuda()
>>> d.is_leaf
True
# d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
>>> e = torch.rand(10).cuda().requires_grad_()
>>> e.is_leaf
True
# e requires gradients and has no operations creating it
>>> f = torch.rand(10, requires_grad=True, device="cuda")
>>> f.is_leaf
True
# f requires grad, has no operation creating it
""")
add_docstr_all('names',
r"""
Stores names for each of this tensor's dimensions.
``names[idx]`` corresponds to the name of tensor dimension ``idx``.
Names are either a string if the dimension is named or ``None`` if the
dimension is unnamed.
Dimension names may contain characters or underscore. Furthermore, a dimension
name must be a valid Python variable name (i.e., does not start with underscore).
Tensors may not have two named dimensions with the same name.
.. warning::
The named tensor API is experimental and subject to change.
""")
add_docstr_all('is_cuda',
r"""
Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.
""")
add_docstr_all('is_xpu',
r"""
Is ``True`` if the Tensor is stored on the XPU, ``False`` otherwise.
""")
add_docstr_all('is_quantized',
r"""
Is ``True`` if the Tensor is quantized, ``False`` otherwise.
""")
add_docstr_all('is_meta',
r"""
Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise. Meta tensors
are like normal tensors, but they carry no data.
""")
add_docstr_all('is_sparse',
r"""
Is ``True`` if the Tensor uses sparse storage layout, ``False`` otherwise.
""")
add_docstr_all('is_sparse_csr',
r"""
Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise.
""")
add_docstr_all('device',
r"""
Is the :class:`torch.device` where this Tensor is.
""")
add_docstr_all('ndim',
r"""
Alias for :meth:`~Tensor.dim()`
""")
add_docstr_all('T',
r"""
Is this Tensor with its dimensions reversed.
If ``n`` is the number of dimensions in ``x``,
``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``.
""")
add_docstr_all('real',
r"""
Returns a new tensor containing real values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
.. warning::
:func:`real` is only supported for tensors with complex dtypes.
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.real
tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
""")
add_docstr_all('imag',
r"""
Returns a new tensor containing imaginary values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
.. warning::
:func:`imag` is only supported for tensors with complex dtypes.
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.imag
tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
""")
add_docstr_all('as_subclass',
r"""
as_subclass(cls) -> Tensor
Makes a ``cls`` instance with the same data pointer as ``self``. Changes
in the output mirror changes in ``self``, and the output stays attached
to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
""")
add_docstr_all('crow_indices',
r"""
crow_indices() -> IntTensor
Returns the tensor containing the compressed row indices of the :attr:`self`
tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
and of type ``int32`` or ``int64``. When using MKL routines such as sparse
matrix multiplication, it is necessary to use ``int32`` indexing in order
to avoid downcasting and potentially losing information.
Example::
>>> csr = torch.eye(5,5).to_sparse_csr()
>>> csr.crow_indices()
tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)
""")
add_docstr_all('col_indices',
r"""
col_indices() -> IntTensor
Returns the tensor containing the column indices of the :attr:`self`
tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
and of type ``int32`` or ``int64``. When using MKL routines such as sparse
matrix multiplication, it is necessary to use ``int32`` indexing in order
to avoid downcasting and potentially losing information.
Example::
>>> csr = torch.eye(5,5).to_sparse_csr()
>>> csr.col_indices()
tensor([0, 1, 2, 3, 4], dtype=torch.int32)
""")
| 23.029693 | 118 | 0.620689 |
aced30461851a50af350bdd0a65536fc0e785818 | 586 | py | Python | src/setup.py | hpcn-uam/ccore | d3b1cd709d9c72eb0d89d8b3fabf54c8e47603d1 | [
"Apache-2.0"
] | null | null | null | src/setup.py | hpcn-uam/ccore | d3b1cd709d9c72eb0d89d8b3fabf54c8e47603d1 | [
"Apache-2.0"
] | 1 | 2017-10-02T15:27:02.000Z | 2017-10-02T15:27:34.000Z | src/setup.py | hpcn-uam/ccore | d3b1cd709d9c72eb0d89d8b3fabf54c8e47603d1 | [
"Apache-2.0"
] | null | null | null | from distutils.core import setup, Extension
from numpy.distutils.misc_util import get_info
module1 = Extension('mockparser', sources=['mockparser.c'])
module2 = Extension(
'cparser',
sources=['cparser.c', 'cparser-python.c', 'pandas_interact.c', 'type_interact.c', 'cparser-iterator.c'],
#extra_compile_args = [ '-Wall', '-std=gnu99', '-march=native', '-Ofast'],
extra_compile_args=['-Wall', '-std=gnu99', '-O3'],
undef_macros=['NDEBUG'],
**get_info("npymath"))
setup(name='hpat', version='1.0', description='C Core for the FERMIN application', ext_modules=[module1, module2])
| 39.066667 | 114 | 0.711604 |
aced3075caca7e4703ccc0056462c882c19cf9e3 | 1,450 | py | Python | changedetectionio/tests/conftest.py | Unpublished/changedetection.io | 4834fb44fb5f2d93bbba51fc3f31a0468cd34625 | [
"Apache-2.0"
] | 1 | 2022-03-21T07:30:25.000Z | 2022-03-21T07:30:25.000Z | changedetectionio/tests/conftest.py | Excloudx6/changedetection.io | 82d1a7f73e038a551807a00d503f5674ad135804 | [
"Apache-2.0"
] | null | null | null | changedetectionio/tests/conftest.py | Excloudx6/changedetection.io | 82d1a7f73e038a551807a00d503f5674ad135804 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import pytest
from changedetectionio import changedetection_app
from changedetectionio import store
import os
# https://github.com/pallets/flask/blob/1.1.2/examples/tutorial/tests/test_auth.py
# Much better boilerplate than the docs
# https://www.python-boilerplate.com/py3+flask+pytest/
global app
def cleanup(datastore_path):
# Unlink test output files
files = ['output.txt',
'url-watches.json',
'notification.txt',
'count.txt',
'endpoint-content.txt'
]
for file in files:
try:
os.unlink("{}/{}".format(datastore_path, file))
except FileNotFoundError:
pass
@pytest.fixture(scope='session')
def app(request):
"""Create application for the tests."""
datastore_path = "./test-datastore"
try:
os.mkdir(datastore_path)
except FileExistsError:
pass
cleanup(datastore_path)
app_config = {'datastore_path': datastore_path}
cleanup(app_config['datastore_path'])
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], include_default_watches=False)
app = changedetection_app(app_config, datastore)
app.config['STOP_THREADS'] = True
def teardown():
datastore.stop_thread = True
app.config.exit.set()
cleanup(app_config['datastore_path'])
request.addfinalizer(teardown)
yield app
| 26.363636 | 118 | 0.664828 |
aced313b59a96931a81a10ffed27ee709dd4b366 | 3,929 | py | Python | numbers.py | Lana-Pa/Some-projects | e12d7945f76a74ed7c134b595aa89a96364d1845 | [
"Apache-2.0"
] | null | null | null | numbers.py | Lana-Pa/Some-projects | e12d7945f76a74ed7c134b595aa89a96364d1845 | [
"Apache-2.0"
] | null | null | null | numbers.py | Lana-Pa/Some-projects | e12d7945f76a74ed7c134b595aa89a96364d1845 | [
"Apache-2.0"
] | null | null | null | # Write a function that converts letter numbers into difital numbers
def word_to_number(string_num):
numbers = {
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7,
"eight": 8,
"nine": 9,
"ten": 10,
"eleven": 11,
"twelve": 12,
"thirteen": 13,
"fourteen": 14,
"fifteen": 15,
"sixteen": 16,
"seventeen": 17,
"eighteen": 18,
"nineteen": 19,
"twenty": 20,
"thirty": 30,
"fourty": 40,
"fifty": 50,
"sixty": 60,
"seventy": 70,
"eighty" : 80,
"ninety": 90
}
# Dividing string
millions = None
thousands = None
hundreds = None
tens = None
nothing = 'nothing'
if 'million' in string_num:
millions_pos = string_num.find('million')
millions_end = string_num.find(' ',millions_pos)
millions = string_num[:millions_end]
string_num_new = string_num[millions_end+1:]
if millions_end < 0:
string_num_new = nothing #if there is only millions, ignore the rest of the code
else:
millions_pos = 0
string_num_new = string_num
if 'thousand' in string_num_new:
thousands_pos = string_num_new.find('thousand')
thousands_end = string_num_new.find(' ',thousands_pos)
thousands = string_num_new[:thousands_end]
string_num_new = string_num_new[thousands_end+1:]
if thousands_end < 0:
string_num_new = nothing
else:
thousands_pos = 0
if 'hundred' in string_num_new:
hundreds_pos = string_num_new.find('hundred')
hundreds_end = string_num_new.find(' ',hundreds_pos)
if hundreds_end < 0:
hundreds = string_num_new
tens = None
else:
hundreds = string_num_new[:hundreds_end]
print hundreds_end
tens = string_num_new[hundreds_end+1:]
else:
tens = string_num_new
res = 0
mil = 0
thous = 0
hund = 0
ten = 0
# Converting words in each section to numbers
if millions is not None:
millions_list = millions.split()
# case when we have 'hundred' in millions part
if millions_list[1] == 'hundred':
mil = mil + numbers[millions_list[0]]*100
millions_list = millions_list[2:]
for word in millions_list:
if word in numbers.keys():
mil = mil + numbers[word]
mil = mil * 1000000
print 'millions= ', mil
else:
millions = 0
# case when we have 'hundred' in thousands part
if thousands is not None:
thousands_list = thousands.split()
if thousands_list[1] == 'hundred':
thous = thous + numbers[thousands_list[0]]*100
thousands_list = thousands_list[2:]
for word in thousands_list:
if word in numbers.keys():
thous = thous + numbers[word]
thous = thous * 1000
print 'thousands= ', thous
else:
thousands = 0
if hundreds is not None:
hundreds_list = hundreds.split()
for word in hundreds_list:
if word in numbers.keys():
hund = hund + numbers[word]
hund = hund * 100
print 'hundreds= ', hund
else:
hundreds = 0
if tens is not None:
tens_list = tens.split()
for word in tens_list:
if word in numbers.keys():
ten = ten + numbers[word]
print 'tens= ', ten
else:
tens = 0
res = mil + thous + hund + ten
if string_num == 'zero':
print 'The number <'+string_num+'> is', 0
else:
print 'The number <'+string_num+'> is', res
# end of function -------------------------------------------------------------
word_to_number("one hundred thirty five million two hundred eighty nine thousand three hundred sixty five") | 31.432 | 107 | 0.558157 |
aced317b15ee2e0ff5dcc86cf7b9a897e429f6a1 | 5,258 | py | Python | main.py | itsidorkin/telegram-bot | 7df8bebfe4f461ee3c554387eece3965388dbfcd | [
"MIT"
] | null | null | null | main.py | itsidorkin/telegram-bot | 7df8bebfe4f461ee3c554387eece3965388dbfcd | [
"MIT"
] | null | null | null | main.py | itsidorkin/telegram-bot | 7df8bebfe4f461ee3c554387eece3965388dbfcd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from json import load, dump
from logging import basicConfig, INFO, getLogger
from requests import get
from telegram import ReplyKeyboardMarkup, Update, ReplyKeyboardRemove
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, ConversationHandler, CallbackContext
basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=INFO)
logger = getLogger(__name__)
CHECK_ADD, ADD, DELETE = range(3)
personal_data = load(open("personal_data.json"))
def get_user(user_id, api_key):
url = "https://osu.ppy.sh/api/get_user?k={}&u={}"
ready_url = url.format(api_key, user_id)
return get(ready_url).json()
def write_data_json(name_json, data_json):
with open(name_json, "w") as j:
dump(obj=data_json, fp=j, indent=2)
def start(update: Update, context: CallbackContext):
first_name = update.message.from_user.first_name
chat_id = update.effective_chat.id
logger.info("Чел стартанул: %s / chat: %s", first_name, chat_id)
update.message.reply_text(
'Привет! Я бот на стадии теста. '
'Моя задача уведомлять тебя, когда кто-нибудь будет играть твои карты в мультиплеере. '
'Чтобы прекратить со мной разговор отправь /cancel.',
reply_markup=ReplyKeyboardRemove()
)
if str(update.effective_chat.id) in load(open("db.json")):
logger.info("Чел уже есть в базе: %s / chat: %s", first_name, chat_id)
reply_keyboard = [['Прекратить']]
update.message.reply_text(
'Ты уже есть в базе. Хочешь прекратить отслеживание? ',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
)
return DELETE
logger.info("Чела нет в базе: %s / chat: %s", first_name, chat_id)
update.message.reply_text('Начинаем? Введи имя своего профиля osu или ID', reply_markup=ReplyKeyboardRemove())
return CHECK_ADD
def check_add(update: Update, context: CallbackContext):
first_name = update.message.from_user.first_name
chat_id = update.effective_chat.id
osu_profil = update.message.text
if get_user(osu_profil.encode('ascii', errors='ignore').decode(), personal_data["api_key"]):
context.user_data['osu_profile'] = osu_profil
reply_keyboard = [['Да, все верно'], ['Нет, я ошибся']]
s = "https://osu.ppy.sh/users/" + osu_profil
update.message.reply_text(
s + '\nЭто ваш профиль?',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
)
return ADD
logger.info("Чела ошибся: %s / chat: %s / osu profile: %s", first_name, chat_id, osu_profil)
update.message.reply_text(
'Такого профиля несуществует. '
'Нажмите /start, чтобы начать с начала',
reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
def add(update: Update, context: CallbackContext):
first_name = update.message.from_user.first_name
chat_id = str(update.effective_chat.id)
osu_profile = context.user_data['osu_profile']
if update.message.text == 'Нет, я ошибся':
logger.info("Чел ошибся: %s / chat: %s / osu profile: %s", first_name, chat_id, osu_profile)
update.message.reply_text('Нажмите /start, чтобы начать с начала', reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
logger.info("Добавился чел: %s / chat: %s / osu profile: %s", first_name, chat_id, osu_profile)
db = load(open("db.json"))
db[chat_id] = osu_profile
write_data_json("db.json", db)
update.message.reply_text('Вы были добавлены. Добро пожаловать!', reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def delete(update: Update, context: CallbackContext):
first_name = update.message.from_user.first_name
chat_id = str(update.effective_chat.id)
db = load(open("db.json"))
logger.info("Удалился чел: %s / chat: %s / osu profile: %s", first_name, chat_id, db[chat_id])
del db[chat_id]
write_data_json("db.json", db)
update.message.reply_text('Слежение было прекращено. Жаль с вами расставаться', reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def cancel(update: Update, context: CallbackContext):
first_name = update.message.from_user.first_name
chat_id = str(update.effective_chat.id)
logger.info("Челу %s что-то непонравилось / chat: %s", first_name, chat_id)
update.message.reply_text('До встречи', reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def main():
updater = Updater(personal_data["token_telegram_bot"])
dispatcher = updater.dispatcher
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
CHECK_ADD: [MessageHandler(Filters.text & ~Filters.command, check_add)],
ADD: [MessageHandler(Filters.text & ~Filters.command, add)],
DELETE: [MessageHandler(Filters.text & ~Filters.command, delete)],
},
fallbacks=[CommandHandler('cancel', cancel)],
)
dispatcher.add_handler(conv_handler)
updater.start_polling()
# every(2).seconds.do(osu)
# while True:
# run_pending()
# sleep(1)
if __name__ == '__main__':
main()
| 40.446154 | 119 | 0.6938 |
aced31b0a6c6d9014dff0c931836b187038492df | 3,052 | py | Python | Round712/C_Balance_the_Bits.py | tqa236/codeforces | 81ad7bdb7786455f83d48d59a8884f62ded66caf | [
"MIT"
] | null | null | null | Round712/C_Balance_the_Bits.py | tqa236/codeforces | 81ad7bdb7786455f83d48d59a8884f62ded66caf | [
"MIT"
] | null | null | null | Round712/C_Balance_the_Bits.py | tqa236/codeforces | 81ad7bdb7786455f83d48d59a8884f62ded66caf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from io import BytesIO, IOBase
import math
from collections import Counter
def func(n, array):
counter = Counter(array)
if 0 not in array:
print("YES")
print("(" * (n // 2) + ")" * (n // 2))
print("(" * (n // 2) + ")" * (n // 2))
return
if counter[0] % 2 != 0:
print("NO")
return
if array[0] == 0 or array[-1] == 0:
print("NO")
return
half_zero = counter[0] // 2
a = [None] * n
b = [None] * n
left = n // 2 - half_zero
curr_a = True
for i, val in enumerate(array):
if val == 0:
if curr_a:
a[i] = "("
b[i] = ")"
else:
a[i] = ")"
b[i] = "("
curr_a = not curr_a
else:
if left > 0:
a[i] = "("
b[i] = "("
left -= 1
else:
a[i] = ")"
b[i] = ")"
print("YES")
print("".join(a))
print("".join(b))
def main():
num_test = int(parse_input())
for _ in range(num_test):
n = int(parse_input())
array = [int(i) for i in parse_input()]
func(n, array)
# region fastio
# BUFSIZE = 8192
# class FastIO(IOBase):
# newlines = 0
# def __init__(self, file):
# self._fd = file.fileno()
# self.buffer = BytesIO()
# self.writable = "x" in file.mode or "r" not in file.mode
# self.write = self.buffer.write if self.writable else None
# def read(self):
# while True:
# b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
# if not b:
# break
# ptr = self.buffer.tell()
# self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
# self.newlines = 0
# return self.buffer.read()
# def readline(self):
# while self.newlines == 0:
# b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
# self.newlines = b.count(b"\n") + (not b)
# ptr = self.buffer.tell()
# self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
# self.newlines -= 1
# return self.buffer.readline()
# def flush(self):
# if self.writable:
# os.write(self._fd, self.buffer.getvalue())
# self.buffer.truncate(0), self.buffer.seek(0)
# class IOWrapper(IOBase):
# def __init__(self, file):
# self.buffer = FastIO(file)
# self.flush = self.buffer.flush
# self.writable = self.buffer.writable
# self.write = lambda s: self.buffer.write(s.encode("ascii"))
# self.read = lambda: self.buffer.read().decode("ascii")
# self.readline = lambda: self.buffer.readline().decode("ascii")
# sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
parse_input = lambda: sys.stdin.readline().rstrip("\r\n")
# endregion
if __name__ == "__main__":
main()
| 27.00885 | 81 | 0.500328 |
aced31c64266b0a9bf0b4fcc0c3b83c7376d28bc | 200 | py | Python | Python-Scripts/get_hostname.py | johncorbin36/Admin-Scripts | 3fc41686c2db22f2f2d5bd6f5699b1aeec623506 | [
"MIT"
] | 1 | 2020-09-21T14:21:10.000Z | 2020-09-21T14:21:10.000Z | Python-Scripts/get_hostname.py | johncorbin36/Admin-Scripts | 3fc41686c2db22f2f2d5bd6f5699b1aeec623506 | [
"MIT"
] | null | null | null | Python-Scripts/get_hostname.py | johncorbin36/Admin-Scripts | 3fc41686c2db22f2f2d5bd6f5699b1aeec623506 | [
"MIT"
] | null | null | null | import platform, socket
# Checks if host name is different
if platform.node() != socket.gethostname():
print(platform.node() + " " + socket.gethostname())
else:
print(socket.gethostname())
| 25 | 55 | 0.69 |
aced32a95e41d8e8e29d883f6c8da2bc9d639876 | 1,150 | py | Python | src/core/schemas/entries/performance.py | nefarius/portfolio-backend | f595041354eedee71a4aa5b761501be030b81d09 | [
"Apache-2.0"
] | 6 | 2019-06-19T12:56:42.000Z | 2021-12-26T07:22:47.000Z | src/core/schemas/entries/performance.py | nefarius/portfolio-backend | f595041354eedee71a4aa5b761501be030b81d09 | [
"Apache-2.0"
] | 13 | 2019-12-20T10:39:44.000Z | 2022-02-10T09:11:09.000Z | src/core/schemas/entries/performance.py | nefarius/portfolio-backend | f595041354eedee71a4aa5b761501be030b81d09 | [
"Apache-2.0"
] | 1 | 2021-12-01T12:03:29.000Z | 2021-12-01T12:03:29.000Z | from ...schemas import ICON_EVENT
from ...skosmos import get_collection_members
from ..base import BaseSchema
from ..general import (
get_contributors_field,
get_contributors_field_for_role,
get_date_range_time_range_location_group_field,
get_format_field,
get_material_field,
get_url_field,
)
from ..utils import years_from_date_range_time_range_location_group_field
ICON = ICON_EVENT
TYPES = get_collection_members('http://base.uni-ak.ac.at/portfolio/taxonomy/collection_performance', use_cache=False)
class PerformanceSchema(BaseSchema):
artists = get_contributors_field_for_role('artist', {'order': 1})
contributors = get_contributors_field({'order': 2})
date_range_time_range_location = get_date_range_time_range_location_group_field({'order': 3})
material = get_material_field({'order': 4})
format = get_format_field({'order': 5})
url = get_url_field({'order': 6, 'field_format': 'half'})
def year_display(self, data):
if data.get('date_range_time_range_location'):
return years_from_date_range_time_range_location_group_field(data['date_range_time_range_location'])
| 38.333333 | 117 | 0.773043 |
aced344d694457c6337fcd918c235c5221be3698 | 1,385 | py | Python | src/OTLMOW/OTLModel/Datatypes/KlBeschermingMaaischade.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/OTLModel/Datatypes/KlBeschermingMaaischade.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/OTLModel/Datatypes/KlBeschermingMaaischade.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | # coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlBeschermingMaaischade(KeuzelijstField):
"""De middelen als bescherming tegen maaischade."""
naam = 'KlBeschermingMaaischade'
label = 'Bescherming maaischade'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlBeschermingMaaischade'
definition = 'De middelen als bescherming tegen maaischade.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlBeschermingMaaischade'
options = {
'houten-paal': KeuzelijstWaarde(invulwaarde='houten-paal',
label='houten paal',
definitie='Bescherming dmv een houten paal.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlBeschermingMaaischade/houten-paal'),
'kunststof': KeuzelijstWaarde(invulwaarde='kunststof',
label='kunststof',
definitie='Bescherming in kunststof.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlBeschermingMaaischade/kunststof')
}
| 55.4 | 142 | 0.652708 |
aced3474c5312cf8ee378fa1573dea0c92f72e75 | 1,439 | py | Python | Sublime Text 3/Backup/20210122193107/backrefs/st3/backrefs/__init__.py | anekeallen/Sublime-Text-3 | 8502b9089ca4223f8ba7ff168626a0dbe67713cb | [
"MIT"
] | 182 | 2017-03-05T07:43:13.000Z | 2022-03-15T13:09:07.000Z | Sublime Text 3/Backup/20210122193107/backrefs/st3/backrefs/__init__.py | anekeallen/Sublime-Text-3 | 8502b9089ca4223f8ba7ff168626a0dbe67713cb | [
"MIT"
] | 3 | 2021-05-10T18:59:14.000Z | 2021-09-02T01:50:15.000Z | Sublime Text 3/Backup/20210122193107/backrefs/st3/backrefs/__init__.py | anekeallen/Sublime-Text-3 | 8502b9089ca4223f8ba7ff168626a0dbe67713cb | [
"MIT"
] | 16 | 2017-03-07T11:01:27.000Z | 2022-01-08T09:21:01.000Z | """Backrefs package."""
# (major, minor, micro, release type, pre-release build, post-release build)
version_info = (3, 5, 0, 'final', 0, 0)
def _version():
"""
Get the version (PEP 440).
Version structure
(major, minor, micro, release type, pre-release build, post-release build)
Release names are named is such a way they are sortable and comparable with ease.
(alpha | beta | candidate | final)
- "final" should never have a pre-release build number
- pre-releases should have a pre-release build number greater than 0
- post-release is only applied if post-release build is greater than 0
"""
releases = {"alpha": 'a', "beta": 'b', "candidate": 'rc', "final": ''}
# Version info should be proper length
assert len(version_info) == 6
# Should be a valid release
assert version_info[3] in releases
# Pre-release releases should have a pre-release value
assert version_info[3] == 'final' or version_info[4] > 0
# Final should not have a pre-release value
assert version_info[3] != 'final' or version_info[4] == 0
main = '.'.join(str(x)for x in (version_info[0:2] if version_info[2] == 0 else version_info[0:3]))
prerel = releases[version_info[3]]
prerel += str(version_info[4]) if prerel else ''
postrel = '.post%d' % version_info[5] if version_info[5] > 0 else ''
return ''.join((main, prerel, postrel))
version = _version()
| 35.975 | 102 | 0.659486 |
aced35e0946fb748d910a12a4ebbfddc5f736ef9 | 3,922 | py | Python | src/.ycm_extra_conf.py | zhuzhenpeng/sparrow | 3c75c7b09b27a2e5acb1b82590005c8785cd36fa | [
"MIT"
] | 17 | 2017-05-15T22:45:11.000Z | 2021-08-15T00:12:29.000Z | src/.ycm_extra_conf.py | zhuzhenpeng/sparrow | 3c75c7b09b27a2e5acb1b82590005c8785cd36fa | [
"MIT"
] | null | null | null | src/.ycm_extra_conf.py | zhuzhenpeng/sparrow | 3c75c7b09b27a2e5acb1b82590005c8785cd36fa | [
"MIT"
] | 8 | 2017-05-15T22:45:13.000Z | 2021-08-15T00:11:46.000Z | import os
import ycm_core
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-I',
'./test/gtest-1.7.0/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| 31.126984 | 79 | 0.701173 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.