hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6aa7857c4b70601dfd0bce5e705e8a1137663a78
| 1,052
|
py
|
Python
|
examples/video/ex_play_rx.py
|
indigits/indigits-vision
|
317fbf70c558e8f9563c3d0ba3bebbc5f84af622
|
[
"Apache-2.0"
] | 2
|
2021-11-02T10:09:47.000Z
|
2021-12-10T04:23:14.000Z
|
examples/video/ex_play_rx.py
|
indigits/indigits-vision
|
317fbf70c558e8f9563c3d0ba3bebbc5f84af622
|
[
"Apache-2.0"
] | null | null | null |
examples/video/ex_play_rx.py
|
indigits/indigits-vision
|
317fbf70c558e8f9563c3d0ba3bebbc5f84af622
|
[
"Apache-2.0"
] | null | null | null |
import os
import cv2
from cr import vision as vision
from dirsetup import VIDEO_DIR
import skvideo.io
import rx
from rx import operators as ops
import asyncio
loop = asyncio.get_event_loop()
done = loop.create_future()
filepath = os.path.join(VIDEO_DIR, 'nascar_01.mp4')
inputparameters = {}
outputparameters = {}
videogen = skvideo.io.vreader(filepath,
inputdict=inputparameters,
outputdict=outputparameters)
source = rx.from_iterable(videogen).pipe(
ops.map(vision.rgb_to_bgr))
dm = vision.DisplayManager('Nascar')
class Player:
def __init__(self):
self.count = 0
def on_next(self, frame):
self.count += 1
print("frame: {} {}".format(self.count, frame.shape))
dm.show(frame)
vision.wait_for_esc_key(40)
pass
def on_error(self, error):
pass
def on_completed(self):
print("completed")
done.set_result(0)
subscription = source.subscribe(Player())
loop.run_until_complete(done)
loop.close()
subscription.dispose()
| 19.849057
| 61
| 0.675856
|
ad45d3f5c979b21f7ff4dbb9f306858c9afd42b1
| 13,796
|
py
|
Python
|
src/tidynotes/notebook.py
|
JosephMcGrath/markdown_notebook
|
94f3ae584c5dfb57df56ea0a357147df47a24976
|
[
"MIT"
] | 1
|
2021-02-12T00:56:27.000Z
|
2021-02-12T00:56:27.000Z
|
src/tidynotes/notebook.py
|
JosephMcGrath/tidynotes
|
94f3ae584c5dfb57df56ea0a357147df47a24976
|
[
"MIT"
] | null | null | null |
src/tidynotes/notebook.py
|
JosephMcGrath/tidynotes
|
94f3ae584c5dfb57df56ea0a357147df47a24976
|
[
"MIT"
] | null | null | null |
"""
Utility for managing an entire notebook.
There should be no direct modification of markdown here, that's in MarkdownPart.
"""
import datetime
import glob
import hashlib
import json
import logging
import os
from typing import Any, Dict, List, Optional, Tuple, Union
import jinja2
import pkg_resources
from .logs import LOG_NAME
from .mardown_document import MarkdownPart
class Notebook:
"""
A notebook of markdown documents, creates, cleans and renders the notebook to HTML.
"""
template_dir = "templates"
note_dir = "notes"
working_dir = "working"
config_name = "config.json"
output_dir = "rendered"
def __init__(self, notebook_dir: str) -> None:
logger = self._make_logger()
self.root_dir = os.path.abspath(notebook_dir)
self.config = self._read_config()
if "notebook_name" not in self.config:
self.set_config("notebook_name", "TidyNotes notebook.")
_ = jinja2.FileSystemLoader(os.path.join(self.root_dir, self.template_dir))
self.env = jinja2.Environment(loader=_)
self.notes = self.read_notes()
logger.info("Set up notebook in %s.", self.root_dir)
@classmethod
def initialise(cls, notebook_dir: str) -> "Notebook":
"""
Create a notebook in the provided directory.
"""
logger = logging.getLogger(LOG_NAME)
logger.info("Creating notebook in [%s].", notebook_dir)
template_list = {
cls.config_name: "",
"corrections.json": cls.working_dir,
"note.css": cls.template_dir,
"note.md": cls.template_dir,
"page.html": cls.template_dir,
"render_changes.json": cls.working_dir,
}
for template, folder in template_list.items():
dst_dir = os.path.join(notebook_dir, folder)
dst_path = os.path.join(dst_dir, template)
if os.path.exists(dst_path):
logger.warning('"%s" already exists.', dst_path)
continue
logger.debug('Creating "%s".', template)
os.makedirs(dst_dir, exist_ok=True)
src_path = os.path.join("templates", template)
raw = pkg_resources.resource_string(__name__, src_path)
with open(dst_path, "wb") as file_out:
raw = pkg_resources.resource_string(__name__, src_path)
file_out.write(raw)
return cls(notebook_dir)
@classmethod
def is_notebook(cls, notebook_dir: str) -> bool:
"""Checks if the directory is a valid notebook."""
config_path = os.path.join(notebook_dir, cls.config_name)
if not os.path.exists(config_path):
return False
config = read_json(config_path)
if not isinstance(config, dict):
return False
return "notebook_name" in read_json(config_path)
def read_notes(self) -> List[MarkdownPart]:
"""Read all notes from files."""
logger = self._make_logger()
logger.debug("Reading notes.")
note_pattern = os.path.join(self.root_dir, self.note_dir, "**", "*.md")
notes = []
for path in glob.glob(note_pattern, recursive=True):
temp = MarkdownPart.from_file(path)
if temp.is_stub():
logger.info('"%s" is a stub.', path)
notes.append(temp)
logger.debug("Loaded %s notes.", len(notes))
return notes
def refresh(self) -> None:
"""Reload all of the notes for the notebook."""
self.notes = self.read_notes()
def make_note(
self, date: datetime.datetime = datetime.datetime.today(), force: bool = False
) -> None:
"""Generates and writes a note for the specified date."""
logger = self._make_logger("Generation")
logger.debug("Generating a note for %s.", date)
date_format = str(self.config["note_file_format"])
dst_path = os.path.join(
self.root_dir, self.note_dir, date.strftime(date_format)
)
os.makedirs(os.path.split(dst_path)[0], exist_ok=True)
if force or not os.path.exists(dst_path):
template = self.env.get_template("note.md")
output = MarkdownPart(template.render(date=date))
output.meta["note_for"] = date
output.meta["notebook"] = self.config["notebook_name"]
output.to_file(dst_path)
logger.debug("Generated note")
self.notes.append(MarkdownPart.from_file(dst_path))
else:
logger.debug("Note already exists - skipping.")
logger.debug("Finished writing note.")
def make_series(
self,
days: int = 7,
starting: datetime.datetime = datetime.datetime.today(),
force: bool = False,
) -> None:
"""Generate a series of notes for `days` number of days."""
for _ in range(days):
self.make_note(starting, force=force)
starting += datetime.timedelta(days=1)
def _make_logger(self, sub_log: Optional[str] = None) -> logging.Logger:
"""Get the logger for the notebook, with an optional sub-log name."""
if isinstance(sub_log, str):
log_name = f"{LOG_NAME}.{sub_log}"
else:
log_name = LOG_NAME
return logging.getLogger(log_name)
def _read_config(self) -> Dict[str, Union[str, int]]:
logger = self._make_logger()
config_path = os.path.join(self.root_dir, self.config_name)
logger.debug("Reading config from %s.", config_path)
if not os.path.exists(config_path):
message = (
"Config path doesn't exist. Maybe it needs to be initialised first?"
f" The path given was {config_path}"
)
logger.error(message)
raise RuntimeError(message)
config = read_json(config_path)
logger.debug("Finished reading config.")
return config
def set_config(self, item_name: str, item_value: Any) -> None:
"""Set an item in the notebook config (including config file)."""
self.config[item_name] = item_value
write_json(self.config, os.path.join(self.root_dir, self.config_name))
def clean(self) -> None:
"""General cleanup operations on the notebook."""
logger = self._make_logger("Cleanup")
logger.info("Cleaning up all notes.")
self.update_projects_and_tasks()
self.text_corrections()
for this_note in self.notes:
this_note.to_file(this_note.meta[".file"]["path"])
logger.info("Finished cleaning notes.")
def extract_project(self, pattern: str) -> List[MarkdownPart]:
"""Extract all entries for a project."""
logger = self._make_logger()
logger.debug('Extracting notes for "%s".', pattern)
output = []
for this_note in self.notes:
for part in this_note.extract_parts(pattern):
part.title = this_note.title
part.set_level(2)
output.append(part)
return output
def update_projects_and_tasks(self) -> None:
"""
Build a list of projects/tasks and replace existing ones based on a mapping.
The mappings are stored in JSON files called "projects" and "tasks".
"""
projects = read_json(self._working_path("projects.json"))
tasks = read_json(self._working_path("tasks.json"))
new_projects, new_tasks = self._make_part_list()
for this_project in new_projects:
if this_project not in projects:
projects[this_project] = this_project
for this_tasks in new_tasks:
if this_tasks not in tasks:
tasks[this_tasks] = this_tasks
for this_note in self.notes:
this_note.replace_title(projects, level=2)
this_note.replace_title(tasks, level=3)
write_json(projects, self._working_path("projects.json"))
write_json(tasks, self._working_path("tasks.json"))
def text_corrections(self) -> None:
"""Apply each regex replacement pattern in corrections.json to all notes."""
corrections = read_json(self._working_path("corrections.json"))
for pattern, replacement in corrections.items():
for this_note in self.notes:
this_note.make_replacement(pattern, replacement)
def _make_part_list(self) -> Tuple[List[str], List[str]]:
"""Generate a list of projects and tasks in the notebook."""
projects = set()
tasks = set()
for this_note in self.notes:
projects.update([x.title for x in this_note.parts])
for this_project in this_note.parts:
tasks.update([x.title for x in this_project.parts])
return sorted(list({x for x in projects if x is not None})), sorted(
list({x for x in tasks if x is not None})
)
def _working_path(self, file_name: str) -> str:
return os.path.join(self.root_dir, self.working_dir, file_name)
def render_full(self, dst_path: Optional[str] = None) -> None:
"""Render all notes into a single HTML file."""
logger = self._make_logger("Rendering")
logger.info('Rendering full notes to HTML at "%s".', dst_path)
if dst_path is None:
dst_path = os.path.join(
self.root_dir, self.output_dir, f"{self.config['notebook_name']}.html"
)
self._render(
notes=self.notes, title=str(self.config["notebook_name"]), dst_path=dst_path
)
logger.info("Finished redering full notes.")
def render_project(self, project_name: str, dst_path: Optional[str] = None) -> None:
"""Render a single project to a HTML file."""
logger = self._make_logger("Rendering")
logger.info('Rendering project "%s" to HTML at "%s".', project_name, dst_path)
if dst_path is None:
dst_path = os.path.join(
self.root_dir, self.output_dir, f"{project_name}.html"
)
self._render(
notes=self.extract_project(project_name),
title=project_name,
dst_path=dst_path,
)
logger.info("Finished redering project.")
def render_all_projects(self, dst_dir: Optional[str] = None) -> None:
"""Render all projects to their own HTML file."""
logger = self._make_logger("Rendering")
logger.info("Rendering all projects to their own output.")
if dst_dir is None:
dst_dir = os.path.join(self.root_dir, self.output_dir)
projects, _ = self._make_part_list()
for this_project in projects:
dst_path = os.path.join(dst_dir, f"{this_project}.html")
self.render_project(this_project, dst_path)
logger.info("Finished all rendering projects.")
def _render(self, notes: List[MarkdownPart], title: str, dst_path: str) -> None:
logger = self._make_logger("Rendering")
logger.debug("Combining %s parts for rendering.", len(notes))
document = MarkdownPart(f"# {title}")
for part in notes:
document.add_part(part)
logger.debug("Making render-time corrections.")
corrections = read_json(self._working_path("render_changes.json"))
for pattern, replacement in corrections.items():
document.make_replacement(pattern, replacement)
logger.debug("Rendering template")
output = self.env.get_template("page.html").render(
**self.config, document=document, title=title
)
logger.debug("Writing to disk.")
with open(dst_path, "w", encoding="utf-8") as file:
file.write(output)
self._log_file_info(dst_path)
logger.debug("Finished rendering.")
def _log_file_info(self, file_path: str) -> None:
"""Logs information about a file (called after rendering an output)."""
logger = self._make_logger()
logger.debug("Collating information on %s.", file_path)
dst_path = self._working_path("hash_log.csv")
file_info = os.stat(file_path)
output = [
'"' + os.path.relpath(file_path, self.root_dir) + '"',
datetime.datetime.fromtimestamp(file_info.st_mtime).isoformat(),
calc_sha256(file_path),
calc_md5(file_path),
str(file_info.st_size),
]
with open(dst_path, mode="a", encoding="utf-8") as file:
file.write(",".join(output) + "\n")
logger.debug("SHA256 was %s.", output[2])
def write_json(data: Dict[str, Any], path: str) -> None:
"""
Write a dictionary to a JSON file.
"""
with open(path, "w", encoding="utf-8") as file:
json.dump(data, file, indent=4)
def read_json(path: str) -> Dict[str, Any]:
"""
Read a JSON file (assumed to be a dictionary).
"""
if os.path.exists(path):
with open(path, "r", encoding="utf-8") as file:
return json.load(file)
else:
return {}
def calc_sha256(path: str, buffer_size: int = 65536) -> str:
"Calculates the SHA256 of a file."
algorithm = hashlib.sha256()
with open(path, "rb") as file_in:
while True:
data = file_in.read(buffer_size)
if not data:
break
algorithm.update(data)
return algorithm.hexdigest()
def calc_md5(path: str, buffer_size: int = 65536) -> str:
"Calculates the MD5 of a file."
algorithm = hashlib.md5()
with open(path, "rb") as file_in:
while True:
data = file_in.read(buffer_size)
if not data:
break
algorithm.update(data)
return algorithm.hexdigest()
| 37.387534
| 88
| 0.611699
|
3a475d54774130f2b8c79e963573d33c9d8862a7
| 13,660
|
py
|
Python
|
pysnmp/HPSWITCH-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/HPSWITCH-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/HPSWITCH-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module HPSWITCH-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPSWITCH-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:30:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, Counter32, Gauge32, ModuleIdentity, ObjectIdentity, Bits, IpAddress, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, enterprises, TimeTicks, iso, MibIdentifier, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "Counter32", "Gauge32", "ModuleIdentity", "ObjectIdentity", "Bits", "IpAddress", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "enterprises", "TimeTicks", "iso", "MibIdentifier", "Counter64")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class MacAddr(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(6, 6)
fixedLength = 6
hp = MibIdentifier((1, 3, 6, 1, 4, 1, 11))
nm = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2))
icf = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14))
icfEswitch = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 6))
hpEs = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1))
hpEsMain = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1))
hpEsConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1))
hpEsFwVer = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsFwVer.setStatus('mandatory')
hpEsHwVer = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsHwVer.setStatus('mandatory')
hpEsIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsIpAddr.setStatus('mandatory')
hpEsNetMask = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsNetMask.setStatus('mandatory')
hpEsDefaultGateway = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsDefaultGateway.setStatus('mandatory')
hpEsTrapRcvrMaxEnt = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsTrapRcvrMaxEnt.setStatus('mandatory')
hpEsTrapRcvrCurEnt = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsTrapRcvrCurEnt.setStatus('mandatory')
hpEsTrapRcvrNext = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 655535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsTrapRcvrNext.setStatus('mandatory')
hpEsTrapRcvrTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 9), )
if mibBuilder.loadTexts: hpEsTrapRcvrTable.setStatus('mandatory')
hpEsTrapRcvrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 9, 1), ).setIndexNames((0, "HPSWITCH-MIB", "hpEsTrapRcvrIndex"))
if mibBuilder.loadTexts: hpEsTrapRcvrEntry.setStatus('mandatory')
hpEsTrapRcvrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsTrapRcvrIndex.setStatus('mandatory')
hpEsTrapRcvrStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("valid", 2), ("invalid", 3), ("create", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsTrapRcvrStatus.setStatus('mandatory')
hpEsTrapRcvrIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 9, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsTrapRcvrIpAddress.setStatus('mandatory')
hpEsTrapRcvrComm = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 1, 9, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsTrapRcvrComm.setStatus('mandatory')
hpEsSys = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2))
hpEsNumPorts = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsNumPorts.setStatus('mandatory')
hpEsNumStations = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsNumStations.setStatus('mandatory')
hpEsMostStations = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsMostStations.setStatus('mandatory')
hpEsMaxStations = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsMaxStations.setStatus('mandatory')
hpEsReset = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("running", 2), ("softReset", 3), ("hardReset", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsReset.setStatus('mandatory')
hpEsNumResets = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsNumResets.setStatus('mandatory')
hpEsAddrAgingTime = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9999))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsAddrAgingTime.setStatus('mandatory')
hpEsSysStaTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 9), )
if mibBuilder.loadTexts: hpEsSysStaTable.setStatus('mandatory')
hpEsSysStaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 9, 1), ).setIndexNames((0, "HPSWITCH-MIB", "hpEsSysStaMacAddr"))
if mibBuilder.loadTexts: hpEsSysStaEntry.setStatus('mandatory')
hpEsSysStaMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 9, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6)).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsSysStaMacAddr.setStatus('mandatory')
hpEsSysStaPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 9, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsSysStaPort.setStatus('mandatory')
hpEsSysStaTraffic = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 1, 2, 9, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsSysStaTraffic.setStatus('mandatory')
hpEsTop = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 2))
hpEsPort = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3))
hpEsPortTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1), )
if mibBuilder.loadTexts: hpEsPortTable.setStatus('mandatory')
hpEsPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1), ).setIndexNames((0, "HPSWITCH-MIB", "hpEsPortIndex"))
if mibBuilder.loadTexts: hpEsPortEntry.setStatus('mandatory')
hpEsPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortIndex.setStatus('mandatory')
hpEsPortOprStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsPortOprStatus.setStatus('mandatory')
hpEsPortExtConn = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("aui", 2), ("rj45", 3), ("noExternal", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortExtConn.setStatus('mandatory')
hpEsPortDuplex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fullDuplex", 1), ("halfDuplex", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortDuplex.setStatus('mandatory')
hpEsPortRcvLocalFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortRcvLocalFrames.setStatus('mandatory')
hpEsPortForwardedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortForwardedFrames.setStatus('mandatory')
hpEsPortMostStations = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortMostStations.setStatus('mandatory')
hpEsPortMaxStations = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortMaxStations.setStatus('mandatory')
hpEsPortSWHandledFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortSWHandledFrames.setStatus('mandatory')
hpEsPortLocalStations = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortLocalStations.setStatus('mandatory')
hpEsPortRemoteStations = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortRemoteStations.setStatus('mandatory')
hpEsPortUnknownStaFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortUnknownStaFrames.setStatus('mandatory')
hpEsPortResetStats = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("running", 2), ("reset", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsPortResetStats.setStatus('mandatory')
hpEsPortResetTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 17), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortResetTimer.setStatus('mandatory')
hpEsPortResetAddrs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("running", 2), ("reset", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEsPortResetAddrs.setStatus('mandatory')
hpEsPortRcvBcasts = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortRcvBcasts.setStatus('mandatory')
hpEsPortSwitchedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 6, 1, 3, 1, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEsPortSwitchedFrames.setStatus('mandatory')
mibBuilder.exportSymbols("HPSWITCH-MIB", hpEs=hpEs, hpEsPortResetStats=hpEsPortResetStats, hpEsPortRemoteStations=hpEsPortRemoteStations, hpEsConfig=hpEsConfig, hpEsNumPorts=hpEsNumPorts, hpEsPortOprStatus=hpEsPortOprStatus, hpEsPortForwardedFrames=hpEsPortForwardedFrames, hpEsPortResetTimer=hpEsPortResetTimer, hpEsTrapRcvrTable=hpEsTrapRcvrTable, hpEsPortSwitchedFrames=hpEsPortSwitchedFrames, hpEsTrapRcvrMaxEnt=hpEsTrapRcvrMaxEnt, hpEsPortUnknownStaFrames=hpEsPortUnknownStaFrames, hpEsPortMaxStations=hpEsPortMaxStations, hpEsReset=hpEsReset, hpEsNetMask=hpEsNetMask, hpEsMostStations=hpEsMostStations, hpEsPortRcvBcasts=hpEsPortRcvBcasts, hpEsPortEntry=hpEsPortEntry, hpEsTrapRcvrCurEnt=hpEsTrapRcvrCurEnt, hpEsTrapRcvrStatus=hpEsTrapRcvrStatus, MacAddr=MacAddr, hpEsSysStaTraffic=hpEsSysStaTraffic, hpEsSysStaPort=hpEsSysStaPort, hpEsTrapRcvrComm=hpEsTrapRcvrComm, hpEsHwVer=hpEsHwVer, hpEsSys=hpEsSys, hpEsSysStaMacAddr=hpEsSysStaMacAddr, hpEsTrapRcvrNext=hpEsTrapRcvrNext, hpEsTrapRcvrIpAddress=hpEsTrapRcvrIpAddress, hpEsPortIndex=hpEsPortIndex, hpEsFwVer=hpEsFwVer, hpEsTop=hpEsTop, hpEsPortResetAddrs=hpEsPortResetAddrs, hpEsDefaultGateway=hpEsDefaultGateway, hpEsSysStaTable=hpEsSysStaTable, hpEsPortLocalStations=hpEsPortLocalStations, hpEsMaxStations=hpEsMaxStations, hpEsIpAddr=hpEsIpAddr, hpEsTrapRcvrEntry=hpEsTrapRcvrEntry, hpEsNumResets=hpEsNumResets, hpEsPortRcvLocalFrames=hpEsPortRcvLocalFrames, hpEsPort=hpEsPort, hpEsSysStaEntry=hpEsSysStaEntry, nm=nm, hpEsAddrAgingTime=hpEsAddrAgingTime, icf=icf, hpEsPortTable=hpEsPortTable, hpEsTrapRcvrIndex=hpEsTrapRcvrIndex, hpEsPortExtConn=hpEsPortExtConn, hpEsPortSWHandledFrames=hpEsPortSWHandledFrames, icfEswitch=icfEswitch, hpEsPortMostStations=hpEsPortMostStations, hpEsPortDuplex=hpEsPortDuplex, hp=hp, hpEsMain=hpEsMain, hpEsNumStations=hpEsNumStations)
| 114.789916
| 1,828
| 0.737628
|
cc98392f4db84ca2d69d848bf8624fe6cc3b5c23
| 464
|
py
|
Python
|
PyOpenGL-3.0.2/OpenGL/raw/GL/SGIX/vertex_preclip.py
|
frederica07/Dragon_Programming_Process
|
c0dff2e20c1be6db5adc6f9977efae8f7f888ef5
|
[
"BSD-2-Clause"
] | null | null | null |
PyOpenGL-3.0.2/OpenGL/raw/GL/SGIX/vertex_preclip.py
|
frederica07/Dragon_Programming_Process
|
c0dff2e20c1be6db5adc6f9977efae8f7f888ef5
|
[
"BSD-2-Clause"
] | null | null | null |
PyOpenGL-3.0.2/OpenGL/raw/GL/SGIX/vertex_preclip.py
|
frederica07/Dragon_Programming_Process
|
c0dff2e20c1be6db5adc6f9977efae8f7f888ef5
|
[
"BSD-2-Clause"
] | null | null | null |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.GL import glget
EXTENSION_NAME = 'GL_SGIX_vertex_preclip'
_p.unpack_constants( """GL_VERTEX_PRECLIP_SGIX 0x83EE
GL_VERTEX_PRECLIP_HINT_SGIX 0x83EF""", globals())
def glInitVertexPreclipSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| 35.692308
| 71
| 0.797414
|
9e9733033b2195d971ba927306012bd2e03d27f2
| 1,616
|
py
|
Python
|
samples/generated_samples/dialogflow_generated_dialogflowcx_v3beta1_test_cases_export_test_cases_sync.py
|
nicain/python-dialogflow-cx
|
2292ff540aea24c3c831a5ffe1604c2c022ccb82
|
[
"Apache-2.0"
] | null | null | null |
samples/generated_samples/dialogflow_generated_dialogflowcx_v3beta1_test_cases_export_test_cases_sync.py
|
nicain/python-dialogflow-cx
|
2292ff540aea24c3c831a5ffe1604c2c022ccb82
|
[
"Apache-2.0"
] | null | null | null |
samples/generated_samples/dialogflow_generated_dialogflowcx_v3beta1_test_cases_export_test_cases_sync.py
|
nicain/python-dialogflow-cx
|
2292ff540aea24c3c831a5ffe1604c2c022ccb82
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ExportTestCases
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_generated_dialogflowcx_v3beta1_TestCases_ExportTestCases_sync]
from google.cloud import dialogflowcx_v3beta1
def sample_export_test_cases():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.ExportTestCasesRequest(
gcs_uri="gcs_uri_value",
parent="parent_value",
)
# Make the request
operation = client.export_test_cases(request=request)
print("Waiting for operation to complete...")
response = operation.result()
print(response)
# [END dialogflow_generated_dialogflowcx_v3beta1_TestCases_ExportTestCases_sync]
| 32.979592
| 85
| 0.764233
|
e6f58ca87c7ebbc0e1000949ffbdaa5be6183632
| 2,619
|
py
|
Python
|
setup.py
|
pierodesenzi/noronha
|
ee7cba8d0d29d0dc5484d2000e1a42c9954c20e4
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
pierodesenzi/noronha
|
ee7cba8d0d29d0dc5484d2000e1a42c9954c20e4
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
pierodesenzi/noronha
|
ee7cba8d0d29d0dc5484d2000e1a42c9954c20e4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright Noronha Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Noronha is a framework that helps you adopt
DataOps practices in Machine Learning projects
"""
import os
from setuptools import find_packages, setup
from distutils.dir_util import copy_tree
if os.environ.get('include_tests'):
copy_tree('tests', 'noronha/resources/tests')
copy_tree('examples', 'noronha/resources/examples')
setup(
name='noronha-dataops',
version='1.6.2',
url='https://github.com/noronha-dataops/noronha',
author='Noronha Development Team',
author_email='noronha.mlops@everis.com',
description='DataOps for Machine Learning',
long_description=__doc__,
zip_safe=False,
platforms=['Unix'],
license='Apache-2.0',
install_requires=open('./requirements/{}_reqs.txt'.format(
'on_board' if os.environ.get('AM_I_ON_BOARD') else 'off_board'
)).read().split('\n'),
packages=find_packages(exclude=['tests']),
include_package_data=True,
package_data={
'noronha.resources': [
'nha.yaml',
'entrypoint.sh',
'isle/*/*',
'tests/*/*',
'examples/*/*',
'examples/*/*/*'
]
},
entry_points={
'console_scripts': [
'{alias}={entry_pont}'.format(alias=alias, entry_pont='noronha.cli.main:nha')
for alias in ['noronha', 'nha']
],
'papermill.engine': [
'noronha_engine=noronha.tools.main:NoronhaEngine'
]
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Unix',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Application Frameworks'
]
)
| 33.151899
| 89
| 0.646048
|
f0eebfd7cc08d4572d89b164b5632b4900475115
| 631
|
py
|
Python
|
zink/templatetags/version.py
|
jw/zink
|
e890e3bcff56a07a265506350d41af8bce633b05
|
[
"MIT"
] | 2
|
2016-01-25T09:01:51.000Z
|
2016-05-08T11:29:40.000Z
|
zink/templatetags/version.py
|
jw/zink
|
e890e3bcff56a07a265506350d41af8bce633b05
|
[
"MIT"
] | 51
|
2018-02-14T05:14:40.000Z
|
2022-01-14T08:01:05.000Z
|
zink/templatetags/version.py
|
jw/zink
|
e890e3bcff56a07a265506350d41af8bce633b05
|
[
"MIT"
] | 1
|
2020-05-03T09:15:21.000Z
|
2020-05-03T09:15:21.000Z
|
import pkg_resources
import sys
from django import template
register = template.Library()
@register.simple_tag
def version(module):
"""
Display the version number of the given module
{% version("django") %}
"""
if module == "python":
python_version = sys.version_info
return f"{python_version[0]}.{python_version[1]}.{python_version[2]}"
else:
try:
package_version = pkg_resources.get_distribution(module).version
except pkg_resources.DistributionNotFound:
package_version = "unknown"
return package_version
register.filter(version)
| 23.37037
| 77
| 0.670365
|
976a0f24ceebe0c3b84dc3b95e4b844b48e0d8cc
| 15,225
|
py
|
Python
|
tests/unittests/exceptions/StoryError.py
|
chrisstpierre/storyscript
|
2f0ed1819eb3be5377732514c33cf72fe9da5e2f
|
[
"Apache-2.0"
] | null | null | null |
tests/unittests/exceptions/StoryError.py
|
chrisstpierre/storyscript
|
2f0ed1819eb3be5377732514c33cf72fe9da5e2f
|
[
"Apache-2.0"
] | null | null | null |
tests/unittests/exceptions/StoryError.py
|
chrisstpierre/storyscript
|
2f0ed1819eb3be5377732514c33cf72fe9da5e2f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import click
from lark.exceptions import UnexpectedCharacters, UnexpectedToken
from lark.lexer import Token
from pytest import fixture, mark
from storyscript.ErrorCodes import ErrorCodes
from storyscript.Intention import Intention
from storyscript.exceptions import CompilerError, StoryError
@fixture
def error(magic):
return magic()
@fixture
def storyerror(error, magic):
story = magic()
return StoryError(error, story)
def test_storyerror_init(storyerror, error):
assert storyerror.error == error
assert storyerror.story is not None
assert storyerror.path is None
assert storyerror.error_tuple is None
assert issubclass(StoryError, SyntaxError)
def test_storyerror_init_path():
storyerror = StoryError('error', 'story', path='hello.story')
assert storyerror.path == 'hello.story'
def test_storyerror_name(storyerror):
assert storyerror.name() == 'story'
def test_storyerror_name_path(patch, storyerror):
patch.object(os, 'getcwd', return_value='/abspath')
storyerror.path = 'hello.story'
assert storyerror.name() == 'hello.story'
def test_storyerror_name_reduce_path(patch, storyerror):
"""
Ensures that paths are simplified for stories in the current working
directory.
"""
patch.object(os, 'getcwd', return_value='/abspath')
storyerror.path = '/abspath/hello.story'
assert storyerror.name() == 'hello.story'
def test_storyerror_int_line(patch, storyerror, error):
"""
Ensures int_line returns the correct line number
"""
storyerror.error.line = 1
assert storyerror.int_line() == 1
def test_storyerror_int_line_string(patch, storyerror, error):
"""
Ensures int_line returns the correct line number with string lines
"""
storyerror.error.line = '1'
assert storyerror.int_line() == 1
def test_storyerror_int_line_fake_tree(patch, storyerror, error):
"""
Ensures int_line returns the correct line number with fake tree lines
"""
storyerror.error.line = '1.2.3'
assert storyerror.int_line() == 1
def test_storyerror_get_line(patch, storyerror, error):
"""
Ensures get_line returns the error line
"""
patch.object(StoryError, 'int_line', return_value=1)
storyerror.story.line.return_value = 'x = 0'
assert storyerror.get_line() == 'x = 0'
storyerror.story.line.assert_called_with(1)
def test_storyerror_header(patch, storyerror, error):
"""
Ensures StoryError.header returns the correct text.
"""
patch.object(click, 'style')
patch.many(StoryError, ['name', 'int_line'])
template = 'Error: syntax error in {} at line {}, column {}'
result = storyerror.header()
click.style.assert_called_with(StoryError.name(), bold=True)
assert result == template.format(click.style(),
storyerror.int_line(), error.column)
def test_storyerror_symbols(patch, storyerror, error):
"""
Ensures StoryError.symbols creates one symbol when there is no end column.
"""
patch.object(click, 'style')
del error.end_column
error.column = '1'
result = storyerror.symbols(line=' a')
click.style.assert_called_with(' ^', fg='red')
assert result == click.style()
def test_storyerror_symbols_tabs(patch, storyerror, error):
"""
Ensures StoryError.symbols deals correctly with tabs.
"""
patch.object(click, 'style')
del error.end_column
error.column = '1'
result = storyerror.symbols(line='\ta')
click.style.assert_called_with(' ^', fg='red')
assert result == click.style()
def test_story_error_symbols_end_column(patch, storyerror, error):
"""
Ensures StoryError.symbols creates many symbols when there is an end
column.
"""
patch.object(click, 'style')
error.end_column = '4'
error.column = '1'
result = storyerror.symbols(line=' abc')
click.style.assert_called_with(' ^^^', fg='red')
assert result == click.style()
storyerror.with_color = False
result = storyerror.symbols(line=' abc')
assert result == ' ^^^'
def test_story_error_symbols_end_column_tabs(patch, storyerror, error):
"""
Ensures StoryError.symbols deals correctly with tabs.
"""
patch.object(click, 'style')
error.end_column = '4'
error.column = '1'
storyerror.with_color = False
result = storyerror.symbols(line='\ta\tc')
assert result == ' ^^^^'
def test_storyerror_highlight(patch, storyerror, error):
"""
Ensures StoryError.highlight produces the correct text.
"""
patch.many(StoryError, ['get_line', 'int_line', 'symbols'])
error.column = '1'
result = storyerror.highlight()
highlight = StoryError.symbols()
args = (storyerror.int_line(), StoryError.get_line().replace(), highlight)
assert result == '{}| {}\n{}'.format(*args)
def test_storyerror_error_code(storyerror):
storyerror.error_tuple = ('code', 'hint')
assert storyerror.error_code() == 'code'
def test_storyerror_hint(storyerror):
storyerror.error_tuple = ('code', 'hint')
assert storyerror.hint() == 'hint'
def test_storyerror_hint_unidentified_error(storyerror, patch):
patch.object(StoryError, '_internal_error')
storyerror.error_tuple = ErrorCodes.unidentified_error
storyerror.error = Exception('Custom.Error')
assert storyerror.hint() == storyerror._internal_error()
def test_storyerror_hint_unidentified_compiler_error(storyerror, patch):
patch.object(StoryError, '_internal_error')
storyerror.error_tuple = ErrorCodes.unidentified_error
storyerror.error = CompilerError(None)
assert storyerror.hint() == storyerror._internal_error()
def test_storyerror_hint_invalid_character(patch, storyerror):
storyerror.error = UnexpectedCharacters('seq', 0, line=1, column=5)
storyerror.error_tuple = ErrorCodes.invalid_character
storyerror._format = {'character': '$'}
assert storyerror.hint() == '`$` is not allowed here'
def test_storyerror_hint_redeclared(patch, storyerror, magic):
storyerror.error = CompilerError(
'reserved_keyword',
format_args={'keyword': 'foo'})
storyerror.error_tuple = ErrorCodes.reserved_keyword
assert storyerror.hint() == '`foo` is a reserved keyword'
def test_storyerror_hint_unexpected_token(patch, storyerror, ):
expected = ['a', 'b', 'c']
storyerror.error = UnexpectedToken(token='and', expected=expected)
storyerror.error_tuple = ErrorCodes.unexpected_token
storyerror._format = {'token': 'and', 'allowed': str(expected)}
assert storyerror.hint() == ('`and` is not allowed here. '
f'Allowed: {str(expected)}')
def test_storyerror_unexpected_token_code(patch, call_count, storyerror):
patch.init(Intention)
patch.object(Intention, 'assignment', return_value=False)
patch.object(Intention, 'unnecessary_colon', return_value=False)
result = storyerror.unexpected_token_code()
Intention.__init__.assert_called_with(storyerror.get_line())
call_count(Intention, ['assignment', 'unnecessary_colon'])
assert result == ErrorCodes.unexpected_token
assert storyerror._format == {
'token': str(storyerror.error.token),
'allowed': str(storyerror.error.expected),
}
def test_storyerror_unexpected_token_code_nl(patch, call_count, storyerror):
"""
Test an unexpected token error with _NL
"""
patch.init(Intention)
patch.object(Intention, 'assignment', return_value=False)
patch.object(Intention, 'unnecessary_colon', return_value=False)
storyerror.error.token.type = '_NL'
result = storyerror.unexpected_token_code()
Intention.__init__.assert_called_with(storyerror.get_line())
call_count(Intention, ['assignment', 'unnecessary_colon'])
assert result == ErrorCodes.unexpected_end_of_line
assert storyerror._format == {
'allowed': str(storyerror.error.expected),
}
def test_storyerror_unexpected_token_code_assignment(patch, storyerror):
patch.init(Intention)
patch.object(storyerror, 'get_line')
patch.object(Intention, 'assignment')
patch.object(Intention, 'unnecessary_colon', return_value=False)
result = storyerror.unexpected_token_code()
assert result == ErrorCodes.assignment_incomplete
def test_storyerror_unexpected_token_code_colon(patch, storyerror):
patch.init(Intention)
patch.object(Intention, 'assignment', return_value=False)
patch.object(Intention, 'unnecessary_colon')
assert storyerror.unexpected_token_code() == ErrorCodes.unnecessary_colon
def test_storyerror_unexpected_token_expected_block_after(patch, storyerror):
patch.init(Intention)
patch.object(storyerror, 'get_line')
patch.object(Intention, 'assignment', return_value=False)
patch.object(Intention, 'unnecessary_colon', return_value=False)
and_ = Token('and', 'and')
storyerror.error = UnexpectedToken(token=and_, expected=['_INDENT'])
assert storyerror.unexpected_token_code() == \
ErrorCodes.block_expected_after
def test_storyerror_unexpected_characters_code(patch, call_count, storyerror):
patch.init(Intention)
patch.object(Intention, 'is_function', return_value=False)
patch.object(Intention, 'unnecessary_colon', return_value=False)
patch.object(storyerror, 'get_line', return_value='x = $')
storyerror.error.column = 5
result = storyerror.unexpected_characters_code()
Intention.__init__.assert_called_with(storyerror.get_line())
call_count(Intention, ['is_function', 'unnecessary_colon'])
assert result == ErrorCodes.invalid_character
assert storyerror._format == {'character': '$'}
def test_storyerror_unexpected_characters_code_function(patch, storyerror):
patch.init(Intention)
patch.object(Intention, 'is_function')
result = storyerror.unexpected_characters_code()
assert result == ErrorCodes.function_misspell
def test_storyerror_unexpected_characters_code_colon(patch, storyerror):
patch.init(Intention)
patch.object(Intention, 'is_function', return_value=False)
patch.object(Intention, 'unnecessary_colon')
result = storyerror.unexpected_characters_code()
assert result == ErrorCodes.unnecessary_colon
def test_storyerror_unexpected_characters_expected_block_before(patch,
storyerror):
patch.init(Intention)
patch.object(Intention, 'is_function', return_value=False)
patch.object(StoryError, 'is_valid_name_start', return_value=True)
patch.object(Intention, 'unnecessary_colon', return_value=False)
storyerror.error = UnexpectedCharacters(seq='abc', lex_pos=0, line=0,
column=0, allowed=None)
result = storyerror.unexpected_characters_code()
assert result == ErrorCodes.block_expected_before
@mark.parametrize('name_char', [
'a', 'c', 'z', 'A', 'G', 'Z', '_',
])
def test_storyerror_is_valid_name_start(storyerror, name_char):
assert storyerror.is_valid_name_start(name_char)
@mark.parametrize('name_char', [
'.', '$', ':', '+', '/', '%', '-', '0', '5', '9'
])
def test_storyerror_is_invalid_name_start(storyerror, name_char):
assert not storyerror.is_valid_name_start(name_char)
def test_storyerror_identify(storyerror):
storyerror.error.error = 'none'
assert storyerror.identify() == ErrorCodes.unidentified_error
@mark.parametrize('name', [
'service_name', 'arguments_noservice', 'return_outside',
'variables_backslash', 'variables_dash'
])
def test_storyerror_identify_codes(storyerror, error, name):
error.error = name
assert storyerror.identify() == getattr(ErrorCodes, name)
def test_storyerror_identify_unexpected_token(patch, storyerror):
"""
Ensures that StoryError.identify can find the error code for unidentified
token errors
"""
patch.init(UnexpectedToken)
patch.object(StoryError, 'unexpected_token_code')
storyerror.error = UnexpectedToken('seq', 'lex', 0, 0)
assert storyerror.identify() == storyerror.unexpected_token_code()
def test_storyerror_identify_unexpected_characters(patch, storyerror):
patch.init(UnexpectedCharacters)
patch.object(StoryError, 'unexpected_characters_code')
storyerror.error = UnexpectedCharacters('seq', 'lex', 0, 0)
assert storyerror.identify() == storyerror.unexpected_characters_code()
def test_storyerror_process(patch, storyerror):
patch.object(StoryError, 'identify')
storyerror.process()
assert storyerror.error_tuple == storyerror.identify()
def test_storyerror_message(patch, storyerror):
patch.many(StoryError,
['process', 'header', 'highlight', 'error_code', 'hint'])
result = storyerror.message()
assert storyerror.process.call_count == 1
args = (storyerror.header(), storyerror.highlight(),
storyerror.error_code(), storyerror.hint())
assert result == '{}\n\n{}\n\n{}: {}'.format(*args)
def test_story_storyerror_short_message(patch, storyerror):
patch.many(StoryError, ['process', 'error_code', 'hint'])
result = storyerror.short_message()
assert result == f'{storyerror.error_code()}: {storyerror.hint()}'
def test_storyerror_echo(patch, storyerror):
"""
Ensures StoryError.echo prints StoryError.message
"""
patch.object(click, 'echo')
patch.object(StoryError, 'message')
storyerror.echo()
click.echo.assert_called_with(StoryError.message())
def test_storyerror_create_error(patch):
"""
Ensures that Errors without Tokens can be created
"""
patch.init(StoryError)
patch.init(CompilerError)
error = StoryError.create_error('error_code')
assert isinstance(error, StoryError)
CompilerError.__init__.assert_called_with('error_code', format_args={})
assert isinstance(StoryError.__init__.call_args[0][0], CompilerError)
assert StoryError.__init__.call_args[0][1] is None
def test_storyerror_create_error_kwargs(patch):
"""
Ensures that Errors without Tokens can be created and kwargs are passed on.
"""
patch.init(StoryError)
patch.init(CompilerError)
error = StoryError.create_error('error_code', a=0)
assert isinstance(error, StoryError)
CompilerError.__init__.assert_called_with('error_code',
format_args={'a': 0})
assert isinstance(StoryError.__init__.call_args[0][0], CompilerError)
assert StoryError.__init__.call_args[0][1] is None
def test_storyerror_internal(patch):
"""
Ensures that an internal error gets properly constructed
"""
patch.init(StoryError)
e = Exception('.ICE.')
error = StoryError.internal_error(e)
assert isinstance(error, StoryError)
StoryError.__init__.assert_called_with(e, story=None)
def test_storyerror_internal_message(patch):
"""
Ensures that the internal error message gets properly built
"""
error = Exception('.ICE.')
expected = (
'Internal error occured: .ICE.\n'
'Please report at https://github.com/storyscript/storyscript/issues')
assert StoryError._internal_error(error) == expected
| 34.445701
| 79
| 0.71225
|
2212b0b0872da6b75abb91f0e1a3cad5e18310ca
| 6,321
|
py
|
Python
|
src/ScreenCompareLibrary/keywords/_compare.py
|
alans09/ScreenCompareLibrary
|
7f06e3fe5ffe8735698ee1652db39bc4265cd4bb
|
[
"Apache-2.0"
] | null | null | null |
src/ScreenCompareLibrary/keywords/_compare.py
|
alans09/ScreenCompareLibrary
|
7f06e3fe5ffe8735698ee1652db39bc4265cd4bb
|
[
"Apache-2.0"
] | null | null | null |
src/ScreenCompareLibrary/keywords/_compare.py
|
alans09/ScreenCompareLibrary
|
7f06e3fe5ffe8735698ee1652db39bc4265cd4bb
|
[
"Apache-2.0"
] | 1
|
2021-01-28T16:21:29.000Z
|
2021-01-28T16:21:29.000Z
|
import imutils
import glob
import os
import cv2
import numpy
from robot.api import logger
from skimage.util import compare_images
class _Compare:
""" Comparision keywords """
def _compare(self, image_a, image_b, diff_name=None, threshold=1):
""" Internal compare function that is responsible for almost everything.
Function gets all required information as:
`image_a` first image
`image_b` second image
`diff_name` default None, if set it will create file on specified path
`threshold` default 1. if changed it will use it for computations
"""
if self.resize:
dim = (int(self.resize.split(",")[0]), int(self.resize.split(",")[1]))
logger.debug(f"Dimensions: {dim}")
image_a = cv2.resize(image_a, dim, interpolation=cv2.INTER_AREA)
image_b = cv2.resize(image_b, dim, interpolation=cv2.INTER_AREA)
gray_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)
gray_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gray_a, gray_b, full=True)
diff = (diff * 255).astype("uint8")
thresh = cv2.threshold(
diff, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(
thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(image_a, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.rectangle(image_b, (x, y), (x + w, y + h), (0, 0, 255), 2)
logger.debug(f"Image comparision score: {score}")
logger.debug(f"Threshold set to: {threshold}")
if score >= float(threshold):
return True
else:
if diff_name:
cv2.imwrite(diff_name, image_b)
return False
def compare_screenshots(self, image_a_path, image_b_path, diff_name=None, threshold=1):
"""Compare two screenshots and get True/False according to match
`image_a_path` first image to compare
`image_b_path` second image to compare (on this image diff will be shown
`diff_name` default None, specify name of image to be stored and diff will be shown on it
`threshold` default 1, specify threshold that could be used make comparision (0.0 - 1.0:
1 means 100% the same image)
Example:
| Compare Screenshots | test.png | test2.png |
| Compare Screenshots | test.png | test2.png | diff.png |
"""
logger.debug(f"Image A path: {image_a_path}")
logger.debug(f"Image B path: {image_b_path}")
image_a = cv2.imread(image_a_path) if os.path.isfile(image_a_path) else False
image_b = cv2.imread(image_b_path) if os.path.isfile(image_b_path) else False
if not isinstance(image_a, numpy.ndarray):
raise AssertionError(f"Image {image_a_path} does not exists!")
if not isinstance(image_b, numpy.ndarray):
raise AssertionError(f"Image {image_b_path} does not exists!")
return self._compare(image_a, image_b, diff_name, threshold)
def compare_folders(self, folder_a, folder_b, diff=False, end_on_error=False, threshold=1):
"""Compare two folders one to one
Beware, the name of images must be the same in order to zip them to list structure
`folder_a` first folder
`folder_b` second folder
`diff` default False. If set to True 'DIFF' folder in actual directory
will be created and diffs will be stored there
`end_on_error` default False. If set on first occurrence of different images script is stopped.
`threshold` default 1. Set the threshold value to use for comparision
Example:
| Compare Folders | Actual | Original |
| Compare Folders | Actual | Original | diff=True |
| Compare Folders | Actual | Original | diff=True | end_on_error=True |
| Compare Folders | Actual | Original | threshold=0.5 |
"""
list_of_files = zip(glob.glob(f"{folder_a}/*.*"), glob.glob(f"{folder_b}/*.*"))
for pair in list_of_files:
if diff:
if not os.path.isdir("DIFF"):
os.mkdir("DIFF")
_, file1 = os.path.split(pair[0])
_, file2 = os.path.split(pair[1])
res = self.compare_screenshots(
pair[0], pair[1],
diff_name=f"DIFF/{file1}-{file2}.png",
threshold=threshold
)
else:
res = self.compare_screenshots(pair[0], pair[1], threshold)
if end_on_error:
if not res:
return False
return True
@staticmethod
def contained_within_image(
image_original,
image_within,
result=None,
threshold=0.8):
"""Tries to find if image contains another image
`image_original` image to search in
`image_within` image to find within image_original
`result` default None. If set, save image and show where image_in is contained
`threshold` default 0.8. Threshold that is used to match
Example:
| Contained Within Image | ORIGINAL.png | TO_FIND.png |
| Contained Within Image | ORIGINAL.png | TO_FIND.png | result=RESULT.png |
| Contained Within Image | ORIGINAL.png | TO_FIND.png | result=RESULT.png | threshold=0.4 |
"""
threshold = float(threshold)
img_original = cv2.imread(image_original)
img_to_find = cv2.imread(image_within)
w, h = img_to_find.shape[:-1]
res = cv2.matchTemplate(img_original, img_to_find,
cv2.TM_CCOEFF_NORMED)
loc = numpy.where(res >= threshold)
if loc[0].size == 0:
return False
for pt in zip(*loc[::-1]):
cv2.rectangle(img_original, pt, (pt[0] + h, pt[1] + w), (0, 0, 255), 2)
if result:
logger.debug(f"Going to write image: {result}")
cv2.imwrite(result, img_original)
return True
| 39.26087
| 106
| 0.59595
|
1d693ae544eee2b9264633b5cf6944be2c56f33f
| 3,625
|
py
|
Python
|
app/livestreams/models.py
|
jctissier/myfootylinks
|
cd625d8c6a6e017418cf6f7c34add31f5bc6f45a
|
[
"MIT"
] | null | null | null |
app/livestreams/models.py
|
jctissier/myfootylinks
|
cd625d8c6a6e017418cf6f7c34add31f5bc6f45a
|
[
"MIT"
] | 1
|
2021-03-31T18:35:58.000Z
|
2021-03-31T18:35:58.000Z
|
app/livestreams/models.py
|
jctissier/myfootylinks
|
cd625d8c6a6e017418cf6f7c34add31f5bc6f45a
|
[
"MIT"
] | 1
|
2018-01-26T16:19:08.000Z
|
2018-01-26T16:19:08.000Z
|
import collections
from random import randint
import re
import json
import praw
from app.util import log, compare_times
r = praw.Reddit(user_agent="FootyLinks" + str(randint(0, 100000)))
subreddit = r.get_subreddit("soccerstreams")
FILTER_SIZE = 50
def get_streams():
"""
Extracts list of current live streams
:return: JSON formatted list of live streams, size of the list
"""
streams = collections.OrderedDict({}) # Use an OrderedDict to keep the order of the streams
for i, submission in enumerate(subreddit.get_hot(limit=FILTER_SIZE)):
if "vs" in submission.title.lower():
if "GMT" in submission.title: # Parse the title and extra game time and name
stream_time, stream_name = parse_stream_title(title=submission.title, has_time=True)
else:
stream_time, stream_name = parse_stream_title(title=submission.title, has_time=False)
if stream_time and stream_name: # if time and name not false
# Update the Ordered Dict with the details of the stream
streams.update({str(i): [{
"stream_name": stream_name,
"stream_time": stream_time.strip(),
"submission_id": submission.id,
"competition": submission.selftext,
"status": compare_times(stream_time.strip()),
}]
})
# print(json.dumps(streams), len(streams))
return json.dumps(streams), len(streams)
def parse_stream_title(title, has_time):
"""
Parses the post title to extract game time and name (if exists)
:param title: submission.title
:param has_time: true if "GMT" in title, false otherwise
:return: formatted game time and game name
"""
try:
# Parses the submission.title, try to account for all human errors (not reading subreddit rules...)
if has_time:
game_name = title[title.index(']') + 1:].strip()
game_time = title[:title.index(']') + 1].strip().replace("[", "").replace("]", "")
elif not has_time:
game_name = title
game_time = ''
else:
# Stream post is not formatted properly so skip it
game_time, game_name = '', ''
return game_time, game_name
except ValueError as e:
# Something went wrong in parsing the title (malformed or not valid) -> skip to the next title
log("Error msg: " + str(e))
return False, False
def parse_submission(reddit_id):
"""
Extracts stream links from top 10 comment of the post
:param reddit_id: specific reddit post in r/soccerstreams
:return: JSON formatted list of stream links for a particular match, size of the list
"""
regex = r"\[(.*?)\]*\((.*?)\)" # Extracts markdown hyperlinks + names
r_submission = r.get_submission(submission_id=reddit_id)
stream_links = collections.OrderedDict({})
for i, comment in enumerate(r_submission.comments[:10]):
matches = re.findall(regex, comment.body)
for x, link in enumerate(matches):
stream_links.update({str(i) + "-" + str(x): [{ # creates an entry for every hyperlink found
"stream_title": link[0].strip(),
"stream_link": link[1],
"upvotes": comment.score
}]
})
# print(json.dumps(stream_links), len(stream_links))
return json.dumps(stream_links), len(stream_links)
| 38.157895
| 115
| 0.599448
|
e6ee4b37e0f83df1f07ed24afe46e6aedc49c9d0
| 239
|
py
|
Python
|
ListaDeExercicios/Exercicio09.py
|
LucasAlmeida0/Estudos
|
ae5b498c0bf3dee94f761a5fe49c77b0e270d483
|
[
"MIT"
] | null | null | null |
ListaDeExercicios/Exercicio09.py
|
LucasAlmeida0/Estudos
|
ae5b498c0bf3dee94f761a5fe49c77b0e270d483
|
[
"MIT"
] | null | null | null |
ListaDeExercicios/Exercicio09.py
|
LucasAlmeida0/Estudos
|
ae5b498c0bf3dee94f761a5fe49c77b0e270d483
|
[
"MIT"
] | null | null | null |
# 9 Faça um Programa que peça a temperatura em graus Fahrenheit, transforme e mostre a temperatura em graus Celsius.
F = float(input("Digite a temperatura em °F:"));
C = 5 * ((F-32) / 9);
print("{}°F equivale a {:.2f}°C ".format(F, C));
| 34.142857
| 116
| 0.661088
|
b2189619c64ee4bb2eaa1fbd1eae2f107a868d46
| 7,167
|
py
|
Python
|
Boundary Hunter Ideas/TensorFlow/TH-ONLY-BH-WITHB.py
|
garibaldu/boundary-seekers
|
441fea01e93de882bf22e0deb411f0b10602fa37
|
[
"MIT"
] | null | null | null |
Boundary Hunter Ideas/TensorFlow/TH-ONLY-BH-WITHB.py
|
garibaldu/boundary-seekers
|
441fea01e93de882bf22e0deb411f0b10602fa37
|
[
"MIT"
] | null | null | null |
Boundary Hunter Ideas/TensorFlow/TH-ONLY-BH-WITHB.py
|
garibaldu/boundary-seekers
|
441fea01e93de882bf22e0deb411f0b10602fa37
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import random
import math
np.random.seed(1234)
random.seed(1234)
plt.switch_backend("TkAgg")
def plotScatter(points, color):
xs = [x[0] for x in points]
ys = [y[1] for y in points]
plt.scatter(xs, ys, c=color)
def plot_weights(weights, gate, color):
plot_line(weights, color)
plot_line(gate, 'r')
#print("B: " + str(byas))
#print("XCoef: " + str(Xcoef))
def plot_line(weights, color):
n = weights
byas = -1 * n[0]/n[2]
Xcoef = -1 * n[1]/n[2]
plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], '{}-'.format(color))
def plot_centroid(centroid):
plt.plot(centroid[0], centroid[1], markersize=10, marker='x', color='g', mew=5)
def plot_incorrect(point):
plt.plot(point[0], point[1], markersize=5, marker='x', color='r', mew=5)
def generateChevronData():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x >= y and x <= -y:
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generateChevronDataWithNoise():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x >= y and x <= -y:
points.append([x/50.0,y/50.0])
targets.append(0.0)
elif x <= y:
points.append([x/50.0,y/50.0])
targets.append(float(np.random.randint(2)))
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generate_split_data():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x < 25 and x > -25 :
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generate_clumps():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, int(totalPoints/2.0)):
x = random.randint(xBounds[0], 0)
y = random.randint(yBounds[0], 0)
if -x - 30 < y:
points.append([x/50.0,y/50.0])
targets.append(1.0)
else:
points.append([x/50.0,y/50.0])
targets.append(0.0)
for i in range(0, int(totalPoints/2.0)):
x = random.randint(0, xBounds[1])
y = random.randint(0, yBounds[1])
if -x + 30 > y:
points.append([x/50.0,y/50.0])
targets.append(1.0)
else:
points.append([x/50.0,y/50.0])
targets.append(0.0)
return np.array(points), np.array(targets)
def generate_rectangle_data():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if np.abs(x) < 30 and np.abs(y) < 30 :
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def sigmoid(phi):
return 1.0/(1.0 + tf.exp(-phi))
points, out = generate_clumps()#generateChevronDataWithNoise()#generateChevronData()#generate_split_data()#generate_rectangle_data()#generateChevronDataWithNoise()#
in_size = 2
out_size = 1
num_centroids = 1
num_outputs = 1
np.random.seed(21354)
random.seed(5324)
inputs = tf.placeholder('float64', [in_size])
targets = tf.placeholder('float64', [out_size])
hidden_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(num_centroids, in_size+1)))
gate_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(num_centroids, in_size+1)))
byas = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(num_centroids)))
output_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(num_outputs, num_centroids + 1)))
inputs_prime = tf.concat([[1.0], inputs], axis=0)
# Peform Computation
prob = tf.reduce_sum(tf.multiply(inputs_prime, hidden_weights), 1)
g = sigmoid(tf.reduce_sum(tf.multiply(inputs_prime, gate_weights), 1))
#hidden_out = tf.add(byas, tf.multiply(g, tf.subtract(prob, byas)))
hidden_out = sigmoid(tf.add(g * prob, (1-g) * byas))
targets_prime = tf.expand_dims(targets, 1)
output = hidden_out
errors = -(targets_prime * tf.log(output) + (1 -targets_prime) * tf.log(1 - output))#tf.pow(tf.subtract(tf.expand_dims(targets, 1), output), 2.0)
error = tf.reduce_sum(errors)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(error)
#clip_byas = tf.assign(byas, tf.clip_by_value(byas, 0, 1))
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
for e in range(6000):
for d in range(len(points)):
session.run(train_op, feed_dict={inputs: points[d], targets: [out[d]]})
#session.run(clip_byas)
#session.run(clip_op_betas)
if e % 10 == 0:
print(session.run(byas))
err = 0
for d in range(len(points)):
err += session.run(error, feed_dict={inputs: points[d], targets: [out[d]]})
#print(session.run(prob, feed_dict={inputs: points[d], targets: [out[d]]}))
#print(session.run(g, feed_dict={inputs: points[d], targets: [out[d]]}))
print(err)
#print(session.run(betas))
incorrect = []
for d in range(len(points)):
o = session.run(output, feed_dict={inputs: points[d], targets: [out[d]]})
if not int(round(o[0])) == out[d]:
incorrect.append(points[d])
gates = session.run(gate_weights)
byas = session.run(byas)
boundarys = session.run(hidden_weights)
print(byas)
# Plot points on graph
c1 = []
c2 = []
for i in range(0, len(points)):
if out[i] == 0:
c1.append(points[i])
else:
c2.append(points[i])
print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
plotScatter(c1,'y')
plotScatter(c2, 'b')
for i in range(len(boundarys)):
plot_weights(boundarys[i], gates[i], 'g')
for point in incorrect:
plot_incorrect(point)
plt.gca().set_aspect('equal')
plt.xlim(xmin=-1.5, xmax=1.5)
plt.ylim(ymin=-1.5, ymax=1.5)
plt.show()
| 27.45977
| 164
| 0.583229
|
3da68b3bc1029f351ee13fb046fff736e7e4663f
| 3,026
|
py
|
Python
|
text_tool_gui.py
|
stimulu/kicad-text-tool
|
c0d9ff77dbd74e248023dac4e53b632b9f55cf5b
|
[
"Apache-2.0"
] | 3
|
2020-04-28T22:28:24.000Z
|
2021-04-27T06:56:30.000Z
|
text_tool_gui.py
|
stimulu/kicad-text-tool
|
c0d9ff77dbd74e248023dac4e53b632b9f55cf5b
|
[
"Apache-2.0"
] | 2
|
2020-07-06T19:01:40.000Z
|
2020-10-25T08:53:18.000Z
|
text_tool_gui.py
|
stimulu/kicad-text-tool
|
c0d9ff77dbd74e248023dac4e53b632b9f55cf5b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 26 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class TextToolDialog
###########################################################################
class TextToolDialog ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"Text Tool", pos = wx.DefaultPosition, size = wx.Size( 581,250 ), style = wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER )
self.SetSizeHints( wx.Size( 510,250 ), wx.DefaultSize )
gbSizer1 = wx.GridBagSizer( 0, 0 )
gbSizer1.SetFlexibleDirection( wx.BOTH )
gbSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
gbSizer1.SetMinSize( wx.Size( 510,250 ) )
font_listChoices = []
self.font_list = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, font_listChoices, wx.CB_SORT )
self.font_list.SetSelection( 1 )
gbSizer1.Add( self.font_list, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 3 ), wx.ALL|wx.EXPAND, 5 )
self.size_spin = wx.SpinCtrlDouble( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 110,-1 ), wx.SP_ARROW_KEYS, 1, 400, 12.000000, 0.1 )
self.size_spin.SetDigits( 1 )
gbSizer1.Add( self.size_spin, wx.GBPosition( 1, 0 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.EXPAND, 5 )
self.text_field = wx.TextCtrl( self, wx.ID_ANY, u"KiCad", wx.DefaultPosition, wx.DefaultSize, wx.TE_DONTWRAP )
gbSizer1.Add( self.text_field, wx.GBPosition( 2, 0 ), wx.GBSpan( 1, 3 ), wx.ALL|wx.EXPAND, 5 )
layer_listChoices = []
self.layer_list = wx.ListBox( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 130,-1 ), layer_listChoices, wx.LB_EXTENDED|wx.LB_MULTIPLE )
gbSizer1.Add( self.layer_list, wx.GBPosition( 0, 3 ), wx.GBSpan( 6, 1 ), wx.ALL|wx.EXPAND, 5 )
self.add_button = wx.Button( self, wx.ID_ANY, u"Add", wx.DefaultPosition, wx.DefaultSize, 0 )
gbSizer1.Add( self.add_button, wx.GBPosition( 3, 2 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.EXPAND, 5 )
self.size_status = wx.StaticText( self, wx.ID_ANY, u"pt (3.88 mm)", wx.DefaultPosition, wx.DefaultSize, 0 )
self.size_status.Wrap( -1 )
gbSizer1.Add( self.size_status, wx.GBPosition( 1, 1 ), wx.GBSpan( 1, 1 ), wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
gbSizer1.AddGrowableCol( 0 )
gbSizer1.AddGrowableRow( 5 )
self.SetSizer( gbSizer1 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_CLOSE, self.on_close )
self.size_spin.Bind( wx.EVT_TEXT, self.on_size_change )
self.add_button.Bind( wx.EVT_BUTTON, self.run )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def on_close( self, event ):
event.Skip()
def on_size_change( self, event ):
event.Skip()
def run( self, event ):
event.Skip()
| 36.902439
| 178
| 0.630535
|
64c1ea1a276da4782516666258346ea3886f5d0e
| 1,396
|
py
|
Python
|
setup.py
|
Olen/python-hole
|
ad2c2e03f85beb85ae6f3eac225cac0e70a74554
|
[
"MIT"
] | null | null | null |
setup.py
|
Olen/python-hole
|
ad2c2e03f85beb85ae6f3eac225cac0e70a74554
|
[
"MIT"
] | null | null | null |
setup.py
|
Olen/python-hole
|
ad2c2e03f85beb85ae6f3eac225cac0e70a74554
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Setup file for the *hole API Python client."""
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the relevant file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as desc:
long_description = desc.read()
if sys.argv[-1] == 'publish':
os.system('python3 setup.py sdist upload')
sys.exit()
setup(
name='hole',
version='0.5.1',
description='Python API for interacting with *hole.',
long_description=long_description,
url='https://github.com/home-assistant-ecosystem/python-hole',
download_url='https://github.com/home-assistant-ecosystem/python-hole/releases',
author='Fabian Affolter',
author_email='fabian@affolter-engineering.ch',
license='MIT',
install_requires=['aiohttp<4', 'async_timeout<4'],
packages=['hole'],
zip_safe=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
],
)
| 30.347826
| 84
| 0.655444
|
ce39ac99c271576e4ea9844002bcccafd18a5e1e
| 32,473
|
py
|
Python
|
keg_elements/forms/__init__.py
|
level12/keg-elements
|
69003d374cd402f64b612e291d49049dd923ec85
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T09:07:22.000Z
|
2017-04-27T09:07:22.000Z
|
keg_elements/forms/__init__.py
|
level12/keg-elements
|
69003d374cd402f64b612e291d49049dd923ec85
|
[
"BSD-3-Clause"
] | 142
|
2015-06-04T18:46:28.000Z
|
2022-03-04T21:10:58.000Z
|
keg_elements/forms/__init__.py
|
level12/keg-elements
|
69003d374cd402f64b612e291d49049dd923ec85
|
[
"BSD-3-Clause"
] | 4
|
2015-06-04T20:54:24.000Z
|
2015-10-21T17:36:36.000Z
|
from __future__ import absolute_import
from __future__ import unicode_literals
import functools
import inspect
import logging
from operator import attrgetter
import flask
from decimal import Decimal
from flask_wtf import FlaskForm as BaseForm
from keg.db import db
import sqlalchemy as sa
from markupsafe import Markup
from sqlalchemy_utils import ArrowType, get_class_by_table
import six
import wtforms.fields
import wtforms.form
from wtforms.validators import InputRequired, Optional, StopValidation, NumberRange
from wtforms_alchemy import (
FormGenerator as FormGeneratorBase,
model_form_factory,
model_form_meta_factory,
)
from wtforms_components.fields import (
SelectField as SelectFieldBase,
SelectMultipleField as SelectMultipleFieldBase,
)
from keg_elements.db.columns import DBEnum
from keg_elements.db.utils import has_column
from keg_elements.extensions import lazy_gettext as _
from keg_elements.forms.validators import NumberScale
form_element = flask.Blueprint('form_element', __name__)
log = logging.getLogger(__name__)
def to_title_case(x):
""" underscore or dash to title case notation """
return x.replace('_', ' ').replace('-', ' ').title()
# sentinel
_not_given = ()
class FieldMeta(object):
"""Meta information for fields to override model-generated info.
Rather than forcing all-or-nothing acceptance of model-generated meta info from wtforms,
FieldMeta may be provided in the FieldsMeta nested class of the form to override specifics.
All modifications are applied to the field instance during the form generation process.
Example::
class PersonForm(ModelForm):
class Meta:
model = Person
class FieldsMeta:
name = FieldMeta('Full Name')
:param label_text: Force a label value.
:param description: Force a description value.
:param label_modifier: Callable to be called with the default label. label_text takes
precedence if both are provided. But, this modifier will apply to choices as well if
applicable and a choices_modifier is not given.
:param choices_modifier: Callable to be called with the label value for choices.
:param required: Force validators to be added/removed for requirement.
:param widget: Force a specific widget to be used for the field in render.
:param extra_validators: Add the given validators to those existing on the field.
:param coerce: Applies a specific coerce for field values. Applicable to select fields only.
:param default: Forces a default value on the field.
"""
def __init__(self, label_text=_not_given, description=_not_given, label_modifier=_not_given,
choices_modifier=_not_given, choices=None, required=_not_given, widget=_not_given,
extra_validators=tuple(), coerce=_not_given, default=_not_given):
self.label_text = label_text
self.label_modifier = label_modifier
self.description = description
self.choices_modifier = choices_modifier
self.choices = choices
self.required = required
self.widget = widget
self.extra_validators = extra_validators
self.coerce = coerce
self.default = default
assert self.required in (_not_given, False, True)
def apply_to_field(self, field):
# field is a wtforms.fields.core.UnboundField instance
self.apply_to_label(field)
self.apply_to_description(field)
self.apply_to_choices(field)
self.apply_required(field)
self.apply_widget(field)
self.apply_extra_validators(field)
self.apply_coerce(field)
self.apply_default(field)
def apply_to_label(self, field):
default_label = field.kwargs['label']
if self.label_text is not _not_given:
label_text = self.label_text
elif self.label_modifier is None:
label_text = default_label
elif self.label_modifier is _not_given:
label_text = to_title_case(default_label)
else:
label_text = self.label_modifier(default_label)
field.kwargs['label'] = self.modify_label(label_text)
def modify_label(self, label_text):
""" for subclasses to easily modify the final label text value """
return label_text
def apply_to_description(self, field):
default_description = field.kwargs.get('description')
if self.description is _not_given:
description = default_description
else:
description = self.description
field.kwargs['description'] = self.modify_description(description)
def modify_description(self, description):
""" for subclasses to easily modify the final label text value """
return description
def apply_to_choices(self, field):
default_choices = field.kwargs.get('choices', None)
if default_choices is None:
# this isn't a field that has choices
return
if self.choices_modifier is None:
modifier = None
elif self.choices_modifier is not _not_given:
modifier = self.choices_modifier
elif self.label_modifier is None:
# no choices modifier and the label modifier is explicit, so no label modifier
modifier = None
elif self.label_modifier is _not_given:
# title case to labels by default
modifier = to_title_case
else:
# a label modifier was given, use that since no choices modifier was given to override
modifier = self.label_modifier
if self.choices is not None:
choices = self.choices
elif modifier is None:
choices = default_choices
else:
choices = [(v, modifier(l)) for v, l in default_choices]
field.kwargs['choices'] = self.modify_choices(choices)
def modify_choices(self, choices):
return choices
def apply_coerce(self, field):
if self.coerce is _not_given:
return
if not issubclass(field.field_class, wtforms.SelectField):
raise ValueError('`coerce` argument may only be used for select fields')
field.kwargs['coerce'] = self.coerce
def apply_default(self, field):
if self.default is _not_given:
return
field.kwargs['default'] = self.default
def apply_required(self, field):
validators = field.kwargs.get('validators', [])
if self.required == _not_given:
# required value not given on FieldMeta, don't make any changes
pass
elif self.required:
# If a required validator isn't present, we need to add one.
req_val_test = lambda val: hasattr(val, 'field_flags') and 'required' in val.field_flags
if not list(filter(req_val_test, validators)):
validators.append(InputRequired())
# If an optional validator is present, we need to remove it.
not_opt_val_test = lambda val: not hasattr(val, 'field_flags') or \
'optional' not in val.field_flags
not_opt_validators = list(filter(not_opt_val_test, validators))
field.kwargs['validators'] = not_opt_validators
else:
# If an optional validator isn't present, we need to add one.
opt_val_test = lambda val: hasattr(val, 'field_flags') and 'optional' in val.field_flags
if not list(filter(opt_val_test, validators)):
validators.append(Optional())
# If a required validator is present, we need to remove it.
non_req_val_test = lambda val: not hasattr(val, 'field_flags') or \
'required' not in val.field_flags
not_req_validators = list(filter(non_req_val_test, validators))
field.kwargs['validators'] = not_req_validators
def apply_widget(self, field):
if self.widget != _not_given:
field.kwargs['widget'] = self.widget
def apply_extra_validators(self, field):
field.kwargs.setdefault('validators', [])
field.kwargs['validators'] += self.extra_validators
def select_coerce(es_pass_thru, coerce, value):
if es_pass_thru and value == '':
return value
if coerce is not _not_given:
return coerce(value)
# try coercing to int first. If not valid, fall back to default behavior
try:
return int(value)
except ValueError as e:
if 'invalid literal for int()' not in six.text_type(e):
raise
return six.text_type(value)
class SelectMixin:
def __init__(self, *args, **kwargs):
self.add_blank_choice = kwargs.pop('add_blank_choice', True)
coerce_arg = kwargs.pop('coerce', _not_given)
super().__init__(*args, **kwargs)
if self.add_blank_choice:
# If we are adding a blank choice, and it is selected, we want the value that comes back
# in .data to be None -> as if no value was selected.
#
# self.filters is a tuple, so have to do some extra work.
self.filters = [lambda x: None if x == '' else x] + list(self.filters)
self.coerce = functools.partial(select_coerce, self.add_blank_choice, coerce_arg)
def iter_choices(self):
if self.add_blank_choice:
yield ('', '', (self.coerce, False))
for value in super().iter_choices():
yield value
@property
def choice_values(self):
values = super().choice_values
if self.add_blank_choice:
return [''] + values
return values
@property
def selected_choice_label(self):
value_dict = dict(self.concrete_choices)
return value_dict.get(self.data)
class SelectField(SelectMixin, SelectFieldBase):
"""
Provides helpful features above wtforms_components SelectField which it is based on:
1) Adds a blank choice by default at the front of the choices. This results in your user
being forced to select something if the field is required, which avoids initial
defaulting of the first value in the field getting submitted.
2) The coerce function used for the choices will automatically convert to int if possible,
falling back to unicode if the value is not an integer.
"""
class SelectMultipleField(SelectMixin, SelectMultipleFieldBase):
"""
Provides helpful features above wtforms_components SelectMultipleField which it is
based on:
The coerce function used for the choices will automatically convert to int if possible,
falling back to unicode if the value is not an integer.
"""
def __init__(self, *args, **kwargs):
kwargs['add_blank_choice'] = kwargs.get('add_blank_choice', False)
super().__init__(*args, **kwargs)
class MultiCheckboxField(wtforms.fields.SelectMultipleField):
"""
A multiple-select, except displays a list of checkboxes.
"""
class RequiredBoolRadioField(wtforms.fields.RadioField):
"""A radio group field with true/false labels and a required validator.
:param true_label: Optional, defaults to Yes.
:param false_label: Optional, defaults to No.
:param validators: Optional. Any provided validators will be added to InputRequired.
"""
def __init__(self, *args, **kwargs):
true_label = kwargs.pop('true_label', _('Yes'))
false_label = kwargs.pop('false_label', _('No'))
def bool_coerce(val):
if val == u'True':
return True
if val == u'False':
return False
return val
kwargs['choices'] = [(True, true_label), (False, false_label)]
kwargs['coerce'] = bool_coerce
kwargs['validators'] = [InputRequired()] + kwargs.get('validators', [])
super(RequiredBoolRadioField, self).__init__(*args, **kwargs)
self.type = 'RadioField'
class RelationshipFieldBase:
"""Common base for single/multiple select fields that reference ORM relationships.
Handles one-to-many and many-to-many patterns.
Note, must use the wtforms-components fields as a base, because we depend on
lazy-loaded choices. At import time, a field's choices may not be fully available
in the data. In addition, when pairing the form with an existing record, we need
to ensure that the option from the record is present even if it would normally
be filtered (e.g. an inactive option).
"""
def __init__(self, label=None, orm_cls=None, label_attr=None, fk_attr='id',
query_filter=None, coerce=_not_given, **kwargs):
label = self.field_label_modifier(label)
self.orm_cls = orm_cls
self.label_attr = label_attr
if self.label_attr is None:
self.label_attr = self.get_best_label_attr()
self.fk_attr = fk_attr
self.query_filter = query_filter
if not self.fk_attr and not coerce:
def coerce_to_orm_obj(value):
"""Coerce ID to relationship object."""
# If coming form formdata, we'll get a string ID.
if isinstance(value, str):
return self.orm_cls.query.get(value)
# If coming from object data, we'll get an ORM instance.
return value
coerce = coerce_to_orm_obj
super().__init__(label=label, choices=self.get_choices, coerce=coerce, **kwargs)
def field_label_modifier(self, label):
"""Modifies the label to something more human-friendly.
One-to-many relationships often have a field name like "foo_id", which title cases
as "Foo Id". Form should only show "Foo", though, so we trim it that way here.
"""
if label.lower().endswith(' id'):
return label.rsplit(' ', 1)[0]
return label
def build_query(self):
query = self.query_base()
query = self.filter_query(query)
return query
def query_base(self):
return self.orm_cls.query.order_by(self.label_attr)
def get_data_filter(self):
if self.fk_attr:
return getattr(self.orm_cls, self.fk_attr) == self.data
else:
return self.orm_cls.id == self.data.id
def filter_query(self, query):
filter_terms = []
# Get supplied filters.
if callable(self.query_filter):
filter_terms.append(self.query_filter(self))
elif self.query_filter is not None:
filter_terms.append(self.query_filter)
# Having an existing value should filter the query.
if self.data is not None:
data_filter = self.get_data_filter()
if data_filter is not None:
filter_terms.append(data_filter)
# Apply filter terms with or_, or directly, depending on length.
if len(filter_terms) == 1:
query = query.filter(*filter_terms)
elif len(filter_terms) > 1:
query = query.filter(sa.sql.or_(*filter_terms))
return query
def get_best_label_attr(self):
if has_column(self.orm_cls, 'label'):
return 'label'
if has_column(self.orm_cls, 'name'):
return 'name'
return None
def get_option_label(self, obj):
if self.label_attr:
return getattr(obj, self.label_attr)
return str(obj)
def get_choices(self):
query = self.build_query()
def get_value(obj):
if self.fk_attr:
return str(getattr(obj, self.fk_attr))
return str(obj.id)
return [(get_value(obj), self.get_option_label(obj)) for obj in query]
@property
def choice_values(self):
# coerce values used for validation, because the data we're matching will
# be int type
return [self.coerce(v) for v in super().choice_values]
class RelationshipField(RelationshipFieldBase, SelectField):
"""SelectField for relationships.
Args:
orm_cls (class): Model class of the relationship attribute. Used to query
records for populating select options.
relationship_attr (str): Name of the attribute on form model that refers to
the relationship object. Typically this is a foreign key ID.
label_attr (str): Name of attribute on relationship class to use for select
option labels.
fk_attr (str): Optional name of foreign key column of ORM class. If set to
None, coerce values to instances of ORM class. Otherwise, coerce values to
the attribute of ORM class the foreign key belongs to. Default is 'id'.
query_filter (callable): Optional SA query filter criterion for querying select
options. Can be a function that returns a filter criterion. Function is
called with the RelationshipField instance it belongs to.
coerce (callable): Optional function used to coerce form values. By default,
if fk_attr is set to None, values are coerced to instances of ORM class.
Otherwise, the default select coersion is applied. Setting this overrides
default behavior.
kwargs: Passed to ``SelectField.__init__``.
Example::
class Bar(Model):
name = Column(Unicode(255))
foos = relationship('Foo', foreign_keys='foos.id')
class Foo(Model):
name = Column(Unicode(255))
bar_id = Column(sa.ForeignKey(Bar.id))
bar = relationship(Bar, foreign_keys=bar_id)
class FooForm(ModelForm):
bar_id = RelationshipField('Bar Label', Bar, 'name')
"""
class RelationshipMultipleField(RelationshipFieldBase, SelectMultipleField):
"""SelectMultipleField for relationships.
Args:
orm_cls (class): Model class of the relationship attribute. Used to query
records for populating select options.
relationship_attr (str): Name of the collection on form model that refers to
the relationship object.
label_attr (str): Name of attribute on relationship class to use for select
option labels.
query_filter (callable): Optional SA query filter criterion for querying select
options. Can be a function that returns a filter criterion. Function is
called with the RelationshipField instance it belongs to.
coerce (callable): Optional function used to coerce form values. By default,
values are coerced to instances of ORM class. Setting this overrides
default behavior.
kwargs: Passed to ``SelectMultipleField.__init__``.
Example::
class Bar(Model):
name = Column(Unicode(255))
foos = relationship('Foo', foreign_keys='foos.id')
class Foo(Model):
name = Column(Unicode(255))
bar_id = Column(sa.ForeignKey(Bar.id))
bar = relationship(Bar, foreign_keys=bar_id)
class BarForm(ModelForm):
foos = RelationshipMultipleField('Foos', Foo, 'name')
"""
def __init__(self, label, orm_cls, label_attr=None,
query_filter=None, coerce=_not_given, **kwargs):
super().__init__(label, orm_cls, label_attr, None, query_filter, coerce, **kwargs)
def get_data_filter(self):
if not self.data:
# Empty set should not add any options. Returning in_ in this case has
# undesirable results.
return
existing_ids = [obj.id for obj in self.data]
return self.orm_cls.id.in_(existing_ids)
class _TypeHintingTextInputBase(wtforms.widgets.TextInput):
def __init__(self, prefix=None, suffix=None):
self.prefix = prefix
self.suffix = suffix
super().__init__()
class TypeHintingTextInputB3(_TypeHintingTextInputBase):
"""
A text input widget with a prefix and/or suffix to hint at the expected type or units.
For use with bootstrap 3
"""
def __call__(self, field, **kwargs):
def make_addon(txt):
return Markup(
'<span class="input-group-addon">{}</span>'.format(wtforms.widgets.core.escape(txt))
)
return Markup(
'<div class="input-group">{pre}{field}{post}</div>'.format(
pre=make_addon(self.prefix) if self.prefix else '',
field=super().__call__(field, **kwargs).__html__(),
post=make_addon(self.suffix) if self.suffix else ''
)
)
class TypeHintingTextInputB4(_TypeHintingTextInputBase):
"""
A text input widget with a prefix and/or suffix to hint at the expected type or units.
For use with bootstrap 4
"""
def __call__(self, field, **kwargs):
def make_addon(txt, addon_type):
return Markup(
'<div class="input-group-{type}">'
' <span class="input-group-text">{txt}</span>'
"</div>".format(type=addon_type, txt=wtforms.widgets.core.escape(txt))
)
return Markup(
'<div class="input-group">{pre}{field}{post}</div>'.format(
pre=make_addon(self.prefix, "prepend") if self.prefix else "",
field=super().__call__(field, **kwargs).__html__(),
post=make_addon(self.suffix, "append") if self.suffix else "",
)
)
def _max_for_numeric(digits, scale):
return Decimal('{}.{}'.format('9' * (digits - scale), '9' * scale))
class FormGenerator(FormGeneratorBase):
"""Model form generator that applies field meta info, provides validators, etc.
Meta nested class directives (in addition to wtforms-alchemy):
- include_datetimes_with_default
- include_required_foreign_keys
Field class overrides:
- Use our SelectField instead of the WTForms default
- Use our RequiredBoolRadioField for non-nullable boolean fields
- Use RelationshipField for foreign key fields
Meta info modifiers:
- Use FieldsMeta.<field_name> if provided
- Falls back to FieldsMeta.__default__
- If none of the above, uses a blank FieldsMeta object, which will title case the label.
Validators:
- Applies range/scale numeric validators when applicable.
"""
def __init__(self, form_class):
super(FormGenerator, self).__init__(form_class)
self.fields_meta = getattr(self.form_class, 'FieldsMeta', None)
def skip_column(self, column):
# Verify the key is not also in exclude=[] so we don't break compatibility with forms
# that already manually excluded these fields
if (not self.meta.include_datetimes_with_default
and isinstance(column.type, ArrowType)
and column.default
and column.key not in self.meta.exclude):
return True
# include_foreign_keys will pull in all foreign keys on the object. If we want the
# form to include only required keys, we use include_required_foreign_keys.
include_required_fks = getattr(self.meta, 'include_required_foreign_keys', False)
if (include_required_fks and column.foreign_keys and column.nullable is False):
return False
return super().skip_column(column)
def get_field_class(self, column):
field_cls = super(FormGenerator, self).get_field_class(column)
if field_cls is SelectFieldBase:
return SelectField
is_required_boolean = (field_cls is wtforms.fields.BooleanField
and not column.nullable
and not column.default)
if is_required_boolean:
return RequiredBoolRadioField
return field_cls
def get_field_modifier(self, prop):
# is there an entry in FieldsMeta?
if hasattr(self.fields_meta, prop.key):
field_modifier = getattr(self.fields_meta, prop.key)
else:
field_modifier = getattr(self.fields_meta, '__default__', _not_given)
if field_modifier is _not_given:
field_modifier = FieldMeta
return field_modifier() if inspect.isclass(field_modifier) else field_modifier
def create_field(self, prop, column):
if column.foreign_keys:
foreign_key = next(iter(column.foreign_keys))
orm_cls = get_class_by_table(db.Model, foreign_key.column.table)
validators = self.create_validators(prop, column)
field = RelationshipField(
label=to_title_case(str(column.key)),
orm_cls=orm_cls,
validators=validators,
)
else:
field = super(FormGenerator, self).create_field(prop, column)
modifier = self.get_field_modifier(prop)
if modifier is not None:
modifier.apply_to_field(field)
return field
def create_validators(self, prop, column):
validators = super(FormGenerator, self).create_validators(prop, column)
if isinstance(column.type, sa.Numeric) and not isinstance(column.type, sa.Float):
if column.type.precision is None or column.type.scale is None:
raise ValueError('Numeric fields must specify precision and scale')
max_ = _max_for_numeric(column.type.precision, column.type.scale)
validators.append(NumberRange(min=-max_, max=max_))
validators.append(NumberScale(column.type.scale))
return validators
def length_validator(self, column):
if isinstance(column.type, sa.types.Enum):
return None
return super(FormGenerator, self).length_validator(column)
def select_field_kwargs(self, column):
enum_cls = getattr(column.type, 'enum_class', None)
if enum_cls and issubclass(enum_cls, DBEnum):
return {
'coerce': enum_cls.coerce,
'choices': enum_cls.form_options()
}
return super().select_field_kwargs(column)
def field_to_dict(field):
if isinstance(field, wtforms.fields.FormField):
return form_fields_to_dict(field)
if isinstance(field, wtforms.fields.FieldList):
return [field_to_dict(subfield) for subfield in field]
return {
'data': field.data,
'errors': field.errors,
'label': field.label.text,
'required': field.flags.required,
}
def form_fields_to_dict(form):
return dict((six.text_type(name), field_to_dict(field))
for name, field in six.iteritems(form._fields))
___validator_creation_counter = 0
def form_validator(func=None, only_when_fields_valid=False):
"""Decorator used to mark a method as a form level validator.
:param only_when_fields_valid: Use to disable validator if form already has errors.
"""
if func is None:
return functools.partial(form_validator, only_when_fields_valid=only_when_fields_valid)
@functools.wraps(func)
def wrapper(form):
if not only_when_fields_valid or not form.field_errors:
return func(form)
global ___validator_creation_counter
wrapper.___form_validator = True
___validator_creation_counter += 1
wrapper.___creation_counter = ___validator_creation_counter
return wrapper
class Form(BaseForm):
"""Base form with a bunch of QoL improvements
:param _field_order: Relying on the default field ordering can lead to unintuitive forms. It is
possible to override this by adding the ``_field_order`` class attribute. Set this class
variable to a tuple or list of field names (addressable via Form._fields['name_of_field'])
and the form will render in that order. You must include all the fields, except CSRF.
Forgetting a field or adding one which doesn't exist will cause the form to raise a
``ValueError`` and the form will not be rendered.
class MyForm(Form):
_field_order = ('field1', 'field2',)
field1 = String('field1_label') # Note that we don't use the label in the ordering
field2 = String()
"""
def __init__(self, *args, **kwargs):
super(Form, self).__init__(*args, **kwargs)
self._form_level_errors = []
self._errors = None
self.after_init(args, kwargs)
def __iter__(self):
order = getattr(self, '_field_order', None)
if order is None:
return super().__iter__()
has_csrf = hasattr(self, 'csrf_token')
order = (['csrf_token'] if has_csrf else []) + list(order)
declared = set(self._fields.keys())
ordered = set(order)
if declared != ordered:
not_ordered = declared - ordered
extra_ordered = ordered - declared
raise ValueError(
'Custom field ordering for {} is incorrect.'.format(self.__class__.__name__),
' Missing fields: {} '.format(not_ordered),
' Extra fields: {} '.format(extra_ordered),
)
return (self._fields[f] for f in order)
def after_init(self, args, kwargs):
"""Hook for providing customization on the form after fields are initialized."""
pass
def fields_todict(self):
"""Turns a form into dicts and lists with both data and errors for each field."""
return form_fields_to_dict(self)
def validate(self):
"""Applies validators and returns bool.
Methods decorated as form-level validators are run after WTForms generic validation.
"""
fields_valid = super(Form, self).validate()
form_validators = {}
# Traverse the MRO so we can get validators in parent classes.
# Do so in reverse order so child classes can override parents' validators.
# WTForms will not include the methods on form instances so we get them from the classes.
for cls in reversed(self.__class__.__mro__):
cls_validators = {
name: attr for name, attr in six.iteritems(cls.__dict__)
if getattr(attr, '___form_validator', False)
}
form_validators.update(cls_validators)
self._form_level_errors = []
for validator in sorted(form_validators.values(), key=attrgetter('___creation_counter')):
try:
validator(self)
except StopValidation as e:
if e.args and e.args[0]:
self._form_level_errors.append(e.args[0])
break
except ValueError as e:
self._form_level_errors.append(e.args[0])
return fields_valid and not self._form_level_errors
@property
def form_errors(self):
"""Form-level validator errors will be logged in this list."""
return self._form_level_errors
@property
def field_errors(self):
"""Field-level validator errors come from WTForms' errors."""
return super().errors
@property
def errors(self):
"""Field-level errors, plus form-level errors under the key "_form"."""
errors = self.field_errors
if self.form_errors:
errors['_form'] = self.form_errors
return errors
BaseModelFormMeta = model_form_meta_factory()
class ModelFormMeta(BaseModelFormMeta):
"""Base model form metaclass that handles nested inheritance issues.
The default metaclass here will handle the nested Meta class. A form
subclass with a Meta nested class will treat the form's superclass' Meta
as a parent.
This metaclass does the same thing for FieldsMeta, allowing superclasses
to define a FieldsMeta that may reasonably be passed down to the subclass.
"""
def __init__(cls, *args, **kwargs):
bases = []
for class_ in cls.__mro__:
if 'FieldsMeta' in class_.__dict__:
bases.append(getattr(class_, 'FieldsMeta'))
if object not in bases:
bases.append(object)
cls.FieldsMeta = type('FieldsMeta', tuple(bases), {})
BaseModelFormMeta.__init__(cls, *args, **kwargs)
BaseModelForm = model_form_factory(Form, meta=ModelFormMeta, form_generator=FormGenerator)
class ModelForm(BaseModelForm):
"""Base model-generated form class that applies KegElements generator and meta."""
@classmethod
def get_session(cls):
return db.session
| 37.497691
| 100
| 0.653312
|
45e4615d3b265df9534720c07390c73f8d1bc2c2
| 38
|
py
|
Python
|
pymkeviewer/pymkeviewer/pymkeviewer/__init__.py
|
MagikEyeInc/SDK
|
e4a3ac92a7ec5bf57978d15d43feb81b1f595653
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"Unlicense"
] | 12
|
2021-07-14T21:30:53.000Z
|
2021-11-01T08:39:30.000Z
|
pymkeviewer/pymkeviewer/pymkeviewer/__init__.py
|
MagikEyeInc/SDK
|
e4a3ac92a7ec5bf57978d15d43feb81b1f595653
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"Unlicense"
] | 3
|
2021-07-23T06:30:29.000Z
|
2021-08-03T11:37:24.000Z
|
pymkeviewer/pymkeviewer/pymkeviewer/__init__.py
|
MagikEyeInc/SDK
|
e4a3ac92a7ec5bf57978d15d43feb81b1f595653
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"Unlicense"
] | 4
|
2021-07-23T11:30:39.000Z
|
2022-01-12T03:39:13.000Z
|
__version__ = '@PYMKEVIEWER_VERSION@'
| 19
| 37
| 0.789474
|
0946c01886ebf21667088a2ea4ab8af8d0658040
| 1,785
|
py
|
Python
|
drf_lastactivity/middlewares.py
|
sassoo/drf_lastactivity
|
cd1ed006d3726eee6ebe40402b2f7a9878e271ac
|
[
"0BSD"
] | null | null | null |
drf_lastactivity/middlewares.py
|
sassoo/drf_lastactivity
|
cd1ed006d3726eee6ebe40402b2f7a9878e271ac
|
[
"0BSD"
] | null | null | null |
drf_lastactivity/middlewares.py
|
sassoo/drf_lastactivity
|
cd1ed006d3726eee6ebe40402b2f7a9878e271ac
|
[
"0BSD"
] | null | null | null |
"""
middlewares
~~~~~~~~~~~
All of our custom DRF middlewares.
"""
from django.utils import timezone
class UpdateLastActivityMiddleware:
""" Update the last_login timestamp every SPLAY_TIME (seconds)
DRF doesn't take a stance here & I want something more
frequent than every username & password submission.
I want every access but obviously that would be a big
performance hit so debounce the operation if the last
update to the timestamp occurred < SPLAY_TIME in seconds.
By default 5 minutes.
"""
LAST_LOGIN_FIELD = 'last_login'
SPLAY_TIME = 300
def __init__(self, get_response):
""" Reuquired by django 2.x middlewares """
self.get_response = get_response
def __call__(self, request):
""" Taken directly from django's docs """
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
user = self.get_authenticated_user(request)
if user:
self.update_last_login(user)
return response
def get_authenticated_user(self, request):
""" Return an authenticated user model """
if request.user and request.user.is_authenticated:
return request.user
return None
def update_last_login(self, user):
""" Update the user models LAST_LOGIN_FIELD """
now = timezone.now()
last = getattr(user, self.LAST_LOGIN_FIELD, None)
if not last or (now - last).seconds > self.SPLAY_TIME:
attrs = {self.LAST_LOGIN_FIELD: now}
user.__class__.objects.filter(pk=user.pk).update(**attrs)
| 28.790323
| 69
| 0.653782
|
dcfb03ec43448fbd228dc0b3989332a2d5cb79c1
| 2,395
|
py
|
Python
|
tests/test_checker.py
|
isac322/flake8-force-keyword-arguments
|
32b5daa0f3183475abba3610adc02878c56d4f11
|
[
"MIT"
] | 1
|
2021-11-05T21:16:10.000Z
|
2021-11-05T21:16:10.000Z
|
tests/test_checker.py
|
isac322/flake8-force-keyword-arguments
|
32b5daa0f3183475abba3610adc02878c56d4f11
|
[
"MIT"
] | 24
|
2021-11-05T21:41:07.000Z
|
2022-03-25T16:25:36.000Z
|
tests/test_checker.py
|
isac322/flake8-force-keyword-arguments
|
32b5daa0f3183475abba3610adc02878c56d4f11
|
[
"MIT"
] | null | null | null |
from textwrap import dedent
def test_builtins_inspected(flake8_path):
(flake8_path / 'example.py').write_text(
'getattr(object(), \'test\')',
)
result = flake8_path.run_flake8(['--kwargs-max-positional-arguments', '2'])
assert result.out_lines == []
def test_kwargs_inspect_module_overwrites(flake8_path):
(flake8_path / 'example.py').write_text(
'getattr(object(), \'test\')',
)
result = flake8_path.run_flake8(['--kwargs-max-positional-arguments', '2', '--kwargs-inspect-module', 'os'])
assert result.out_lines == [
'./example.py:1:1: FKA100 getattr\'s call uses 2 positional arguments, use keyword arguments.'
]
def test_kwargs_inspect_module_extended(flake8_path):
(flake8_path / 'example.py').write_text(
'getattr(object(), os.path.join(\'test\', \'arg\'))',
)
result = flake8_path.run_flake8(['--kwargs-max-positional-arguments', '2', '--kwargs-inspect-module-extend', 'os'])
assert result.out_lines == []
def test_default_ignore_function_pattern(flake8_path):
(flake8_path / 'example.py').write_text(
'object.__getattr__(object(), \'test\')',
)
result = flake8_path.run_flake8(['--kwargs-max-positional-arguments', '2'])
assert result.out_lines == []
def test_default_ignore_function_pattern_typing_cast(flake8_path):
(flake8_path / 'example.py').write_text(
'''
typing.cast(object, \'test\')
cast(object, 1)
''',
)
result = flake8_path.run_flake8(['--kwargs-max-positional-arguments', '2'])
assert result.out_lines == []
def test_ignore_function_pattern_extended(flake8_path):
(flake8_path / 'example.py').write_text(
'tt = lambda a, b, c: None',
)
result = flake8_path.run_flake8(
['--kwargs-max-positional-arguments', '2', '--kwargs-ignore-function-pattern-extend', '^tt$']
)
assert result.out_lines == []
def test_ignore_function_pattern_extended_multiple(flake8_path):
(flake8_path / 'example.py').write_text(
dedent(
'''
tt = lambda a, b, c: None
o = object()
o.test_function(1, 2, 3)
'''
),
)
result = flake8_path.run_flake8(
['--kwargs-max-positional-arguments', '2', '--kwargs-ignore-function-pattern-extend', '(:?^tt$|test_function$)']
)
assert result.out_lines == []
| 32.808219
| 120
| 0.63382
|
025086f1aea4bba517f4bb27d159ea1960b12908
| 4,862
|
py
|
Python
|
test/script/regression_utils.py
|
raman-bt/autopsy
|
e80ad2c732fd827c558fb088d444313fd999c717
|
[
"Apache-2.0"
] | null | null | null |
test/script/regression_utils.py
|
raman-bt/autopsy
|
e80ad2c732fd827c558fb088d444313fd999c717
|
[
"Apache-2.0"
] | null | null | null |
test/script/regression_utils.py
|
raman-bt/autopsy
|
e80ad2c732fd827c558fb088d444313fd999c717
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import subprocess
from time import localtime, strftime
import traceback
# Returns a Windows style path starting with the cwd and
# ending with the list of directories given
def make_local_path(*dirs):
path = wgetcwd().decode("utf-8")
for dir in dirs:
path += ("\\" + str(dir))
return path_fix(path)
# Returns a Windows style path based only off the given directories
def make_path(*dirs):
path = dirs[0]
for dir in dirs[1:]:
path += ("\\" + str(dir))
return path_fix(path)
# Fix a standard os.path by making it Windows format
def path_fix(path):
return path.replace("/", "\\")
# Gets the true current working directory instead of Cygwin's
def wgetcwd():
proc = subprocess.Popen(("cygpath", "-m", os.getcwd()), stdout=subprocess.PIPE)
out,err = proc.communicate()
tst = out.rstrip()
if os.getcwd == tst:
return os.getcwd
else:
proc = subprocess.Popen(("cygpath", "-m", os.getcwd()), stdout=subprocess.PIPE)
out,err = proc.communicate()
return out.rstrip()
# Verifies a file's existance
def file_exists(file):
try:
if os.path.exists(file):
return os.path.isfile(file)
except:
return False
# Verifies a directory's existance
def dir_exists(dir):
try:
return os.path.exists(dir)
except:
return False
# Returns the nth word in the given string or "" if n is out of bounds
# n starts at 0 for the first word
def get_word_at(string, n):
words = string.split(" ")
if len(words) >= n:
return words[n]
else:
return ""
# Returns true if the given file is one of the required input files
# for ingest testing
def required_input_file(name):
if ((name == "notablehashes.txt-md5.idx") or
(name == "notablekeywords.xml") or
(name == "nsrl.txt-md5.idx")):
return True
else:
return False
def image_type(image_file):
ext_start = image_file.rfind(".")
if (ext_start == -1):
return IMGTYPE.UNKNOWN
ext = image_file[ext_start:].lower()
if (ext == ".img" or ext == ".dd"):
return IMGTYPE.RAW
elif (ext == ".e01"):
return IMGTYPE.ENCASE
elif (ext == ".aa" or ext == ".001"):
return IMGTYPE.SPLIT
else:
return IMGTYPE.UNKNOWN
# Returns the type of image file, based off extension
class IMGTYPE:
RAW, ENCASE, SPLIT, UNKNOWN = range(4)
def get_image_name(image_file):
path_end = image_file.rfind("/")
path_end2 = image_file.rfind("\\")
ext_start = image_file.rfind(".")
if(ext_start == -1):
name = image_file
if(path_end2 != -1):
name = image_file[path_end2+1:ext_start]
elif(ext_start == -1):
name = image_file[path_end+1:]
elif(path_end == -1):
name = image_file[:ext_start]
elif(path_end!=-1 and ext_start!=-1):
name = image_file[path_end+1:ext_start]
else:
name = image_file[path_end2+1:ext_start]
return name
def usage():
"""Return the usage description of the test script."""
return """
Usage: ./regression.py [-f FILE] [OPTIONS]
Run RegressionTest.java, and compare the result with a gold standard.
By default, the script tests every image in ../input
When the -f flag is set, this script only tests a single given image.
When the -l flag is set, the script looks for a configuration file,
which may outsource to a new input directory and to individual images.
Expected files:
An NSRL database at: ../input/nsrl.txt-md5.idx
A notable hash database at: ../input/notablehashes.txt-md5.idx
A notable keyword file at: ../input/notablekeywords.xml
Options:
-r Rebuild the gold standards for the image(s) tested.
-i Ignores the ../input directory and all files within it.
-u Tells Autopsy not to ingest unallocated space.
-k Keeps each image's Solr index instead of deleting it.
-v Verbose mode; prints all errors to the screen.
-e ex Prints out all errors containing ex.
-l cfg Runs from configuration file cfg.
-c Runs in a loop over the configuration file until canceled. Must be used in conjunction with -l
-fr Will not try download gold standard images
"""
#####
# Enumeration definition (python 3.2 doesn't have enumerations, this is a common solution
# that allows you to access a named enum in a Java-like style, i.e. Numbers.ONE)
#####
def enum(*seq, **named):
enums = dict(zip(seq, range(len(seq))), **named)
return type('Enum', (), enums)
def get_files_by_ext(dir_path, ext):
"""Get a list of all the files with a given extenstion in the directory.
Args:
dir: a pathto_Dir, the directory to search.
ext: a String, the extension to search for. i.e. ".html"
"""
return [ os.path.join(dir_path, file) for file in os.listdir(dir_path) if
file.endswith(ext) ]
| 31.166667
| 110
| 0.655903
|
bd4ed9494f2066883851331f6ce4a58d448c616c
| 5,838
|
py
|
Python
|
src/autoupdater/__init__.py
|
arnehilmann/k8s-auto-updater
|
8118507390fd82e2f0443d8798706ff31251d640
|
[
"MIT"
] | 4
|
2019-04-16T18:52:00.000Z
|
2020-12-28T23:59:19.000Z
|
src/autoupdater/__init__.py
|
arnehilmann/k8s-auto-updater
|
8118507390fd82e2f0443d8798706ff31251d640
|
[
"MIT"
] | 1
|
2021-03-12T01:30:53.000Z
|
2021-03-12T02:13:59.000Z
|
src/autoupdater/__init__.py
|
arnehilmann/k8s-auto-updater
|
8118507390fd82e2f0443d8798706ff31251d640
|
[
"MIT"
] | null | null | null |
import base64
import json
import re
import subprocess
from kubepy import api as kubectl
def header(text, style="-", min_length=60):
print()
print(style * max(min_length, len(text)))
print(text)
print(style * max(min_length, len(text)))
def get_first_owner(resource):
owners = resource["metadata"].get("ownerReferences", [])
if not owners:
return None
return kubectl.get(owners[0]["kind"], owners[0]["name"])
def matches_pod(selectors, labels, name, verbose=False):
selected = True
for selector in selectors.split(","):
if not selected:
break
if "!=" in selector:
key, value = selector.split("!=")
if labels.get(key) == value:
selected = False
elif "==" in selector:
key, value = selector.split("==")
if labels.get(key) != value:
selected = False
elif "=" in selector:
key, value = selector.split("=")
if labels.get(key) != value:
selected = False
else:
key = selector
if key and key not in labels:
selected = False
if verbose:
if not selected:
print("skipped: pod/{} not selected because unmet criteria '{}'".format(name, selector))
return selected
def fetch_credentials(digest2pods):
creds = ""
for _, pods in digest2pods.items():
if creds:
break
for pod in pods:
if creds:
break
pull_secrets = pod["spec"].get("imagePullSecrets", "null")
if pull_secrets != "null":
for pull_secret in pull_secrets:
token_name = pull_secret["name"]
token = kubectl.get("secret", token_name)
secret_base64 = token["data"].get(".dockerconfigjson", "")
if not secret_base64:
continue
secret_dict = json.loads(base64.b64decode(secret_base64))
hostname = list(secret_dict["auths"].keys())[0]
username = secret_dict["auths"][hostname]["username"]
password = secret_dict["auths"][hostname]["password"]
creds = "{}:{}".format(username, password)
break
return creds
def split_image_name(image_name):
host = namespace = repo = tag = ""
repo, tag = image_name.rsplit(":", 1)
if "/" in repo:
namespace, repo = repo.rsplit("/", 1)
if "/" in namespace:
host, namespace = namespace.rsplit("/", 1)
return host, namespace, repo, tag
def matches_image(regexp, name, verbose=False):
if not re.match(regexp, name):
if verbose:
print("skipped: docker-image/{} skipped because missed regexp '{}'".format(name, regexp))
return False
return True
def collect_data(image_regexp, pod_selectors, verbose=False):
image2digest2pods = {}
for pod in kubectl.get("pods")["items"]:
for container in pod["status"]["containerStatuses"]:
image_name = container["image"]
if not matches_image(image_regexp, image_name, verbose):
continue
if not matches_pod(pod_selectors,
pod["metadata"].get("labels", {}),
pod["metadata"]["name"],
verbose):
continue
digest = re.sub("^.*@", "", container.get("imageID", ""))
if image_name not in image2digest2pods:
image2digest2pods[image_name] = {}
if digest not in image2digest2pods[image_name]:
image2digest2pods[image_name][digest] = []
image2digest2pods[image_name][digest].append(pod)
if verbose:
for image in image2digest2pods:
print("selected: docker-image/{}".format(image))
return image2digest2pods
def query_repodigst(host, namespace, repo, tag, creds):
raw_result = subprocess.run(
filter(None,
["skopeo",
"inspect",
"--creds={}".format(creds) if creds else None,
"docker://{}/{}/{}:{}".format(
host if host else "docker.io",
namespace if namespace else "library",
repo,
tag
)]),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if raw_result.returncode != 0:
print("\t[WARN] {}".format(raw_result.stderr))
return None
result = json.loads(raw_result.stdout)
return result["Digest"]
def check_pods(image2digest2pods, strategy, verbose=False):
for image_name in image2digest2pods:
print()
print(image_name)
host, namespace, repo, tag = split_image_name(image_name)
creds = fetch_credentials(image2digest2pods[image_name])
repodigest = query_repodigst(host, namespace, repo, tag, creds)
if not repodigest:
continue
if verbose:
for pod in image2digest2pods[image_name].get(repodigest, []):
print("\tuptodate: pod/{}".format(pod["metadata"]["name"]))
for digest in image2digest2pods.get(image_name, {}):
if digest == repodigest:
continue
for pod in image2digest2pods[image_name][digest]:
pod_name = pod["metadata"]["name"]
print("\toutdated: pod/{}".format(pod_name))
if verbose:
print("\t\trepodigest of pod: {}".format(digest))
print("\t\tnewest repodigest: {}".format(repodigest))
if not strategy(**locals()):
print("\t[WARN] something went wrong...")
| 34.75
| 104
| 0.546591
|
731d8726d5cf12a31e3853e63b3d3ba356c3f0ea
| 6,953
|
py
|
Python
|
confluent_server/confluent/asynchttp.py
|
brianfinley/confluent
|
6458eac93b1e3c6d45e26a7ddb434d692b5cdff2
|
[
"Apache-2.0"
] | 27
|
2015-02-11T13:56:46.000Z
|
2021-12-28T14:17:20.000Z
|
confluent_server/confluent/asynchttp.py
|
brianfinley/confluent
|
6458eac93b1e3c6d45e26a7ddb434d692b5cdff2
|
[
"Apache-2.0"
] | 32
|
2015-09-23T13:19:04.000Z
|
2022-03-15T13:50:45.000Z
|
confluent_server/confluent/asynchttp.py
|
brianfinley/confluent
|
6458eac93b1e3c6d45e26a7ddb434d692b5cdff2
|
[
"Apache-2.0"
] | 24
|
2015-07-14T20:41:55.000Z
|
2021-07-15T04:18:51.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016-2018 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Overall, the result of this shall be:
# - Web clients can create the same out-of-order responsiveness as socket
# clients (but with more complexity on their end)
# - Web clients can share single request among console sessions
# - Web clients can get async notify of things like node add/remove, events
# This provides an async strategy to http clients. The design is that a http
# session may have an 'async' resource. In such a case, any requests are
# queued and immediately the response is given accepting the queued request.
# A request flags itself as queue-compatible through an HTTP header indicating
# the identifier of the async thread. As responses happen to the queued
# request, data is dispatched to the first registered poller for data on
# the session. This way, a client may elect to provide multiple pollers
# to mitigate general choppiness of http network pattern. It may not be
# worth it, but it's possible.
# Additionally support console session multiplexing, to mitigate needed
# connection count.
# Also, this should allow a client to register for notifications of things
# like node add/delete or an event firing, ultimately.
# Much like console sessions, these will be reaped if a client spends too
# far away.
import collections
import confluent.exceptions as exc
import confluent.messages as messages
import confluent.util as util
import eventlet
import greenlet
import time
_asyncsessions = {}
_cleanthread = None
_consolesessions = None
def _assign_asyncid(asyncsession):
sessid = util.randomstring(32)
while sessid in _asyncsessions:
sessid = util.randomstring(32)
_asyncsessions[sessid] = {'asyncsession': asyncsession}
return sessid
class AsyncTermRelation(object):
# Need to keep an association of term object to async
# This allows the async handler to know the context of
# outgoing data to provide to calling code
def __init__(self, termid, asynchdl):
self.asynchdl = asynchdl
self.termid = termid
def got_data(self, data):
self.asynchdl.add(self.termid, data)
class AsyncSession(object):
def __init__(self):
self.asyncid = _assign_asyncid(self)
self.responses = collections.deque()
self._evt = None
self.termrelations = []
self.consoles = set([])
self.reaper = eventlet.spawn_after(15, self.destroy)
def add(self, requestid, rsp):
if self.responses is None:
return
self.responses.append((requestid, rsp))
if self._evt:
self._evt.send()
self._evt = None
def set_term_relation(self, env):
# need a term relation to keep track of what data belongs
# to what object (since the callback does not provide context
# for data, and here ultimately the client is responsible
# for sorting out which is which.
termrel = AsyncTermRelation(env['HTTP_CONFLUENTREQUESTID'], self)
self.termrelations.append(termrel)
return termrel
def add_console_session(self, sessionid):
self.consoles.add(sessionid)
def destroy(self):
if self._evt:
self._evt.send()
self._evt = None
for console in self.consoles:
_consolesessions[console]['session'].destroy()
self.consoles = None
self.responses = None
del _asyncsessions[self.asyncid]
def run_handler(self, handler, requestid):
try:
for rsp in handler:
self.add(requestid, rsp)
self.add(requestid, messages.AsyncCompletion())
except Exception as e:
self.add(requestid, e)
def get_responses(self, timeout=25):
self.reaper.cancel()
self.reaper = eventlet.spawn_after(timeout + 15, self.destroy)
nextexpiry = time.time() + 90
for csess in list(self.consoles):
try:
_consolesessions[csess]['expiry'] = nextexpiry
except KeyError: # session has been closed elsewhere
self.consoles.discard(csess)
if self._evt:
# TODO(jjohnson2): This precludes the goal of 'double barreled'
# access.... revisit if this could matter
raise Exception('get_responses is not re-entrant')
if not self.responses: # wait to accumulate some
self._evt = eventlet.event.Event()
with eventlet.Timeout(timeout, False):
self._evt.wait()
self._evt = None
while self.responses:
yield self.responses.popleft()
def run_handler(hdlr, env):
asyncsessid = env['HTTP_CONFLUENTASYNCID']
try:
asyncsession = _asyncsessions[asyncsessid]['asyncsession']
requestid = env['HTTP_CONFLUENTREQUESTID']
except KeyError:
raise exc.InvalidArgumentException(
'Invalid Session ID or missing request id')
eventlet.spawn_n(asyncsession.run_handler, hdlr, requestid)
return requestid
def get_async(env, querydict):
global _cleanthread
return _asyncsessions[env['HTTP_CONFLUENTASYNCID']]['asyncsession']
def handle_async(env, querydict, threadset):
global _cleanthread
# This may be one of two things, a request for a new async stream
# or a request for next data from async stream
# httpapi otherwise handles requests an injecting them to queue
if 'asyncid' not in querydict or not querydict['asyncid']:
# This is a new request, create a new multiplexer
currsess = AsyncSession()
yield messages.AsyncSession(currsess.asyncid)
return
if querydict['asyncid'] not in _asyncsessions:
raise exc.InvalidArgumentException(
'Invalid or expired async id')
mythreadid = greenlet.getcurrent()
threadset.add(mythreadid)
loggedout = None
currsess = None
try:
currsess = _asyncsessions[querydict['asyncid']]['asyncsession']
for rsp in currsess.get_responses():
yield messages.AsyncMessage(rsp)
except greenlet.GreenletExit as ge:
loggedout = ge
threadset.discard(mythreadid)
if loggedout is not None:
currsess.destroy()
raise exc.LoggedOut()
def set_console_sessions(consolesessions):
global _consolesessions
_consolesessions = consolesessions
| 36.025907
| 78
| 0.685747
|
8ab45b19c20dc31dcd7101aec117c119de3c48db
| 1,134
|
py
|
Python
|
src/users/tests/management/merge_same_email_users/test_single_email_merge.py
|
denkasyanov/education-backend
|
c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5
|
[
"MIT"
] | 62
|
2021-09-22T18:38:26.000Z
|
2022-03-29T06:09:42.000Z
|
src/users/tests/management/merge_same_email_users/test_single_email_merge.py
|
denkasyanov/education-backend
|
c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5
|
[
"MIT"
] | 50
|
2021-09-16T07:17:31.000Z
|
2022-03-26T12:06:58.000Z
|
src/users/tests/management/merge_same_email_users/test_single_email_merge.py
|
denkasyanov/education-backend
|
c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5
|
[
"MIT"
] | 16
|
2021-10-17T17:43:31.000Z
|
2022-03-26T11:22:45.000Z
|
import pytest
from pytest_mock import MockerFixture
pytestmark = pytest.mark.django_db
def test_target_user_lower_email(bob_a, bob_b, command, mocker: MockerFixture):
command.merge_user = mocker.MagicMock()
command.handle_single_email(bob_a.email)
bob_b.refresh_from_db()
assert bob_b.email == bob_b.email.lower()
def test_target_user_lower_username(bob_a, bob_b, command, mocker: MockerFixture):
command.merge_user = mocker.MagicMock()
command.handle_single_email(bob_a.email)
bob_b.refresh_from_db()
assert bob_b.username == bob_b.username.lower()
def test_two_users_merge_into_latest(bob_a, bob_b, command, mocker: MockerFixture):
command.merge_user = mocker.MagicMock()
command.handle_single_email(bob_a.email)
command.merge_user.assert_called_once_with(bob_a, bob_b)
def test_three_users_merge_into_latest(bob_a, bob_b, bob_c, command, mocker: MockerFixture):
command.merge_user = mocker.MagicMock()
command.handle_single_email(bob_a.email)
command.merge_user.assert_has_calls((
mocker.call(bob_b, bob_c),
mocker.call(bob_a, bob_c),
))
| 27
| 92
| 0.761905
|
e95bfb57744b776c95c3623cf3b96e90357c24cb
| 3,014
|
py
|
Python
|
zips_to_latlong.py
|
alchemydc/c19vaxfinder
|
86dae2543109a7fd9fb4e210711ce1c1a4d805b2
|
[
"MIT"
] | null | null | null |
zips_to_latlong.py
|
alchemydc/c19vaxfinder
|
86dae2543109a7fd9fb4e210711ce1c1a4d805b2
|
[
"MIT"
] | null | null | null |
zips_to_latlong.py
|
alchemydc/c19vaxfinder
|
86dae2543109a7fd9fb4e210711ce1c1a4d805b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# libs
import requests
from requests_toolbelt.utils import dump
import json
import os
import sys
from dotenv import load_dotenv
from pathlib import Path
# create a custom requests object, modifying the global module throws an error
# provides intrinic exception handling for requests without try/except
http = requests.Session()
assert_status_hook = lambda response, *args, **kwargs: response.raise_for_status()
http.hooks["response"] = [assert_status_hook]
# dump verbose request and response data to stdout
def logging_hook(response, *args, **kwargs):
data = dump.dump_all(response)
print(data.decode('utf-8'))
#setup Requests to log request and response to stdout verbosely
#http.hooks["response"] = [logging_hook]
# read secrets from env vars
env_path = Path('.') / '.env'
load_dotenv(dotenv_path = env_path)
GOOG_GEOCODING_APIKEY = os.getenv('GOOG_GEOCODING_APIKEY')
# constants
GOOG_LOC_API_BASE = 'https://maps.googleapis.com/maps/api/geocode/json?key=' + GOOG_GEOCODING_APIKEY + '&components=postal_code:'
#ZIPCODE_FILE = './static_data/walgreens_zipcodes.txt'
ZIPCODE_FILE = './static_data/walgreens_zips_four_corners_CO.txt'
#LATLONG_FILE = './static_data/latlong_data.json'
LATLONG_FILE = './static_data/latlong_data_co.json'
http_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
# fcns
def readFile(filename):
#print('hello readFile')
print('opening file %s for reading' % filename)
with open(filename, 'r') as infile:
data=infile.readlines()
return(data)
def writeFile(data, filename):
print('opening file %s for writing' % filename)
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def getData(zipcodes):
decoratedZipcodes = []
for zipcode in zipcodes:
url = GOOG_LOC_API_BASE + zipcode.rstrip()
print(url)
response = http.get(url, headers=http_headers)
if response.status_code == 200:
print("http/%s" % response.status_code)
data = response.json()
if data['status'] == "OK":
decoratedZipcodes.append({
'zip' : data['results'][0]['address_components'][0]['long_name'],
'latitude' : data['results'][0]['geometry']['location']['lat'],
'longitude' : data['results'][0]['geometry']['location']['lng'],
'name' : data['results'][0]['formatted_address']
})
else:
print("Exception http/%s requesting latlong for ZIP %s" % (response.status_code, zipcode))
return(decoratedZipcodes)
def main():
print("using GOOG apikey %s" % GOOG_GEOCODING_APIKEY)
zipcodes = readFile(ZIPCODE_FILE)
decoratedZipcodes = getData(zipcodes)
print(decoratedZipcodes)
writeFile(decoratedZipcodes, LATLONG_FILE)
if __name__ == "__main__":
main()
| 34.25
| 133
| 0.657266
|
11fd10e76a77932a11cde92efa3b25eb48e00986
| 12,507
|
py
|
Python
|
auth_token/migrations/0011_auto_20210119_1308.py
|
druids/django-token-authorization
|
bcd029e86d3edb49784bfa7e9329bcd1be4c511e
|
[
"BSD-3-Clause"
] | null | null | null |
auth_token/migrations/0011_auto_20210119_1308.py
|
druids/django-token-authorization
|
bcd029e86d3edb49784bfa7e9329bcd1be4c511e
|
[
"BSD-3-Clause"
] | null | null | null |
auth_token/migrations/0011_auto_20210119_1308.py
|
druids/django-token-authorization
|
bcd029e86d3edb49784bfa7e9329bcd1be4c511e
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.1 on 2021-01-19 12:08
import auth_token.enums
import auth_token.models
from django.conf import settings
import django.core.serializers.json
from django.db import migrations, models
import django.db.models.deletion
import enumfields.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
('auth_token', '0010_auto_20190723_1410'),
]
operations = [
migrations.CreateModel(
name='AuthorizationToken',
fields=[
('created_at', models.DateTimeField(db_index=True, verbose_name='created at')),
('changed_at', models.DateTimeField(db_index=True, verbose_name='changed at')),
('key', models.CharField(max_length=128, primary_key=True, serialize=False, verbose_name='key')),
('is_active', models.BooleanField(default=True, verbose_name='is active')),
('user_agent', models.CharField(blank=True, max_length=256, null=True, verbose_name='user agent')),
('expires_at', models.DateTimeField(default=auth_token.models.compute_authorization_token_expires_at,
verbose_name='expires at')),
('ip', models.GenericIPAddressField(verbose_name='IP')),
('auth_slug', models.SlugField(blank=True, null=True, verbose_name='slug')),
('backend', models.CharField(max_length=250, verbose_name='backend')),
('allowed_cookie', models.BooleanField(default=True, verbose_name='is allowed cookie')),
('allowed_header', models.BooleanField(default=True, verbose_name='is allowed header')),
('is_authenticated', models.BooleanField(default=False, verbose_name='is authenticated')),
('preserve_cookie', models.BooleanField(default=False, verbose_name='preserve cookie')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='authorization_tokens',
to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'authorization token',
'verbose_name_plural': 'authorization tokens',
},
),
migrations.CreateModel(
name='AuthorizationRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('changed_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='changed at')),
('slug', models.SlugField(blank=True, null=True, verbose_name='slug')),
('title', models.CharField(max_length=250, verbose_name='title')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('result', enumfields.fields.IntegerEnumField(
blank=True, enum=auth_token.enums.AuthorizationRequestResult, null=True, verbose_name='result'
)),
('backend', models.CharField(max_length=250, verbose_name='backend')),
('data', models.JSONField(blank=True, encoder=django.core.serializers.json.DjangoJSONEncoder, null=True,
verbose_name='data')),
('expires_at', models.DateTimeField(blank=True, null=True, verbose_name='expires at')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='authorization_requests',
to=settings.AUTH_USER_MODEL, verbose_name='user')),
('granted_at', models.DateTimeField(null=True, blank=True, verbose_name='granted at')),
],
options={
'verbose_name': 'authorization request',
'verbose_name_plural': 'authorization requests',
'ordering': ('-created_at',),
},
),
migrations.CreateModel(
name='AuthorizationRequestGenericManyToManyRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('changed_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='changed at')),
('object_id', models.TextField(db_index=True, verbose_name='ID of the related object')),
('authorization_request',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='_related_objects',
related_query_name='related_objects', to='auth_token.authorizationrequest')),
('object_ct',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype',
verbose_name='content type of the related object')),
],
options={
'db_tablespace': '',
'unique_together': {('authorization_request', 'object_ct', 'object_id')},
},
),
migrations.CreateModel(
name='AuthorizationTokenGenericManyToManyRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('changed_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='changed at')),
('object_id', models.TextField(db_index=True, verbose_name='ID of the related object')),
('authorization_token',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='_related_objects',
related_query_name='related_objects', to='auth_token.authorizationtoken')),
('object_ct',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype',
verbose_name='content type of the related object')),
],
options={
'db_tablespace': '',
'unique_together': {('authorization_token', 'object_ct', 'object_id')},
},
),
migrations.RenameModel('DeviceKey', 'MobileDevice'),
migrations.CreateModel(
name='OneTimePassword',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('changed_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='changed at')),
('key', models.CharField(max_length=128, primary_key=True, serialize=False, verbose_name='key')),
('expires_at', models.DateTimeField(blank=True, null=True, verbose_name='expires at')),
('slug', models.SlugField(verbose_name='slug')),
('is_active', models.BooleanField(default=True, verbose_name='is active')),
('data', models.JSONField(blank=True, encoder=django.core.serializers.json.DjangoJSONEncoder, null=True,
verbose_name='data')),
],
options={
'verbose_name': 'one time password',
'verbose_name_plural': 'one time passwords',
'ordering': ('-created_at',),
},
),
migrations.CreateModel(
name='OneTimePasswordGenericManyToManyRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('changed_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='changed at')),
('object_id', models.TextField(db_index=True, verbose_name='ID of the related object')),
('object_ct',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype',
verbose_name='content type of the related object')),
('one_time_password',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='_related_objects',
related_query_name='related_objects', to='auth_token.onetimepassword')),
],
options={
'db_tablespace': '',
'unique_together': {('one_time_password', 'object_ct', 'object_id')},
},
),
migrations.CreateModel(
name='UserAuthorizationTokenTakeover',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('changed_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='changed at')),
('is_active', models.BooleanField()),
('token', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_takeovers',
to='auth_token.authorizationtoken', verbose_name='authorization token')),
('user',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_token_takeovers',
to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'authorization takeover',
'verbose_name_plural': 'authorization takeovers',
},
),
migrations.AddField(
model_name='authorizationrequest',
name='authorization_token',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='authorization_requests', to='auth_token.authorizationtoken',
verbose_name='authorization token'),
),
migrations.AddField(
model_name='mobiledevice',
name='changed_at',
field=models.DateTimeField(null=True, blank=True, db_index=True, verbose_name='changed at'),
),
migrations.AddField(
model_name='mobiledevice',
name='name',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name='name'),
),
migrations.AddField(
model_name='mobiledevice',
name='slug',
field=models.SlugField(blank=True, null=True, verbose_name='slug'),
),
migrations.AddField(
model_name='mobiledevice',
name='is_primary',
field=models.BooleanField(default=False, verbose_name='is primary'),
),
migrations.AlterField(
model_name='mobiledevice',
name='uuid',
field=models.CharField(max_length=36, verbose_name='UUID'),
),
migrations.AlterField(
model_name='mobiledevice',
name='user',
field=models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name='user',
related_name='mobile_devices'
)
),
migrations.AddField(
model_name='authorizationtoken',
name='mobile_device',
field=models.ForeignKey(
verbose_name='mobile device',
to='auth_token.mobiledevice',
related_name='authorization_tokens',
null=True,
blank=True,
on_delete=models.CASCADE
),
),
]
| 56.59276
| 120
| 0.585032
|
73e4c840a7cbb1d542494d3099f9ad9bfb817268
| 399
|
py
|
Python
|
jobs/helloworld.py
|
vstenby/01005WakeWord
|
c7e2ca4e4102e66b851c517d4a746d87be42423c
|
[
"MIT"
] | 2
|
2021-02-09T10:14:33.000Z
|
2021-09-20T07:43:33.000Z
|
jobs/helloworld.py
|
vstenby/01005WakeWord
|
c7e2ca4e4102e66b851c517d4a746d87be42423c
|
[
"MIT"
] | null | null | null |
jobs/helloworld.py
|
vstenby/01005WakeWord
|
c7e2ca4e4102e66b851c517d4a746d87be42423c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 5 11:44:43 2021
@author: s174483
"""
import sys
def main():
argin = sys.argv
if len(argin) > 1:
filename = 'test'+str(argin[1])+'.txt'
else:
filename = 'test.txt'
f = open(filename,"w")
f.write(str(argin))
f.close()
return
if __name__ == "__main__":
main()
| 14.777778
| 46
| 0.52381
|
2083e4fe7fb5079bdaf8aa1a356f8b17eaa79128
| 10,838
|
py
|
Python
|
main.py
|
AccomplishedCode/Mapping-CDW-in-NYC
|
d8e74faf991c6debabf3fea4e3b0d557210e1cb8
|
[
"MIT"
] | null | null | null |
main.py
|
AccomplishedCode/Mapping-CDW-in-NYC
|
d8e74faf991c6debabf3fea4e3b0d557210e1cb8
|
[
"MIT"
] | null | null | null |
main.py
|
AccomplishedCode/Mapping-CDW-in-NYC
|
d8e74faf991c6debabf3fea4e3b0d557210e1cb8
|
[
"MIT"
] | 1
|
2021-08-05T19:54:57.000Z
|
2021-08-05T19:54:57.000Z
|
import streamlit as st
import pydeck as pdk
import pandas as pd
import numpy as np
import base64
from maps import *
from utils import *
from annotated_text import annotated_text
#Loading some dataset for beta expander
categories= pd.read_csv('data/categories.csv')
regions= pd.read_csv('data/regions.csv')
#Setting page to wide config mode
st.set_page_config(layout='wide')
st.title('Mapping Construction+Demolition Waste Flows for Recovered C+DW Use in NYC’s Capital Program')
st.write("Welcome! This is a tool created by a team of MS students at NYU Center for Urban Sciences & Progress to visualize CDW Waste in NYC. The dataset used for these visualizations was extracted from handwritten forms provided by NYS DEP, and converted into machine readable format by the team over a period of 3 months.")
st.write("The dataset used is easily explored through the sidepane.Please choose the desired view checkbox to display the visualization, and uncheck to clear the view.")
def first_decision():
sidebar_chosen= st.checkbox("Yes!")
desc_chosen= st.checkbox("Not yet, I want to learn more about this project.")
return sidebar_chosen,desc_chosen
s_chosen, desc_chosen= first_decision()
#Selecting whether or not to display sidebar config controls
data_dir= 'data/' #Input own directory if needed
#Loading the data
datafile= 'dataset_snake_v2.csv'
df= pd.read_csv(data_dir + datafile)
df= df.drop(columns=['Unnamed: 0'], axis=1)
df_capacities= pd.read_csv('data/LF_caps.csv')
#Separating into Transfers and Landfills
df_transfers= df.loc[df['Facility Type']=='Transfer Facility']
df_landfills= df.loc[df['Facility Type']=='Landfill']
#Defining filter functions and map functions
def filter_data(dataframe, year, material):
df1= dataframe.loc[dataframe['Year']==year]
df1= df1.loc[df1['Material']==material]
df_in= df1.loc[df1['Direction']=='Incoming']
df_out= df1.loc[df1['Direction']=='Outgoing']
return df_in, df_out
#Defining sidebar panel
if s_chosen:
st.sidebar.subheader('Please check the box on the dataset you wish to visualize')
data_chosen= st.sidebar.radio('We currently provide exploration of only one type', ['Transfer Facility', 'Landfills'])
if data_chosen:
st.sidebar.subheader('How would you like to explore the dataset?')
tab_view= st.sidebar.checkbox('Tabular View') #Defining views for the page to display
map_view= st.sidebar.checkbox('Map View') #Bool variable for Mapping view
stat_view= st.sidebar.checkbox('Statistical Visualizations') #Exploratory dataframe analysis to be implemented
st.sidebar.subheader('Please choose year of interest')
year_chosen = st.sidebar.radio(
'Please select year of interest:',
(2019, 2020))
if tab_view:
#Allow data downloading
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings
linko= f'<a href="data:file/csv;base64,{b64}" download="myfilename.csv">Download Dataset file</a>'
st.markdown(linko, unsafe_allow_html=True)
csv_monthly= df_monthly.to_csv(index=False)
b64 = base64.b64encode(csv_monthly.encode()).decode() # some strings
linko_monthly= f'<a href="data:file/csv;base64,{b64}" download="myfilename.csv">Download Monthly Breakdown file</a>'
st.markdown(linko_monthly, unsafe_allow_html=True)
csv_capacities= df_capacities.to_csv(index=False)
b64 = base64.b64encode(csv_capacities.encode()).decode() # some strings
linko_capacities= f'<a href="data:file/csv;base64,{b64}" download="myfilename.csv">Download Remaining Landfill Capacities file</a>'
st.markdown(linko_capacities, unsafe_allow_html=True)
if data_chosen=='Transfer Facility':
st.subheader('Dataframe of the Transfer facilities dataset')
st.write(df_transfers)
if data_chosen=='Landfills':
st.subheader('Dataframe of the Landfills dataset')
st.write(df_landfills)
if map_view:
direction_chosen= st.sidebar.selectbox(
'Please choose the type of CDW flows to visualize:',
('Incoming', 'Outgoing', 'Both')
)
st.write("This map is fully interactive. You can zoom in and out, pan by holding the right mouse click and get information about each trip by hovering over the arc.")
if data_chosen=='Transfer Facility':
st.subheader('Map of Incoming Construction & Demolition Waste to Transfer Facilities')
annotated_text(
"Legend:",
("Incoming", "To Transfer Facility", "#ee6123"),
("Outgoing", "From Transfer Facility", "#047634" )
)
material_chosen= st.sidebar.selectbox(
'Please choose the material of interest:',
(df_transfers['Material'].unique()))
df_in, df_out= filter_data(df_transfers, year_chosen, material_chosen)
if direction_chosen=='Incoming':
map(df_in,COPPER_RGB)
elif direction_chosen=='Outgoing':
map(df_out, DARK_GREEN_RGB)
elif direction_chosen=='Both':
dual_map(df_in, df_out, COPPER_RGB, DARK_GREEN_RGB)
elif data_chosen=='Landfills':
st.subheader('Map of Incoming Construction & Demolition Waste to Landfills')
annotated_text(
"Legend:",
("Incoming",),
("RECYCLED","FROM LANDFILL", "#f50029" )
)
material_chosen= st.sidebar.selectbox(
'Please choose the material of interest:',
(df_landfills['Material'].dropna().unique()))
df_landfill_filtered= df_landfills.loc[df_landfills['Material']==material_chosen]
map_landfill(df_landfill_filtered)
st.subheader("Map of Materials reused onsite Landfills")
column_map(df_landfill_filtered)
if stat_view:
st.sidebar.subheader('We provide two different visualizations for this data')
st.sidebar.write('Please choose the graph types below: ')
monthly= st.sidebar.checkbox('Monthly CDW breakdown')
sankey= st.sidebar.checkbox('Regional Material Flows')
if monthly:
if data_chosen:
st.subheader('Monthly Breakdown of CDW')
st.write('The graph is fully interactive, please feel free to select/deselect relevant materials')
st.plotly_chart(timeline_2020(df_monthly, material_list, region_list, year_chosen), use_container_width=True)
if year_chosen==2020:
st.subheader('Monthly Breakdown of Other Waste')
st.write('Please check disclaimer section for all materials included under the category: Other')
st.plotly_chart(timeline_2020(df_monthly,['Other'], region_list, year_chosen), use_container_width=True)
if sankey:
if data_chosen=='Transfer Facility':
region_chosen= st.sidebar.selectbox('Please choose the region of interest:', [1,2,8])
st.subheader('Regional Flow Graph of CDW For Transfer Facilities')
st.plotly_chart(sankey_destiny_2(df_transfers, region_chosen,year_chosen,data_chosen ), use_container_width=True)
else:
region_chosen= st.sidebar.selectbox('Please choose the region of interest:', (df_landfills['facility_region'].unique()))
st.subheader('Regional Flow Graph of CDW For Landfills')
st.plotly_chart(sankey_destiny_2(df_landfills, region_chosen,2020,'Landfill' ), use_container_width=True)
if desc_chosen:
st.subheader(" A Bit About This Project")
st.write("Thanks for clicking to learn more about our project! We believe that reuse of materials in the construction industry is critical in reducing Greenhouse Gas Emmissions, and it's important that there exist a way to track and map these material flows in order to create a market for secondary use. ")
st.write("To read more about our Capstone Project and check out the code, please click on the link below:")
st.write("https://github.com/AccomplishedCode/Mapping-CDW-in-NYC", unsafe_allow_html=True)
#Expander for disclaimers
st.subheader("A Few Pointers About the Data and the Maps")
with st.beta_expander("See important disclaimers about our maps and data"):
st.write(
"""
- This data has been aggregated by humans from handwritten, scanned PDF forms.
- The coordinate locations for transfer facilities and landfills were extracted using a mix of Google Maps API as well as other open source API's. Due to facility names being fluid due to change of ownership/new registrations, we've confirmed that the names listed on this dataset are the latest available as of April, 2021.
- We rely on the level of spatial granularity that is provided in the reporting forms. In the cases where the “Service Area” refers to a state or county, our map portrays that where the respective end of the arc falls at the center of the town/county as per the NYS Municipal boundary map. In some cases (in and around Suffolk County, for example) that center could coincidentally lie in water. Please note the “Coming from/Going to” details as you hover over the arcs.
- This tool is just a visualization of the data we were able to gather. This does not aim to inculpate or put blame on any particular facility or region for their activities.
- The following table defines the final 21 material types by categorizing the numerous different types of reported materials:
"""
)
st.dataframe(categories)
st.write("""
- The following table shows the available data in terms of facility type, region and year:
""")
st.dataframe(regions)
st.image('data/regions.jpg')
st.write("Thank you for using our webapp! We really hope you liked it! Feel free to check out the dark theme if you'd like, it should automatically turn on if your system already has it. If not, you can change it in the settings on the top right! :-)")
| 51.856459
| 479
| 0.653626
|
049b754a8d3ea36ab14c1891666bfdc449eb7238
| 19,879
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations_async/_express_route_connections_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations_async/_express_route_connections_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations_async/_express_route_connections_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteConnectionsOperations:
"""ExpressRouteConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
put_express_route_connection_parameters: "models.ExpressRouteConnection",
**kwargs
) -> "models.ExpressRouteConnection":
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteConnection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(put_express_route_connection_parameters, 'ExpressRouteConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
put_express_route_connection_parameters: "models.ExpressRouteConnection",
**kwargs
) -> "models.ExpressRouteConnection":
"""Creates a connection between an ExpressRoute gateway and an ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:param put_express_route_connection_parameters: Parameters required in an
ExpressRouteConnection PUT operation.
:type put_express_route_connection_parameters: ~azure.mgmt.network.v2020_03_01.models.ExpressRouteConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: ExpressRouteConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ExpressRouteConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
put_express_route_connection_parameters=put_express_route_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs
) -> "models.ExpressRouteConnection":
"""Gets the specified ExpressRouteConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the ExpressRoute connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ExpressRouteConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteConnection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs
) -> None:
"""Deletes a connection to a ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def list(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs
) -> "models.ExpressRouteConnectionList":
"""Lists ExpressRouteConnections.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnectionList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ExpressRouteConnectionList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteConnectionList"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnectionList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections'} # type: ignore
| 50.454315
| 250
| 0.689924
|
9875e9b44e09dd981e9b2be43ec5b18d1df30e01
| 950
|
py
|
Python
|
accounts/views.py
|
PatrickLeonard/superlists
|
c99cdb5ed32009a878016da9e3bb2659a267f851
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
PatrickLeonard/superlists
|
c99cdb5ed32009a878016da9e3bb2659a267f851
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
PatrickLeonard/superlists
|
c99cdb5ed32009a878016da9e3bb2659a267f851
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.contrib import auth, messages
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from accounts.models import Token
import sys
def send_login_email(request):
email = request.POST['email']
token = Token.objects.create(email=email)
url = request.build_absolute_uri(
reverse('login') + '?token={uid}'.format(uid=str(token.uid))
)
message_body = 'Use this link to log in: \n\n{url}'.format(url=url)
send_mail(
'Your login link for Superlists',
message_body,
'p.leonard.example@gmail.com',
[email],
)
messages.success(
request,
"Check your email, we've sent you a link you can use to log in."
)
return redirect('/')
def login(request):
user = auth.authenticate(uid=request.GET.get('token'))
if user:
auth.login(request, user)
return redirect('/')
| 29.6875
| 72
| 0.665263
|
702333142c8ec70337d86e5a5d5f918c17f131f0
| 54,045
|
py
|
Python
|
dynaconf/vendor_src/ruamel/yaml/main.py
|
sephiartlist/dynaconf
|
9c5f60b289c1f0fa3f899f1962a8fe5712c74eab
|
[
"MIT"
] | 2,293
|
2015-08-14T22:39:31.000Z
|
2022-03-31T12:44:49.000Z
|
dynaconf/vendor_src/ruamel/yaml/main.py
|
sephiartlist/dynaconf
|
9c5f60b289c1f0fa3f899f1962a8fe5712c74eab
|
[
"MIT"
] | 676
|
2015-08-20T19:29:56.000Z
|
2022-03-31T13:45:51.000Z
|
dynaconf/vendor_src/ruamel/yaml/main.py
|
sephiartlist/dynaconf
|
9c5f60b289c1f0fa3f899f1962a8fe5712c74eab
|
[
"MIT"
] | 255
|
2015-12-02T21:16:33.000Z
|
2022-03-20T22:03:46.000Z
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals, print_function
import sys
import os
import warnings
import glob
from importlib import import_module
import dynaconf.vendor.ruamel as ruamel
from .error import UnsafeLoaderWarning, YAMLError # NOQA
from .tokens import * # NOQA
from .events import * # NOQA
from .nodes import * # NOQA
from .loader import BaseLoader, SafeLoader, Loader, RoundTripLoader # NOQA
from .dumper import BaseDumper, SafeDumper, Dumper, RoundTripDumper # NOQA
from .compat import StringIO, BytesIO, with_metaclass, PY3, nprint
from .resolver import VersionedResolver, Resolver # NOQA
from .representer import (
BaseRepresenter,
SafeRepresenter,
Representer,
RoundTripRepresenter,
)
from .constructor import (
BaseConstructor,
SafeConstructor,
Constructor,
RoundTripConstructor,
)
from .loader import Loader as UnsafeLoader
if False: # MYPY
from typing import List, Set, Dict, Union, Any, Callable, Optional, Text # NOQA
from .compat import StreamType, StreamTextType, VersionType # NOQA
if PY3:
from pathlib import Path
else:
Path = Any
try:
from _ruamel_yaml import CParser, CEmitter # type: ignore
except: # NOQA
CParser = CEmitter = None
# import io
enforce = object()
# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a
# subset of abbreviations, which should be all caps according to PEP8
class YAML(object):
def __init__(
self, _kw=enforce, typ=None, pure=False, output=None, plug_ins=None # input=None,
):
# type: (Any, Optional[Text], Any, Any, Any) -> None
"""
_kw: not used, forces keyword arguments in 2.7 (in 3 you can do (*, safe_load=..)
typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default)
'safe' -> SafeLoader/SafeDumper,
'unsafe' -> normal/unsafe Loader/Dumper
'base' -> baseloader
pure: if True only use Python modules
input/output: needed to work as context manager
plug_ins: a list of plug-in files
"""
if _kw is not enforce:
raise TypeError(
'{}.__init__() takes no positional argument but at least '
'one was given ({!r})'.format(self.__class__.__name__, _kw)
)
self.typ = ['rt'] if typ is None else (typ if isinstance(typ, list) else [typ])
self.pure = pure
# self._input = input
self._output = output
self._context_manager = None # type: Any
self.plug_ins = [] # type: List[Any]
for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins():
file_name = pu.replace(os.sep, '.')
self.plug_ins.append(import_module(file_name))
self.Resolver = ruamel.yaml.resolver.VersionedResolver # type: Any
self.allow_unicode = True
self.Reader = None # type: Any
self.Representer = None # type: Any
self.Constructor = None # type: Any
self.Scanner = None # type: Any
self.Serializer = None # type: Any
self.default_flow_style = None # type: Any
typ_found = 1
setup_rt = False
if 'rt' in self.typ:
setup_rt = True
elif 'safe' in self.typ:
self.Emitter = (
ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
)
self.Representer = ruamel.yaml.representer.SafeRepresenter
self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
self.Composer = ruamel.yaml.composer.Composer
self.Constructor = ruamel.yaml.constructor.SafeConstructor
elif 'base' in self.typ:
self.Emitter = ruamel.yaml.emitter.Emitter
self.Representer = ruamel.yaml.representer.BaseRepresenter
self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
self.Composer = ruamel.yaml.composer.Composer
self.Constructor = ruamel.yaml.constructor.BaseConstructor
elif 'unsafe' in self.typ:
self.Emitter = (
ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
)
self.Representer = ruamel.yaml.representer.Representer
self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
self.Composer = ruamel.yaml.composer.Composer
self.Constructor = ruamel.yaml.constructor.Constructor
else:
setup_rt = True
typ_found = 0
if setup_rt:
self.default_flow_style = False
# no optimized rt-dumper yet
self.Emitter = ruamel.yaml.emitter.Emitter
self.Serializer = ruamel.yaml.serializer.Serializer
self.Representer = ruamel.yaml.representer.RoundTripRepresenter
self.Scanner = ruamel.yaml.scanner.RoundTripScanner
# no optimized rt-parser yet
self.Parser = ruamel.yaml.parser.RoundTripParser
self.Composer = ruamel.yaml.composer.Composer
self.Constructor = ruamel.yaml.constructor.RoundTripConstructor
del setup_rt
self.stream = None
self.canonical = None
self.old_indent = None
self.width = None
self.line_break = None
self.map_indent = None
self.sequence_indent = None
self.sequence_dash_offset = 0
self.compact_seq_seq = None
self.compact_seq_map = None
self.sort_base_mapping_type_on_output = None # default: sort
self.top_level_colon_align = None
self.prefix_colon = None
self.version = None
self.preserve_quotes = None
self.allow_duplicate_keys = False # duplicate keys in map, set
self.encoding = 'utf-8'
self.explicit_start = None
self.explicit_end = None
self.tags = None
self.default_style = None
self.top_level_block_style_scalar_no_indent_error_1_1 = False
# directives end indicator with single scalar document
self.scalar_after_indicator = None
# [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}]
self.brace_single_entry_mapping_in_flow_sequence = False
for module in self.plug_ins:
if getattr(module, 'typ', None) in self.typ:
typ_found += 1
module.init_typ(self)
break
if typ_found == 0:
raise NotImplementedError(
'typ "{}"not recognised (need to install plug-in?)'.format(self.typ)
)
@property
def reader(self):
# type: () -> Any
try:
return self._reader # type: ignore
except AttributeError:
self._reader = self.Reader(None, loader=self)
return self._reader
@property
def scanner(self):
# type: () -> Any
try:
return self._scanner # type: ignore
except AttributeError:
self._scanner = self.Scanner(loader=self)
return self._scanner
@property
def parser(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
if self.Parser is not CParser:
setattr(self, attr, self.Parser(loader=self))
else:
if getattr(self, '_stream', None) is None:
# wait for the stream
return None
else:
# if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'):
# # pathlib.Path() instance
# setattr(self, attr, CParser(self._stream))
# else:
setattr(self, attr, CParser(self._stream))
# self._parser = self._composer = self
# nprint('scanner', self.loader.scanner)
return getattr(self, attr)
@property
def composer(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
setattr(self, attr, self.Composer(loader=self))
return getattr(self, attr)
@property
def constructor(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self)
cnst.allow_duplicate_keys = self.allow_duplicate_keys
setattr(self, attr, cnst)
return getattr(self, attr)
@property
def resolver(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
setattr(self, attr, self.Resolver(version=self.version, loader=self))
return getattr(self, attr)
@property
def emitter(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
if self.Emitter is not CEmitter:
_emitter = self.Emitter(
None,
canonical=self.canonical,
indent=self.old_indent,
width=self.width,
allow_unicode=self.allow_unicode,
line_break=self.line_break,
prefix_colon=self.prefix_colon,
brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA
dumper=self,
)
setattr(self, attr, _emitter)
if self.map_indent is not None:
_emitter.best_map_indent = self.map_indent
if self.sequence_indent is not None:
_emitter.best_sequence_indent = self.sequence_indent
if self.sequence_dash_offset is not None:
_emitter.sequence_dash_offset = self.sequence_dash_offset
# _emitter.block_seq_indent = self.sequence_dash_offset
if self.compact_seq_seq is not None:
_emitter.compact_seq_seq = self.compact_seq_seq
if self.compact_seq_map is not None:
_emitter.compact_seq_map = self.compact_seq_map
else:
if getattr(self, '_stream', None) is None:
# wait for the stream
return None
return None
return getattr(self, attr)
@property
def serializer(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
setattr(
self,
attr,
self.Serializer(
encoding=self.encoding,
explicit_start=self.explicit_start,
explicit_end=self.explicit_end,
version=self.version,
tags=self.tags,
dumper=self,
),
)
return getattr(self, attr)
@property
def representer(self):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
repres = self.Representer(
default_style=self.default_style,
default_flow_style=self.default_flow_style,
dumper=self,
)
if self.sort_base_mapping_type_on_output is not None:
repres.sort_base_mapping_type_on_output = self.sort_base_mapping_type_on_output
setattr(self, attr, repres)
return getattr(self, attr)
# separate output resolver?
# def load(self, stream=None):
# if self._context_manager:
# if not self._input:
# raise TypeError("Missing input stream while dumping from context manager")
# for data in self._context_manager.load():
# yield data
# return
# if stream is None:
# raise TypeError("Need a stream argument when not loading from context manager")
# return self.load_one(stream)
def load(self, stream):
# type: (Union[Path, StreamTextType]) -> Any
"""
at this point you either have the non-pure Parser (which has its own reader and
scanner) or you have the pure Parser.
If the pure Parser is set, then set the Reader and Scanner, if not already set.
If either the Scanner or Reader are set, you cannot use the non-pure Parser,
so reset it to the pure parser and set the Reader resp. Scanner if necessary
"""
if not hasattr(stream, 'read') and hasattr(stream, 'open'):
# pathlib.Path() instance
with stream.open('rb') as fp:
return self.load(fp)
constructor, parser = self.get_constructor_parser(stream)
try:
return constructor.get_single_data()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
def load_all(self, stream, _kw=enforce): # , skip=None):
# type: (Union[Path, StreamTextType], Any) -> Any
if _kw is not enforce:
raise TypeError(
'{}.__init__() takes no positional argument but at least '
'one was given ({!r})'.format(self.__class__.__name__, _kw)
)
if not hasattr(stream, 'read') and hasattr(stream, 'open'):
# pathlib.Path() instance
with stream.open('r') as fp:
for d in self.load_all(fp, _kw=enforce):
yield d
return
# if skip is None:
# skip = []
# elif isinstance(skip, int):
# skip = [skip]
constructor, parser = self.get_constructor_parser(stream)
try:
while constructor.check_data():
yield constructor.get_data()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
def get_constructor_parser(self, stream):
# type: (StreamTextType) -> Any
"""
the old cyaml needs special setup, and therefore the stream
"""
if self.Parser is not CParser:
if self.Reader is None:
self.Reader = ruamel.yaml.reader.Reader
if self.Scanner is None:
self.Scanner = ruamel.yaml.scanner.Scanner
self.reader.stream = stream
else:
if self.Reader is not None:
if self.Scanner is None:
self.Scanner = ruamel.yaml.scanner.Scanner
self.Parser = ruamel.yaml.parser.Parser
self.reader.stream = stream
elif self.Scanner is not None:
if self.Reader is None:
self.Reader = ruamel.yaml.reader.Reader
self.Parser = ruamel.yaml.parser.Parser
self.reader.stream = stream
else:
# combined C level reader>scanner>parser
# does some calls to the resolver, e.g. BaseResolver.descend_resolver
# if you just initialise the CParser, to much of resolver.py
# is actually used
rslvr = self.Resolver
# if rslvr is ruamel.yaml.resolver.VersionedResolver:
# rslvr = ruamel.yaml.resolver.Resolver
class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore
def __init__(selfx, stream, version=self.version, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA
CParser.__init__(selfx, stream)
selfx._parser = selfx._composer = selfx
self.Constructor.__init__(selfx, loader=selfx)
selfx.allow_duplicate_keys = self.allow_duplicate_keys
rslvr.__init__(selfx, version=version, loadumper=selfx)
self._stream = stream
loader = XLoader(stream)
return loader, loader
return self.constructor, self.parser
def dump(self, data, stream=None, _kw=enforce, transform=None):
# type: (Any, Union[Path, StreamType], Any, Any) -> Any
if self._context_manager:
if not self._output:
raise TypeError('Missing output stream while dumping from context manager')
if _kw is not enforce:
raise TypeError(
'{}.dump() takes one positional argument but at least '
'two were given ({!r})'.format(self.__class__.__name__, _kw)
)
if transform is not None:
raise TypeError(
'{}.dump() in the context manager cannot have transform keyword '
''.format(self.__class__.__name__)
)
self._context_manager.dump(data)
else: # old style
if stream is None:
raise TypeError('Need a stream argument when not dumping from context manager')
return self.dump_all([data], stream, _kw, transform=transform)
def dump_all(self, documents, stream, _kw=enforce, transform=None):
# type: (Any, Union[Path, StreamType], Any, Any) -> Any
if self._context_manager:
raise NotImplementedError
if _kw is not enforce:
raise TypeError(
'{}.dump(_all) takes two positional argument but at least '
'three were given ({!r})'.format(self.__class__.__name__, _kw)
)
self._output = stream
self._context_manager = YAMLContextManager(self, transform=transform)
for data in documents:
self._context_manager.dump(data)
self._context_manager.teardown_output()
self._output = None
self._context_manager = None
def Xdump_all(self, documents, stream, _kw=enforce, transform=None):
# type: (Any, Union[Path, StreamType], Any, Any) -> Any
"""
Serialize a sequence of Python objects into a YAML stream.
"""
if not hasattr(stream, 'write') and hasattr(stream, 'open'):
# pathlib.Path() instance
with stream.open('w') as fp:
return self.dump_all(documents, fp, _kw, transform=transform)
if _kw is not enforce:
raise TypeError(
'{}.dump(_all) takes two positional argument but at least '
'three were given ({!r})'.format(self.__class__.__name__, _kw)
)
# The stream should have the methods `write` and possibly `flush`.
if self.top_level_colon_align is True:
tlca = max([len(str(x)) for x in documents[0]]) # type: Any
else:
tlca = self.top_level_colon_align
if transform is not None:
fstream = stream
if self.encoding is None:
stream = StringIO()
else:
stream = BytesIO()
serializer, representer, emitter = self.get_serializer_representer_emitter(
stream, tlca
)
try:
self.serializer.open()
for data in documents:
try:
self.representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
self.serializer.close()
finally:
try:
self.emitter.dispose()
except AttributeError:
raise
# self.dumper.dispose() # cyaml
delattr(self, '_serializer')
delattr(self, '_emitter')
if transform:
val = stream.getvalue()
if self.encoding:
val = val.decode(self.encoding)
if fstream is None:
transform(val)
else:
fstream.write(transform(val))
return None
def get_serializer_representer_emitter(self, stream, tlca):
# type: (StreamType, Any) -> Any
# we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler
if self.Emitter is not CEmitter:
if self.Serializer is None:
self.Serializer = ruamel.yaml.serializer.Serializer
self.emitter.stream = stream
self.emitter.top_level_colon_align = tlca
if self.scalar_after_indicator is not None:
self.emitter.scalar_after_indicator = self.scalar_after_indicator
return self.serializer, self.representer, self.emitter
if self.Serializer is not None:
# cannot set serializer with CEmitter
self.Emitter = ruamel.yaml.emitter.Emitter
self.emitter.stream = stream
self.emitter.top_level_colon_align = tlca
if self.scalar_after_indicator is not None:
self.emitter.scalar_after_indicator = self.scalar_after_indicator
return self.serializer, self.representer, self.emitter
# C routines
rslvr = (
ruamel.yaml.resolver.BaseResolver
if 'base' in self.typ
else ruamel.yaml.resolver.Resolver
)
class XDumper(CEmitter, self.Representer, rslvr): # type: ignore
def __init__(
selfx,
stream,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=None,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
CEmitter.__init__(
selfx,
stream,
canonical=canonical,
indent=indent,
width=width,
encoding=encoding,
allow_unicode=allow_unicode,
line_break=line_break,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
)
selfx._emitter = selfx._serializer = selfx._representer = selfx
self.Representer.__init__(
selfx, default_style=default_style, default_flow_style=default_flow_style
)
rslvr.__init__(selfx)
self._stream = stream
dumper = XDumper(
stream,
default_style=self.default_style,
default_flow_style=self.default_flow_style,
canonical=self.canonical,
indent=self.old_indent,
width=self.width,
allow_unicode=self.allow_unicode,
line_break=self.line_break,
explicit_start=self.explicit_start,
explicit_end=self.explicit_end,
version=self.version,
tags=self.tags,
)
self._emitter = self._serializer = dumper
return dumper, dumper, dumper
# basic types
def map(self, **kw):
# type: (Any) -> Any
if 'rt' in self.typ:
from dynaconf.vendor.ruamel.yaml.comments import CommentedMap
return CommentedMap(**kw)
else:
return dict(**kw)
def seq(self, *args):
# type: (Any) -> Any
if 'rt' in self.typ:
from dynaconf.vendor.ruamel.yaml.comments import CommentedSeq
return CommentedSeq(*args)
else:
return list(*args)
# helpers
def official_plug_ins(self):
# type: () -> Any
bd = os.path.dirname(__file__)
gpbd = os.path.dirname(os.path.dirname(bd))
res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')]
return res
def register_class(self, cls):
# type:(Any) -> Any
"""
register a class for dumping loading
- if it has attribute yaml_tag use that to register, else use class name
- if it has methods to_yaml/from_yaml use those to dump/load else dump attributes
as mapping
"""
tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
try:
self.representer.add_representer(cls, cls.to_yaml)
except AttributeError:
def t_y(representer, data):
# type: (Any, Any) -> Any
return representer.represent_yaml_object(
tag, data, cls, flow_style=representer.default_flow_style
)
self.representer.add_representer(cls, t_y)
try:
self.constructor.add_constructor(tag, cls.from_yaml)
except AttributeError:
def f_y(constructor, node):
# type: (Any, Any) -> Any
return constructor.construct_yaml_object(node, cls)
self.constructor.add_constructor(tag, f_y)
return cls
def parse(self, stream):
# type: (StreamTextType) -> Any
"""
Parse a YAML stream and produce parsing events.
"""
_, parser = self.get_constructor_parser(stream)
try:
while parser.check_event():
yield parser.get_event()
finally:
parser.dispose()
try:
self._reader.reset_reader()
except AttributeError:
pass
try:
self._scanner.reset_scanner()
except AttributeError:
pass
# ### context manager
def __enter__(self):
# type: () -> Any
self._context_manager = YAMLContextManager(self)
return self
def __exit__(self, typ, value, traceback):
# type: (Any, Any, Any) -> None
if typ:
nprint('typ', typ)
self._context_manager.teardown_output()
# self._context_manager.teardown_input()
self._context_manager = None
# ### backwards compatibility
def _indent(self, mapping=None, sequence=None, offset=None):
# type: (Any, Any, Any) -> None
if mapping is not None:
self.map_indent = mapping
if sequence is not None:
self.sequence_indent = sequence
if offset is not None:
self.sequence_dash_offset = offset
@property
def indent(self):
# type: () -> Any
return self._indent
@indent.setter
def indent(self, val):
# type: (Any) -> None
self.old_indent = val
@property
def block_seq_indent(self):
# type: () -> Any
return self.sequence_dash_offset
@block_seq_indent.setter
def block_seq_indent(self, val):
# type: (Any) -> None
self.sequence_dash_offset = val
def compact(self, seq_seq=None, seq_map=None):
# type: (Any, Any) -> None
self.compact_seq_seq = seq_seq
self.compact_seq_map = seq_map
class YAMLContextManager(object):
def __init__(self, yaml, transform=None):
# type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None
self._yaml = yaml
self._output_inited = False
self._output_path = None
self._output = self._yaml._output
self._transform = transform
# self._input_inited = False
# self._input = input
# self._input_path = None
# self._transform = yaml.transform
# self._fstream = None
if not hasattr(self._output, 'write') and hasattr(self._output, 'open'):
# pathlib.Path() instance, open with the same mode
self._output_path = self._output
self._output = self._output_path.open('w')
# if not hasattr(self._stream, 'write') and hasattr(stream, 'open'):
# if not hasattr(self._input, 'read') and hasattr(self._input, 'open'):
# # pathlib.Path() instance, open with the same mode
# self._input_path = self._input
# self._input = self._input_path.open('r')
if self._transform is not None:
self._fstream = self._output
if self._yaml.encoding is None:
self._output = StringIO()
else:
self._output = BytesIO()
def teardown_output(self):
# type: () -> None
if self._output_inited:
self._yaml.serializer.close()
else:
return
try:
self._yaml.emitter.dispose()
except AttributeError:
raise
# self.dumper.dispose() # cyaml
try:
delattr(self._yaml, '_serializer')
delattr(self._yaml, '_emitter')
except AttributeError:
raise
if self._transform:
val = self._output.getvalue()
if self._yaml.encoding:
val = val.decode(self._yaml.encoding)
if self._fstream is None:
self._transform(val)
else:
self._fstream.write(self._transform(val))
self._fstream.flush()
self._output = self._fstream # maybe not necessary
if self._output_path is not None:
self._output.close()
def init_output(self, first_data):
# type: (Any) -> None
if self._yaml.top_level_colon_align is True:
tlca = max([len(str(x)) for x in first_data]) # type: Any
else:
tlca = self._yaml.top_level_colon_align
self._yaml.get_serializer_representer_emitter(self._output, tlca)
self._yaml.serializer.open()
self._output_inited = True
def dump(self, data):
# type: (Any) -> None
if not self._output_inited:
self.init_output(data)
try:
self._yaml.representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
# def teardown_input(self):
# pass
#
# def init_input(self):
# # set the constructor and parser on YAML() instance
# self._yaml.get_constructor_parser(stream)
#
# def load(self):
# if not self._input_inited:
# self.init_input()
# try:
# while self._yaml.constructor.check_data():
# yield self._yaml.constructor.get_data()
# finally:
# parser.dispose()
# try:
# self._reader.reset_reader() # type: ignore
# except AttributeError:
# pass
# try:
# self._scanner.reset_scanner() # type: ignore
# except AttributeError:
# pass
def yaml_object(yml):
# type: (Any) -> Any
""" decorator for classes that needs to dump/load objects
The tag for such objects is taken from the class attribute yaml_tag (or the
class name in lowercase in case unavailable)
If methods to_yaml and/or from_yaml are available, these are called for dumping resp.
loading, default routines (dumping a mapping of the attributes) used otherwise.
"""
def yo_deco(cls):
# type: (Any) -> Any
tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
try:
yml.representer.add_representer(cls, cls.to_yaml)
except AttributeError:
def t_y(representer, data):
# type: (Any, Any) -> Any
return representer.represent_yaml_object(
tag, data, cls, flow_style=representer.default_flow_style
)
yml.representer.add_representer(cls, t_y)
try:
yml.constructor.add_constructor(tag, cls.from_yaml)
except AttributeError:
def f_y(constructor, node):
# type: (Any, Any) -> Any
return constructor.construct_yaml_object(node, cls)
yml.constructor.add_constructor(tag, f_y)
return cls
return yo_deco
########################################################################################
def scan(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
try:
while loader.scanner.check_token():
yield loader.scanner.get_token()
finally:
loader._parser.dispose()
def parse(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
try:
while loader._parser.check_event():
yield loader._parser.get_event()
finally:
loader._parser.dispose()
def compose(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
loader = Loader(stream)
try:
return loader.get_single_node()
finally:
loader.dispose()
def compose_all(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
loader = Loader(stream)
try:
while loader.check_node():
yield loader._composer.get_node()
finally:
loader._parser.dispose()
def load(stream, Loader=None, version=None, preserve_quotes=None):
# type: (StreamTextType, Any, Optional[VersionType], Any) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
if Loader is None:
warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
Loader = UnsafeLoader
loader = Loader(stream, version, preserve_quotes=preserve_quotes)
try:
return loader._constructor.get_single_data()
finally:
loader._parser.dispose()
try:
loader._reader.reset_reader()
except AttributeError:
pass
try:
loader._scanner.reset_scanner()
except AttributeError:
pass
def load_all(stream, Loader=None, version=None, preserve_quotes=None):
# type: (Optional[StreamTextType], Any, Optional[VersionType], Optional[bool]) -> Any # NOQA
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
if Loader is None:
warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
Loader = UnsafeLoader
loader = Loader(stream, version, preserve_quotes=preserve_quotes)
try:
while loader._constructor.check_data():
yield loader._constructor.get_data()
finally:
loader._parser.dispose()
try:
loader._reader.reset_reader()
except AttributeError:
pass
try:
loader._scanner.reset_scanner()
except AttributeError:
pass
def safe_load(stream, version=None):
# type: (StreamTextType, Optional[VersionType]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
return load(stream, SafeLoader, version)
def safe_load_all(stream, version=None):
# type: (StreamTextType, Optional[VersionType]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
return load_all(stream, SafeLoader, version)
def round_trip_load(stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
def round_trip_load_all(stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
def emit(
events,
stream=None,
Dumper=Dumper,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
):
# type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
)
try:
for event in events:
dumper.emit(event)
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
enc = None if PY3 else 'utf-8'
def serialize_all(
nodes,
stream=None,
Dumper=Dumper,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
):
# type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = StringIO()
else:
stream = BytesIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
version=version,
tags=tags,
explicit_start=explicit_start,
explicit_end=explicit_end,
)
try:
dumper._serializer.open()
for node in nodes:
dumper.serialize(node)
dumper._serializer.close()
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
# type: (Any, Optional[StreamType], Any, Any) -> Any
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(
documents,
stream=None,
Dumper=Dumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Optional[str] # NOQA
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if top_level_colon_align is True:
top_level_colon_align = max([len(str(x)) for x in documents[0]])
if stream is None:
if encoding is None:
stream = StringIO()
else:
stream = BytesIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
top_level_colon_align=top_level_colon_align,
prefix_colon=prefix_colon,
)
try:
dumper._serializer.open()
for data in documents:
try:
dumper._representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
dumper._serializer.close()
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
return None
def dump(
data,
stream=None,
Dumper=Dumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[str] # NOQA
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
default_style ∈ None, '', '"', "'", '|', '>'
"""
return dump_all(
[data],
stream,
Dumper=Dumper,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
)
def safe_dump_all(documents, stream=None, **kwds):
# type: (Any, Optional[StreamType], Any) -> Optional[str]
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
# type: (Any, Optional[StreamType], Any) -> Optional[str]
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def round_trip_dump(
data,
stream=None,
Dumper=RoundTripDumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[str] # NOQA
allow_unicode = True if allow_unicode is None else allow_unicode
return dump_all(
[data],
stream,
Dumper=Dumper,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
top_level_colon_align=top_level_colon_align,
prefix_colon=prefix_colon,
)
# Loader/Dumper are no longer composites, to get to the associated
# Resolver()/Representer(), etc., you need to instantiate the class
def add_implicit_resolver(
tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver
):
# type: (Any, Any, Any, Any, Any, Any) -> None
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
if Loader is None and Dumper is None:
resolver.add_implicit_resolver(tag, regexp, first)
return
if Loader:
if hasattr(Loader, 'add_implicit_resolver'):
Loader.add_implicit_resolver(tag, regexp, first)
elif issubclass(
Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
):
Resolver.add_implicit_resolver(tag, regexp, first)
else:
raise NotImplementedError
if Dumper:
if hasattr(Dumper, 'add_implicit_resolver'):
Dumper.add_implicit_resolver(tag, regexp, first)
elif issubclass(
Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
):
Resolver.add_implicit_resolver(tag, regexp, first)
else:
raise NotImplementedError
# this code currently not tested
def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver):
# type: (Any, Any, Any, Any, Any, Any) -> None
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
if Loader is None and Dumper is None:
resolver.add_path_resolver(tag, path, kind)
return
if Loader:
if hasattr(Loader, 'add_path_resolver'):
Loader.add_path_resolver(tag, path, kind)
elif issubclass(
Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
):
Resolver.add_path_resolver(tag, path, kind)
else:
raise NotImplementedError
if Dumper:
if hasattr(Dumper, 'add_path_resolver'):
Dumper.add_path_resolver(tag, path, kind)
elif issubclass(
Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
):
Resolver.add_path_resolver(tag, path, kind)
else:
raise NotImplementedError
def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor):
# type: (Any, Any, Any, Any) -> None
"""
Add an object constructor for the given tag.
object_onstructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
if Loader is None:
constructor.add_constructor(tag, object_constructor)
else:
if hasattr(Loader, 'add_constructor'):
Loader.add_constructor(tag, object_constructor)
return
if issubclass(Loader, BaseLoader):
BaseConstructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, SafeLoader):
SafeConstructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, Loader):
Constructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, RoundTripLoader):
RoundTripConstructor.add_constructor(tag, object_constructor)
else:
raise NotImplementedError
def add_multi_constructor(tag_prefix, multi_constructor, Loader=None, constructor=Constructor):
# type: (Any, Any, Any, Any) -> None
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
if Loader is None:
constructor.add_multi_constructor(tag_prefix, multi_constructor)
else:
if False and hasattr(Loader, 'add_multi_constructor'):
Loader.add_multi_constructor(tag_prefix, constructor)
return
if issubclass(Loader, BaseLoader):
BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, SafeLoader):
SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, ruamel.yaml.loader.Loader):
Constructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, RoundTripLoader):
RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor)
else:
raise NotImplementedError
def add_representer(data_type, object_representer, Dumper=None, representer=Representer):
# type: (Any, Any, Any, Any) -> None
"""
Add a representer for the given type.
object_representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
if Dumper is None:
representer.add_representer(data_type, object_representer)
else:
if hasattr(Dumper, 'add_representer'):
Dumper.add_representer(data_type, object_representer)
return
if issubclass(Dumper, BaseDumper):
BaseRepresenter.add_representer(data_type, object_representer)
elif issubclass(Dumper, SafeDumper):
SafeRepresenter.add_representer(data_type, object_representer)
elif issubclass(Dumper, Dumper):
Representer.add_representer(data_type, object_representer)
elif issubclass(Dumper, RoundTripDumper):
RoundTripRepresenter.add_representer(data_type, object_representer)
else:
raise NotImplementedError
# this code currently not tested
def add_multi_representer(data_type, multi_representer, Dumper=None, representer=Representer):
# type: (Any, Any, Any, Any) -> None
"""
Add a representer for the given type.
multi_representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
if Dumper is None:
representer.add_multi_representer(data_type, multi_representer)
else:
if hasattr(Dumper, 'add_multi_representer'):
Dumper.add_multi_representer(data_type, multi_representer)
return
if issubclass(Dumper, BaseDumper):
BaseRepresenter.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, SafeDumper):
SafeRepresenter.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, Dumper):
Representer.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, RoundTripDumper):
RoundTripRepresenter.add_multi_representer(data_type, multi_representer)
else:
raise NotImplementedError
class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
# type: (Any, Any, Any) -> None
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore
cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore
class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_constructor = Constructor
yaml_representer = Representer
yaml_tag = None # type: Any
yaml_flow_style = None # type: Any
@classmethod
def from_yaml(cls, constructor, node):
# type: (Any, Any) -> Any
"""
Convert a representation node to a Python object.
"""
return constructor.construct_yaml_object(node, cls)
@classmethod
def to_yaml(cls, representer, data):
# type: (Any, Any) -> Any
"""
Convert a Python object to a representation node.
"""
return representer.represent_yaml_object(
cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style
)
| 35.208469
| 227
| 0.599297
|
63db72175ce0927e62d4e08d1e844607490ab0ca
| 3,061
|
py
|
Python
|
fs_patches_of_hybrid_cloud/cherry_for_B038/nova_cascaded/nova/virt/vtep/aws_driver.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 2
|
2015-06-15T02:16:33.000Z
|
2022-02-23T07:10:38.000Z
|
patches_tool/vcloud_patch/code/nova/virt/vtep/aws_driver.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 7
|
2016-05-13T06:39:45.000Z
|
2016-05-20T02:55:31.000Z
|
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vtep/aws_driver.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 4
|
2015-11-02T04:02:50.000Z
|
2021-05-13T17:06:00.000Z
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Nova compute driver support fo vtep in vm through openvswitch
"""
from oslo.config import cfg
from nova import exception
from nova import network
from nova.i18n import _, _LI, _LW
from nova import exception
from nova.network import model as network_model
from nova.network import neutronv2
from nova.openstack.common import log as logging
from nova.virt import virtapi
from nova.virt import driver
from nova.virt.vtep import network_api
from nova.virt.vtep import driver
from nova.virt.vtep.driver import ProviderPort, VtepDriver
from nova.virt.vtep import network_api
from nova.virt.aws import driver as aws_driver
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class VtepAWSDriver(VtepDriver, aws_driver.AwsEc2Driver):
""" The VtepAWSVCloud connection object."""
def __init__(self, virtapi):
super(VtepAWSDriver, self).__init__(virtapi)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
# NOTE(nkapotoxin): Vtep driver must generate provider port, and
# the original driver use it to create vtep-vm, vtep agent help to
# link this port to user vm port, every vtep driver can do spawn
# like this.
# NOTE(nkapotoxin): Generate nwinfo with fixed network, cause vcloud
# driver create vm instance with vdc network id is vif.id in network_info,
# so just set it to network_id
nwinfo = self._allocate_provider_vifs([
CONF.vtepdriver.provider_tunnel_network_name,
CONF.vtepdriver.provider_api_network_name])
aws_driver.AwsEc2Driver.spawn(self, context, instance, image_meta,
injected_files, admin_password,
nwinfo, block_device_info)
# Check provider network port use vm mac, change
# mac and name of nwinfo
instance_mac = None
try:
instance_mac = self.get_instance_macs(instance)
except Exception:
LOG.error(
"Get mac from aws error, instance:%s" %
instance,
exc_info=True)
raise exception.NovaException(
"Get mac error from instance:%s" % instance)
# Generate provider_network port
self._allocate_provider_port(context, instance, network_info,
instance_mac)
| 37.329268
| 82
| 0.684744
|
638a4a367fef48b923ccb75a34cd6e6478e2df18
| 20,036
|
py
|
Python
|
openpype/modules/default_modules/sync_server/sync_server.py
|
yosuperdope/OpenPype
|
0c90df97ddb8cda291a4f66d35da58b3deb94a71
|
[
"MIT"
] | null | null | null |
openpype/modules/default_modules/sync_server/sync_server.py
|
yosuperdope/OpenPype
|
0c90df97ddb8cda291a4f66d35da58b3deb94a71
|
[
"MIT"
] | null | null | null |
openpype/modules/default_modules/sync_server/sync_server.py
|
yosuperdope/OpenPype
|
0c90df97ddb8cda291a4f66d35da58b3deb94a71
|
[
"MIT"
] | null | null | null |
"""Python 3 only implementation."""
import os
import asyncio
import threading
import concurrent.futures
from concurrent.futures._base import CancelledError
from .providers import lib
from openpype.lib import PypeLogger
from .utils import SyncStatus, ResumableError
log = PypeLogger().get_logger("SyncServer")
async def upload(module, collection, file, representation, provider_name,
remote_site_name, tree=None, preset=None):
"""
Upload single 'file' of a 'representation' to 'provider'.
Source url is taken from 'file' portion, where {root} placeholder
is replaced by 'representation.Context.root'
Provider could be one of implemented in provider.py.
Updates MongoDB, fills in id of file from provider (ie. file_id
from GDrive), 'created_dt' - time of upload
'provider_name' doesn't have to match to 'site_name', single
provider (GDrive) might have multiple sites ('projectA',
'projectB')
Args:
module(SyncServerModule): object to run SyncServerModule API
collection (str): source collection
file (dictionary): of file from representation in Mongo
representation (dictionary): of representation
provider_name (string): gdrive, gdc etc.
site_name (string): site on provider, single provider(gdrive) could
have multiple sites (different accounts, credentials)
tree (dictionary): injected memory structure for performance
preset (dictionary): site config ('credentials_url', 'root'...)
"""
# create ids sequentially, upload file in parallel later
with module.lock:
# this part modifies structure on 'remote_site', only single
# thread can do that at a time, upload/download to prepared
# structure should be run in parallel
remote_handler = lib.factory.get_provider(provider_name,
collection,
remote_site_name,
tree=tree,
presets=preset)
file_path = file.get("path", "")
try:
local_file_path, remote_file_path = resolve_paths(module,
file_path, collection, remote_site_name, remote_handler
)
except Exception as exp:
print(exp)
target_folder = os.path.dirname(remote_file_path)
folder_id = remote_handler.create_folder(target_folder)
if not folder_id:
err = "Folder {} wasn't created. Check permissions.". \
format(target_folder)
raise NotADirectoryError(err)
loop = asyncio.get_running_loop()
file_id = await loop.run_in_executor(None,
remote_handler.upload_file,
local_file_path,
remote_file_path,
module,
collection,
file,
representation,
remote_site_name,
True
)
return file_id
async def download(module, collection, file, representation, provider_name,
remote_site_name, tree=None, preset=None):
"""
Downloads file to local folder denoted in representation.Context.
Args:
module(SyncServerModule): object to run SyncServerModule API
collection (str): source collection
file (dictionary) : info about processed file
representation (dictionary): repr that 'file' belongs to
provider_name (string): 'gdrive' etc
site_name (string): site on provider, single provider(gdrive) could
have multiple sites (different accounts, credentials)
tree (dictionary): injected memory structure for performance
preset (dictionary): site config ('credentials_url', 'root'...)
Returns:
(string) - 'name' of local file
"""
with module.lock:
remote_handler = lib.factory.get_provider(provider_name,
collection,
remote_site_name,
tree=tree,
presets=preset)
file_path = file.get("path", "")
local_file_path, remote_file_path = resolve_paths(
module, file_path, collection, remote_site_name, remote_handler
)
local_folder = os.path.dirname(local_file_path)
os.makedirs(local_folder, exist_ok=True)
local_site = module.get_active_site(collection)
loop = asyncio.get_running_loop()
file_id = await loop.run_in_executor(None,
remote_handler.download_file,
remote_file_path,
local_file_path,
module,
collection,
file,
representation,
local_site,
True
)
return file_id
def resolve_paths(module, file_path, collection,
remote_site_name=None, remote_handler=None):
"""
Returns tuple of local and remote file paths with {root}
placeholders replaced with proper values from Settings or Anatomy
Ejected here because of Python 2 hosts (GDriveHandler is an issue)
Args:
module(SyncServerModule): object to run SyncServerModule API
file_path(string): path with {root}
collection(string): project name
remote_site_name(string): remote site
remote_handler(AbstractProvider): implementation
Returns:
(string, string) - proper absolute paths, remote path is optional
"""
remote_file_path = ''
if remote_handler:
remote_file_path = remote_handler.resolve_path(file_path)
local_handler = lib.factory.get_provider(
'local_drive', collection, module.get_active_site(collection))
local_file_path = local_handler.resolve_path(file_path)
return local_file_path, remote_file_path
def site_is_working(module, project_name, site_name):
"""
Confirm that 'site_name' is configured correctly for 'project_name'.
Must be here as lib.factory access doesn't work in Python 2 hosts.
Args:
module (SyncServerModule)
project_name(string):
site_name(string):
Returns
(bool)
"""
if _get_configured_sites(module, project_name).get(site_name):
return True
return False
def _get_configured_sites(module, project_name):
"""
Loops through settings and looks for configured sites and checks
its handlers for particular 'project_name'.
Args:
project_setting(dict): dictionary from Settings
only_project_name(string, optional): only interested in
particular project
Returns:
(dict of dict)
{'ProjectA': {'studio':True, 'gdrive':False}}
"""
settings = module.get_sync_project_setting(project_name)
return _get_configured_sites_from_setting(module, project_name, settings)
def _get_configured_sites_from_setting(module, project_name, project_setting):
if not project_setting.get("enabled"):
return {}
initiated_handlers = {}
configured_sites = {}
all_sites = module._get_default_site_configs()
all_sites.update(project_setting.get("sites"))
for site_name, config in all_sites.items():
provider = module.get_provider_for_site(site=site_name)
handler = initiated_handlers.get((provider, site_name))
if not handler:
handler = lib.factory.get_provider(provider,
project_name,
site_name,
presets=config)
initiated_handlers[(provider, site_name)] = \
handler
if handler.is_active():
configured_sites[site_name] = True
return configured_sites
class SyncServerThread(threading.Thread):
"""
Separate thread running synchronization server with asyncio loop.
Stopped when tray is closed.
"""
def __init__(self, module):
super(SyncServerThread, self).__init__()
self.module = module
self.loop = None
self.is_running = False
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
self.timer = None
def run(self):
self.is_running = True
try:
log.info("Starting Sync Server")
self.loop = asyncio.new_event_loop() # create new loop for thread
asyncio.set_event_loop(self.loop)
self.loop.set_default_executor(self.executor)
asyncio.ensure_future(self.check_shutdown(), loop=self.loop)
asyncio.ensure_future(self.sync_loop(), loop=self.loop)
self.loop.run_forever()
except Exception:
log.warning(
"Sync Server service has failed", exc_info=True
)
finally:
self.loop.close() # optional
async def sync_loop(self):
"""
Runs permanently, each time:
- gets list of collections in DB
- gets list of active remote providers (has configuration,
credentials)
- for each collection it looks for representations that should
be synced
- synchronize found collections
- update representations - fills error messages for exceptions
- waits X seconds and repeat
Returns:
"""
while self.is_running and not self.module.is_paused():
try:
import time
start_time = None
self.module.set_sync_project_settings() # clean cache
for collection, preset in self.module.sync_project_settings.\
items():
if collection not in self.module.get_enabled_projects():
continue
start_time = time.time()
local_site, remote_site = self._working_sites(collection)
if not all([local_site, remote_site]):
continue
sync_repres = self.module.get_sync_representations(
collection,
local_site,
remote_site
)
task_files_to_process = []
files_processed_info = []
# process only unique file paths in one batch
# multiple representation could have same file path
# (textures),
# upload process can find already uploaded file and
# reuse same id
processed_file_path = set()
site_preset = preset.get('sites')[remote_site]
remote_provider = \
self.module.get_provider_for_site(site=remote_site)
handler = lib.factory.get_provider(remote_provider,
collection,
remote_site,
presets=site_preset)
limit = lib.factory.get_provider_batch_limit(
remote_provider)
# first call to get_provider could be expensive, its
# building folder tree structure in memory
# call only if needed, eg. DO_UPLOAD or DO_DOWNLOAD
for sync in sync_repres:
if self.module.\
is_representation_paused(sync['_id']):
continue
if limit <= 0:
continue
files = sync.get("files") or []
if files:
for file in files:
# skip already processed files
file_path = file.get('path', '')
if file_path in processed_file_path:
continue
status = self.module.check_status(
file,
local_site,
remote_site,
preset.get('config'))
if status == SyncStatus.DO_UPLOAD:
tree = handler.get_tree()
limit -= 1
task = asyncio.create_task(
upload(self.module,
collection,
file,
sync,
remote_provider,
remote_site,
tree,
site_preset))
task_files_to_process.append(task)
# store info for exception handlingy
files_processed_info.append((file,
sync,
remote_site,
collection
))
processed_file_path.add(file_path)
if status == SyncStatus.DO_DOWNLOAD:
tree = handler.get_tree()
limit -= 1
task = asyncio.create_task(
download(self.module,
collection,
file,
sync,
remote_provider,
remote_site,
tree,
site_preset))
task_files_to_process.append(task)
files_processed_info.append((file,
sync,
local_site,
collection
))
processed_file_path.add(file_path)
log.debug("Sync tasks count {}".
format(len(task_files_to_process)))
files_created = await asyncio.gather(
*task_files_to_process,
return_exceptions=True)
for file_id, info in zip(files_created,
files_processed_info):
file, representation, site, collection = info
error = None
if isinstance(file_id, BaseException):
error = str(file_id)
file_id = None
self.module.update_db(collection,
file_id,
file,
representation,
site,
error)
duration = time.time() - start_time
log.debug("One loop took {:.2f}s".format(duration))
delay = self.module.get_loop_delay(collection)
log.debug("Waiting for {} seconds to new loop".format(delay))
self.timer = asyncio.create_task(self.run_timer(delay))
await asyncio.gather(self.timer)
except ConnectionResetError:
log.warning("ConnectionResetError in sync loop, "
"trying next loop",
exc_info=True)
except CancelledError:
# just stopping server
pass
except ResumableError:
log.warning("ResumableError in sync loop, "
"trying next loop",
exc_info=True)
except Exception:
self.stop()
log.warning("Unhandled except. in sync loop, stopping server",
exc_info=True)
def stop(self):
"""Sets is_running flag to false, 'check_shutdown' shuts server down"""
self.is_running = False
async def check_shutdown(self):
""" Future that is running and checks if server should be running
periodically.
"""
while self.is_running:
await asyncio.sleep(0.5)
tasks = [task for task in asyncio.all_tasks() if
task is not asyncio.current_task()]
list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks
results = await asyncio.gather(*tasks, return_exceptions=True)
log.debug(f'Finished awaiting cancelled tasks, results: {results}...')
await self.loop.shutdown_asyncgens()
# to really make sure everything else has time to stop
self.executor.shutdown(wait=True)
await asyncio.sleep(0.07)
self.loop.stop()
async def run_timer(self, delay):
"""Wait for 'delay' seconds to start next loop"""
await asyncio.sleep(delay)
def reset_timer(self):
"""Called when waiting for next loop should be skipped"""
log.debug("Resetting timer")
if self.timer:
self.timer.cancel()
self.timer = None
def _working_sites(self, collection):
if self.module.is_project_paused(collection):
log.debug("Both sites same, skipping")
return None, None
local_site = self.module.get_active_site(collection)
remote_site = self.module.get_remote_site(collection)
if local_site == remote_site:
log.debug("{}-{} sites same, skipping".format(local_site,
remote_site))
return None, None
configured_sites = _get_configured_sites(self.module, collection)
if not all([local_site in configured_sites,
remote_site in configured_sites]):
log.debug("Some of the sites {} - {} is not ".format(local_site,
remote_site) +
"working properly")
return None, None
return local_site, remote_site
| 42.90364
| 79
| 0.493861
|
6f3bc24116d9466f69af667365af22c4ed18d778
| 15,869
|
py
|
Python
|
spades/spades.py
|
XanaduBarchetta/swe681-spades
|
a00c8e91d8bb96a1459be37751daffe57adc733c
|
[
"MIT"
] | 1
|
2020-03-23T00:06:24.000Z
|
2020-03-23T00:06:24.000Z
|
spades/spades.py
|
XanaduBarchetta/swe681-spades
|
a00c8e91d8bb96a1459be37751daffe57adc733c
|
[
"MIT"
] | 3
|
2020-03-02T22:40:11.000Z
|
2020-05-05T19:36:16.000Z
|
spades/spades.py
|
XanaduBarchetta/swe681-spades
|
a00c8e91d8bb96a1459be37751daffe57adc733c
|
[
"MIT"
] | 1
|
2020-05-25T21:36:46.000Z
|
2020-05-25T21:36:46.000Z
|
import re
import logging
from flask import flash, redirect, url_for, request, render_template
from flask_login import current_user, LoginManager, login_required, login_user, logout_user
from spades.dbobjects import User, Game, GameStateEnum, DirectionsEnum, SuitEnum
from spades.exceptions import UserAlreadyExistsException, UserCanNotBidError, BadGameStateError, SpadesNotBrokenError, \
NotFollowingLeadSuitError, CardNotInHandError, NotPlayersTurnError
from spades import app
USERNAME_REGEX = re.compile(r'^\w+$')
PASSWORD_REGEX = re.compile(r'^[-=+!@#$%^&*()\w]+$')
BID_REGEX = re.compile(r'^(\d|1[0-3])$')
CARD_REGEX = re.compile(r'^(0[2-9]|1[0-4])[SHCD]$')
GAME_ID_REGEX = re.compile(r'^[1-9]\d*$')
logger = logging.getLogger('spades')
hdlr = logging.FileHandler(app.config['LOGFILE'])
logger.addHandler(hdlr)
security = logging.getLogger('audit')
security.setLevel(logging.INFO)
security_formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
shdlr = logging.FileHandler(app.config['AUDITLOGFILE'])
shdlr.setFormatter(security_formatter)
security.addHandler(shdlr)
login_manager = LoginManager()
login_manager.init_app(app)
@app.template_filter('translate_game_state')
def translate_game_state(state: GameStateEnum):
if state == GameStateEnum.IN_PROGRESS:
return "in progress"
return state.value.lower()
@app.template_filter('partner_direction')
def partner_direction(direction: DirectionsEnum):
return DirectionsEnum.get_partner_direction(direction).value
@app.template_filter('translate_suit')
def translate_suit(suit: SuitEnum):
return SuitEnum.get_suit_word(suit).title()
@app.route('/', methods=["GET", "POST"])
def login():
if request.method == 'POST':
username = request.form.get('username', '').strip()
password = request.form.get('password', '')
# Validate input
failed_validation = False
if not isinstance(username, str) or not isinstance(password, str) or len(username) < 1 or len(password) < 1:
flash('You must provide both a username and password.')
security.info("User has not provided both a username and password")
failed_validation = True
if len(username) > 32:
flash("Usernames can not be longer than 32 characters.")
security.info("User %s has inputted a name longer than 32 characters.", username)
failed_validation = True
if not USERNAME_REGEX.match(username):
flash("Usernames may only contain letters, numbers, and underscore.")
security.info(
"User %s has inputted a name which does not contain letters, numbers, and underscore.",
username)
failed_validation = True
# Don't check password length here in case requirements have changed.
# We don't want to lock out legacy users!
if not PASSWORD_REGEX.match(password):
flash("Passwords are limited to letters, numbers, and the following characters: -=+!@#$%^&*()_")
security.info("User %s has inputted invalid password which does not meet -=+!@#$%%^&*()_", username)
failed_validation = True
if failed_validation:
security.info("There has been a failed validation.")
return redirect(url_for('login'))
user = User.get_user(username, password)
if user is None:
flash('The provided credentials do not match any registered user.')
security.info("The provided credentials do not match any registered user for %s.", username)
return redirect(url_for('login'))
else:
user.name = username
login_user(user)
security.info("Successful login for user %s.", username)
return redirect(url_for('home'))
else:
return render_template('login.html')
@app.route('/home')
@login_required
def home():
data = {
'name': current_user.username,
'user_is_in_game': current_user.get_active_game() is not None
}
return render_template('home.html', **data)
# callback to reload the user object
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
# handle login failed
@app.errorhandler(401)
def page_not_found(e):
security.info("Page not found reached. Error: %s", e)
return render_template('error.html'), 401
@app.route("/logout", methods=['GET'])
@login_required
def logout():
logger.info(request.method)
logout_user()
return redirect(url_for('login'))
@app.route("/signup", methods=["GET", "POST"])
def signup():
logger.info(request.method)
if request.method == 'POST':
username = request.form.get('username', '').strip()
password = request.form.get('password', '')
# Validate input
failed_validation = False
if not isinstance(username, str) or not isinstance(password, str) or len(username) < 1 or len(password) < 1:
flash('You must provide both a username and password.')
security.info("User has not provided both a username and password")
failed_validation = True
if len(username) > 32:
flash("Usernames can not be longer than 32 characters.")
security.info("Username was longer than 32 characters.")
failed_validation = True
if not USERNAME_REGEX.match(username):
flash("Usernames may only contain letters, numbers, and underscore.")
security.info("Username %s was not etters, numbers, or underscore.", username)
failed_validation = True
if len(password) < app.config['MIN_PASSWORD_LENGTH'] or len(password) > app.config['MAX_PASSWORD_LENGTH']:
flash("Passwords must be no fewer than {min} and no greater than {max} characters.".format(
min=app.config['MIN_PASSWORD_LENGTH'],
max=app.config['MAX_PASSWORD_LENGTH']
))
security.info("Password did not correct criteria.")
failed_validation = True
if not PASSWORD_REGEX.match(password):
flash("Passwords are limited to letters, numbers, and the following characters: -=+!@#$%^&*()_")
security.info("Password did not the following characterss: -=+!@#$%^&*()_.")
failed_validation = True
if failed_validation:
return redirect(url_for('signup'))
try:
User.create_user(username, password)
except UserAlreadyExistsException:
flash("Username already exists. Please choose a different username.")
security.info("Username %s already exists.", username)
return redirect(url_for('signup'))
else:
flash("You may now login with your new username and password.")
security.info("Successful creation of username: %s", username)
return redirect(url_for('login'))
elif request.method == 'GET':
return render_template("signup.html")
@app.route('/joingame', methods=['GET'])
@login_required
def join_game():
if current_user.get_active_game() is None:
# Join a new/filling game
Game.join_game(current_user.user_id)
return redirect(url_for('game_home'))
@app.route('/game', methods=['GET'])
@login_required
def game_home():
"""
Main endpoint for dealing with game interactions
"""
game = current_user.get_active_game()
if game is None:
game = current_user.get_last_ended_game()
if game is None:
flash('If you want to join a game, click the Join button.')
return redirect(url_for('home'))
else:
flash('These are the results from your most recent ended game.')
return redirect(url_for('game_summary', game_id=game.game_id))
response_data = {
'game': game,
'north_playername': User.get_username_by_id(game.player_north),
'south_playername': User.get_username_by_id(game.player_south),
'east_playername': User.get_username_by_id(game.player_east),
'west_playername': User.get_username_by_id(game.player_west),
'player_direction': DirectionsEnum.NORTH, # Default to north, will check below
'hand': None,
'cards': {
'spades': [],
'hearts': [],
'clubs': [],
'diamonds': []
},
'trick': None,
'tricks_taken': None,
'enable_bidding': False,
}
if game.player_is_direction(current_user.user_id, DirectionsEnum.SOUTH):
response_data['player_direction'] = DirectionsEnum.SOUTH
if game.player_is_direction(current_user.user_id, DirectionsEnum.EAST):
response_data['player_direction'] = DirectionsEnum.EAST
if game.player_is_direction(current_user.user_id, DirectionsEnum.WEST):
response_data['player_direction'] = DirectionsEnum.WEST
if game.state == GameStateEnum.FILLING:
# No need to fetch hand or trick data, as game hasn't started yet
return render_template('game.html', **response_data)
elif game.state == GameStateEnum.IN_PROGRESS:
# Fetch current Hand data
hand = game.get_latest_hand()
response_data['hand'] = hand
response_data['ns_score'], response_data['ew_score'] = hand.get_score_from_previous_hand()
cards = hand.get_playable_cards_for_user(current_user.user_id)
cards.sort(key=lambda x: x.card)
for suit, letter in [('spades', 'S'), ('hearts', 'H'), ('clubs', 'C'), ('diamonds', 'D')]:
response_data['cards'][suit] = [card for card in cards if card.card.endswith(letter)]
if None not in [
hand.north_bid,
hand.south_bid,
hand.east_bid,
hand.west_bid
]:
# All bids have been placed. Fetch trick data.
response_data['trick'] = hand.get_latest_trick()
response_data['next_play_direction'] = response_data['trick'].get_next_play_direction()
response_data['tricks_taken'] = {key.value: value for key, value in hand.get_total_tricks_taken().items()}
else:
# Waiting on at least one bid
response_data['enable_bidding'] = game.can_user_place_bid(current_user.user_id, hand)
return render_template('game.html', **response_data)
else:
# Shouldn't arrive at this state. Log it.
flash('An unknown error occurred. Please try again.')
logger.error(
'Game with id [%s] in bad state while user [%s] attempted to display game home.',
game.game_id,
current_user.username
)
return redirect(url_for('home'))
@app.route('/game/bid', methods=['POST'])
@login_required
def game_bid():
"""
Endpoint for placing a bid
"""
game = current_user.get_active_game()
bid = request.form.get('bid', '')
# Validate bid
if not isinstance(bid, str):
# Invalid input for bid, but no need to alert user
return redirect(url_for('game_home'))
bid = bid.strip()
if not BID_REGEX.match(bid):
flash('Your bid must be an integer bid from zero (0) to thirteen (13).')
return redirect(url_for('game_home'))
if game is None:
flash('If you want to join a game, click the Join button.')
return redirect(url_for('home'))
else:
hand = game.get_latest_hand()
# Attempt to place the bid
try:
hand.place_bid(current_user.user_id, int(bid), game)
except UserCanNotBidError:
flash('Bidding is not available at this time for you.')
return redirect(url_for('game_home'))
except BadGameStateError:
flash('An error occurred while trying to pace your bid. Please try again.')
return redirect(url_for('game_home'))
else:
flash(f'Your bid of {bid} has been placed.')
return redirect(url_for('game_home'))
@app.route('/game/play_card', methods=['POST'])
@login_required
def play_card():
"""
Endpoint for playing a card
"""
game = current_user.get_active_game()
card = request.form.get('card', '')
if game is None:
flash('If you want to join a game, click the Join button.')
return redirect(url_for('home'))
else:
# Validate card
if not isinstance(card, str):
# Invalid input for card, but no need to alert user
return redirect(url_for('game_home'))
card = card.strip()
if not CARD_REGEX.match(card):
flash('Invalid card format.')
return redirect(url_for('game_home'))
hand = game.get_latest_hand()
trick = hand.get_latest_trick(with_for_update=True)
# Attempt to play the card
try:
trick.play_card(current_user.user_id, card, game, hand)
except NotPlayersTurnError:
flash('It is not your turn to play a card.')
return redirect(url_for('game_home'))
except CardNotInHandError:
flash('The card \'{0}\' is not in your hand or has already been played.'
' Please play a card from your hand.'.format(card))
return redirect(url_for('game_home'))
except SpadesNotBrokenError:
flash('Spades have not yet been broken. Please choose a different suit.')
return redirect(url_for('game_home'))
except NotFollowingLeadSuitError:
flash('You must follow the lead suit whenever possible. Please choose a card with the lead suit.')
return redirect(url_for('game_home'))
except BadGameStateError:
flash('An error occurred while trying to play your card. Please try again.')
return redirect(url_for('game_home'))
else:
flash(f'You played {card} successfully.')
if trick.winner is not None:
flash(f'{trick.winner.value} won the trick.')
if trick.trick_number == 13 and game.state == GameStateEnum.IN_PROGRESS:
flash('A new hand has been dealt.')
return redirect(url_for('game_home'))
elif game.state == GameStateEnum.COMPLETED:
if game.ns_win:
flash('North/South team won the game.')
else:
flash('East/West team won the game.')
# Redirect to game summary screen
redirect(url_for('game_summary', game_id=game.game_id))
return redirect(url_for('game_home'))
@app.route('/users', methods=['GET'])
@login_required
def user_list():
users = User.query.all()
return render_template('user_list.html', users=users)
@app.route('/game/list', methods=['GET'])
@login_required
def game_list():
games = Game.get_viewable_games()
users = {user.user_id: user.username for user in User.query.all()}
return render_template('game_list.html', games=games, users=users)
@app.route('/game/summary/<game_id>', methods=['GET'])
@login_required
def game_summary(game_id):
if not GAME_ID_REGEX.match(game_id):
flash('Malformed game_id.')
return redirect(url_for('game_list'))
game_id = int(game_id)
game = Game.get_game_by_id(game_id)
if game is None:
flash('No such game exists.')
return redirect(url_for('game_list'))
if game.state not in [GameStateEnum.ABANDONED, GameStateEnum.FORFEITED, GameStateEnum.COMPLETED]:
flash('There is no viewable game with that id.')
return redirect(url_for('game_list'))
hands = game.get_all_hands_and_tricks()
users = {user.user_id: user.username for user in User.query.all()}
return render_template('game_summary.html', game=game, hands=hands, users=users)
| 40.585678
| 120
| 0.64125
|
800ed10d20bdc3a0813e945d38aa43bf70187f5e
| 2,172
|
py
|
Python
|
venv/lib/python3.6/site-packages/gym/vector/tests/test_sync_vector_env.py
|
amousist/cartpole
|
aec01e9c2d28eda6019fe8bb94804a78f2d7fbc0
|
[
"MIT"
] | 3
|
2020-06-02T11:23:57.000Z
|
2021-09-02T12:02:20.000Z
|
gym/vector/tests/test_sync_vector_env.py
|
huangjiancong1/gym_baxter
|
7534d9504b4678a3b09a4e17466f54eaeaf23ccc
|
[
"Apache-2.0"
] | null | null | null |
gym/vector/tests/test_sync_vector_env.py
|
huangjiancong1/gym_baxter
|
7534d9504b4678a3b09a4e17466f54eaeaf23ccc
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import numpy as np
from gym.spaces import Box
from gym.vector.tests.utils import make_env
from gym.vector.sync_vector_env import SyncVectorEnv
def test_create_sync_vector_env():
env_fns = [make_env('CubeCrash-v0', i) for i in range(8)]
try:
env = SyncVectorEnv(env_fns)
finally:
env.close()
assert env.num_envs == 8
def test_reset_sync_vector_env():
env_fns = [make_env('CubeCrash-v0', i) for i in range(8)]
try:
env = SyncVectorEnv(env_fns)
observations = env.reset()
finally:
env.close()
assert isinstance(env.observation_space, Box)
assert isinstance(observations, np.ndarray)
assert observations.dtype == env.observation_space.dtype
assert observations.shape == (8,) + env.single_observation_space.shape
assert observations.shape == env.observation_space.shape
def test_step_sync_vector_env():
env_fns = [make_env('CubeCrash-v0', i) for i in range(8)]
try:
env = SyncVectorEnv(env_fns)
observations = env.reset()
actions = [env.single_action_space.sample() for _ in range(8)]
observations, rewards, dones, _ = env.step(actions)
finally:
env.close()
assert isinstance(env.observation_space, Box)
assert isinstance(observations, np.ndarray)
assert observations.dtype == env.observation_space.dtype
assert observations.shape == (8,) + env.single_observation_space.shape
assert observations.shape == env.observation_space.shape
assert isinstance(rewards, np.ndarray)
assert isinstance(rewards[0], (float, np.floating))
assert rewards.ndim == 1
assert rewards.size == 8
assert isinstance(dones, np.ndarray)
assert dones.dtype == np.bool_
assert dones.ndim == 1
assert dones.size == 8
def test_check_observations_sync_vector_env():
# CubeCrash-v0 - observation_space: Box(40, 32, 3)
env_fns = [make_env('CubeCrash-v0', i) for i in range(8)]
# MemorizeDigits-v0 - observation_space: Box(24, 32, 3)
env_fns[1] = make_env('MemorizeDigits-v0', 1)
with pytest.raises(RuntimeError):
env = SyncVectorEnv(env_fns)
env.close()
| 31.478261
| 74
| 0.691529
|
8516a1a8460d25f13a6afd92223eaf9ec661fc57
| 3,750
|
py
|
Python
|
travis_pypi_setup.py
|
omtinez/pddb
|
a24cee0702c8286c5c466c51ca65cf8dbc2c183c
|
[
"MIT"
] | 3
|
2016-01-11T16:04:49.000Z
|
2016-02-27T19:07:22.000Z
|
travis_pypi_setup.py
|
omtinez/pddb
|
a24cee0702c8286c5c466c51ca65cf8dbc2c183c
|
[
"MIT"
] | 2
|
2016-06-11T23:54:45.000Z
|
2021-03-25T21:34:17.000Z
|
travis_pypi_setup.py
|
omtinez/pddb
|
a24cee0702c8286c5c466c51ca65cf8dbc2c183c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'omtinez/pddb'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| 30.487805
| 79
| 0.7
|
5c399145b39d2027136ab65cfd525cb9fd8ff811
| 10,760
|
py
|
Python
|
ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/params.py
|
Syndra/Ambari-source
|
717526b2bf3636622212b14de0d3d298a20c7370
|
[
"Apache-2.0"
] | 5
|
2017-07-20T11:15:10.000Z
|
2020-04-16T15:42:55.000Z
|
ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/params.py
|
Syndra/Ambari-source
|
717526b2bf3636622212b14de0d3d298a20c7370
|
[
"Apache-2.0"
] | 8
|
2020-06-18T17:31:19.000Z
|
2022-03-02T08:32:03.000Z
|
ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/params.py
|
Syndra/Ambari-source
|
717526b2bf3636622212b14de0d3d298a20c7370
|
[
"Apache-2.0"
] | 12
|
2017-05-17T09:48:01.000Z
|
2021-08-05T19:01:25.000Z
|
#!/usr/bin/python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.functions.default import default
from resource_management import *
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.version import format_stack_version
from spark import *
import status_params
# a map of the Ambari role to the component name
# for use with /usr/iop/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
'SPARK_CLIENT' : 'spark-client',
'SPARK_THRIFTSERVER' : 'spark-thriftserver'
}
upgrade_direction = default("/commandParams/upgrade_direction", None)
component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK_CLIENT")
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = default("/hostLevelParams/stack_name", None)
iop_full_version = format_stack_version(version)
hadoop_home = "/usr/iop/current/hadoop-client"
spark_conf = format("/usr/iop/current/{component_directory}/conf")
spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
spark_pid_dir = status_params.spark_pid_dir
spark_role_root = "spark-client"
command_role = default("/role", "")
if command_role == "SPARK_CLIENT":
spark_role_root = "spark-client"
elif command_role == "SPARK_JOBHISTORYSERVER":
spark_role_root = "spark-historyserver"
elif command_role == "SPARK_THRIFTSERVER":
spark_role_root = "spark-thriftserver"
spark_home = format("/usr/iop/current/{spark_role_root}")
if not os.path.exists(spark_home):
os.symlink('/usr/iop/current/spark', spark_home)
java_home = config['hostLevelParams']['java_home']
spark_user = status_params.spark_user
hive_user = status_params.hive_user
spark_group = status_params.spark_group
user_group = status_params.user_group
spark_hdfs_user_dir = format("/user/{spark_user}")
spark_hdfs_user_mode = 0755
spark_eventlog_dir_mode = 01777
spark_jar_hdfs_dir = "/iop/apps/" + str(iop_full_version) + "/spark/jars"
spark_jar_hdfs_dir_mode = 0755
spark_jar_file_mode = 0444
spark_jar_src_dir = "/usr/iop/current/spark-historyserver/lib"
spark_jar_src_file = "spark-assembly.jar"
spark_history_server_pid_file = status_params.spark_history_server_pid_file
spark_thrift_server_pid_file = status_params.spark_thrift_server_pid_file
spark_history_server_start = format("{spark_home}/sbin/start-history-server.sh")
spark_history_server_stop = format("{spark_home}/sbin/stop-history-server.sh")
spark_thrift_server_start = format("{spark_home}/sbin/start-thriftserver.sh")
spark_thrift_server_stop = format("{spark_home}/sbin/stop-thriftserver.sh")
spark_submit_cmd = format("{spark_home}/bin/spark-submit")
spark_smoke_example = "org.apache.spark.examples.SparkPi"
spark_service_check_cmd = format(
"{spark_submit_cmd} --class {spark_smoke_example} --master yarn-cluster --num-executors 1 --driver-memory 256m --executor-memory 256m --executor-cores 1 {spark_home}/lib/spark-examples*.jar 1")
spark_jobhistoryserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", [])
spark_thriftserver_hosts = default("/clusterHostInfo/spark_thriftserver_hosts", [])
namenode_hosts = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_hosts) == 0
if len(spark_jobhistoryserver_hosts) > 0:
spark_history_server_host = spark_jobhistoryserver_hosts[0]
else:
spark_history_server_host = "localhost"
if len(spark_thriftserver_hosts) > 0:
spark_thrift_server_host = spark_thriftserver_hosts[0]
else:
spark_thrift_server_host = "localhost"
# spark-defaults params
if has_namenode:
namenode_host = str(namenode_hosts[0])
else:
namenode_host = "localhost"
hadoop_fs_defaultfs = config['configurations']['core-site']['fs.defaultFS']
spark_eventlog_dir_default=hadoop_fs_defaultfs + config['configurations']['spark-defaults']['spark.eventLog.dir']
spark_yarn_jar_default=hadoop_fs_defaultfs + '/iop/apps/' + str(iop_full_version) + '/spark/jars/spark-assembly.jar'
spark_yarn_applicationMaster_waitTries = default(
"/configurations/spark-defaults/spark.yarn.applicationMaster.waitTries", '10')
spark_yarn_submit_file_replication = default("/configurations/spark-defaults/spark.yarn.submit.file.replication", '3')
spark_yarn_preserve_staging_files = default("/configurations/spark-defaults/spark.yarn.preserve.staging.files", "false")
spark_yarn_scheduler_heartbeat_interval = default(
"/configurations/spark-defaults/spark.yarn.scheduler.heartbeat.interval-ms", "5000")
spark_yarn_queue = default("/configurations/spark-defaults/spark.yarn.queue", "default")
spark_yarn_containerLauncherMaxThreads = default(
"/configurations/spark-defaults/spark.yarn.containerLauncherMaxThreads", "25")
spark_yarn_max_executor_failures = default("/configurations/spark-defaults/spark.yarn.max.executor.failures", "3")
spark_yarn_executor_memoryOverhead = default("/configurations/spark-defaults/spark.yarn.executor.memoryOverhead", "384")
spark_yarn_driver_memoryOverhead = default("/configurations/spark-defaults/spark.yarn.driver.memoryOverhead", "384")
spark_history_ui_port = default("/configurations/spark-defaults/spark.history.ui.port", "18080")
spark_thriftserver_port = default("/configurations/spark-env/spark_thriftserver_port", "10015")
spark_eventlog_enabled = default("/configurations/spark-defaults/spark.eventLog.enabled", "true")
spark_eventlog_dir = default("/configurations/spark-defaults/spark.eventLog.dir", spark_eventlog_dir_default)
spark_yarn_jar = default("/configurations/spark-defaults/spark.yarn.jar", spark_yarn_jar_default)
spark_thriftserver_ui_port = 4039
# add the properties that cannot be configured thru UI
spark_conf_properties_map = dict(config['configurations']['spark-defaults'])
spark_conf_properties_map["spark.yarn.historyServer.address"] = spark_history_server_host + ":" + str(spark_history_ui_port)
spark_conf_properties_map["spark.yarn.jar"] = spark_yarn_jar
spark_conf_properties_map["spark.eventLog.dir"] = spark_eventlog_dir_default
spark_env_sh = config['configurations']['spark-env']['content']
spark_log4j = config['configurations']['spark-log4j']['content']
#spark_metrics_properties = config['configurations']['spark-metrics-properties']['content']
spark_javaopts_properties = config['configurations']['spark-javaopts-properties']['content']
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
is_hive_installed = not len(hive_server_host) == 0
spark_driver_extraJavaOptions = str(config['configurations']['spark-defaults']['spark.driver.extraJavaOptions'])
if spark_driver_extraJavaOptions.find('-Diop.version') == -1:
spark_driver_extraJavaOptions = spark_driver_extraJavaOptions + ' -Diop.version=' + str(iop_full_version)
spark_yarn_am_extraJavaOptions = str(config['configurations']['spark-defaults']['spark.yarn.am.extraJavaOptions'])
if spark_yarn_am_extraJavaOptions.find('-Diop.version') == -1:
spark_yarn_am_extraJavaOptions = spark_yarn_am_extraJavaOptions + ' -Diop.version=' + str(iop_full_version)
spark_javaopts_properties = str(spark_javaopts_properties)
if spark_javaopts_properties.find('-Diop.version') == -1:
spark_javaopts_properties = spark_javaopts_properties+ ' -Diop.version=' + str(iop_full_version)
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = functions.get_kinit_path()
spark_kerberos_keytab = config['configurations']['spark-defaults']['spark.history.kerberos.keytab']
spark_kerberos_principal = config['configurations']['spark-defaults']['spark.history.kerberos.principal']
smokeuser = config['configurations']['cluster-env']['smokeuser']
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
if security_enabled:
spark_principal = spark_kerberos_principal.replace('_HOST',spark_history_server_host.lower())
# for create_hdfs_directory
# To create hdfs directory
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hostname = config["hostname"]
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
kinit_path_local = functions.get_kinit_path()
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
# Hiveserver 2 properties
hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
hive_http_endpoint = config['configurations']['hive-site']['hive.server2.thrift.http.path']
hive_ssl = config['configurations']['hive-site']['hive.server2.use.SSL']
if hive_ssl:
hive_ssl_keystore_path = str(config['configurations']['hive-site']['hive.server2.keystore.path'])
hive_ssl_keystore_password = str(config['configurations']['hive-site']['hive.server2.keystore.password'])
else:
hive_ssl_keystore_path = None
hive_ssl_keystore_password = None
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs
)
| 49.357798
| 201
| 0.803996
|
9a19b80c4568ac222889a915cba73e7c45253f2f
| 4,467
|
py
|
Python
|
test/downloads.py
|
bpiwowar/ir_datasets
|
556c79e03c4538f53e0a7d8d137a0c39aca7e8b8
|
[
"Apache-2.0"
] | null | null | null |
test/downloads.py
|
bpiwowar/ir_datasets
|
556c79e03c4538f53e0a7d8d137a0c39aca7e8b8
|
[
"Apache-2.0"
] | null | null | null |
test/downloads.py
|
bpiwowar/ir_datasets
|
556c79e03c4538f53e0a7d8d137a0c39aca7e8b8
|
[
"Apache-2.0"
] | null | null | null |
import io
import random
import sys
import json
import time
import datetime
from contextlib import contextmanager
import re
import os
import unittest
import argparse
import json
import ir_datasets
@contextmanager
def tmp_environ(**kwargs):
orig_values = {}
for key, value in kwargs.items():
orig_values[key] = os.environ.get(key)
os.environ[key] = value
try:
yield
finally:
for key, value in kwargs.items():
orig_value = orig_values[key]
if orig_value is not None:
os.environ[key] = orig_value
else:
del os.environ[key]
class TestDownloads(unittest.TestCase):
dlc_filter = None
output_path = None
rand_delay = None # useful for being nice to servers when running tests by adding a random delay between tests
output_data = []
def test_downloads(self):
with open('ir_datasets/etc/downloads.json') as f:
data = json.load(f)
try:
self._test_download_iter(data)
finally:
if self.output_path is not None:
with open(self.output_path, 'wt') as f:
json.dump(self.output_data, f)
def _test_download_iter(self, data, prefix=''):
with tmp_environ(IR_DATASETS_DL_TRIES='10'): # give the test up to 10 attempts to download
if 'url' in data and 'expected_md5' in data:
if self.dlc_filter is None or re.search(self.dlc_filter, prefix) and not data.get('skip_test', False):
with self.subTest(prefix):
if self.rand_delay is not None:
# sleep in range of [0.5, 1.5] * rand_delay seconds
time.sleep(random.uniform(self.rand_delay * 0.5, self.rand_delay * 1.5))
record = {
'name': prefix,
'url': data['url'],
'time': datetime.datetime.now().isoformat(),
'duration': None,
'result': 'IN_PROGRESS',
'fail_messagae': None,
'md5': data['expected_md5'],
'size': 0,
}
self.output_data.append(record)
start = time.time()
try:
download = ir_datasets.util.Download([ir_datasets.util.RequestsDownload(data['url'], **data.get('download_args', {}))], expected_md5=data['expected_md5'], stream=True)
with download.stream() as stream:
inp = stream.read(io.DEFAULT_BUFFER_SIZE)
while len(inp) > 0:
record['size'] += len(inp)
inp = stream.read(io.DEFAULT_BUFFER_SIZE)
record['duration'] = time.time() - start
record['result'] = 'PASS'
except KeyboardInterrupt:
record['duration'] = time.time() - start
record['result'] = 'USER_SKIP'
self.skipTest('Test skipped by user')
self.output_data.append({})
except Exception as ex:
record['duration'] = time.time() - start
record['result'] = 'FAIL' if not data.get('irds_mirror') else 'FAIL_BUT_HAS_MIRROR'
record['fail_messagae'] = str(ex)
raise
elif 'instructions' in data:
pass
else:
for key in data.keys():
self._test_download_iter(data[key], prefix=f'{prefix}/{key}' if prefix else key)
if __name__ == '__main__':
argv = sys.argv
for i, arg in enumerate(argv):
if arg == '--filter':
TestDownloads.dlc_filter = argv[i+1]
argv = argv[:i] + argv[i+2:]
for i, arg in enumerate(argv):
if arg == '--output':
TestDownloads.output_path = argv[i+1]
argv = argv[:i] + argv[i+2:]
for i, arg in enumerate(argv):
if arg == '--randdelay':
TestDownloads.rand_delay = float(argv[i+1])
argv = argv[:i] + argv[i+2:]
unittest.main(argv=argv)
| 40.609091
| 195
| 0.498993
|
a294f633bbe356a5eb694afaaad35534feba07e2
| 3,608
|
py
|
Python
|
tests/test_gmaps_takeout.py
|
PFigs/location-history
|
e858251448bd6dff72a861e6ff844d2aede2d479
|
[
"MIT"
] | 1
|
2020-03-20T12:46:13.000Z
|
2020-03-20T12:46:13.000Z
|
tests/test_gmaps_takeout.py
|
PFigs/location-history
|
e858251448bd6dff72a861e6ff844d2aede2d479
|
[
"MIT"
] | 13
|
2019-04-23T06:02:09.000Z
|
2021-10-19T01:01:40.000Z
|
tests/test_gmaps_takeout.py
|
PFigs/location-history
|
e858251448bd6dff72a861e6ff844d2aede2d479
|
[
"MIT"
] | null | null | null |
import timemap
import datetime
gmaps_sample = "./tests/gmaps_sample.json"
FILE_START = "2018-04-19T21:07:52.746000"
FILE_END = "2018-04-19T20:47:20.433000"
def test_json_browse():
takeout = timemap.gmaps.Takeout(filepath=gmaps_sample)
takeout.browse()
def test_accuracy_set():
takeout = timemap.gmaps.Takeout(filepath=gmaps_sample)
takeout.lookup()
for day, events in takeout:
for event in events.values():
print(event)
assert event.accuracy > 0
def test_json_lookup():
takeout = timemap.gmaps.Takeout(filepath=gmaps_sample)
takeout.browse()
takeout.load()
takeout.lookup()
assert len(takeout) == takeout.report["nb_entries"]
assert len(takeout) > 0
return takeout
def test_json_lookup_with_start():
takeout = timemap.gmaps.Takeout(filepath=gmaps_sample)
takeout.browse()
start = datetime.datetime.fromtimestamp(takeout.report["start_timestamp"])
takeout.load()
takeout.lookup(start=start)
assert len(takeout) == takeout.report["nb_entries"]
assert len(takeout) > 0
def test_json_lookup_with_start_end():
takeout = timemap.gmaps.Takeout(filepath=gmaps_sample)
takeout.browse()
start = datetime.datetime.fromtimestamp(takeout.report["start_timestamp"])
end = datetime.datetime.fromtimestamp(takeout.report["end_timestamp"])
takeout.load()
takeout.lookup(start=start, end=end)
assert len(takeout) > 0
assert len(takeout) == takeout.report["nb_entries"]
def test_json_lookup_with_end():
takeout = timemap.gmaps.Takeout(filepath=gmaps_sample)
takeout.browse()
end = datetime.datetime.fromtimestamp(takeout.report["end_timestamp"])
takeout.load()
takeout.lookup(end=end)
assert len(takeout) == takeout.report["nb_entries"]
def test_event_filter_none():
def event_filter(event, latitude, longitude, altitude, radius, **kwargs):
event.distance_3d(latitude, longitude, altitude)
if event.distance > radius:
return False
return True
takeout = timemap.gmaps.Takeout(filepath=gmaps_sample)
args = dict(latitude=0, longitude=0, altitude=0, radius=10)
takeout.lookup(event_filter=event_filter, event_filter_args=args)
takeout.summary()
assert len(takeout) == 0
assert takeout.report["nb_entries"] == 0
def test_event_filter_some():
def event_filter(event, latitude, longitude, altitude, radius, **kwargs):
event.distance_2d(latitude, longitude)
if event.distance > radius:
return False
return True
takeout = timemap.gmaps.Takeout(filepath=gmaps_sample)
args = dict(latitude=0, longitude=0, altitude=0, radius=10230370)
takeout.lookup(event_filter=event_filter, event_filter_args=args)
assert len(takeout) > 0
assert takeout.report["nb_entries"] > 0
def test_daily_report():
def event_filter(event, latitude, longitude, altitude, radius, **kwargs):
event.distance_2d(latitude, longitude)
if event.distance > radius:
return False
return True
takeout = timemap.gmaps.Takeout(filepath=gmaps_sample)
args = dict(latitude=0, longitude=0, altitude=0, radius=10230370)
takeout.lookup(event_filter=event_filter, event_filter_args=args)
takeout.report.total_daily(takeout.events)
for item in takeout.report.daily:
item.describe()
takeout.summary()
takeout.report.total_monthly()
for item in takeout.report.montlhy:
item.describe()
takeout.summary(monthly=True)
if __name__ == "__main__":
takeout = test_json_lookup()
| 28.1875
| 78
| 0.705654
|
605fa0c8e662ff8de52200f91e517c5748238d43
| 1,693
|
py
|
Python
|
day_interval/legend.py
|
kohjiaxuan/Stock-Market-Dashboard
|
a517136eff62a482455b68c0e8ebed8361ab1d53
|
[
"MIT"
] | 13
|
2019-06-13T15:50:47.000Z
|
2022-03-11T07:57:11.000Z
|
min_interval/legend.py
|
kohjiaxuan/Stock-Market-Dashboard
|
a517136eff62a482455b68c0e8ebed8361ab1d53
|
[
"MIT"
] | null | null | null |
min_interval/legend.py
|
kohjiaxuan/Stock-Market-Dashboard
|
a517136eff62a482455b68c0e8ebed8361ab1d53
|
[
"MIT"
] | 1
|
2021-08-22T06:12:33.000Z
|
2021-08-22T06:12:33.000Z
|
def legendcalculation(AAPLdailyopen_n, AMZNdailyopen_n, GOOGLdailyopen_n, FBdailyopen_n, MSFTdailyopen_n, NFLXdailyopen_n):
#Determine where to put legend board in relative stock performance graph
stockgoodperformance = 0
#Default location for legends of small graphs is top right, will change to bottom right depending on graph
AAPLlegendloc = 1
AMZNlegendloc = 1
GOOGLlegendloc = 1
FBlegendloc = 1
MSFTlegendloc = 1
NFLXlegendloc = 1
#Find number of stocks closing higher than when they opened
if AAPLdailyopen_n[-1] > AAPLdailyopen_n[0]:
stockgoodperformance += 1
AAPLlegendloc = 4
if AMZNdailyopen_n[-1] > AMZNdailyopen_n[0]:
stockgoodperformance += 1
AMZNlegendloc = 4
if GOOGLdailyopen_n[-1] > GOOGLdailyopen_n[0]:
stockgoodperformance += 1
GOOGLlegendloc = 4
if FBdailyopen_n[-1] > FBdailyopen_n[0]:
stockgoodperformance += 1
FBlegendloc = 4
if MSFTdailyopen_n[-1] > MSFTdailyopen_n[0]:
stockgoodperformance += 1
MSFTlegendloc = 4
if NFLXdailyopen_n[-1] > NFLXdailyopen_n[0]:
stockgoodperformance += 1
NFLXlegendloc = 4
if stockgoodperformance >= 5:
legendloc = 4 #stocks mostly rised, place legend bottom right
elif stockgoodperformance <= 2:
legendloc = 3 #stocks mostly tumbled, place legend bottom left
else: #Mixed, place legend center-left
# legendloc = 9
legendloc = 6
# Auto detect best location
# legendloc = 0
return {"legendloc": legendloc,
"stockloc": [AAPLlegendloc, AMZNlegendloc, GOOGLlegendloc, FBlegendloc, MSFTlegendloc, NFLXlegendloc]}
| 38.477273
| 123
| 0.684584
|
599dcd434787571b0c074e64684ca519376d48e4
| 60,987
|
py
|
Python
|
core/domain/topic_domain_test.py
|
queencai/oppia
|
c9a36db9c258588b04be9bc26f3d2efef7d21abc
|
[
"Apache-2.0"
] | 1
|
2021-06-26T00:31:08.000Z
|
2021-06-26T00:31:08.000Z
|
core/domain/topic_domain_test.py
|
queencai/oppia
|
c9a36db9c258588b04be9bc26f3d2efef7d21abc
|
[
"Apache-2.0"
] | null | null | null |
core/domain/topic_domain_test.py
|
queencai/oppia
|
c9a36db9c258588b04be9bc26f3d2efef7d21abc
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for topic domain objects."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from constants import constants
from core.domain import topic_domain
from core.domain import user_services
from core.tests import test_utils
import feconf
import utils
class TopicDomainUnitTests(test_utils.GenericTestBase):
"""Tests for topic domain objects."""
topic_id = 'topic_id'
def setUp(self):
super(TopicDomainUnitTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'Name', 'abbrev', 'description')
self.topic.subtopics = [
topic_domain.Subtopic(
1, 'Title', ['skill_id_1'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0],
'dummy-subtopic-url')]
self.topic.next_subtopic_id = 2
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_a = user_services.UserActionsInfo(self.user_id_a)
self.user_b = user_services.UserActionsInfo(self.user_id_b)
def test_create_default_topic(self):
"""Tests the create_default_topic() function."""
topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'Name', 'abbrev', 'description')
expected_topic_dict = {
'id': self.topic_id,
'name': 'Name',
'abbreviated_name': 'Name',
'url_fragment': 'abbrev',
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'description': 'description',
'canonical_story_references': [],
'additional_story_references': [],
'uncategorized_skill_ids': [],
'subtopics': [],
'next_subtopic_id': 1,
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'subtopic_schema_version': feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION,
'story_reference_schema_version': (
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
'version': 0
}
self.assertEqual(topic.to_dict(), expected_topic_dict)
def test_get_all_skill_ids(self):
self.topic.uncategorized_skill_ids = ['skill_id_2', 'skill_id_3']
self.assertEqual(
self.topic.get_all_skill_ids(),
['skill_id_2', 'skill_id_3', 'skill_id_1'])
def test_get_all_uncategorized_skill_ids(self):
self.topic.uncategorized_skill_ids = ['skill_id_1', 'skill_id_2']
self.assertEqual(
self.topic.get_all_uncategorized_skill_ids(),
['skill_id_1', 'skill_id_2'])
def test_get_all_subtopics(self):
subtopics = self.topic.get_all_subtopics()
self.assertEqual(
subtopics, [{
'skill_ids': ['skill_id_1'],
'id': 1,
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': '#FFFFFF',
'title': 'Title',
'url_fragment': 'dummy-subtopic-url'}])
def test_delete_canonical_story(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_2')
]
self.topic.delete_canonical_story('story_id_1')
canonical_story_ids = self.topic.get_canonical_story_ids()
self.assertEqual(
canonical_story_ids, ['story_id', 'story_id_2'])
with self.assertRaisesRegexp(
Exception, 'The story_id story_id_5 is not present in the canonical'
' story references list of the topic.'):
self.topic.delete_canonical_story('story_id_5')
def test_rearrange_canonical_story_fail_with_invalid_from_index_value(self):
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be a number, '
'received None'):
self.topic.rearrange_canonical_story(None, 2)
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be a number, '
'received a'):
self.topic.rearrange_canonical_story('a', 2)
def test_rearrange_canonical_story_fail_with_invalid_to_index_value(self):
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be a number, '
'received None'):
self.topic.rearrange_canonical_story(1, None)
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be a number, '
'received a'):
self.topic.rearrange_canonical_story(1, 'a')
def test_rearrange_canonical_story_fail_with_out_of_bound_indexes(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be with-in bounds.'):
self.topic.rearrange_canonical_story(10, 0)
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be with-in bounds.'):
self.topic.rearrange_canonical_story(-1, 0)
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be with-in bounds.'):
self.topic.rearrange_canonical_story(0, 10)
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be with-in bounds.'):
self.topic.rearrange_canonical_story(0, -1)
def test_rearrange_canonical_story_fail_with_identical_index_values(self):
with self.assertRaisesRegexp(
Exception, 'Expected from_index and to_index values to be '
'different.'):
self.topic.rearrange_canonical_story(1, 1)
def test_rearrange_canonical_story(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_2'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_3')
]
canonical_story_ids = self.topic.get_canonical_story_ids()
self.assertEqual(canonical_story_ids[0], 'story_id_1')
self.assertEqual(canonical_story_ids[1], 'story_id_2')
self.assertEqual(canonical_story_ids[2], 'story_id_3')
self.topic.rearrange_canonical_story(1, 0)
canonical_story_ids = self.topic.get_canonical_story_ids()
self.assertEqual(canonical_story_ids[0], 'story_id_2')
self.assertEqual(canonical_story_ids[1], 'story_id_1')
self.assertEqual(canonical_story_ids[2], 'story_id_3')
self.topic.rearrange_canonical_story(2, 1)
canonical_story_ids = self.topic.get_canonical_story_ids()
self.assertEqual(canonical_story_ids[0], 'story_id_2')
self.assertEqual(canonical_story_ids[1], 'story_id_3')
self.assertEqual(canonical_story_ids[2], 'story_id_1')
self.topic.rearrange_canonical_story(2, 0)
canonical_story_ids = self.topic.get_canonical_story_ids()
self.assertEqual(canonical_story_ids[0], 'story_id_1')
self.assertEqual(canonical_story_ids[1], 'story_id_2')
self.assertEqual(canonical_story_ids[2], 'story_id_3')
def test_rearrange_skill_in_subtopic_fail_with_invalid_from_index(self):
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be a number, '
'received None'):
self.topic.rearrange_skill_in_subtopic(1, None, 2)
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be a number, '
'received a'):
self.topic.rearrange_skill_in_subtopic(1, 'a', 2)
def test_rearrange_skill_in_subtopic_fail_with_invalid_to_index_value(self):
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be a number, '
'received None'):
self.topic.rearrange_skill_in_subtopic(1, 1, None)
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be a number, '
'received a'):
self.topic.rearrange_skill_in_subtopic(1, 1, 'a')
def test_rearrange_skill_in_subtopic_fail_with_out_of_bound_indexes(self):
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be with-in bounds.'):
self.topic.rearrange_skill_in_subtopic(1, 10, 1)
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be with-in bounds.'):
self.topic.rearrange_skill_in_subtopic(1, -1, 0)
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be with-in bounds.'):
self.topic.rearrange_skill_in_subtopic(1, 0, 10)
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be with-in bounds.'):
self.topic.rearrange_skill_in_subtopic(1, 0, -10)
def test_rearrange_skill_in_subtopic_fail_with_identical_index_values(self):
with self.assertRaisesRegexp(
Exception, 'Expected from_index and to_index values to be '
'different.'):
self.topic.rearrange_skill_in_subtopic(1, 1, 1)
def test_rearrange_skill_in_subtopic(self):
self.topic.subtopics = [
topic_domain.Subtopic(
1, 'Title', ['skill_id_1', 'skill_id_2', 'skill_id_3'],
'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0],
'dummy-subtopic-three')]
skill_ids = self.topic.subtopics[0].skill_ids
self.assertEqual(skill_ids[0], 'skill_id_1')
self.assertEqual(skill_ids[1], 'skill_id_2')
self.assertEqual(skill_ids[2], 'skill_id_3')
self.topic.rearrange_skill_in_subtopic(1, 1, 0)
self.assertEqual(skill_ids[0], 'skill_id_2')
self.assertEqual(skill_ids[1], 'skill_id_1')
self.assertEqual(skill_ids[2], 'skill_id_3')
self.topic.rearrange_skill_in_subtopic(1, 2, 1)
self.assertEqual(skill_ids[0], 'skill_id_2')
self.assertEqual(skill_ids[1], 'skill_id_3')
self.assertEqual(skill_ids[2], 'skill_id_1')
self.topic.rearrange_skill_in_subtopic(1, 2, 0)
self.assertEqual(skill_ids[0], 'skill_id_1')
self.assertEqual(skill_ids[1], 'skill_id_2')
self.assertEqual(skill_ids[2], 'skill_id_3')
def test_rearrange_subtopic_fail_with_invalid_from_index(self):
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be a number, '
'received None'):
self.topic.rearrange_subtopic(None, 2)
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be a number, '
'received a'):
self.topic.rearrange_subtopic('a', 2)
def test_rearrange_subtopic_fail_with_invalid_to_index_value(self):
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be a number, '
'received None'):
self.topic.rearrange_subtopic(1, None)
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be a number, '
'received a'):
self.topic.rearrange_subtopic(1, 'a')
def test_rearrange_subtopic_fail_with_out_of_bound_indexes(self):
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be with-in bounds.'):
self.topic.rearrange_subtopic(10, 1)
with self.assertRaisesRegexp(
Exception, 'Expected from_index value to be with-in bounds.'):
self.topic.rearrange_subtopic(-1, 0)
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be with-in bounds.'):
self.topic.rearrange_subtopic(0, 10)
with self.assertRaisesRegexp(
Exception, 'Expected to_index value to be with-in bounds.'):
self.topic.rearrange_subtopic(0, -10)
def test_rearrange_subtopic_fail_with_identical_index_values(self):
with self.assertRaisesRegexp(
Exception, 'Expected from_index and to_index values to be '
'different.'):
self.topic.rearrange_subtopic(1, 1)
def test_rearrange_subtopic(self):
self.topic.subtopics = [
topic_domain.Subtopic(1, 'Title1', [], None, None, 'title-one'),
topic_domain.Subtopic(2, 'Title2', [], None, None, 'title-two'),
topic_domain.Subtopic(3, 'Title3', [], None, None, 'title-three')]
subtopics = self.topic.subtopics
self.assertEqual(subtopics[0].id, 1)
self.assertEqual(subtopics[1].id, 2)
self.assertEqual(subtopics[2].id, 3)
self.topic.rearrange_subtopic(1, 0)
self.assertEqual(subtopics[0].id, 2)
self.assertEqual(subtopics[1].id, 1)
self.assertEqual(subtopics[2].id, 3)
self.topic.rearrange_subtopic(2, 1)
self.assertEqual(subtopics[0].id, 2)
self.assertEqual(subtopics[1].id, 3)
self.assertEqual(subtopics[2].id, 1)
self.topic.rearrange_subtopic(2, 0)
self.assertEqual(subtopics[0].id, 1)
self.assertEqual(subtopics[1].id, 2)
self.assertEqual(subtopics[2].id, 3)
def test_get_all_story_references(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id_2'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_3')
]
all_story_references = self.topic.get_all_story_references()
self.assertEqual(len(all_story_references), 4)
self.assertEqual(all_story_references[0].story_id, 'story_id')
self.assertEqual(all_story_references[1].story_id, 'story_id_1')
self.assertEqual(all_story_references[2].story_id, 'story_id_2')
self.assertEqual(all_story_references[3].story_id, 'story_id_3')
def test_add_canonical_story(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self.topic.add_canonical_story('story_id_2')
canonical_story_ids = self.topic.get_canonical_story_ids()
self.assertEqual(
canonical_story_ids,
['story_id', 'story_id_1', 'story_id_2'])
with self.assertRaisesRegexp(
Exception, 'The story_id story_id_2 is already present in the '
'canonical story references list of the topic.'):
self.topic.add_canonical_story('story_id_2')
def test_delete_additional_story(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_2')
]
self.topic.delete_additional_story('story_id_1')
additional_story_ids = self.topic.get_additional_story_ids()
self.assertEqual(
additional_story_ids, ['story_id', 'story_id_2'])
with self.assertRaisesRegexp(
Exception,
'The story_id story_id_5 is not present in the additional'
' story references list of the topic.'):
self.topic.delete_additional_story('story_id_5')
def test_add_additional_story(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self.topic.add_additional_story('story_id_2')
additional_story_ids = self.topic.get_additional_story_ids()
self.assertEqual(
additional_story_ids,
['story_id', 'story_id_1', 'story_id_2'])
with self.assertRaisesRegexp(
Exception, 'The story_id story_id_2 is already present in the '
'additional story references list of the topic.'):
self.topic.add_additional_story('story_id_2')
def _assert_validation_error(self, expected_error_substring):
"""Checks that the topic passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.topic.validate()
def _assert_strict_validation_error(self, expected_error_substring):
"""Checks that the topic passes prepublish validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.topic.validate(strict=True)
def _assert_valid_topic_id(self, expected_error_substring, topic_id):
"""Checks that the skill passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
topic_domain.Topic.require_valid_topic_id(topic_id)
def _assert_valid_abbreviated_name(
self, expected_error_substring, name):
"""Checks that the topic passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
topic_domain.Topic.require_valid_abbreviated_name(name)
def _assert_valid_thumbnail_filename_for_topic(
self, expected_error_substring, thumbnail_filename):
"""Checks that topic passes validation for thumbnail filename."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
topic_domain.Topic.require_valid_thumbnail_filename(
thumbnail_filename)
def _assert_valid_thumbnail_filename_for_subtopic(
self, expected_error_substring, thumbnail_filename):
"""Checks that subtopic passes validation for thumbnail filename."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
topic_domain.Subtopic.require_valid_thumbnail_filename(
thumbnail_filename)
def test_valid_topic_id(self):
self._assert_valid_topic_id('Topic id should be a string', 10)
self._assert_valid_topic_id('Topic id abc is invalid', 'abc')
def test_thumbnail_filename_validation_for_topic(self):
self._assert_valid_thumbnail_filename_for_topic(
'Expected thumbnail filename to be a string, received 10', 10)
self._assert_valid_thumbnail_filename_for_topic(
'Thumbnail filename should not start with a dot.', '.name')
self._assert_valid_thumbnail_filename_for_topic(
'Thumbnail filename should not include slashes or '
'consecutive dot characters.', 'file/name')
self._assert_valid_thumbnail_filename_for_topic(
'Thumbnail filename should not include slashes or '
'consecutive dot characters.', 'file..name')
self._assert_valid_thumbnail_filename_for_topic(
'Thumbnail filename should include an extension.', 'name')
self._assert_valid_thumbnail_filename_for_topic(
'Expected a filename ending in svg, received name.jpg', 'name.jpg')
def test_subtopic_strict_validation(self):
self.topic.thumbnail_filename = 'filename.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
self.topic.subtopics[0].skill_ids = []
self._assert_strict_validation_error(
'Subtopic with title Title does not have any skills linked')
def test_subtopic_title_validation(self):
self.topic.subtopics[0].title = 1
self._assert_validation_error('Expected subtopic title to be a string')
self.topic.subtopics[0].title = (
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefgh'
'ijklmnopqrstuvwxyz')
self._assert_validation_error(
'Expected subtopic title to be less than 64 characters')
def test_story_id_validation(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference(123, True)
]
self._assert_validation_error('Expected story id to be a string')
def test_story_is_published_validation(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference('story_id', 'published')
]
self._assert_validation_error(
'Expected story_is_published to be a boolean')
def test_subtopic_id_validation(self):
self.topic.subtopics[0].id = 'invalid_id'
self._assert_validation_error('Expected subtopic id to be an int')
def test_thumbnail_filename_validation_for_subtopic(self):
self._assert_valid_thumbnail_filename_for_subtopic(
'Expected thumbnail filename to be a string, received 10', 10)
self._assert_valid_thumbnail_filename_for_subtopic(
'Thumbnail filename should not start with a dot.', '.name')
self._assert_valid_thumbnail_filename_for_subtopic(
'Thumbnail filename should not include slashes or '
'consecutive dot characters.', 'file/name')
self._assert_valid_thumbnail_filename_for_subtopic(
'Thumbnail filename should not include slashes or '
'consecutive dot characters.', 'file..name')
self._assert_valid_thumbnail_filename_for_subtopic(
'Thumbnail filename should include an extension.', 'name')
self._assert_valid_thumbnail_filename_for_subtopic(
'Expected a filename ending in svg, received name.jpg', 'name.jpg')
def test_topic_thumbnail_filename_in_strict_mode(self):
self.topic.thumbnail_bg_color = None
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected thumbnail filename to be a string, received None.'):
self.topic.validate(strict=True)
def test_topic_thumbnail_bg_validation(self):
self.topic.thumbnail_bg_color = '#FFFFFF'
self._assert_validation_error(
'Topic thumbnail background color #FFFFFF is not supported.')
def test_topic_thumbnail_filename_or_thumbnail_bg_color_is_none(self):
self.topic.thumbnail_bg_color = '#C6DCDA'
self.topic.thumbnail_filename = None
self._assert_validation_error(
'Topic thumbnail image is not provided.')
self.topic.thumbnail_bg_color = None
self.topic.thumbnail_filename = 'test.svg'
self._assert_validation_error(
'Topic thumbnail background color is not specified.')
def test_subtopic_thumbnail_bg_validation(self):
self.topic.subtopics[0].thumbnail_bg_color = '#CACACA'
self._assert_validation_error(
'Subtopic thumbnail background color #CACACA is not supported.')
def test_subtopic_thumbnail_filename_or_thumbnail_bg_color_is_none(self):
self.topic.subtopics[0].thumbnail_bg_color = '#FFFFFF'
self.topic.subtopics[0].thumbnail_filename = None
self._assert_validation_error(
'Subtopic thumbnail image is not provided.')
self.topic.subtopics[0].thumbnail_bg_color = None
self.topic.subtopics[0].thumbnail_filename = 'test.svg'
self._assert_validation_error(
'Subtopic thumbnail background color is not specified.')
def test_subtopic_skill_ids_validation(self):
self.topic.subtopics[0].skill_ids = 'abc'
self._assert_validation_error('Expected skill ids to be a list')
self.topic.subtopics[0].skill_ids = ['skill_id', 'skill_id']
self._assert_validation_error(
'Expected all skill ids to be distinct.')
self.topic.subtopics[0].skill_ids = [1, 2]
self._assert_validation_error('Expected each skill id to be a string')
def test_subtopics_validation(self):
self.topic.subtopics = 'abc'
self._assert_validation_error('Expected subtopics to be a list')
def test_name_validation(self):
self.topic.name = 1
self._assert_validation_error('Name should be a string')
self.topic.name = ''
self._assert_validation_error('Name field should not be empty')
self.topic.name = 'Very long and therefore invalid topic name'
self._assert_validation_error(
'Topic name should be at most 39 characters')
def test_validation_fails_with_invalid_url_fragment(self):
self.topic.url_fragment = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Topic URL Fragment field must be a string. Received 0.'):
self.topic.validate()
def test_validation_fails_with_empty_url_fragment(self):
self.topic.url_fragment = ''
validation_message = 'Topic URL Fragment field should not be empty.'
with self.assertRaisesRegexp(
utils.ValidationError, validation_message):
self.topic.validate()
def test_validation_fails_with_lenghty_url_fragment(self):
self.topic.url_fragment = 'a' * 25
url_fragment_char_limit = constants.MAX_CHARS_IN_TOPIC_URL_FRAGMENT
validation_message = (
'Topic URL Fragment field should not exceed %d characters, '
'received %s.' % (
url_fragment_char_limit, self.topic.url_fragment))
with self.assertRaisesRegexp(
utils.ValidationError, validation_message):
self.topic.validate()
def test_subtopic_schema_version_type_validation(self):
self.topic.subtopic_schema_version = 'invalid_version'
self._assert_validation_error(
'Expected subtopic schema version to be an integer')
def test_story_reference_schema_version_type_validation(self):
self.topic.story_reference_schema_version = 'invalid_version'
self._assert_validation_error(
'Expected story reference schema version to be an integer')
def test_subtopic_schema_version_validation(self):
self.topic.subtopic_schema_version = 0
self._assert_validation_error(
'Expected subtopic schema version to be %s'
% (feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION))
def test_subtopic_type_validation(self):
self.topic.subtopics = ['subtopic']
self._assert_validation_error(
'Expected each subtopic to be a Subtopic object')
def test_description_validation(self):
self.topic.description = 1
self._assert_validation_error('Expected description to be a string')
self.topic.description = (
'Lorem ipsum dolor sit amet, consectetuer '
'adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. '
'Dum sociis natoque penatibus et magnis dis parturient montes, '
'nascetur ridiculus mus. Donec quam felis, ultricies nec, '
'pellentesque eu,'
)
self._assert_validation_error(
'Topic description should be at most 240 characters.')
def test_next_subtopic_id_validation(self):
self.topic.next_subtopic_id = '1'
self._assert_validation_error('Expected next_subtopic_id to be an int')
self.topic.next_subtopic_id = 1
self._assert_validation_error(
'The id for subtopic 1 is greater than or equal to '
'next_subtopic_id 1')
def test_language_code_validation(self):
self.topic.language_code = 0
self._assert_validation_error('Expected language code to be a string')
self.topic.language_code = 'xz'
self._assert_validation_error('Invalid language code')
def test_canonical_story_references_validation(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self._assert_validation_error(
'Expected all canonical story ids to be distinct.')
self.topic.canonical_story_references = 'story_id'
self._assert_validation_error(
'Expected canonical story references to be a list')
def test_additional_story_references_validation(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self._assert_validation_error(
'Expected all additional story ids to be distinct.')
self.topic.additional_story_references = 'story_id'
self._assert_validation_error(
'Expected additional story references to be a list')
def test_additional_canonical_story_intersection_validation(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
]
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_2')
]
self._assert_validation_error(
'Expected additional story ids list and canonical story '
'ids list to be mutually exclusive.')
def test_uncategorized_skill_ids_validation(self):
self.topic.uncategorized_skill_ids = 'uncategorized_skill_id'
self._assert_validation_error(
'Expected uncategorized skill ids to be a list')
def test_add_uncategorized_skill_id(self):
self.topic.subtopics.append(
topic_domain.Subtopic(
'id_2', 'Title2', ['skill_id_2'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0],
'dummy-title-two'))
with self.assertRaisesRegexp(
Exception,
'The skill id skill_id_1 already exists in subtopic with id 1'):
self.topic.add_uncategorized_skill_id('skill_id_1')
self.topic.add_uncategorized_skill_id('skill_id_3')
self.assertEqual(self.topic.uncategorized_skill_ids, ['skill_id_3'])
def test_remove_uncategorized_skill_id(self):
self.topic.uncategorized_skill_ids = ['skill_id_5']
with self.assertRaisesRegexp(
Exception,
'The skill id skill_id_3 is not present in the topic'):
self.topic.remove_uncategorized_skill_id('skill_id_3')
self.topic.remove_uncategorized_skill_id('skill_id_5')
self.assertEqual(self.topic.uncategorized_skill_ids, [])
def test_move_skill_id_to_subtopic(self):
self.topic.uncategorized_skill_ids = ['skill_id_1']
self.topic.subtopics[0].skill_ids = ['skill_id_2']
self.topic.move_skill_id_to_subtopic(None, 1, 'skill_id_1')
self.assertEqual(self.topic.uncategorized_skill_ids, [])
self.assertEqual(
self.topic.subtopics[0].skill_ids, ['skill_id_2', 'skill_id_1'])
self.topic.uncategorized_skill_ids = ['skill_id_1']
self.topic.subtopics[0].skill_ids = ['skill_id_2']
with self.assertRaisesRegexp(
Exception,
'Skill id skill_id_3 is not an uncategorized skill id'):
self.topic.move_skill_id_to_subtopic(None, 'id_1', 'skill_id_3')
def test_get_subtopic_index(self):
self.assertIsNone(self.topic.get_subtopic_index(2))
self.assertEqual(self.topic.get_subtopic_index(1), 0)
def test_to_dict(self):
user_ids = [self.user_id_a, self.user_id_b]
topic_rights = topic_domain.TopicRights(self.topic_id, user_ids, False)
expected_dict = {
'topic_id': self.topic_id,
'manager_names': ['A', 'B'],
'topic_is_published': False
}
self.assertEqual(expected_dict, topic_rights.to_dict())
def test_is_manager(self):
user_ids = [self.user_id_a, self.user_id_b]
topic_rights = topic_domain.TopicRights(self.topic_id, user_ids, False)
self.assertTrue(topic_rights.is_manager(self.user_id_a))
self.assertTrue(topic_rights.is_manager(self.user_id_b))
self.assertFalse(topic_rights.is_manager('fakeuser'))
def test_cannot_create_topic_rights_change_class_with_invalid_cmd(self):
with self.assertRaisesRegexp(
Exception, 'Command invalid cmd is not allowed'):
topic_domain.TopicRightsChange({
'cmd': 'invalid cmd'
})
def test_cannot_create_topic_rights_change_class_with_invalid_changelist(
self):
with self.assertRaisesRegexp(
Exception, 'Missing cmd key in change dict'):
topic_domain.TopicRightsChange({})
def test_create_new_topic_rights_change_class(self):
topic_rights = topic_domain.TopicRightsChange({
'cmd': 'create_new'
})
self.assertEqual(topic_rights.to_dict(), {'cmd': 'create_new'})
def test_update_language_code(self):
self.assertEqual(self.topic.language_code, 'en')
self.topic.update_language_code('bn')
self.assertEqual(self.topic.language_code, 'bn')
def test_update_abbreviated_name(self):
self.assertEqual(self.topic.abbreviated_name, 'Name')
self.topic.update_abbreviated_name('abbrev')
self.assertEqual(self.topic.abbreviated_name, 'abbrev')
def test_update_thumbnail_filename(self):
self.assertEqual(self.topic.thumbnail_filename, None)
self.topic.update_thumbnail_filename('img.svg')
self.assertEqual(self.topic.thumbnail_filename, 'img.svg')
def test_update_thumbnail_bg_color(self):
self.assertEqual(self.topic.thumbnail_bg_color, None)
self.topic.update_thumbnail_bg_color('#C6DCDA')
self.assertEqual(self.topic.thumbnail_bg_color, '#C6DCDA')
def test_cannot_add_uncategorized_skill_with_existing_uncategorized_skill(
self):
self.assertEqual(self.topic.uncategorized_skill_ids, [])
self.topic.uncategorized_skill_ids = ['skill_id1']
with self.assertRaisesRegexp(
Exception,
'The skill id skill_id1 is already an uncategorized skill.'):
self.topic.add_uncategorized_skill_id('skill_id1')
def test_cannot_delete_subtopic_with_invalid_subtopic_id(self):
with self.assertRaisesRegexp(
Exception, 'A subtopic with id invalid_id doesn\'t exist.'):
self.topic.delete_subtopic('invalid_id')
def test_cannot_update_subtopic_title_with_invalid_subtopic_id(self):
with self.assertRaisesRegexp(
Exception, 'The subtopic with id invalid_id does not exist.'):
self.topic.update_subtopic_title('invalid_id', 'new title')
def test_update_subtopic_title(self):
self.assertEqual(len(self.topic.subtopics), 1)
self.assertEqual(self.topic.subtopics[0].title, 'Title')
self.topic.update_subtopic_title(1, 'new title')
self.assertEqual(self.topic.subtopics[0].title, 'new title')
def test_update_subtopic_thumbnail_filename(self):
self.assertEqual(len(self.topic.subtopics), 1)
self.assertEqual(
self.topic.subtopics[0].thumbnail_filename, 'image.svg')
self.topic.update_subtopic_thumbnail_filename(1, 'new_image.svg')
self.assertEqual(
self.topic.subtopics[0].thumbnail_filename, 'new_image.svg')
with self.assertRaisesRegexp(
Exception, 'The subtopic with id invalid_id does not exist.'):
self.topic.update_subtopic_thumbnail_filename(
'invalid_id', 'new title')
def test_update_subtopic_url_fragment(self):
self.assertEqual(len(self.topic.subtopics), 1)
self.assertEqual(
self.topic.subtopics[0].url_fragment, 'dummy-subtopic-url')
self.topic.update_subtopic_url_fragment(1, 'new-subtopic-url')
self.assertEqual(
self.topic.subtopics[0].url_fragment, 'new-subtopic-url')
with self.assertRaisesRegexp(
Exception, 'The subtopic with id invalid_id does not exist.'):
self.topic.update_subtopic_url_fragment('invalid_id', 'new-url')
def test_update_subtopic_thumbnail_bg_color(self):
self.assertEqual(len(self.topic.subtopics), 1)
self.topic.subtopics[0].thumbnail_bg_color = None
self.assertEqual(
self.topic.subtopics[0].thumbnail_bg_color, None)
self.topic.update_subtopic_thumbnail_bg_color(1, '#FFFFFF')
self.assertEqual(
self.topic.subtopics[0].thumbnail_bg_color, '#FFFFFF')
with self.assertRaisesRegexp(
Exception, 'The subtopic with id invalid_id does not exist.'):
self.topic.update_subtopic_thumbnail_bg_color(
'invalid_id', '#FFFFFF')
def test_cannot_remove_skill_id_from_subtopic_with_invalid_subtopic_id(
self):
with self.assertRaisesRegexp(
Exception, 'The subtopic with id invalid_id does not exist.'):
self.topic.remove_skill_id_from_subtopic('invalid_id', 'skill_id1')
def test_cannot_move_skill_id_to_subtopic_with_invalid_subtopic_id(self):
with self.assertRaisesRegexp(
Exception, 'The subtopic with id old_subtopic_id does not exist.'):
self.topic.move_skill_id_to_subtopic(
'old_subtopic_id', 'new_subtopic_id', 'skill_id1')
def test_cannot_move_existing_skill_to_subtopic(self):
self.topic.subtopics = [
topic_domain.Subtopic(
1, 'Title', ['skill_id_1'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0],
'dummy-subtopic-one'),
topic_domain.Subtopic(
2, 'Another title', ['skill_id_1'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0],
'dummy-subtopic-two')]
with self.assertRaisesRegexp(
Exception,
'Skill id skill_id_1 is already present in the target subtopic'):
self.topic.move_skill_id_to_subtopic(1, 2, 'skill_id_1')
class TopicChangeTests(test_utils.GenericTestBase):
def test_topic_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
topic_domain.TopicChange({'invalid': 'data'})
def test_topic_change_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
topic_domain.TopicChange({'cmd': 'invalid'})
def test_topic_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_value, old_value')):
topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'name',
})
def test_topic_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
topic_domain.TopicChange({
'cmd': 'add_subtopic',
'title': 'title',
'subtopic_id': 'subtopic_id',
'invalid': 'invalid'
})
def test_topic_change_object_with_invalid_topic_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd update_topic_property: '
'invalid is not allowed')):
topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_topic_change_object_with_invalid_subtopic_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd update_subtopic_property: '
'invalid is not allowed')):
topic_domain.TopicChange({
'cmd': 'update_subtopic_property',
'subtopic_id': 'subtopic_id',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_topic_change_object_with_add_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'add_subtopic',
'subtopic_id': 'subtopic_id',
'title': 'title'
})
self.assertEqual(topic_change_object.cmd, 'add_subtopic')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
self.assertEqual(topic_change_object.title, 'title')
def test_topic_change_object_with_delete_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'delete_subtopic',
'subtopic_id': 'subtopic_id'
})
self.assertEqual(topic_change_object.cmd, 'delete_subtopic')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
def test_topic_change_object_with_add_uncategorized_skill_id(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': 'new_uncategorized_skill_id'
})
self.assertEqual(topic_change_object.cmd, 'add_uncategorized_skill_id')
self.assertEqual(
topic_change_object.new_uncategorized_skill_id,
'new_uncategorized_skill_id')
def test_topic_change_object_with_remove_uncategorized_skill_id(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': 'uncategorized_skill_id'
})
self.assertEqual(
topic_change_object.cmd, 'remove_uncategorized_skill_id')
self.assertEqual(
topic_change_object.uncategorized_skill_id,
'uncategorized_skill_id')
def test_topic_change_object_with_move_skill_id_to_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'move_skill_id_to_subtopic',
'skill_id': 'skill_id',
'old_subtopic_id': 'old_subtopic_id',
'new_subtopic_id': 'new_subtopic_id'
})
self.assertEqual(topic_change_object.cmd, 'move_skill_id_to_subtopic')
self.assertEqual(topic_change_object.skill_id, 'skill_id')
self.assertEqual(topic_change_object.old_subtopic_id, 'old_subtopic_id')
self.assertEqual(topic_change_object.new_subtopic_id, 'new_subtopic_id')
def test_topic_change_object_with_remove_skill_id_from_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'remove_skill_id_from_subtopic',
'skill_id': 'skill_id',
'subtopic_id': 'subtopic_id'
})
self.assertEqual(
topic_change_object.cmd, 'remove_skill_id_from_subtopic')
self.assertEqual(topic_change_object.skill_id, 'skill_id')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
def test_topic_change_object_with_update_subtopic_property(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'update_subtopic_property',
'subtopic_id': 'subtopic_id',
'property_name': 'title',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(topic_change_object.cmd, 'update_subtopic_property')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
self.assertEqual(topic_change_object.property_name, 'title')
self.assertEqual(topic_change_object.new_value, 'new_value')
self.assertEqual(topic_change_object.old_value, 'old_value')
def test_topic_change_object_with_update_topic_property(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'name',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(topic_change_object.cmd, 'update_topic_property')
self.assertEqual(topic_change_object.property_name, 'name')
self.assertEqual(topic_change_object.new_value, 'new_value')
self.assertEqual(topic_change_object.old_value, 'old_value')
def test_topic_change_object_with_create_new(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'create_new',
'name': 'name',
})
self.assertEqual(topic_change_object.cmd, 'create_new')
self.assertEqual(topic_change_object.name, 'name')
def test_topic_change_object_with_migrate_subtopic_schema_to_latest_version(
self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'migrate_subtopic_schema_to_latest_version',
'from_version': 'from_version',
'to_version': 'to_version',
})
self.assertEqual(
topic_change_object.cmd,
'migrate_subtopic_schema_to_latest_version')
self.assertEqual(topic_change_object.from_version, 'from_version')
self.assertEqual(topic_change_object.to_version, 'to_version')
def test_to_dict(self):
topic_change_dict = {
'cmd': 'create_new',
'name': 'name'
}
topic_change_object = topic_domain.TopicChange(topic_change_dict)
self.assertEqual(topic_change_object.to_dict(), topic_change_dict)
class TopicRightsChangeTests(test_utils.GenericTestBase):
def test_topic_rights_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
topic_domain.TopicRightsChange({'invalid': 'data'})
def test_topic_change_rights_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
topic_domain.TopicRightsChange({'cmd': 'invalid'})
def test_topic_rights_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_role, old_role')):
topic_domain.TopicRightsChange({
'cmd': 'change_role',
'assignee_id': 'assignee_id',
})
def test_topic_rights_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
topic_domain.TopicRightsChange({
'cmd': 'publish_topic',
'invalid': 'invalid'
})
def test_topic_rights_change_object_with_invalid_role(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for old_role in cmd change_role: '
'invalid is not allowed')):
topic_domain.TopicRightsChange({
'cmd': 'change_role',
'assignee_id': 'assignee_id',
'old_role': 'invalid',
'new_role': topic_domain.ROLE_MANAGER
})
def test_topic_rights_change_object_with_create_new(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'create_new'
})
self.assertEqual(topic_rights_change_object.cmd, 'create_new')
def test_topic_rights_change_object_with_change_role(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'change_role',
'assignee_id': 'assignee_id',
'old_role': topic_domain.ROLE_NONE,
'new_role': topic_domain.ROLE_MANAGER
})
self.assertEqual(topic_rights_change_object.cmd, 'change_role')
self.assertEqual(topic_rights_change_object.assignee_id, 'assignee_id')
self.assertEqual(
topic_rights_change_object.old_role, topic_domain.ROLE_NONE)
self.assertEqual(
topic_rights_change_object.new_role, topic_domain.ROLE_MANAGER)
def test_topic_rights_change_object_with_publish_topic(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'publish_topic'
})
self.assertEqual(topic_rights_change_object.cmd, 'publish_topic')
def test_topic_rights_change_object_with_unpublish_topic(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'unpublish_topic'
})
self.assertEqual(topic_rights_change_object.cmd, 'unpublish_topic')
def test_to_dict(self):
topic_rights_change_dict = {
'cmd': 'change_role',
'assignee_id': 'assignee_id',
'old_role': topic_domain.ROLE_NONE,
'new_role': topic_domain.ROLE_MANAGER
}
topic_rights_change_object = topic_domain.TopicRightsChange(
topic_rights_change_dict)
self.assertEqual(
topic_rights_change_object.to_dict(), topic_rights_change_dict)
class TopicSummaryTests(test_utils.GenericTestBase):
def setUp(self):
super(TopicSummaryTests, self).setUp()
current_time = datetime.datetime.utcnow()
time_in_millisecs = utils.get_time_in_millisecs(current_time)
self.topic_summary_dict = {
'url_fragment': 'url-frag',
'id': 'topic_id',
'name': 'name',
'description': 'topic description',
'language_code': 'en',
'version': 1,
'canonical_story_count': 1,
'additional_story_count': 1,
'uncategorized_skill_count': 1,
'subtopic_count': 1,
'total_skill_count': 1,
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': '#C6DCDA',
'topic_model_created_on': time_in_millisecs,
'topic_model_last_updated': time_in_millisecs,
}
self.topic_summary = topic_domain.TopicSummary(
'topic_id', 'name', 'name', 'en', 'topic description',
1, 1, 1, 1, 1, 1, 'image.svg', '#C6DCDA', 'url-frag', current_time,
current_time)
def _assert_validation_error(self, expected_error_substring):
"""Checks that the topic summary passes validation.
Args:
expected_error_substring: str. String that should be a substring
of the expected error message.
"""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.topic_summary.validate()
def test_topic_summary_gets_created(self):
self.assertEqual(
self.topic_summary.to_dict(), self.topic_summary_dict)
def test_validation_passes_with_valid_properties(self):
self.topic_summary.validate()
def test_validation_fails_with_invalid_name(self):
self.topic_summary.name = 0
self._assert_validation_error('Name should be a string.')
def test_thumbnail_filename_validation(self):
self.topic_summary.thumbnail_filename = []
self._assert_validation_error(
'Expected thumbnail filename to be a string')
def test_thumbnail_bg_validation(self):
self.topic_summary.thumbnail_bg_color = '#FFFFFF'
self._assert_validation_error(
'Topic thumbnail background color #FFFFFF is not supported.')
def test_thumbnail_filename_or_thumbnail_bg_color_is_none(self):
self.topic_summary.thumbnail_bg_color = '#C6DCDA'
self.topic_summary.thumbnail_filename = None
self._assert_validation_error(
'Topic thumbnail image is not provided.')
self.topic_summary.thumbnail_bg_color = None
self.topic_summary.thumbnail_filename = 'test.svg'
self._assert_validation_error(
'Topic thumbnail background color is not specified.')
def test_validation_fails_with_empty_name(self):
self.topic_summary.name = ''
self._assert_validation_error('Name field should not be empty')
def test_validation_fails_with_invalid_url_fragment(self):
self.topic_summary.url_fragment = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Topic URL Fragment field must be a string. Received 0.'):
self.topic_summary.validate()
def test_validation_fails_with_empty_url_fragment(self):
self.topic_summary.url_fragment = ''
validation_message = 'Topic URL Fragment field should not be empty.'
with self.assertRaisesRegexp(
utils.ValidationError, validation_message):
self.topic_summary.validate()
def test_validation_fails_with_lenghty_url_fragment(self):
self.topic_summary.url_fragment = 'a' * 25
url_fragment_char_limit = constants.MAX_CHARS_IN_TOPIC_URL_FRAGMENT
validation_message = (
'Topic URL Fragment field should not exceed %d characters, '
'received %s.' % (
url_fragment_char_limit, self.topic_summary.url_fragment))
with self.assertRaisesRegexp(
utils.ValidationError, validation_message):
self.topic_summary.validate()
def test_validation_fails_with_invalid_description(self):
self.topic_summary.description = 3
self._assert_validation_error(
'Expected description to be a string, received 3')
def test_validation_fails_with_invalid_canonical_name(self):
self.topic_summary.canonical_name = 0
self._assert_validation_error('Canonical name should be a string.')
def test_validation_fails_with_empty_canonical_name(self):
self.topic_summary.canonical_name = ''
self._assert_validation_error(
'Canonical name field should not be empty')
def test_validation_fails_with_invalid_language_code(self):
self.topic_summary.language_code = 0
self._assert_validation_error(
'Expected language code to be a string, received 0')
def test_validation_fails_with_unallowed_language_code(self):
self.topic_summary.language_code = 'invalid'
self._assert_validation_error('Invalid language code: invalid')
def test_validation_fails_with_invalid_canonical_story_count(self):
self.topic_summary.canonical_story_count = '10'
self._assert_validation_error(
'Expected canonical story count to be an integer, received \'10\'')
def test_validation_fails_with_negative_canonical_story_count(self):
self.topic_summary.canonical_story_count = -1
self._assert_validation_error(
'Expected canonical_story_count to be non-negative, '
'received \'-1\'')
def test_validation_fails_with_invalid_additional_story_count(self):
self.topic_summary.additional_story_count = '10'
self._assert_validation_error(
'Expected additional story count to be an integer, received \'10\'')
def test_validation_fails_with_negative_additional_story_count(self):
self.topic_summary.additional_story_count = -1
self._assert_validation_error(
'Expected additional_story_count to be non-negative, '
'received \'-1\'')
def test_validation_fails_with_invalid_uncategorized_skill_count(self):
self.topic_summary.uncategorized_skill_count = '10'
self._assert_validation_error(
'Expected uncategorized skill count to be an integer, '
'received \'10\'')
def test_validation_fails_with_negative_uncategorized_skill_count(self):
self.topic_summary.uncategorized_skill_count = -1
self._assert_validation_error(
'Expected uncategorized_skill_count to be non-negative, '
'received \'-1\'')
def test_validation_fails_with_invalid_total_skill_count(self):
self.topic_summary.total_skill_count = '10'
self._assert_validation_error(
'Expected total skill count to be an integer, received \'10\'')
def test_validation_fails_with_negative_total_skill_count(self):
self.topic_summary.total_skill_count = -1
self._assert_validation_error(
'Expected total_skill_count to be non-negative, received \'-1\'')
def test_validation_fails_with_invalid_total_skill_count_value(self):
self.topic_summary.total_skill_count = 5
self.topic_summary.uncategorized_skill_count = 10
self._assert_validation_error(
'Expected total_skill_count to be greater than or equal to '
'uncategorized_skill_count 10, received \'5\'')
def test_validation_fails_with_invalid_subtopic_count(self):
self.topic_summary.subtopic_count = '10'
self._assert_validation_error(
'Expected subtopic count to be an integer, received \'10\'')
def test_validation_fails_with_negative_subtopic_count(self):
self.topic_summary.subtopic_count = -1
self._assert_validation_error(
'Expected subtopic_count to be non-negative, received \'-1\'')
class TopicRightsTests(test_utils.GenericTestBase):
def setUp(self):
super(TopicRightsTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.topic_summary_dict = {
'topic_id': 'topic_id',
'manager_names': ['A'],
'topic_is_published': False,
}
self.topic_summary = topic_domain.TopicRights(
'topic_id', [self.user_id_a], False)
def test_topic_summary_gets_created(self):
self.assertEqual(
self.topic_summary.to_dict(), self.topic_summary_dict)
def test_is_manager(self):
self.assertTrue(self.topic_summary.is_manager(self.user_id_a))
self.assertFalse(self.topic_summary.is_manager(self.user_id_b))
| 43.593281
| 80
| 0.668913
|
a8d9e919b28c52a1310ce64fb5cc1fac5f1a0f49
| 1,587
|
py
|
Python
|
13/1373_maximum_sum_bst_in_binary_tree/main.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 4
|
2018-03-07T02:56:03.000Z
|
2021-06-15T05:43:31.000Z
|
13/1373_maximum_sum_bst_in_binary_tree/main.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | null | null | null |
13/1373_maximum_sum_bst_in_binary_tree/main.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 1
|
2021-09-02T12:05:15.000Z
|
2021-09-02T12:05:15.000Z
|
from typing import Optional
import sys
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxSumBST(self, root: TreeNode) -> int:
"""
https://leetcode.com/problems/maximum-sum-bst-in-binary-tree/discuss/531800/Python-Easy-traversal-with-explanation
https://labuladong.gitbook.io/algo/mu-lu-ye-1/mu-lu-ye-1/hou-xu-bian-li
"""
res = 0
def traverse(root):
"""
return: is_bst(0: no, 1: yes and None, 2: yes), left_min, right_max, sum
"""
nonlocal res
if not root:
return 1, None, None, 0
lb, ll, lh, ls = traverse(root.left)
rb, rl, rh, rs = traverse(root.right)
if ((lb == 2 and lh < root.val) or lb == 1) and ((rb == 2 and rl > root.val) or rb == 1):
s = root.val + ls + rs
res = max(res, s)
return 2, (ll if ll is not None else root.val), (rh if rh is not None else root.val), s
return 0, None, None, None
traverse(root)
return res
def test():
root, n1, n2 = TreeNode(1), TreeNode(4), TreeNode(3)
root.left, root.right = n1, n2
n3, n4, n5, n6 = TreeNode(2), TreeNode(4), TreeNode(2), TreeNode(5)
n7, n8 = TreeNode(4), TreeNode(6)
n1.left, n1.right, n2.left, n2.right = n3, n4, n5, n6
n6.left, n6.right = n7, n8
s = Solution()
print(s.maxSumBST(root))
if __name__ == '__main__':
test()
| 28.854545
| 122
| 0.543163
|
3e415ab8983f32a5a63dd9ad3ee40dd2d55962e8
| 4,259
|
py
|
Python
|
experiment/base.py
|
JJGO/shrinkbench
|
3c35bc2fe319031d93c716b561ed487ab4874b2c
|
[
"MIT"
] | 345
|
2020-02-29T11:49:23.000Z
|
2022-03-31T09:03:33.000Z
|
experiment/base.py
|
JJGO/shrinkbench
|
3c35bc2fe319031d93c716b561ed487ab4874b2c
|
[
"MIT"
] | 24
|
2020-03-13T16:54:13.000Z
|
2021-12-14T15:35:08.000Z
|
experiment/base.py
|
JJGO/shrinkbench
|
3c35bc2fe319031d93c716b561ed487ab4874b2c
|
[
"MIT"
] | 60
|
2020-03-02T20:54:42.000Z
|
2022-03-26T11:38:13.000Z
|
from abc import ABC, abstractmethod
import datetime
import hashlib
import json
import pathlib
import random
import shutil
import signal
import string
import sys
import numpy as np
import torch
from ..util import CSVLogger
from ..util import printc
class Experiment(ABC):
def __init__(self, seed=42):
self._params = {"experiment": self.__class__.__name__, 'params': {}}
self.seed = seed
self.frozen = False
signal.signal(signal.SIGINT, self.SIGINT_handler)
signal.signal(signal.SIGQUIT, self.SIGQUIT_handler)
def add_params(_self, **kwargs):
if not _self.frozen:
_self._params['params'].update({k: v for k, v in kwargs.items() if k not in ('self', '__class__')})
else:
raise RuntimeError("Cannot add params to frozen experiment")
def freeze(self):
self.generate_uid()
self.fix_seed(self.seed)
self.frozen = True
@property
def params(self):
# prevents from trying to modify
return self._params['params']
def serializable_params(self):
return {k: repr(v) for k, v in self._params.items()}
def save_params(self):
path = self.path / 'params.json'
with open(path, 'w') as f:
json.dump(self.serializable_params(), f, indent=4)
def get_path(self):
if hasattr(self, "rootdir"):
parent = pathlib.Path(self.rootdir)
else:
parent = pathlib.Path('results')
if self._params.get('debug', False):
parent /= 'tmp'
parent.mkdir(parents=True, exist_ok=True)
return parent / self.uid
@property
def digest(self):
return hashlib.md5(json.dumps(self.serializable_params(), sort_keys=True).encode('utf-8')).hexdigest()
def __hash__(self):
return hash(self.digest)
def fix_seed(self, seed=42, deterministic=False):
# https://pytorch.org/docs/stable/notes/randomness.html
# Python
random.seed(seed)
# Numpy
np.random.seed(seed)
# PyTorch
torch.manual_seed(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def generate_uid(self):
"""Returns a time sortable UID
Computes timestamp and appends unique identifie
Returns:
str -- uid
"""
if hasattr(self, "uid"):
return self.uid
N = 4 # length of nonce
time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
nonce = ''.join(random.choices(string.ascii_uppercase + string.digits, k=N))
self.uid = f"{time}-{nonce}-{self.digest}"
return self.uid
def build_logging(self, metrics, path=None, csv=True, tensorboard=False):
if path is None:
self.path = self.get_path()
printc(f"Logging results to {self.path}", color='MAGENTA')
self.path.mkdir(exist_ok=True, parents=True)
self.save_params()
self.log_csv = csv
self.log_tb = tensorboard
self.log_epoch_n = 0
if self.log_csv:
self.csvlogger = CSVLogger(self.path / 'logs.csv', metrics)
if self.log_tb:
tb_path = self.path / 'tbevents'
tb_path.mkdir()
from torch.utils.tensorboard import SummaryWriter
self.tblogger = SummaryWriter(log_dir=tb_path)
def log(self, **kwargs):
if self.log_csv:
self.csvlogger.set(**kwargs)
if self.log_tb:
for k, v in kwargs.items():
self.tb_writer.add_scalar(k, v, self.log_epoch_n)
def log_epoch(self, epoch=None):
if epoch is not None:
self.log_epoch_n = epoch
self.log_epoch_n += 1
if self.log_csv:
self.csvlogger.set(epoch=epoch)
self.csvlogger.update()
self.csvlogger.set(epoch=self.log_epoch_n)
def SIGINT_handler(self, signal, frame):
pass
def SIGQUIT_handler(self, signal, frame):
shutil.rmtree(self.path, ignore_errors=True)
sys.exit(1)
@abstractmethod
def run(self):
pass
def __repr__(self):
return json.dumps(self.params, indent=4)
| 28.583893
| 111
| 0.605776
|
edecb895a683da800d533e13fb953a573483f641
| 8,779
|
py
|
Python
|
toontown/cogdominium/DistCogdoMazeGameAI.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
toontown/cogdominium/DistCogdoMazeGameAI.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
toontown/cogdominium/DistCogdoMazeGameAI.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
from direct.directnotify import DirectNotifyGlobal
from toontown.cogdominium.DistCogdoGameAI import DistCogdoGameAI
import CogdoMazeGameGlobals
from direct.distributed.ClockDelta import *
from direct.task import Timer
from toontown.battle import BattleBase
from toontown.building.ElevatorConstants import *
ALL_ABOARD_LAG = 3.7
BASE_TOON_UP = 10
JOKE_TOON_UP = 5
class DistCogdoMazeGameAI(DistCogdoGameAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistCogdoMazeGameAI")
delayIntro = BattleBase.ELEVATOR_T + ElevatorData[ELEVATOR_FIELD]['openTime']
def __init__(self, air):
DistCogdoGameAI.__init__(self, air)
self.numSuits = (0,0,0)
self.timer = Timer.Timer()
self.doorRevealed = False
self.toonsInDoor = []
self.bosses = {}
self.fastMinions = {}
self.slowMinions = {}
self.suitTypes = [self.bosses, self.fastMinions, self.slowMinions]
self.numJokes = {}
def announceGenerate(self):
DistCogdoGameAI.announceGenerate(self)
self.setupSuitsAI()
def setupSuitsAI(self):
bossHp = CogdoMazeGameGlobals.SuitData[0]['hp']
fastMiniHp = CogdoMazeGameGlobals.SuitData[1]['hp']
slowMiniHp = CogdoMazeGameGlobals.SuitData[2]['hp']
serialNum = 0
for i in xrange(self.numSuits[0]):
self.bosses[serialNum] = bossHp
serialNum += 1
for i in xrange(self.numSuits[1]):
self.fastMinions[serialNum] = fastMiniHp
serialNum += 1
for i in xrange(self.numSuits[2]):
self.slowMinions[serialNum] = slowMiniHp
serialNum += 1
def setNumSuits(self, num):
self.numSuits = num
def getNumSuits(self):
return self.numSuits
def requestUseGag(self, x, y, h, timestamp):
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('toonUsedGag', [avId, x, y, h, globalClockDelta.getRealNetworkTime()])
def requestSuitHitByGag(self, suitType, suitNum):
hitAI = self.hitSuitAI(suitType, suitNum)
if not hitAI:
self.notify.warning('Cannot hit suit!')
return
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('suitHitByGag', [avId, suitType, suitNum])
def requestHitBySuit(self, suitType, suitNum, nettime):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
lostHp = CogdoMazeGameGlobals.SuitData[suitType]['toonDamage'] * self.getDifficulty() * 10
av.takeDamage(lostHp)
networkTime = globalClockDelta.getRealNetworkTime()
self.sendUpdate('toonHitBySuit', [avId, suitType, suitNum, networkTime])
if av.getHp() < 1:
self.toonWentSad(avId)
def requestHitByDrop(self):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
lostHp = CogdoMazeGameGlobals.DropDamage
av.takeDamage(lostHp)
self.sendUpdate('toonHitByDrop', [avId])
def requestPickUp(self, pickupNum):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
now = globalClockDelta.getRealNetworkTime()
if avId in self.numJokes:
self.numJokes[avId] += 1
else:
self.numJokes[avId] = 1
self.sendUpdate('pickUp', [avId, pickupNum, now])
def requestGag(self, coolerIndex):
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('hasGag', [avId, globalClockDelta.getRealNetworkTime()])
def hitSuitAI(self, suitType, suitNum):
cogKey = None
for cogNum in self.suitTypes[suitType].keys():
if cogNum == suitNum:
cogKey = cogNum
break
if cogKey == None:
return 0
cogHp = self.suitTypes[suitType][cogKey]
cogHp -= 1
self.suitTypes[suitType][cogKey] = cogHp
if cogHp <= 0:
del self.suitTypes[suitType][cogKey]
return 1
def handleStart(self):
taskMgr.add(self.__checkGameDone, self.taskName('check-game-done'))
taskMgr.add(self.__checkPlayersTask, self.taskName('check-players-task'))
serverDelay = 1.0
self.timer.startCallback(CogdoMazeGameGlobals.SecondsUntilTimeout + serverDelay, self.__handleGameOver)
taskMgr.doMethodLater(serverDelay, self.clientCountdown, self.taskName('client_countdown'))
taskMgr.add(self.__timeWarningTask, self.taskName('time-warning-task'))
def clientCountdown(self, task):
self.doAction(CogdoMazeGameGlobals.GameActions.Countdown, 0)
return task.done
def __handleGameOver(self):
self.removeAll()
self.gameDone(failed=True)
def __checkGameDone(self, task):
bossesLeft = self.bosses
if len(bossesLeft) == 0:
self.timer.stop()
self.doAction(CogdoMazeGameGlobals.GameActions.OpenDoor, 0)
self.__startTimeout()
return task.done
return task.again
def __startTimeout(self):
self.timer.startCallback(CogdoMazeGameGlobals.SecondsUntilGameEnds, self.__handleTimeout)
def __handleTimeout(self):
for toon in self.toons:
if not toon in self.toonsInDoor:
self.killToon(toon)
self.removeAll()
self.gameDone()
def __timeWarningTask(self, task):
if self.timer.getT() <= CogdoMazeGameGlobals.SecondsForTimeAlert:
self.doAction(CogdoMazeGameGlobals.GameActions.TimeAlert, 0)
return task.done
return task.again
def killToon(self, avId):
av = self.air.doId2do.get(avId)
if av:
if av.getHp() > 0:
av.takeDamage(av.getHp())
self.toonWentSad(avId)
self.__playerDisconnected(avId)
def __checkPlayersTask(self, task):
for toonId in self.toons:
toon = self.air.doId2do.get(toonId)
if not toon:
self.__playerDisconnected(toonId)
return task.again
def __playerDisconnected(self, avId):
self.sendUpdate('setToonDisconnect', [avId])
self.toons.pop(self.toons.index(avId))
if len(self.toons) == 0:
self.removeAll()
self.gameDone(failed=True)
def doAction(self, action, data):
self.sendUpdate('doAction', [action, data, globalClockDelta.getRealNetworkTime()])
def requestAction(self, action, data):
Globals = CogdoMazeGameGlobals
avId = self.air.getAvatarIdFromSender()
if action == Globals.GameActions.RevealDoor:
if not self.doorRevealed:
self.doAction(action, avId)
self.doorRevealed = True
else:
self.notify.warning('Toon tried to reveal door but it\'s already revealed! Ignoring.')
elif action == Globals.GameActions.EnterDoor:
if not avId in self.toonsInDoor:
self.doAction(action, avId)
self.toonsInDoor.append(avId)
self.toonUpToon(avId)
else:
self.notify.warning('Toon tried to enter into door but already entered! Ignoring.')
return
if len(self.toonsInDoor) >= len(self.toons):
self.__handleAllAboard()
else:
self.notify.warning('Client requested unknown action \'%s\'' %action)
def __handleAllAboard(self):
if len(self.toonsInDoor) != len(self.toons):
self.notify.warning('__handleAllAboard expect all toons aboard!')
return
self.removeAll()
taskMgr.doMethodLater(ALL_ABOARD_LAG, lambda t: self.gameDone(), self.taskName('all-aboard-delay'))
def toonUpToon(self, toonId):
if toonId in self.toonsInDoor:
toon = self.air.doId2do.get(toonId)
if toon:
val = min(BASE_TOON_UP + JOKE_TOON_UP * self.numJokes.get(toonId, 0), toon.getMaxHp())
toon.toonUp(val)
def removeAll(self):
taskMgr.remove(self.taskName('check-game-done'))
taskMgr.remove(self.taskName('check-players-task'))
taskMgr.remove(self.taskName('time-warning-task'))
taskMgr.remove(self.taskName('all-aboard-delay'))
self.timer.stop()
def disable(self):
DistCogdoGameAI.disable(self)
self.removeAll()
from otp.ai.MagicWordGlobal import *
@magicWord(category=CATEGORY_PROGRAMMER)
def endMaze():
if hasattr(simbase.air, 'cogdoGame'):
maze = simbase.air.cogdoGame
maze.doAction(CogdoMazeGameGlobals.GameActions.OpenDoor, 0)
return 'Finished cogdo maze game!'
| 36.579167
| 111
| 0.630937
|
19f099412e785d9c0198d9f3d5c3f02ff17cdec7
| 5,741
|
py
|
Python
|
patientMatcher/server/controllers.py
|
Clinical-Genomics/patientMatcher
|
d91dee9fbf9667cf9cd06dd057dc2b3f239ed075
|
[
"MIT"
] | 11
|
2019-07-02T11:14:21.000Z
|
2022-03-08T21:43:10.000Z
|
patientMatcher/server/controllers.py
|
Clinical-Genomics/patientMatcher
|
d91dee9fbf9667cf9cd06dd057dc2b3f239ed075
|
[
"MIT"
] | 182
|
2019-01-23T10:13:30.000Z
|
2022-03-25T13:17:08.000Z
|
patientMatcher/server/controllers.py
|
Clinical-Genomics/patientMatcher
|
d91dee9fbf9667cf9cd06dd057dc2b3f239ed075
|
[
"MIT"
] | 6
|
2019-01-09T21:21:43.000Z
|
2022-03-09T20:26:23.000Z
|
# -*- coding: utf-8 -*-
import logging
from flask import jsonify
from jsonschema import ValidationError
from patientMatcher.constants import STATUS_CODES
from patientMatcher.utils.stats import general_metrics
from patientMatcher.utils.delete import delete_by_query
from patientMatcher.utils.patient import patients
from patientMatcher.parse.patient import validate_api, mme_patient
from patientMatcher.auth.auth import authorize
from patientMatcher.match.handler import external_matcher
from patientMatcher.__version__ import __version__
LOG = logging.getLogger(__name__)
def heartbeat(disclaimer):
"""Return a heartbeat as defined here:https://github.com/ga4gh/mme-apis/blob/master/heartbeat-api.md"""
hbeat = {
"heartbeat": {
"production": True,
"version": __version__,
"accept": [
"application/vnd.ga4gh.matchmaker.v1.0+json",
"application/vnd.ga4gh.matchmaker.v1.1+json",
],
},
"disclaimer": disclaimer,
}
return hbeat
def metrics(database):
"""return database metrics"""
db_metrics = general_metrics(database)
return db_metrics
def get_nodes(database):
"""Get all connected nodes as a list of objects with node_id and node_label as elements"""
results = list(database["nodes"].find())
nodes = []
for node in results:
nodes.append({"id": node["_id"], "description": node["label"]})
return nodes
def patient(database, patient_id):
"""Return a mme-like patient from database by providing its ID"""
query_patient = None
query_result = list(patients(database, ids=[patient_id]))
if query_result:
query_patient = query_result[0]
return query_patient
def match_external(database, query_patient, node=None):
"""Trigger an external patient matching for a given patient object"""
# trigger the matching and save the matching id to variable
matching_obj = external_matcher(database, query_patient, node)
# save matching object to database only if there are results or error messages
if matching_obj and (matching_obj.get("has_matches") or matching_obj.get("errors")):
database["matches"].insert_one(matching_obj)
return matching_obj
def check_request(database, request):
"""Check if request is valid, if it is return MME formatted patient
Otherwise return error code.
"""
# check that request is using a valid auth token
if not authorize(database, request):
LOG.info("Request is not authorized")
return 401
try: # make sure request has valid json data
request_json = request.get_json(force=True)
except Exception as err:
LOG.info("Json data in request is not valid:{}".format(err))
return 400
try: # validate json data against MME API
validate_api(json_obj=request_json, is_request=True)
except Exception as err:
LOG.info("Patient data does not conform to API:{}".format(err))
return 422
formatted_patient = mme_patient(json_patient=request_json["patient"], convert_to_ensembl=True)
return formatted_patient
def check_async_request(database, request):
"""Check if an asynchronous request is valid.
Basically json data must be valid and the query ID should be
already present in async responses database collection"""
data = None
try: # Check if request contains valid data
data = request.json
LOG.info("Request data looks valid. Source is {}".format(data.get("source")))
except:
LOG.error("Request data is not valid. Abort")
return 400
# check if query ID was previously saved into async responses collection
query_id = data.get("query_id")
if query_id:
async_response = database["async_responses"].find_one({"query_id": query_id})
LOG.info("Async response is {}".format(async_response))
if query_id is None or async_response is None:
LOG.error("Async request not authorized. Abort")
return 401
resp = data.get("response")
if resp is None:
LOG.error("Async server did not provide any 'response' object")
return 400
try: # validate json response (results)
validate_api(json_obj=resp, is_request=False)
except Exception as err:
LOG.info("Patient data does not conform to API:{}".format(err))
return 422
return data
def validate_response(matches):
"""Validates patient matching results before sending them away in a response"""
try: # validate json data against MME API
validate_api(json_obj=matches, is_request=False)
except ValidationError as err:
LOG.info("Patient data does not conform to API:{}".format(err))
return 422
return matches
def bad_request(error_code):
"""Crete an automatic response based on custom error codes"""
message = STATUS_CODES[error_code]["message"]
resp = jsonify(message)
resp.status_code = error_code
return resp
def delete_patient(database, patient_id):
"""Remove a patient by ID"""
message = ""
# first delete all matches in database for this patient:
query = {"data.patient.id": patient_id}
deleted = delete_by_query(query, database, "matches")
LOG.info("deleted {} matche/s triggered by this patient".format(deleted))
query = {"_id": patient_id}
deleted = delete_by_query(query, database, "patients")
message = {}
if deleted == 1:
message["message"] = "Patient and its matches were successfully deleted from database"
else:
message["message"] = "ERROR. Could not delete a patient with ID {} from database".format(
patient_id
)
return message
| 34.377246
| 107
| 0.690472
|
b31814dbd1160ffe70145b58f7312f8350b17f73
| 1,658
|
py
|
Python
|
dassl/modeling/backbone/cnn_duckieS_baseline.py
|
pmirallesr/Dassl.pytorch
|
ec41f816bb60a9af94c9b055c500f0e2e404cfc6
|
[
"MIT"
] | null | null | null |
dassl/modeling/backbone/cnn_duckieS_baseline.py
|
pmirallesr/Dassl.pytorch
|
ec41f816bb60a9af94c9b055c500f0e2e404cfc6
|
[
"MIT"
] | null | null | null |
dassl/modeling/backbone/cnn_duckieS_baseline.py
|
pmirallesr/Dassl.pytorch
|
ec41f816bb60a9af94c9b055c500f0e2e404cfc6
|
[
"MIT"
] | null | null | null |
"""
Reference
https://github.com/VisionLearningGroup/VisionLearningGroup.github.io/tree/master/M3SDA
"""
import torch.nn as nn
from torch.nn import functional as F
import torch
from .build import BACKBONE_REGISTRY
from .backbone import Backbone
class FeatureExtractor(Backbone):
def __init__(self, action_dim, max_action, in_channels=3):
super(FeatureExtractor, self).__init__()
flat_size = 32 * 2 * 4
self.lr = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 32, 8, stride=2)
self.conv2 = nn.Conv2d(32, 32, 4, stride=2)
self.conv3 = nn.Conv2d(32, 32, 4, stride=2)
self.conv4 = nn.Conv2d(32, 32, 4, stride=1)
self.bn1 = nn.BatchNorm2d(32)
self.bn2 = nn.BatchNorm2d(32)
self.bn3 = nn.BatchNorm2d(32)
self.bn4 = nn.BatchNorm2d(32)
self.dropout = nn.Dropout(.5)
self.fc1 = nn.Linear(flat_size,512)
self._out_features = 512
def _check_input(self, x):
H, W = x.shape[2:]
assert H == 60 and W == 80, \
'Input to network must be 60x80, ' \
'but got {}x{}'.format(H, W)
def forward(self, x):
"""Forward pass x."""
x = self.bn1(self.lr(self.conv1(x)))
x = self.bn2(self.lr(self.conv2(x)))
x = self.bn3(self.lr(self.conv3(x)))
x = self.bn4(self.lr(self.conv4(x)))
x = torch.flatten(x, start_dim=1)
x = self.dropout(x)
x = self.lr(self.fc1(x))
return x
@BACKBONE_REGISTRY.register()
def cnn_duckieS_baseline(in_channels=3, **kwargs):
"""
"""
return FeatureExtractor(2, 1, in_channels)
| 30.145455
| 86
| 0.595296
|
e40a1d1e6faaf0cb7eb18e5e525fd4c86d1a317b
| 3,005
|
py
|
Python
|
smartcameras/test/test_camera.py
|
PedrosWits/smart-cameras
|
68fa9a2c43e0d659cb9bd0f87cc7b2aaaccefb2f
|
[
"MIT"
] | null | null | null |
smartcameras/test/test_camera.py
|
PedrosWits/smart-cameras
|
68fa9a2c43e0d659cb9bd0f87cc7b2aaaccefb2f
|
[
"MIT"
] | null | null | null |
smartcameras/test/test_camera.py
|
PedrosWits/smart-cameras
|
68fa9a2c43e0d659cb9bd0f87cc7b2aaaccefb2f
|
[
"MIT"
] | null | null | null |
import threading
from smartcameras.speedcamera import SpeedCamera
import smartcameras.speedcamera as speedcamera
import time
import json
def test_constructor():
camera = SpeedCamera("Blandford Square", "Newcastle")
assert camera.street == "Blandford Square"
assert camera.city == "Newcastle"
def test_uuids():
camera1 = SpeedCamera("Blandford Square", "Newcastle")
camera2 = SpeedCamera("Blandford Square", "Newcastle")
assert camera1.id != camera2.id
def test_relocate():
camera = SpeedCamera("Blandford Square", "Newcastle")
camera.relocate("St. James Avenue")
assert camera.street == "St. James Avenue"
camera.relocate("Campo Alegre", "Porto")
assert camera.street == "Campo Alegre"
assert camera.city == "Porto"
def test_activity_with_threads():
camera = SpeedCamera("Queens Road", "Manchester")
t = threading.Thread(target=camera.activate, args=(50, 5))
t.start()
time.sleep(5)
assert camera.isActive
#print(camera.toJson())
camera.deactivate()
t.join()
assert not camera.isActive
#print(camera.toJson())
def test_constants():
assert SpeedCamera.TOPIC == "speedcamera"
def test_pub_sub():
test_subscription = "TEST_PUB_SUB"
camera = SpeedCamera("Big Ben", "London")
camera.cloudhook.serviceBus.delete_subscription(SpeedCamera.TOPIC, test_subscription)
camera.cloudhook.subscribe(SpeedCamera.TOPIC, test_subscription)
thread = speedcamera.activateInNewThread(camera, 50, 1)
assert camera.isActive
camera_msg = camera.cloudhook.getMessage(SpeedCamera.TOPIC, test_subscription, timeout='5')
assert json.loads(camera_msg.body)['event'] == SpeedCamera.EVENT_ACTIVATION
msg_dict_values = json.loads(camera_msg.body)['camera'].values()
assert camera.id in msg_dict_values
assert camera.city in msg_dict_values
assert camera.street in msg_dict_values
assert camera.rate in msg_dict_values
assert camera.speedLimit in msg_dict_values
# print(camera_msg.body)
vehicle_msg = camera.cloudhook.getMessage(SpeedCamera.TOPIC, test_subscription, timeout='5')
assert json.loads(vehicle_msg.body)['event'] == SpeedCamera.EVENT_VEHICLE
assert 'camera' in vehicle_msg.body
assert 'vehicle' in vehicle_msg.body
msg_camera_dict_values = json.loads(vehicle_msg.body)['camera'].values()
msg_vehicle_dict_keys = json.loads(vehicle_msg.body)['vehicle'].keys()
assert camera.id in msg_camera_dict_values
assert camera.city in msg_camera_dict_values
assert camera.street in msg_camera_dict_values
assert camera.rate in msg_camera_dict_values
assert camera.speedLimit in msg_camera_dict_values
assert "plate" in msg_vehicle_dict_keys
assert "speed" in msg_vehicle_dict_keys
assert "type" in msg_vehicle_dict_keys
# print(vehicle_msg.body)
camera.deactivate()
thread.join()
assert not camera.isActive
camera.cloudhook.serviceBus.delete_subscription(SpeedCamera.TOPIC, test_subscription)
| 35.77381
| 96
| 0.745092
|
2797b6dfc818a3de2bc52aaf5906014401475627
| 793
|
py
|
Python
|
estructuras de control secuenciales/ejercicio10.py
|
svcuellar/algoritmos_programacion
|
0813ee6a2ccb605557a7920bf82440b7388b49e8
|
[
"MIT"
] | null | null | null |
estructuras de control secuenciales/ejercicio10.py
|
svcuellar/algoritmos_programacion
|
0813ee6a2ccb605557a7920bf82440b7388b49e8
|
[
"MIT"
] | null | null | null |
estructuras de control secuenciales/ejercicio10.py
|
svcuellar/algoritmos_programacion
|
0813ee6a2ccb605557a7920bf82440b7388b49e8
|
[
"MIT"
] | null | null | null |
"""
entradas
cantidadchelinesaustriacos-->c-->float
cantidaddragmasgriegos-->dg-->float
cantidadpesetas-->p-->float
salidas
chelines_a_pesetas-->c_p-->float
dragmas_a_francosfrancese-->dg_ff-->float
pesetas_a_dolares-->p_d-->float
pesetas_a_lirasitalianas-->p_li-->float
"""
#entradas
c=float(input("Ingrese la cantidad de chelines austriacos "))
dg=float(input("Ingrese la cantidad de dragmas griegos "))
p=float(input("Ingrese la cantidad de pesetas "))
#caja negra
c_p=round((c*9.57), 2)
dg_ff=round(((c*0.957)/20.110), 2)
p_d=round((p/122.499), 2)
p_li=round((p/0.092289), 2)
#salidas
print(c, " chelines equivalen a", c_p, " pesetas")
print(dg, " dragmas griegos equivalen a", dg_ff, " francos franceses")
print(p, " pesetas equivalen a", p_d, " dolares y ", p_li, " liras italianas")
| 28.321429
| 78
| 0.726356
|
04a4f79714710c2afbc31bc7a20fb0872795e8d0
| 466
|
py
|
Python
|
examples/toy.py
|
devanshusomani99/myFM
|
d8e3d93de7c4a3dc19551c07d5f1d71d13f6abc6
|
[
"MIT"
] | 50
|
2019-12-27T01:47:39.000Z
|
2022-03-30T13:48:56.000Z
|
examples/toy.py
|
devanshusomani99/myFM
|
d8e3d93de7c4a3dc19551c07d5f1d71d13f6abc6
|
[
"MIT"
] | 7
|
2021-03-13T00:59:40.000Z
|
2022-02-15T19:29:34.000Z
|
examples/toy.py
|
devanshusomani99/myFM
|
d8e3d93de7c4a3dc19551c07d5f1d71d13f6abc6
|
[
"MIT"
] | 10
|
2020-09-01T16:55:59.000Z
|
2021-06-27T15:18:34.000Z
|
import myfm
from sklearn.feature_extraction import DictVectorizer
import numpy as np
train = [
{"user": "1", "item": "5", "age": 19},
{"user": "2", "item": "43", "age": 33},
{"user": "3", "item": "20", "age": 55},
{"user": "4", "item": "10", "age": 20},
]
v = DictVectorizer()
X = v.fit_transform(train)
y = np.asarray([0, 1, 1, 0])
fm = myfm.MyFMClassifier(rank=4)
fm.fit(X,y)
p = fm.predict_proba(v.transform({"user": "1", "item": "10", "age": 24}))
print(p)
| 25.888889
| 73
| 0.581545
|
a9688873427c693dec97c5c013ed420a36765eff
| 4,706
|
py
|
Python
|
src/pkgcheck/checks/imlate.py
|
floppym/pkgcheck
|
07215eac255a88b3a0c351f35626a1c79d9f2632
|
[
"BSD-3-Clause"
] | 18
|
2015-04-24T23:15:30.000Z
|
2022-02-14T04:14:56.000Z
|
src/pkgcheck/checks/imlate.py
|
floppym/pkgcheck
|
07215eac255a88b3a0c351f35626a1c79d9f2632
|
[
"BSD-3-Clause"
] | 351
|
2015-03-08T07:07:47.000Z
|
2022-03-21T11:57:58.000Z
|
src/pkgcheck/checks/imlate.py
|
floppym/pkgcheck
|
07215eac255a88b3a0c351f35626a1c79d9f2632
|
[
"BSD-3-Clause"
] | 19
|
2015-03-08T01:16:10.000Z
|
2022-03-17T10:37:26.000Z
|
from collections import defaultdict
from pkgcore.restrictions import packages, values
from snakeoil.strings import pluralism
from .. import addons, results, sources
from . import Check
class PotentialStable(results.VersionResult, results.Info):
"""Stable arches with potential stable package candidates."""
def __init__(self, slot, stable, keywords, **kwargs):
super().__init__(**kwargs)
self.slot = slot
self.stable = tuple(stable)
self.keywords = tuple(keywords)
@property
def desc(self):
es = pluralism(self.stable, plural='es')
stable = ', '.join(self.stable)
s = pluralism(self.keywords)
keywords = ', '.join(self.keywords)
return f'slot({self.slot}), stabled arch{es}: [ {stable} ], potential{s}: [ {keywords} ]'
class LaggingStable(results.VersionResult, results.Info):
"""Stable arches for stabilized package that are lagging from a stabling standpoint."""
def __init__(self, slot, stable, keywords, **kwargs):
super().__init__(**kwargs)
self.slot = slot
self.stable = tuple(stable)
self.keywords = tuple(keywords)
@property
def desc(self):
es = pluralism(self.stable, plural='es')
stable = ', '.join(self.stable)
keywords = ', '.join(self.keywords)
return f'slot({self.slot}), stabled arch{es}: [ {stable} ], lagging: [ {keywords} ]'
class ImlateCheck(Check):
"""Scan for ebuilds that are lagging in stabilization."""
_source = sources.PackageRepoSource
required_addons = (addons.StableArchesAddon,)
known_results = frozenset([PotentialStable, LaggingStable])
@staticmethod
def mangle_argparser(parser):
parser.plugin.add_argument(
"--source-arches", action='csv', metavar='ARCH',
help="comma separated list of arches to compare against for lagging stabilization",
docs="""
Comma separated list of arches to compare against for
lagging stabilization.
The default arches are all stable arches (unless --arches is specified).
""")
def __init__(self, *args, stable_arches_addon=None):
super().__init__(*args)
self.all_arches = frozenset(self.options.arches)
self.stable_arches = frozenset(arch.strip().lstrip("~") for arch in self.options.stable_arches)
self.target_arches = frozenset(f'~{arch}' for arch in self.stable_arches)
source_arches = self.options.source_arches
if source_arches is None:
source_arches = self.options.stable_arches
self.source_arches = frozenset(
arch.lstrip("~") for arch in source_arches)
self.source_filter = packages.PackageRestriction(
"keywords", values.ContainmentMatch2(self.source_arches))
def feed(self, pkgset):
pkg_slotted = defaultdict(list)
for pkg in pkgset:
pkg_slotted[pkg.slot].append(pkg)
fmatch = self.source_filter.match
for slot, pkgs in sorted(pkg_slotted.items()):
slot_keywords = set().union(*(pkg.keywords for pkg in pkgs))
stable_slot_keywords = self.all_arches.intersection(slot_keywords)
potential_slot_stables = {'~' + x for x in stable_slot_keywords}
newer_slot_stables = set()
for pkg in reversed(pkgs):
# only consider pkgs with keywords that contain the targeted arches
if not fmatch(pkg):
newer_slot_stables.update(self.all_arches.intersection(pkg.keywords))
continue
# current pkg stable keywords
stable = {'~' + x for x in self.source_arches.intersection(pkg.keywords)}
lagging = potential_slot_stables.intersection(pkg.keywords)
# skip keywords that have newer stable versions
lagging -= {'~' + x for x in newer_slot_stables}
lagging -= stable
if lagging:
stable_kwds = (x for x in pkg.keywords if not x[0] in ('~', '-'))
yield LaggingStable(
slot, sorted(stable_kwds), sorted(lagging), pkg=pkg)
unstable_keywords = {x for x in pkg.keywords if x[0] == '~'}
potential = self.target_arches.intersection(unstable_keywords)
potential -= lagging | stable
if potential:
stable_kwds = (x for x in pkg.keywords if not x[0] in ('~', '-'))
yield PotentialStable(
slot, sorted(stable_kwds), sorted(potential), pkg=pkg)
break
| 40.568966
| 103
| 0.614535
|
078c49fff098bd3a9d55e6f0698ced7d1acd2856
| 34,677
|
py
|
Python
|
scripts/class_BigARTM.py
|
omega1996/vacancy_clustering
|
28ffb41f4c29044f14dfa7770fdec0eba39e38b9
|
[
"MIT"
] | null | null | null |
scripts/class_BigARTM.py
|
omega1996/vacancy_clustering
|
28ffb41f4c29044f14dfa7770fdec0eba39e38b9
|
[
"MIT"
] | null | null | null |
scripts/class_BigARTM.py
|
omega1996/vacancy_clustering
|
28ffb41f4c29044f14dfa7770fdec0eba39e38b9
|
[
"MIT"
] | null | null | null |
import os
import glob
import artm
import random
import time
import numpy as np
import logging
logging.basicConfig(level = logging.ERROR)#logging.DEBUG)
import matplotlib.pyplot as plt
plt.ioff()
from tqdm import tqdm
import pandas as pd
from files_and_dirs import *
import pickle
def write_to_pickle(filename, data):
with open(filename, 'wb') as f:
pickle.dump(data, f)
import winsound
# # To configure logging folder
# import artm
lc = artm.messages.ConfigureLoggingArgs()
# lc.log_dir=r'C:\bigartm_log'
# lc.minloglevel = 3
lib = artm.wrapper.LibArtm(logging_config=lc)
#
# # To change any other logging parameters at runtime (except logging folder)
lc.minloglevel=3 # 0 = INFO, 1 = WARNING, 2 = ERROR, 3 = FATAL
lib.ArtmConfigureLogging(lc)
import pickle
def read_from_pickle(filename):
with open(filename, 'rb') as f:
data_new = pickle.load(f)
return data_new
def write_to_pickle(filename, data):
with open(filename, 'wb') as f:
pickle.dump(data, f)
class BigARTM():
save_scores = False
num_toptokens = 10
num_document_passes = 1
num_collection_passes = 30
base_dir = ""
def __init__(self, name, num_of_topics):
logging.info('Create BigARTM object. ')
self.name = name
self.num_of_topics = num_of_topics
logging.info(f'Param: {num_of_topics}, {BigARTM.num_toptokens}, {BigARTM.num_document_passes}, {BigARTM.num_collection_passes}.')
self._prepare_folders()
def _prepare_folders(self):
dirs = [f'{base_dir}\\matrixes',
f'{base_dir}\\plots',
f'{base_dir}\\dictionary',
f'{base_dir}\\top_tokens',
f'{base_dir}\\uci',
f'{base_dir}\\scores'
f'{base_dir}\\uci\\batches'
]
for dir_ in dirs:
#print(dir_)
if not os.path.exists(dir_):
os.makedirs(dir_)
def create_batch_vectorizer(self, data_path, data_format, collection_name, batch_folder):
logging.info(f"Create batch_vectorizer...")
if (len(glob.glob(os.path.join('bow', '.batch'))) < 1):
self.batch_vectorizer = artm.BatchVectorizer(data_path=data_path,
data_format=data_format,
collection_name=collection_name,
target_folder=batch_folder)
else:
self.batch_vectorizer = artm.BatchVectorizer(data_path=data_path,
data_format=data_format)
logging.info(f"Create dictionary...")
self.dictionary = self.batch_vectorizer.dictionary
logging.info(f"Done")
def create_topics_names(self, topics_names=[]):
if (topics_names == []):
logging.info('Create topics_names by default.')
self.topics_names = ['topic_{}'.format(i) for i in range(self.num_of_topics)]
else:
logging.info('Create topics_names by user_topics_names.')
self.topics_names = topics_names
if (len(self.topics_names) != self.num_of_topics):
logging.error('Количество тем указанное пользователем не совпадает с количеством тем в модели')
self.topics_names = []
# def _create_plsa_model(self):
# logging.info('Create PLSA model.')
# self.model_plsa = artm.ARTM(topic_names=self.topics_names,
# cache_theta=True,
# scores=[artm.PerplexityScore(name='PerplexityScore', dictionary=self.dictionary)])
# self.model_plsa.initialize(dictionary=self.dictionary)
# def _plsa_add_scores(self):
# logging.info(f"Add scores to PLSA model.")
# self.model_plsa.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
# self.model_plsa.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))
# self.model_plsa.scores.add(artm.TopicKernelScore(name='TopicKernelScore', probability_mass_threshold=0.3))
# self.model_plsa.scores.add(artm.TopTokensScore(name='TopTokensScore', num_tokens=self.num_of_topics))
def create_artm_model_empty(self):
logging.info('Create empty ARTM model.')
self.model_artm = artm.ARTM(topic_names=self.topics_names,
cache_theta=True,
scores=[],
regularizers=[])
self.model_artm.initialize(dictionary=self.dictionary)
#self.model_artm.show_progress_bars = True
def save_artm_dictionary(self):
filename = f"{base_dir}\\dictionary\\{self.name}_dictionary.txt"
self.dictionary.save_text(filename)
def create_cooc_dict(self, batches_folder, cooc_file, vocab_file):
logging.info(f"Create cooc_dict.")
self.cooc_dict = self.dictionary
self.cooc_dict.gather(
data_path=batches_folder,
cooc_file_path=cooc_file,
vocab_file_path=vocab_file,
symmetric_cooc_values=True)
def artm_add_scores(self):
logging.info(f"=== SCORES ===")
logging.info(f"Add scores to ARTM model.")
logging.info(f"Add score PerplexityScore.")
self.model_artm.scores.add(artm.PerplexityScore(name='PerplexityScore', dictionary=self.dictionary))
logging.info(f"Add score SparsityPhiScore.")
self.model_artm.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
logging.info(f"Add score SparsityThetaScore.")
self.model_artm.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))
logging.info(f"Add score TopTokensScore.")
self.model_artm.scores.add(artm.TopTokensScore(name='TopTokensScore', num_tokens=BigARTM.num_toptokens))
logging.info(f"Add score TopicKernelScore.")
self.model_artm.scores.add(artm.TopicKernelScore(name='TopicKernelScore', probability_mass_threshold=0.3))
logging.info(f"Add score ItemsProcessedScore.")
self.model_artm.scores.add(artm.TopTokensScore(name='ItemsProcessedScore'))
logging.info(f"Add score ThetaSnippetScore.")
self.model_artm.scores.add(artm.TopTokensScore(name='ThetaSnippetScore'))
logging.info(f"Add score TopicMassPhiScore.")
self.model_artm.scores.add(artm.TopTokensScore(name='TopicMassPhiScore'))
logging.info(f"Add score ClassPrecisionScore.")
self.model_artm.scores.add(artm.TopTokensScore(name='ClassPrecisionScore'))
logging.info(f"Add score BackgroundTokensRatioScore.")
self.model_artm.scores.add(artm.TopTokensScore(name='BackgroundTokensRatioScore'))
#if(self.cooc_dict != None):
logging.info(f"Add score TopTokensCoherenceScore.")
self.coherence_score = artm.TopTokensScore(
name='TopTokensCoherenceScore',
class_id='@default_class',
num_tokens=10,
topic_names=self.topics_names,
dictionary=self.cooc_dict)
self.model_artm.scores.add(self.coherence_score)
def add_regularization_artm(self, SparsePhi=0, SparseTheta=0, DecorrelatorPhi=0):
logging.info(f"=== REGULARIZATION ===")
logging.info(f"Add regularization to ARTM model.")
self.SparsePhi = SparsePhi
logging.info(f"Add regularizer SparsePhi. tau = {SparsePhi}")
self.model_artm.regularizers.add(artm.SmoothSparsePhiRegularizer(name='SparsePhi', tau=SparsePhi))
self.SparseTheta = SparseTheta
logging.info(f"Add regularizer SparseTheta. tau = {SparseTheta}")
self.model_artm.regularizers.add(artm.SmoothSparseThetaRegularizer(name='SparseTheta', tau=SparseTheta))
self.DecorrelatorPhi = DecorrelatorPhi
logging.info(f"Add regularizer DecorrelatorPhi. tau = {DecorrelatorPhi}")
self.model_artm.regularizers.add(artm.DecorrelatorPhiRegularizer(name='DecorrelatorPhi', tau=DecorrelatorPhi))
# def print_regularizations(self):
# print(f"SparsePhi = {self.model_artm.regularizers['SparsePhi'].tau}")
# print(f"SparseTheta = {self.model_artm.regularizers['SparseTheta'].tau}")
# print(f"DecorrelatorPhi = {self.model_artm.regularizers['DecorrelatorPhi'].tau}")
def reup_regularization_artm(self, SparsePhi=0, SparseTheta=0, DecorrelatorPhi=0):
logging.info(f"reup_regularization")
self.SparsePhi = SparsePhi
self.SparseTheta = SparseTheta
self.DecorrelatorPhi = DecorrelatorPhi
self.model_artm.regularizers['SparsePhi'].tau = SparsePhi
self.model_artm.regularizers['SparseTheta'].tau = SparseTheta
self.model_artm.regularizers['DecorrelatorPhi'].tau = DecorrelatorPhi
def fit_model(self):
logging.info(f"Fit models")
#self.model_plsa.num_document_passes = num_document_passes
self.model_artm.num_document_passes = BigARTM.num_document_passes
#self.model_plsa.fit_offline(batch_vectorizer=self.batch_vectorizer,
# num_collection_passes=num_collection_passes)
self.model_artm.fit_offline(batch_vectorizer=self.batch_vectorizer,
num_collection_passes=BigARTM.num_collection_passes)
def print_measures_artm(self):
print('Sparsity Phi: {0:.3f} (ARTM)'.format(
self.model_artm.score_tracker['SparsityPhiScore'].last_value))
print('Sparsity Theta: {0:.3f} (ARTM)'.format(
self.model_artm.score_tracker['SparsityThetaScore'].last_value))
print('Kernel contrast: {0:.3f} (ARTM)'.format(
self.model_artm.score_tracker['TopicKernelScore'].last_average_contrast))
print('Kernel purity: {0:.3f} (ARTM)'.format(
self.model_artm.score_tracker['TopicKernelScore'].last_average_purity))
print('Perplexity: {0:.3f} (ARTM)'.format(
self.model_artm.score_tracker['PerplexityScore'].last_value))
# def print_measures_plsa_artm(self):
# print('Sparsity Phi: \t\t{0:.3f} (PLSA) \tvs. \t{1:.3f} (ARTM)'.format(
# self.model_plsa.score_tracker['SparsityPhiScore'].last_value,
# self.model_artm.score_tracker['SparsityPhiScore'].last_value))
# print('Sparsity Theta: \t{0:.3f} (PLSA) \tvs. \t{1:.3f} (ARTM)'.format(
# self.model_plsa.score_tracker['SparsityThetaScore'].last_value,
# self.model_artm.score_tracker['SparsityThetaScore'].last_value))
# print('Kernel contrast: \t{0:.3f} (PLSA) \tvs. \t{1:.3f} (ARTM)'.format(
# self.model_plsa.score_tracker['TopicKernelScore'].last_average_contrast,
# self.model_artm.score_tracker['TopicKernelScore'].last_average_contrast))
# print('Kernel purity: \t\t{0:.3f} (PLSA) \tvs. \t{1:.3f} (ARTM)'.format(
# self.model_plsa.score_tracker['TopicKernelScore'].last_average_purity,
# self.model_artm.score_tracker['TopicKernelScore'].last_average_purity))
# print('Perplexity: \t\t{0:.3f} (PLSA) \tvs. \t{1:.3f} (ARTM)'.format(
# self.model_plsa.score_tracker['PerplexityScore'].last_value,
# self.model_artm.score_tracker['PerplexityScore'].last_value))
def plot_measures_artm(self):
filename = f"{base_dir}\\plots\\{self.name}_measures.png"
f = plt.figure()
ax = f.add_subplot(111)
f.set_size_inches(10, 7)
ls = random.choice(['-', '--', '-.', ':'])
marker=random.choice([".",",","o","v","^","<",">","1","2","3",
"4","8","s","p","P","*","h","H","+","x",
"X","D","d","|","_",0,1,2,3,4,5,6,7,8,9,10,11])
plt.plot(range(self.model_artm.num_phi_updates),
self.model_artm.score_tracker['PerplexityScore'].value, ls=ls, marker=marker, linewidth=2)
sparsity_phi = 'Sparsity Phi: {0:.3f} (ARTM)'.format(
self.model_artm.score_tracker['SparsityPhiScore'].last_value)
sparsity_theta = 'Sparsity Theta: {0:.3f} (ARTM)'.format(
self.model_artm.score_tracker['SparsityThetaScore'].last_value)
kernel_contrast = 'Kernel contrast: {0:.3f} (ARTM)'.format(
self.model_artm.score_tracker['TopicKernelScore'].last_average_contrast)
kernel_purity = 'Kernel purity: {0:.3f} (ARTM)'.format(
self.model_artm.score_tracker['TopicKernelScore'].last_average_purity)
perplexity = 'Perplexity: {0:.3f} (ARTM)'.format(
self.model_artm.score_tracker['PerplexityScore'].last_value)
measures = [sparsity_phi, sparsity_theta, kernel_contrast, kernel_purity, perplexity]
plt.text(0.2, 0.8,'\n'.join(measures), ha='left', va='center', transform=ax.transAxes, fontsize=14)
#plt.text(150, 0, '\n'.join(measures), fontsize=14)
plt.title(f'Measures. {self.name}')
plt.xlabel('Iterations count')
plt.ylabel('ARTM PerplexityScore.')
plt.grid(True)
plt.savefig(filename)
#plt.show()
# def plot_measures_plsa_artm(self):
# filename = f"{base_dir}\\plots\\measures\\{self.name}_measures.png"
#
# f = plt.figure()
# ax = f.add_subplot(111)
# f.set_size_inches(10, 7)
#
# plt.plot(range(self.model_plsa.num_phi_updates),
# self.model_plsa.score_tracker['PerplexityScore'].value, 'b--',
# range(self.model_artm.num_phi_updates),
# self.model_artm.score_tracker['PerplexityScore'].value, 'r--', linewidth=2)
#
# sparsity_phi = 'Sparsity Phi: {0:.3f} (PLSA) vs. {1:.3f} (ARTM)'.format(
# self.model_plsa.score_tracker['SparsityPhiScore'].last_value,
# self.model_artm.score_tracker['SparsityPhiScore'].last_value)
# sparsity_theta = 'Sparsity Theta: {0:.3f} (PLSA) vs. {1:.3f} (ARTM)'.format(
# self.model_plsa.score_tracker['SparsityThetaScore'].last_value,
# self.model_artm.score_tracker['SparsityThetaScore'].last_value)
# kernel_contrast = 'Kernel contrast: {0:.3f} (PLSA) vs. {1:.3f} (ARTM)'.format(
# self.model_plsa.score_tracker['TopicKernelScore'].last_average_contrast,
# self.model_artm.score_tracker['TopicKernelScore'].last_average_contrast)
# kernel_purity = 'Kernel purity: {0:.3f} (PLSA) vs. {1:.3f} (ARTM)'.format(
# self.model_plsa.score_tracker['TopicKernelScore'].last_average_purity,
# self.model_artm.score_tracker['TopicKernelScore'].last_average_purity)
# perplexity = 'Perplexity: {0:.3f} (PLSA) vs. {1:.3f} (ARTM)'.format(
# self.model_plsa.score_tracker['PerplexityScore'].last_value,
# self.model_artm.score_tracker['PerplexityScore'].last_value)
#
# measures = [sparsity_phi, sparsity_theta, kernel_contrast, kernel_purity, perplexity]
# plt.text(0.2, 0.8,'\n'.join(measures), ha='left', va='center', transform=ax.transAxes, fontsize=14)
# #plt.text(150, 0, '\n'.join(measures), fontsize=14)
#
# plt.title(f'Measures. topic={self.num_of_topics}')
# plt.xlabel('Iterations count')
# plt.ylabel('PLSA perp. (blue), ARTM perp. (red)')
# plt.grid(True)
# plt.savefig(filename)
# plt.show()
def plot_score_tracker(self, score):
f = plt.figure()
f.set_size_inches(10, 7)
# plt.plot(range(self.model_plsa.num_phi_updates),
# self.model_plsa.score_tracker[score].value, 'b--', linewidth=2)
ls = random.choice(['-', '--', '-.', ':'])
marker=random.choice([".",",","o","v","^","<",">","1","2","3",
"4","8","s","p","P","*","h","H","+","x",
"X","D","d","|","_",0,1,2,3,4,5,6,7,8,9,10,11])
plt.plot(range(self.model_artm.num_phi_updates),
self.model_artm.score_tracker[score].value, marker=marker, ls=ls, linewidth=2)
plt.title(f'{score}. topics={self.num_of_topics}.', size=20)
plt.xlabel(f'Iterations count', size=20)
plt.ylabel(f'{score}',size=20)
plt.legend(fontsize=20)
plt.grid(True)
plt.savefig(f"{base_dir}\\plots\\{score}\\{self.name}.png")
#plt.show()
def save_matrix_phi(self):
phi_matrix = self.model_artm.get_phi()
phi_matrix.head()
phi_matrix.to_csv(f"{base_dir}\\matrixes\\{self.name}_phi.csv")
def save_matrix_theta(self):
theta_matrix = self.model_artm.get_theta()
theta_matrix.head()
theta_matrix.to_csv(f"{base_dir}\\matrixes\\{self.name}_theta.csv")
def save_top_tokens(self, filename=''):
if(filename==''):
filename = f"{base_dir}\\top_tokens\\{self.name}"
res_dict_artm = {}
if(len(self.model_artm.topic_names) != len(self.model_artm.score_tracker['TopTokensScore'].last_tokens)):
print("Присутствуют пустые темы!!!")
for topic_name in self.model_artm.topic_names:
try:
value = self.model_artm.score_tracker['TopTokensScore'].last_tokens[topic_name]
except:
value = []
res_dict_artm[topic_name] = value
write_to_pickle(f"{filename}.pickle", res_dict_artm)
lst = []
for topic_name in self.model_artm.topic_names:
row_lst = []
row_lst.append(topic_name)
try:
row_lst.extend(self.model_artm.score_tracker['TopTokensScore'].last_tokens[topic_name])
except:
row_lst.extend([])
lst.append(row_lst)
df = pd.DataFrame(lst)
df.to_csv(f"{filename}.csv")
def get_values_by_score_tracker(self, score):
scores = []
if score == 'PerplexityScore':
scores.append((self.name, score, "value", self.model_artm.score_tracker[score].value))
scores.append((self.name, score, "raw", self.model_artm.score_tracker[score].raw))
scores.append((self.name, score, "normalizer", self.model_artm.score_tracker[score].normalizer))
scores.append((self.name, score, "zero_tokens", self.model_artm.score_tracker[score].zero_tokens))
scores.append((self.name, score, "class_id_info", self.model_artm.score_tracker[score].raw))
return scores
if score == 'SparsityPhiScore':
scores.append((self.name, score, "value", self.model_artm.score_tracker[score].value))
scores.append((self.name, score, "zero_tokens", self.model_artm.score_tracker[score].zero_tokens))
scores.append((self.name, score, "total_tokens", self.model_artm.score_tracker[score].total_tokens))
return scores
if score == 'SparsityThetaScore':
scores.append((self.name, score, "value", self.model_artm.score_tracker[score].value))
scores.append((self.name, score, "zero_topics", self.model_artm.score_tracker[score].zero_topics))
scores.append((self.name, score, "total_topics", self.model_artm.score_tracker[score].total_topics))
return scores
if score == 'TopTokensScore':
scores.append((self.name, score, "num_tokens", self.model_artm.score_tracker[score].num_tokens))
scores.append((self.name, score, "coherence", self.model_artm.score_tracker[score].coherence))
scores.append((self.name, score, "average_coherence", self.model_artm.score_tracker[score].average_coherence))
scores.append((self.name, score, "tokens", self.model_artm.score_tracker[score].tokens))
scores.append((self.name, score, "weights", self.model_artm.score_tracker[score].weights))
return scores
if score == 'TopicKernelScore':
scores.append((self.name, score, "tokens", self.model_artm.score_tracker[score].tokens))
scores.append((self.name, score, "size", self.model_artm.score_tracker[score].size))
scores.append((self.name, score, "contrast", self.model_artm.score_tracker[score].contrast))
scores.append((self.name, score, "purity", self.model_artm.score_tracker[score].purity))
scores.append((self.name, score, "coherence", self.model_artm.score_tracker[score].coherence))
scores.append((self.name, score, "average_size", self.model_artm.score_tracker[score].average_size))
scores.append((self.name, score, "average_contrast", self.model_artm.score_tracker[score].average_contrast))
scores.append((self.name, score, "average_purity", self.model_artm.score_tracker[score].average_purity))
scores.append((self.name, score, "average_coherence", self.model_artm.score_tracker[score].average_coherence))
return scores
if score == 'ItemsProcessedScore':
#scores.append((self.name, score, "value", self.model_artm.score_tracker[score].value))
return scores
if score == 'ThetaSnippetScore':
#scores.append((self.name, score, "document_ids", self.model_artm.score_tracker[score].document_ids))
#scores.append((self.name, score, "snippet", self.model_artm.score_tracker[score].snippet))
return scores
if score == 'TopicMassPhiScore':
#scores.append((self.name, score, "value", self.model_artm.score_tracker[score].value))
#scores.append((self.name, score, "topic_mass", self.model_artm.score_tracker[score].topic_mass))
#scores.append((self.name, score, "topic_ratio", self.model_artm.score_tracker[score].topic_ratio))
return scores
if score == 'ClassPrecisionScore':
#scores.append((self.name, score, "value", self.model_artm.score_tracker[score].value))
#scores.append((self.name, score, "error", self.model_artm.score_tracker[score].error))
#scores.append((self.name, score, "total", self.model_artm.score_tracker[score].total))
return scores
if score == 'BackgroundTokensRatioScore':
#scores.append((self.name, score, "value", self.model_artm.score_tracker[score].value))
#scores.append((self.name, score, "tokens", self.model_artm.score_tracker[score].tokens))
return scores
if score == 'TopTokensCoherenceScore':
scores.append((self.name, score, "coherence", self.model_artm.score_tracker['TopTokensCoherenceScore'].average_coherence))
return scores
#%%
def plot_score_trackers_for_few_models(base_dir, score, df_some_score):
f = plt.figure()
f.set_size_inches(10, 7)
names_models = []
ls = random.choice(['-', '--', '-.', ':'])
markers=[".",",","o","v","^","<",">","1","2","3",
"4","8","s","p","P","*","h","H","+","x",
"X","D","d","|","_",0,1,2,3,4,5,6,7,8,9,10,11]
for i in range(len(df_some_score)):
names_models.append(df_some_score.iloc[i]['name_model'])
plt.plot(range(len(df_some_score.iloc[i]['values'])),
df_some_score.iloc[i]['values'], linewidth=2, marker=markers[i], ls=ls)
plt.title(f'{score[0]}_{score[1]}', size=20)
plt.xlabel(f'Iterations count', size=20)
plt.ylabel(f'{score[0]}_{score[1]}', size=20)
plt.legend(names_models, fontsize=20)
plt.grid(True)
names_models_str = '_'.join(names_models)
print(names_models_str)
plt.savefig(f"{base_dir}\\plots\\{score[0]}_{score[1]}.png")
#plt.show()
def create_artm_model(name_model, num_topics, tau_P=0, tau_T=0, dcr=0):
model = BigARTM(name_model, num_topics)
model.create_batch_vectorizer(f"{BigARTM.base_dir}\\uci\\", 'bow_uci', 'bow', f"{BigARTM.base_dir}\\batches")
model.create_topics_names()
model.create_artm_model_empty()
model.save_artm_dictionary()
model.create_cooc_dict(f"{BigARTM.base_dir}\\batches", f"{BigARTM.base_dir}\\cooc\\cooc_tf_", f"{BigARTM.base_dir}\\uci\\vocab.bow.txt")
model.artm_add_scores()
model.add_regularization_artm(tau_P, tau_T, dcr)
model.fit_model()
print("save")
model.plot_measures_artm()
model.save_top_tokens()
model.save_matrix_phi()
model.save_matrix_theta()
return model
def save_model_scores(scores_dir, model):
print(f"Save scores model: {model.name}")
scores_tuples = []
for score in model.model_artm.scores.data.items():
#print(f"name_score: {score[0]}")
datas = model.get_values_by_score_tracker(score[0])
for data in datas:
#print(data[0], data[1], data[2])
if(data[1] == 'TopicKernelScore' and data[2] == 'tokens'):
pass
else:
scores_tuples.append(data)
write_to_pickle(f"{dir_}\\scores\\{name_model}.pickle", scores_tuples)
# print(scores_tuples)
# df = pd.DataFrame(list(scores_tuples), columns = ['name_model', 'score', 'parameter', 'values'])
# df.head()
# df.to_csv(f"{scores_dir}\\scores\\{name_model}.csv")
# df.to_pickle(f"{scores_dir}\\scores\\{name_model}.pickle")
#%%
if __name__ == "__main__":
name_experiment = "vacancies_06_22k"
#base_dir = "C:\\Users\\Ivan\\PycharmProjects\\all_my_projects_3_6\\bigartm2\\experiments"
base_dir = "C:\\Users\\Ivan\\PycharmProjects\\all_my_projects_3_6\\bigartm vacancies\\results"
dir_ = f"{base_dir}\\{name_experiment}"
if not os.path.exists(dir_):
os.makedirs(dir_)
base_dir = dir_
BigARTM.num_toptokens = 10
BigARTM.num_document_passes = 1
BigARTM.num_collection_passes = 60
BigARTM.base_dir = base_dir
models = []
# num_topics = 60
# name_model = f"{num_topics}_{0}_{0}_{0}"
# model = create_artm_model(name_model, num_topics)
# models.append(model)
#
#
num_topics = [80, 300, 500]
tau_P_list = [0.0, -0.1, -0.25, -0.5]
tau_T_list = [0.0, -0.1, -0.25, -0.5, -1, -2]
dcr_list = [0, 1000, 2500, 5000, 10000]
param_list = []
for num_topics in num_topics:
for tau_P in tau_P_list:
for tau_T in tau_T_list:
for dcr in dcr_list:
tuple_ = num_topics, tau_P, tau_T, dcr
param_list.append(tuple_)
#print(param_list)
#param_list = param_list[::-1]
# Эксперимент по определению числа тем
models = []
start_time = time.time()
for i in tqdm(range(len(param_list))):
param = param_list[i]
num_topics = param[0]
tau_P = param[1]
tau_T = param[2]
dcr = param[3]
lap_time = time.time()
name_model = f"{str(num_topics).zfill(3)}_{tau_P}_{tau_T}_{dcr}_{BigARTM.num_collection_passes}"
print(i, name_model)
#print(f"{dir_}\\scores\\{name_model}.pickle")
if(os.path.exists(f"{dir_}\\scores\\{name_model}.pickle")==False):
model = create_artm_model(name_model, num_topics, tau_P, tau_T, dcr)
save_model_scores(f"{dir_}\\scores\\", model)
#models.append(model)
print(f"--- Lap_time: {(time.time() - lap_time)/60:.2f} Total: {(time.time() - start_time)/60:.2f} ---")
# # Эксперимент с регуляризацией
# num_topics = 60
#
# name_model = f"{num_topics}_{0}_{0}_{0}"
# model = create_artm_model(name_model, num_topics)
# models.append(model)
#
# tau_P = -0.02; tau_T = -0.03; dcr = 2500
# name_model = f"{num_topics}_{tau_P}_{tau_T}_{dcr}"
# model = create_artm_model(name_model, num_topics, tau_P, tau_T, dcr)
# models.append(model)
#
# tau_P = -0.02; tau_T = -0.5; dcr = 5000
# name_model = f"{num_topics}_{tau_P}_{tau_T}_{dcr}"
# model = create_artm_model(name_model, num_topics, tau_P, tau_T, dcr)
# models.append(model)
#
# tau_P = -0.02; tau_T = -1; dcr = 10000
# name_model = f"{num_topics}_{tau_P}_{tau_T}_{dcr}"
# model = create_artm_model(name_model, num_topics, tau_P, tau_T, dcr)
# models.append(model)
# # Эксперимент по определению влияния регуляризации
# num_topics = 200; tau_P = 0; tau_T = 0; dcr = 0
#
# start_time = time.time()
#
# for tau_T in np.arange(0, -1.01, -0.1):
# lap_time = time.time()
# name_model = f"{num_topics}_{tau_P}_{tau_T}_{dcr}"
# model = create_artm_model(name_model, num_topics, tau_P, tau_T, dcr)
#
# models.append(model)
# print(f"--- Lap_time: {time.time() - lap_time} Total: {time.time() - start_time} ---")
#%%
# save_model_scores(f"{dir_}\\scores\\", model)
# #%%
#
# print(f"Save scores model: {model.name}")
# scores_tuples = []
# for score in model.model_artm.scores.data.items():
# print(f"name_score: {score[0]}")
#
# datas = model.get_values_by_score_tracker(score[0])
# for data in datas:
# print(data[0], data[1], data[2])
# if(data[1] == 'TopicKernelScore' and data[2] == 'tokens'):
# pass
# else:
# scores_tuples.append(data)
# write_to_pickle(f"{dir_}\\scores\\{name_model}.pickle", scores_tuples)
#
##%%
#
# datas = read_from_pickle(f"{dir_}\\scores\\{name_model}.pickle")
# print(datas)
#%%
# print(scores_tuples)
# df = pd.DataFrame(list(scores_tuples), columns = ['name_model', 'score', 'parameter', 'values'])
# df.head()
# df.to_csv(f"{dir_}\\scores\\{name_model}.csv")
# df.to_pickle(f"{dir_}\\scores\\{name_model}.pickle")
# for d in data:
# print(d)
# print('PerplexityScore', )
#%%
# scores_tuples = []
# for model in models:
# print(model.name)
# for score in model.model_artm.scores.data.items():
# print(score[0])
# scores_tuples.extend(model.get_values_by_score_tracker(score[0]))
# print(len(scores_tuples))
#
# df = pd.DataFrame(list(scores_tuples), columns = ['name_model', 'score', 'parameter', 'values'])
# df.to_csv(base_dir+'\\scores.csv')
# print(df.iloc[0])
#
##%%
# # общие графики по всем моделям по каждому score
# scores = [('PerplexityScore','value'),
# ('PerplexityScore','raw'),
# ('PerplexityScore','normalizer'),
# ('PerplexityScore','zero_tokens'),
# ('SparsityPhiScore','value'),
# ('SparsityPhiScore','zero_tokens'),
# ('SparsityPhiScore','total_tokens'),
# ('SparsityThetaScore','value'),
# ('SparsityThetaScore','zero_topics'),
# ('SparsityThetaScore','total_topics'),
# #('TopicKernelScore','tokens'),
# #('TopicKernelScore','size'),
# #('TopicKernelScore','contrast'),
# #('TopicKernelScore','purity'),
# #('TopicKernelScore','coherence'),
# ('TopicKernelScore','average_size'),
# ('TopicKernelScore','average_contrast'),
# ('TopicKernelScore','average_purity'),
# ('TopicKernelScore','average_coherence'),
# ('TopTokensCoherenceScore', 'coherence')]
#
##%%
#
# #df['score']
##%%
# for score in scores:
# df_some_score = df[(df['score']==score[0]) & (df['parameter']==score[1])]
# print(len(df_some_score))
# print(score)
# print(df_some_score.head())
# plot_score_trackers_for_few_models(base_dir, score, df_some_score)
# # звуковой сигнал окончания процесс вычисления
# duration = 300 # millisecond
# freq = 440 # Hz
# winsound.Beep(freq, duration)
#%%
#from text_to_speach import *
import text_to_speach
text_to_speach.text_to_speach("Задание выполнено!")
#%%
##
### for num_topics in range(10, 201, 10):
### #for tauF in np.arange(-0.01, -0.051, -0.01):
### #for tauF in [-0.02, -0.04]:
### for tauF in [-0.02, -0.05]:
### #for tauT in np.arange(-0.02, -0.1, -0.02):
### #for tauT in [-0.033, -0.066, -0.099]:
### for tauT in [-0.03, -0.06]:
### #for decorelator_phi in np.arange(2500, 2500, 7501):
### #for decorelator_phi in [2500, 5000, 7500]:
### for decorelator_phi in [2500, 10000]:
### name_model = f"{num_topics}_{tauF}_{tauT}_{decorelator_phi}"
###
### model = BigARTM(name_model, base_dir, num_topics, num_toptokens, num_document_passes, num_collection_passes)
### model.create_batch_vectorizer(f"{data_folder}\\uci\\", 'bow_uci', 'bow', f"{base_dir}\\batches")
### model.create_topics_names()
### model.create_artm_model_empty()
### model.save_artm_dictionary()
### model.artm_add_scores()
### model.add_regularization_artm(tauF, tauT, decorelator_phi)
### model.fit_model()
###
### model.plot_measures_artm()
### model.save_top_tokens()
### model.save_matrix_phi()
### model.save_matrix_theta()
### models.append(model)
#
#%%
print(model.name)
topic_name = 'topic_69'
value = model.model_artm.score_tracker['TopTokensScore'].last_tokens[topic_name]
print(len(model.model_artm.score_tracker['TopTokensScore'].last_tokens[topic_name]))
print(model.model_artm.score_tracker['TopTokensScore'].last_tokens)
print(value)
#model.save_top_tokens('')
#print('done')
| 43.95057
| 141
| 0.603166
|
b88c304cd66bcc17985367c296361689baf0ac6e
| 625
|
py
|
Python
|
manage.py
|
mamalmaleki/kolbe_cl
|
0daf1ab55562b1f71a232be76c9e7609e8255e9a
|
[
"MIT"
] | 1
|
2020-01-02T05:51:11.000Z
|
2020-01-02T05:51:11.000Z
|
manage.py
|
mamalmaleki/kolbe
|
0daf1ab55562b1f71a232be76c9e7609e8255e9a
|
[
"MIT"
] | 2
|
2021-03-30T12:38:16.000Z
|
2021-09-22T18:30:59.000Z
|
manage.py
|
mamalmaleki/kolbe
|
0daf1ab55562b1f71a232be76c9e7609e8255e9a
|
[
"MIT"
] | 1
|
2020-12-01T09:47:12.000Z
|
2020-12-01T09:47:12.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kolbe.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.409091
| 73
| 0.6816
|
3e6aca4eb48ce2b1c49ec56e6377c2a259200f6a
| 141
|
py
|
Python
|
glue/python/scaii/_internal_/__init__.py
|
SCAII/SCAII
|
7ab302f788556392850d104d3df6e0b4a556414d
|
[
"BSD-3-Clause"
] | 1
|
2017-11-01T20:09:32.000Z
|
2017-11-01T20:09:32.000Z
|
glue/python/scaii/_internal_/__init__.py
|
SCAII/SCAII
|
7ab302f788556392850d104d3df6e0b4a556414d
|
[
"BSD-3-Clause"
] | 103
|
2017-09-14T17:04:53.000Z
|
2018-08-15T22:52:32.000Z
|
glue/python/scaii/_internal_/__init__.py
|
SCAII/SCAII
|
7ab302f788556392850d104d3df6e0b4a556414d
|
[
"BSD-3-Clause"
] | 6
|
2018-01-31T00:05:14.000Z
|
2020-01-29T07:01:29.000Z
|
"""
Internal tools and glue used to implement the core SCAII
Python features. These shouldn't be used unless you know what
you're doing.
"""
| 23.5
| 61
| 0.758865
|
157d8a91830f2b5059d3c44ceee75fe067e7d6ed
| 861
|
py
|
Python
|
neural_tangents/predict.py
|
romanngg/neural-tangents
|
a575da5d13e2e0f4a9a4debf22a491183c97cdd9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
neural_tangents/predict.py
|
romanngg/neural-tangents
|
a575da5d13e2e0f4a9a4debf22a491183c97cdd9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
neural_tangents/predict.py
|
romanngg/neural-tangents
|
a575da5d13e2e0f4a9a4debf22a491183c97cdd9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public `neural_tangents.predict` API. See `_src/predict.py` for details."""
# flake8: noqa: F401
from ._src.predict import (
gradient_descent,
gradient_descent_mse,
gradient_descent_mse_ensemble,
gp_inference,
max_learning_rate,
ODEState,
Gaussian
)
| 28.7
| 78
| 0.744483
|
4ae9d436ff575a5be6d023ca45053efc3962e1aa
| 14,177
|
py
|
Python
|
piwheels/master/file_juggler.py
|
bk-mtg/piwheels
|
67152dd1cfd5bd03ea90a8f0255103a9ee9c71d6
|
[
"BSD-3-Clause"
] | null | null | null |
piwheels/master/file_juggler.py
|
bk-mtg/piwheels
|
67152dd1cfd5bd03ea90a8f0255103a9ee9c71d6
|
[
"BSD-3-Clause"
] | null | null | null |
piwheels/master/file_juggler.py
|
bk-mtg/piwheels
|
67152dd1cfd5bd03ea90a8f0255103a9ee9c71d6
|
[
"BSD-3-Clause"
] | null | null | null |
# The piwheels project
# Copyright (c) 2017 Ben Nuttall <https://github.com/bennuttall>
# Copyright (c) 2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Defines the :class:`FileJuggler` task and the :class:`FsClient` RPC class
for interacting with it.
.. autoexception:: TransferError
.. autoexception:: TransferIgnoreChunk
.. autoexception:: TransferDone
.. autoclass:: FileJuggler
:members:
.. autoclass:: FsClient
:members:
"""
import os
from pathlib import Path
from .. import transport, protocols, tasks
from ..states import TransferState, FileState
class TransferError(Exception):
"""
Base class for errors raised during a file transfer.
"""
class TransferIgnoreChunk(TransferError):
"""
Exception raised when a build slave sends CHUNK instead of HELLO as the
first message (see :meth:`FileJuggler.new_transfer`).
"""
class TransferDone(TransferError):
"""
Exception raised when a transfer is complete. It may seem a little odd to
use an exception for this, but it is "exceptional" behaviour to terminate
the file transfer.
"""
class FileJuggler(tasks.Task):
"""
This task handles file transfers from the build slaves. The specifics of
the file transfer protocol are best understood from the implementation of
the :class:`~.states.FileState` class.
However, to detail how a file transfer begins: when a build slave has
successfully completed a build it informs the master via the
:class:`~.slave_driver.SlaveDriver` task. That task replies with a "SEND"
instruction to the slave (including a filename). The slave then initiates
the transfer with a "HELLO" message to this task. Once transfers are
complete the slave sends a "SENT" message to the
:class:`~.slave_driver.SlaveDriver` task which verifies the transfer and
either retries it (when verification fails) or sends back "DONE" indicating
the slave can wipe the source file.
"""
name = 'master.file_juggler'
def __init__(self, config):
super().__init__(config)
self.output_path = Path(config.output_path)
TransferState.output_path = self.output_path
file_queue = self.socket(
transport.ROUTER, protocol=protocols.file_juggler_files)
file_queue.hwm = TransferState.pipeline_size * 50
file_queue.bind(config.file_queue)
fs_queue = self.socket(
transport.REP, protocol=protocols.file_juggler_fs)
fs_queue.hwm = 10
fs_queue.bind(config.fs_queue)
self.stats_queue = self.socket(
transport.PUSH, protocol=reversed(protocols.big_brother))
self.stats_queue.hwm = 10
self.stats_queue.connect(config.stats_queue)
self.register(file_queue, self.handle_file)
self.register(fs_queue, self.handle_fs_request)
self.pending = {} # keyed by slave_id
self.active = {} # keyed by slave address
self.complete = {} # keyed by slave_id
def once(self):
stats = os.statvfs(str(self.output_path))
self.stats_queue.send_msg(
'STATFS', (stats.f_frsize, stats.f_bavail, stats.f_blocks))
def handle_fs_request(self, queue):
"""
Handle incoming messages from :class:`FsClient` instances.
"""
try:
msg, data = queue.recv_msg()
except IOError as e:
self.logger.error(str(e))
queue.send_msg('ERROR', str(e))
else:
try:
handler = {
'EXPECT': self.do_expect,
'VERIFY': self.do_verify,
'REMOVE': self.do_remove,
}[msg]
result = handler(*data)
except Exception as exc:
self.logger.error('error handling fs request: %s', msg)
queue.send_msg('ERROR', str(exc))
else:
queue.send_msg('OK', result)
def do_expect(self, slave_id, file_state):
"""
Message sent by :class:`FsClient` to inform file juggler that a build
slave is about to start a file transfer. The message includes the full
:class:`~.states.FileState`. The state is stored in the ``pending``
map.
:param int slave_id:
The identity of the build slave about to begin the transfer.
:param list file_state:
The details of the file to be transferred including the expected
hash.
"""
file_state = FileState.from_message(file_state)
self.pending[slave_id] = TransferState(slave_id, file_state)
self.logger.info('expecting transfer: %s', file_state.filename)
def do_verify(self, slave_id, package):
"""
Message sent by :class:`FsClient` to request that juggler verify a file
transfer against the expected hash and, if it matches, rename the file
into its final location.
:param int slave_id:
The identity of the build slave that sent the file.
:param str package:
The name of the package that the file is to be committed to, if
valid.
"""
transfer = self.complete.pop(slave_id)
try:
transfer.verify()
except IOError:
transfer.rollback()
self.logger.warning('verification failed: %s',
transfer.file_state.filename)
raise
else:
transfer.commit(package)
self.logger.info('verified: %s', transfer.file_state.filename)
stats = os.statvfs(str(self.output_path))
self.stats_queue.send_msg(
'STATFS', (stats.f_frsize, stats.f_bavail, stats.f_blocks))
def do_remove(self, package, filename):
"""
Message sent by :class:`FsClient` to request that *filename* in
*package* should be removed.
:param str package:
The name of the package from which the specified file is to be
removed.
:param str filename:
The name of the file to remove from *package*.
"""
path = self.output_path / 'simple' / package / filename
try:
path.unlink()
except FileNotFoundError:
self.logger.warning('remove failed (not found): %s', path)
else:
self.logger.info('removed: %s', path)
stats = os.statvfs(str(self.output_path))
self.stats_queue.send_msg(
'STATFS', (stats.f_frsize, stats.f_bavail, stats.f_blocks))
def handle_file(self, queue):
"""
Handle incoming file-transfer messages from build slaves.
The file transfer protocol is in some ways very simple (see the chart
in the :doc:`slaves` chapter for an overview of the message sequence)
and in some ways rather complex (read the ZeroMQ guide chapter on file
transfers for more detail on why multiple messages must be allowed in
flight simultaneously).
The "normal" state for a file transfer is to be requesting and
receiving chunks. Anything else, including redundant re-sends, and
transfer completion is handled as an exceptional case.
"""
address, msg, *args = queue.recv_multipart()
try:
transfer = None
try:
transfer = self.active[address]
except KeyError:
transfer = self.new_transfer(msg, *args)
self.active[address] = transfer
else:
self.current_transfer(transfer, msg, *args)
except TransferDone as exc:
self.logger.info(str(exc))
del self.active[address]
self.complete[transfer.slave_id] = transfer
queue.send_multipart([address, b'DONE'])
return
except TransferIgnoreChunk as exc:
self.logger.debug(str(exc))
return
except TransferError as exc:
self.logger.error(str(exc))
# XXX Delete the transfer object?
if transfer is None:
return
# XXX Remove transfer from slave?
fetch_range = transfer.fetch()
while fetch_range:
queue.send_multipart([
address, b'FETCH',
str(fetch_range.start).encode('ascii'),
str(len(fetch_range)).encode('ascii')
])
fetch_range = transfer.fetch()
def new_transfer(self, msg, *args):
r"""
Called for messages initiating a new file transfer.
The first message must be HELLO along with the id of the slave starting
the transfer. The metadata for the transfer will be looked up in the
``pending`` list (which is written to by :meth:`do_expect`).
:param str msg:
The message sent to start the transfer (must be "HELLO")
:param \*args:
All additional arguments (expected to be an integer slave id).
"""
if msg == b'CHUNK':
raise TransferIgnoreChunk('ignoring redundant CHUNK from prior '
'transfer')
elif msg != b'HELLO':
raise TransferError('invalid start transfer from slave: %s' % msg)
try:
slave_id = int(args[0])
transfer = self.pending.pop(slave_id)
except ValueError:
raise TransferError('invalid slave id: %s' % args[0])
except KeyError:
raise TransferError('unexpected transfer from slave: %d' % slave_id)
return transfer
def current_transfer(self, transfer, msg, *args):
r"""
Called for messages associated with an existing file transfer.
Usually this is "CHUNK" indicating another chunk of data. Rarely, it
can be "HELLO" if the master has fallen silent and dropped tons of
packets.
:param TransferState transfer:
The object representing the state of the transfer.
:param str msg:
The message sent during the transfer.
:param \*args:
All additional arguments; for "CHUNK" the first must be the file
offset and the second the data to write to that offset.
"""
# pylint: disable=no-self-use
if msg == b'CHUNK':
transfer.chunk(int(args[0].decode('ascii')), args[1])
if transfer.done:
raise TransferDone('transfer complete: %s' %
transfer.file_state.filename)
else:
# This only happens if there's either a corrupted package, or we've
# dropped a *lot* of packets, and the slave's timed out waiting for
# another FETCH. In either case reset the amount of "credit" on the
# transfer so it can start fetching again
transfer.reset_credit()
# XXX Should check slave ID reported in HELLO matches the slave
# retrieved from the cache
if msg != b'HELLO':
raise TransferError(
'invalid chunk header from slave: %s' % msg)
class FsClient:
"""
RPC client class for talking to :class:`FileJuggler`.
"""
def __init__(self, config, logger=None):
self.ctx = transport.Context()
self.fs_queue = self.ctx.socket(
transport.REQ, protocol=reversed(protocols.file_juggler_fs),
logger=logger)
self.fs_queue.hwm = 10
self.fs_queue.connect(config.fs_queue)
def close(self):
self.fs_queue.close()
def _execute(self, msg, data=protocols.NoData):
# If sending blocks this either means we're shutting down, or
# something's gone horribly wrong (either way, raising EAGAIN is fine)
self.fs_queue.send_msg(msg, data, flags=transport.NOBLOCK)
status, result = self.fs_queue.recv_msg()
if status == 'OK':
return result
else:
raise IOError(result)
def expect(self, slave_id, file_state):
"""
See :meth:`FileJuggler.do_expect`.
"""
self._execute('EXPECT', [slave_id, file_state.as_message()])
def verify(self, slave_id, package):
"""
See :meth:`FileJuggler.do_verify`.
"""
try:
self._execute('VERIFY', [slave_id, package])
except IOError:
return False
else:
return True
def remove(self, package, filename):
"""
See :meth:`FileJuggler.do_remove`.
"""
self._execute('REMOVE', [package, filename])
| 38.110215
| 80
| 0.623686
|
24c1a9a53bb708ad139c3c139e77c8abb3595bfb
| 28,349
|
py
|
Python
|
src/WriteZeroDirectoryFiles/WriteBoundaryConditions.py
|
darrinl2t/OpenFOAMCaseGenerator
|
72c3072814e9447b12c40c3423593c8d9a8c9cb4
|
[
"MIT"
] | null | null | null |
src/WriteZeroDirectoryFiles/WriteBoundaryConditions.py
|
darrinl2t/OpenFOAMCaseGenerator
|
72c3072814e9447b12c40c3423593c8d9a8c9cb4
|
[
"MIT"
] | null | null | null |
src/WriteZeroDirectoryFiles/WriteBoundaryConditions.py
|
darrinl2t/OpenFOAMCaseGenerator
|
72c3072814e9447b12c40c3423593c8d9a8c9cb4
|
[
"MIT"
] | null | null | null |
from input import GlobalVariables as Parameters
from math import pow, sqrt
import copy
class WriteBoundaryConditions:
def __init__(self, properties, file_manager):
# assign private variables
self.properties = properties
self.file_manager = file_manager
# list of all variables managed by this class. Each variable is mapped to a list of properties that contains
# the following information
#
# first index: type of boundary field (either of scalar, vector or tensor type)
# second index: dimension of of boundary field
self.variables = {
'U': ['volVectorField', '[0 1 -1 0 0 0 0]'],
'p': ['volScalarField', '[0 2 -2 0 0 0 0]'],
'T': ['volScalarField', '[0 0 0 1 0 0 0]'],
'k': ['volScalarField', '[0 2 -2 0 0 0 0]'],
'omega': ['volScalarField', '[0 0 -1 0 0 0 0]'],
'epsilon': ['volScalarField', '[0 2 -3 0 0 0 0]'],
'nuTilda': ['volScalarField', '[0 2 -1 0 0 0 0]'],
'nut': ['volScalarField', '[0 2 -1 0 0 0 0]'],
'alphat': ['volScalarField', '[1 -1 -1 0 0 0 0]'],
'kt': ['volScalarField', '[0 2 -2 0 0 0 0]'],
'kl': ['volScalarField', '[0 2 -2 0 0 0 0]'],
'ReThetat': ['volScalarField', '[0 0 0 0 0 0 0]'],
'gammaInt': ['volScalarField', '[0 0 0 0 0 0 0]'],
'R': ['volSymmTensorField', '[0 2 -2 0 0 0 0]'],
}
if self.properties['flow_properties']['flow_type'] == Parameters.compressible:
self.variables['p'] = ['volScalarField', '[1 -1 -2 0 0 0 0]']
# list of all wall function types used for RANS turbulence modelling. Not all wall modelling approaches have
# wall functions applied and appropriate dirichlet or neumann boundary conditions are set here instead. These
# are left blank in the list below.
self.RANS_wall_functions = {
'k': ['kLowReWallFunction', 'kqRWallFunction'],
'kl': ['', 'kqRWallFunction'],
'kt': ['', 'kqRWallFunction'],
'omega': ['omegaWallFunction', 'omegaWallFunction'],
'epsilon': ['', 'epsilonWallFunction'],
'nut': ['nutLowReWallFunction', 'nutkWallFunction'],
'alphat': ['compressible::alphatJayatillekeWallFunction', 'compressible::alphatJayatillekeWallFunction'],
'R': ['kLowReWallFunction', 'kqRWallFunction'],
}
# calculate freestream conditions
# see https://www.cfd-online.com/Wiki/Turbulence_free-stream_boundary_conditions as a reference
self.velocity_magnitude = self.properties['flow_properties']['dimensional_properties']['velocity_magnitude']
self.temperature = self.properties['flow_properties']['dimensional_properties']['T']
self.turbulence_intensity = self.properties['turbulence_properties']['freestream_turbulent_intensity']
self.freestream_k = self.__calculate_freestream_k()
self.freestream_omega = self.__calculate_freestream_omega()
self.freestream_epsilon = self.__calculate_freestream_epsilon()
self.freestream_nuTilda = self.__calculate_freestream_nuTilda()
self.freestream_ReThetat = self.__calculate_ReThetaT()
def __calculate_turbulent_length_scale_for_internal_flows(self):
return 0.07 * self.properties['dimensionless_coefficients']['reference_length']
def __calculate_turbulent_length_scale_for_external_flows(self):
Re = self.properties['flow_properties']['non_dimensional_properties']['reynolds_number']
L = self.properties['dimensionless_coefficients']['reference_length']
delta = 0.37 * L / pow(Re, 0.2)
return 0.4 * delta
def __calculate_turbulent_to_laminar_viscosity_ratio(self):
TI = self.turbulence_intensity
if TI < 0.01:
return 1
elif 0.01 <= TI < 0.05:
return 1 + 9 * (TI - 0.01) / 0.04
elif 0.05 <= TI < 0.1:
return 10 + 90 * (TI - 0.05) / 0.05
elif TI >= 0.1:
return 100
def __calculate_freestream_k(self):
TI = self.turbulence_intensity
UMag = self.velocity_magnitude
return 1.5 * pow(UMag * TI, 2)
def __calculate_freestream_omega(self):
turbulence_at_inlet = self.properties['turbulence_properties']['turbulent_quantities_at_inlet']
turbulent_length_scale = self.__calculate_turbulent_length_scale_for_internal_flows()
turbulent_to_laminar_viscosity_ratio = self.properties['turbulence_properties']['turbulent_to_laminar_ratio']
turbulent_to_laminar_viscosity_ratio_calculated = self.__calculate_turbulent_to_laminar_viscosity_ratio()
nu = self.properties['flow_properties']['dimensional_properties']['nu']
k = self.freestream_k
if turbulence_at_inlet == Parameters.INTERNAL:
return pow(Parameters.C_MU, -0.25) * pow(k, 0.5) / turbulent_length_scale
elif turbulence_at_inlet == Parameters.EXTERNAL:
return pow(Parameters.C_MU, -0.25) * pow(k, 0.5) / turbulent_length_scale
elif turbulence_at_inlet == Parameters.RATIO:
return (k / nu) / turbulent_to_laminar_viscosity_ratio
elif turbulence_at_inlet == Parameters.RATIO_AUTO:
return (k / nu) / turbulent_to_laminar_viscosity_ratio_calculated
def __calculate_freestream_epsilon(self):
turbulence_at_inlet = self.properties['turbulence_properties']['turbulent_quantities_at_inlet']
turbulent_length_scale = self.__calculate_turbulent_length_scale_for_internal_flows()
turbulent_to_laminar_viscosity_ratio = self.properties['turbulence_properties']['turbulent_to_laminar_ratio']
turbulent_to_laminar_viscosity_ratio_calculated = self.__calculate_turbulent_to_laminar_viscosity_ratio()
nu = self.properties['flow_properties']['dimensional_properties']['nu']
k = self.freestream_k
if turbulence_at_inlet == Parameters.INTERNAL:
return pow(Parameters.C_MU, 0.75) * pow(k, 1.5) / turbulent_length_scale
elif turbulence_at_inlet == Parameters.EXTERNAL:
return pow(Parameters.C_MU, 0.75) * pow(k, 1.5) / turbulent_length_scale
elif turbulence_at_inlet == Parameters.RATIO:
return (Parameters.C_MU * pow(k, 2) / nu) / turbulent_to_laminar_viscosity_ratio
elif turbulence_at_inlet == Parameters.RATIO_AUTO:
return (Parameters.C_MU * pow(k, 2) / nu) / turbulent_to_laminar_viscosity_ratio_calculated
def __calculate_freestream_nuTilda(self):
turbulence_at_inlet = self.properties['turbulence_properties']['turbulent_quantities_at_inlet']
nu = self.properties['flow_properties']['dimensional_properties']['nu']
turbulent_length_scale = self.__calculate_turbulent_length_scale_for_internal_flows()
k = self.freestream_k
TI = self.turbulence_intensity
UMag = self.velocity_magnitude
if turbulence_at_inlet == Parameters.INTERNAL or turbulence_at_inlet == Parameters.EXTERNAL:
return (sqrt(1.5) * UMag * TI * turbulent_length_scale)
else:
return 5 * nu
def __calculate_ReThetaT(self):
TI = self.turbulence_intensity
if TI <= 0.013:
return 1173.51 - 589.428 * TI * 100 + 0.2196 / pow(TI * 100, 2)
elif TI > 0.013:
return 331.5 / pow((TI * 100 - 0.5658), 0.671)
def write_all_boundary_conditions(self):
# open files
file_id = self.__open_boundary_condition_files()
# write headers
self.__write_headers_to_file(file_id)
# write dimensions
self.__write_dimensions_to_file(file_id)
# construct initial conditions
bc_freestream_conditions, bc_zero_initial_conditions = self.__construct_initial_conditions()
# write initial field
self.__write_initial_conditions_to_file(file_id, bc_freestream_conditions, bc_zero_initial_conditions)
# write boundary conditions here
self.__write_boundary_condition_entries_to_file(file_id, bc_freestream_conditions)
# close all boundary condition files
self.__close_boundary_condition_files(file_id)
def __open_boundary_condition_files(self):
file_id = {}
for var in self.variables:
file_id[var] = self.file_manager.create_file('0', var)
return file_id
def __close_boundary_condition_files(self, file_id):
for var in self.variables:
self.file_manager.close_file(file_id[var])
def __write_headers_to_file(self, file_id):
for var, bc_props in self.variables.items():
self.file_manager.write_header(file_id[var], bc_props[Parameters.BC_TYPE], '0', var)
def __write_dimensions_to_file(self, file_id):
for var, bc_props in self.variables.items():
self.file_manager.write(file_id[var], '\ndimensions ' + bc_props[Parameters.BC_DIMENSIONS] + ';\n\n')
def __write_initial_conditions_to_file(self, file_id, bc_freestream_conditions, bc_zero_initial_conditions):
initial_conditions_type = self.properties['flow_properties']['initial_conditions']
custom_initial_conditions_flag = self.properties['flow_properties']['custom_initial_conditions']
custom_initial_conditions_setup = self.properties['flow_properties']['custom_initial_conditions_setup']
for var in self.variables:
if custom_initial_conditions_flag:
if var in custom_initial_conditions_setup:
path_to_script = custom_initial_conditions_setup[var]
self.__write_custom_initial_conditions(file_id[var], path_to_script)
if (custom_initial_conditions_flag is False) or (var not in custom_initial_conditions_setup):
if initial_conditions_type == Parameters.BOUNDARY_CONDITIONED_BASED:
self.file_manager.write(file_id[var], 'internalField ' + bc_freestream_conditions[var] + ';\n\n')
elif initial_conditions_type == Parameters.ZERO_VELOCITY:
self.file_manager.write(file_id[var],
'internalField ' + bc_zero_initial_conditions[var] + ';\n\n')
def __construct_initial_conditions(self):
U = self.properties['flow_properties']['dimensional_properties']['velocity_vector']
uiui = (2.0 / 3.0) * self.freestream_k
if self.properties['flow_properties']['flow_type'] == Parameters.incompressible:
p_initial = '0'
elif self.properties['flow_properties']['flow_type'] == Parameters.compressible:
p_initial = str(self.properties['flow_properties']['dimensional_properties']['p'])
bc_freestream_conditions = {
'U': 'uniform (' + str(U[0]) + ' ' + str(U[1]) + ' ' + str(U[2]) + ')',
'p': 'uniform ' + p_initial,
'T': 'uniform ' + str(self.temperature),
'k': 'uniform ' + str(self.freestream_k),
'kt': 'uniform ' + str(self.freestream_k),
'kl': 'uniform 0',
'nut': 'uniform 0',
'alphat': 'uniform 0',
'epsilon': 'uniform ' + str(self.freestream_epsilon),
'omega': 'uniform ' + str(self.freestream_omega),
'nuTilda': 'uniform ' + str(self.freestream_nuTilda),
'ReThetat': 'uniform ' + str(self.freestream_ReThetat),
'gammaInt': 'uniform 1',
'R': 'uniform (' + str(uiui) + ' 0 0 ' + str(uiui) + ' 0 ' + str(uiui) + ')'
}
bc_zero_initial_conditions = copy.deepcopy(bc_freestream_conditions)
bc_zero_initial_conditions['U'] = 'uniform (0 0 0)'
bc_zero_initial_conditions['nuTilda'] = 'uniform 0'
bc_zero_initial_conditions['R'] = 'uniform (0 0 0 0 0 0)'
return bc_freestream_conditions, bc_zero_initial_conditions
def __write_boundary_condition_entries_to_file(self, file_id, bc_freestream_conditions):
for var in self.variables:
self.file_manager.write(file_id[var], 'boundaryField\n{\n')
for name, bc_type in self.properties['boundary_properties']['boundary_conditions'].items():
for var in self.variables:
# write boundary condition's name
self.file_manager.write(file_id[var], ' ' + name + '\n {\n')
# write inlet boundary conditions
if bc_type == Parameters.INLET or bc_type == Parameters.DFSEM_INLET:
self.__inlet_boundary_condition(file_id, var, name, bc_type, bc_freestream_conditions)
# write standard outlet boundary conditions
if bc_type == Parameters.OUTLET:
self.__outlet_boundary_condition(bc_freestream_conditions, file_id, var)
# write backflow outlet boundary conditions
if bc_type == Parameters.BACKFLOW_OUTLET:
self.__backflow_boundary_condition(bc_freestream_conditions, file_id, var)
# write advective outlet boundary conditions
if bc_type == Parameters.ADVECTIVE_OUTLET:
self.__advective_boundary_condition(file_id, var)
# wall boundary condition
if bc_type == Parameters.WALL:
self._wall_boundary_condition(file_id, var, bc_freestream_conditions)
# freestream boundary condition
if bc_type == Parameters.FREESTREAM:
self.__freestream_boundary_condition(file_id, var, bc_freestream_conditions)
# symmetry boundary condition
if bc_type == Parameters.SYMMETRY:
self.__symmetry_boundary_condition(file_id, var)
# cyclic boundary conditions
if bc_type == Parameters.CYCLIC:
self.__cyclic_boundary_condition(file_id, var)
# empty boundary conditions
if bc_type == Parameters.EMPTY:
self.__empty_boundary_condition(file_id, var)
# close boundary condition writing
self.file_manager.write(file_id[var], ' }\n')
for var in self.variables:
self.file_manager.write(file_id[var], '}')
def __inlet_boundary_condition(self, file_id, var, name, bc_type, bc_freestream_conditions):
custom_inlet = self.properties['boundary_properties']['custom_inlet_boundary_conditions']
custom_inlet_setup = self.properties['boundary_properties']['custom_inlet_boundary_conditions_setup']
if custom_inlet:
if var in custom_inlet_setup:
path_to_script = custom_inlet_setup[var]
self.__write_custom_inlet_profile(file_id[var], 8, path_to_script)
if (custom_inlet is False) or (var not in custom_inlet_setup):
if var == 'U':
if bc_type == Parameters.INLET:
self.__dirichlet(file_id[var], bc_freestream_conditions[var])
elif bc_type == Parameters.DFSEM_INLET:
self.__write_dfsem_inlet(file_id[var], name, bc_freestream_conditions[var])
elif var == 'p' and self.properties['flow_properties']['flow_type'] == Parameters.incompressible:
self.__neumann(file_id[var])
elif var == 'nut' or var == 'alphat':
self.__zero_calculated(file_id[var])
else:
self.__dirichlet(file_id[var], bc_freestream_conditions[var])
def __outlet_boundary_condition(self, bc_freestream_conditions, file_id, var):
if var == 'p' and self.properties['flow_properties']['flow_type'] == Parameters.incompressible:
self.__dirichlet(file_id[var], bc_freestream_conditions[var])
elif var == 'nut' or var == 'alphat':
self.__zero_calculated(file_id[var])
else:
self.__neumann(file_id[var])
def __backflow_boundary_condition(self, bc_freestream_conditions, file_id, var):
if var == 'p' and self.properties['flow_properties']['flow_type'] == Parameters.incompressible:
self.__dirichlet(file_id[var], bc_freestream_conditions[var])
elif var == 'nut' or var == 'alphat':
self.__zero_calculated(file_id[var])
else:
self.__inlet_outlet(file_id[var], bc_freestream_conditions)
def __advective_boundary_condition(self, file_id, var):
self.__advective(file_id[var])
def _wall_boundary_condition(self, file_id, var, bc_freestream_conditions):
wall_modelling = self.properties['turbulence_properties']['wall_modelling']
rans_model = self.properties['turbulence_properties']['RANS_model']
write_wall_function_high_re = lambda v: self.__wall_function(file_id[v], bc_freestream_conditions[v],
self.RANS_wall_functions[v][Parameters.HIGH_RE])
write_wall_function_low_re = lambda v: self.__wall_function(file_id[v], bc_freestream_conditions[v],
self.RANS_wall_functions[v][Parameters.LOW_RE])
if var == 'U':
self.__no_slip_wall(file_id[var])
elif var == 'k':
if wall_modelling == Parameters.HIGH_RE:
write_wall_function_high_re(var)
elif wall_modelling == Parameters.LOW_RE:
write_wall_function_low_re(var)
elif var == 'omega':
if wall_modelling == Parameters.HIGH_RE:
write_wall_function_high_re(var)
elif wall_modelling == Parameters.LOW_RE:
if rans_model == Parameters.kkLOmega:
self.__neumann(file_id[var])
else:
write_wall_function_low_re(var)
elif var == 'epsilon':
if wall_modelling == Parameters.HIGH_RE:
write_wall_function_high_re(var)
elif wall_modelling == Parameters.LOW_RE:
self.__neumann(file_id[var])
elif var == 'nuTilda':
if wall_modelling == Parameters.HIGH_RE:
self.__neumann(file_id[var])
elif wall_modelling == Parameters.LOW_RE:
nuTilda = self.properties['flow_properties']['dimensional_properties']['nu'] / 2
self.__dirichlet(file_id[var], 'uniform ' + str(nuTilda))
elif var == 'nut':
if wall_modelling == Parameters.HIGH_RE:
write_wall_function_high_re(var)
elif wall_modelling == Parameters.LOW_RE:
write_wall_function_low_re(var)
elif var == 'alphat':
if wall_modelling == Parameters.HIGH_RE:
write_wall_function_high_re(var)
elif wall_modelling == Parameters.LOW_RE:
write_wall_function_low_re(var)
elif var == 'kt':
if wall_modelling == Parameters.HIGH_RE:
write_wall_function_high_re(var)
elif wall_modelling == Parameters.LOW_RE:
self.__dirichlet(file_id[var], bc_freestream_conditions[var])
elif var == 'kl':
if wall_modelling == Parameters.HIGH_RE:
write_wall_function_high_re(var)
elif wall_modelling == Parameters.LOW_RE:
self.__dirichlet(file_id[var], bc_freestream_conditions[var])
elif var == 'R':
if wall_modelling == Parameters.HIGH_RE:
write_wall_function_high_re(var)
elif wall_modelling == Parameters.LOW_RE:
self.__dirichlet(file_id[var], 'uniform (0 0 0 0 0 0)')
else:
self.__neumann(file_id[var])
def __freestream_boundary_condition(self, file_id, var, bc_freestream_conditions):
if var == 'U':
self.__freestream_velocity(file_id[var], bc_freestream_conditions[var])
elif var == 'p':
self.__freestream_pressure(file_id[var], bc_freestream_conditions[var])
elif var == 'nut' or var == 'alphat':
self.__zero_calculated(file_id[var])
else:
self.__freestream(file_id[var], bc_freestream_conditions[var])
def __symmetry_boundary_condition(self, file_id, var):
if var == 'nut' or var == 'alphat':
self.__zero_calculated(file_id[var])
else:
self.__neumann(file_id[var])
def __cyclic_boundary_condition(self, file_id, var):
self.__periodic(file_id[var])
def __empty_boundary_condition(self, file_id, var):
self.__empty(file_id[var])
def __dirichlet(self, file_id, initial_field):
file_id.write(' type fixedValue;\n')
file_id.write(' value ' + initial_field + ';\n')
def __neumann(self, file_id):
file_id.write(' type zeroGradient;\n')
def __no_slip_wall(self, file_id):
file_id.write(' type noSlip;\n')
def __advective(self, file_id):
file_id.write(' type advective;\n')
file_id.write(' phi phi;\n')
def __inlet_outlet(self, file_id, internal_field):
file_id.write(' type inletOutlet;\n')
file_id.write(' inletValue ' + internal_field + ';\n')
def __periodic(self, file_id):
file_id.write(' type cyclic;\n')
def __empty(self, file_id):
file_id.write(' type empty;\n')
def __wall_function(self, file_id, initial_field, wall_function_type):
file_id.write(' type ' + wall_function_type + ';\n')
file_id.write(' value ' + initial_field + ';\n')
def __zero_calculated(self, file_id):
file_id.write(' type calculated;\n')
file_id.write(' value uniform 0;\n')
def __freestream_velocity(self, file_id, initial_field):
file_id.write(' type freestreamVelocity;\n')
file_id.write(' freestreamValue ' + initial_field + ';\n')
def __freestream_pressure(self, file_id, initial_field):
file_id.write(' type freestreamPressure;\n')
file_id.write(' freestreamValue ' + initial_field + ';\n')
def __freestream(self, file_id, initial_field):
file_id.write(' type freestream;\n')
file_id.write(' freestreamValue ' + initial_field + ';\n')
def __write_dfsem_inlet(self, file_id, bc_name, initial_field):
custom_DFSEM_conditions = self.properties['boundary_properties']['custom_DFSEM_conditions']
custom_DFSEM_conditions_setup = self.properties['boundary_properties']['custom_DFSEM_conditions_setup']
length_scale = self.properties['dimensionless_coefficients']['reference_length']
R = self.properties['boundary_properties']['reynolds_stresses']
L = self.properties['boundary_properties']['turbulent_length_scale']
nCellsPerEddy = self.properties['boundary_properties']['number_of_cells_per_eddy']
init_reynolds_stresses = 'uniform (' + str(R[0]) + ' ' + str(R[1]) + ' ' + str(R[2]) + ' ' + str(R[3]) + ' '\
+ str(R[4]) + ' ' + str(R[5]) + ')'
init_turbulent_length_scale = 'uniform ' + str(L)
file_id.write(' type turbulentDFSEMInlet;\n')
file_id.write(' delta ' + str(length_scale) + ';\n')
if custom_DFSEM_conditions:
if 'R' in custom_DFSEM_conditions_setup:
file_id.write(' R\n {\n')
path_to_script = custom_DFSEM_conditions_setup['R']
self.__write_custom_inlet_profile(file_id, 12, path_to_script)
file_id.write(' }\n')
if 'U' in custom_DFSEM_conditions_setup:
file_id.write(' U\n {\n')
path_to_script = custom_DFSEM_conditions_setup['U']
self.__write_custom_inlet_profile(file_id, 12, path_to_script)
file_id.write(' }\n')
if 'L' in custom_DFSEM_conditions_setup:
file_id.write(' L\n {\n')
path_to_script = custom_DFSEM_conditions_setup['L']
self.__write_custom_inlet_profile(file_id, 12, path_to_script)
file_id.write(' }\n')
if (custom_DFSEM_conditions is False) or ('R' not in custom_DFSEM_conditions_setup):
file_id.write(' R ' + init_reynolds_stresses + ';\n')
if (custom_DFSEM_conditions is False) or ('U' not in custom_DFSEM_conditions_setup):
file_id.write(' U ' + initial_field + ';\n')
if (custom_DFSEM_conditions is False) or ('L' not in custom_DFSEM_conditions_setup):
if self.properties['boundary_properties']['set_turbulent_length_scale_at_inlet']:
file_id.write(' L ' + init_turbulent_length_scale + ';\n')
else:
file_id.write(' L uniform 0;\n')
file_id.write(' nCellsPerEddy ' + str(nCellsPerEddy) + ';\n')
file_id.write(' value uniform (0 0 0);\n')
def __write_custom_initial_conditions(self, file_id, path_to_script):
file_id.write('internalField #codeStream\n')
file_id.write('{\n')
file_id.write(' codeInclude\n')
file_id.write(' #{\n')
file_id.write(' #include "fvCFD.H"\n')
file_id.write(' #};\n')
file_id.write('\n')
file_id.write(' codeOptions\n')
file_id.write(' #{\n')
file_id.write(' -I$(LIB_SRC)/finiteVolume/lnInclude \\\n')
file_id.write(' -I$(LIB_SRC)/meshTools/lnInclude\n')
file_id.write(' #};\n')
file_id.write('\n')
file_id.write(' codeLibs\n')
file_id.write(' #{\n')
file_id.write(' -lmeshTools \\\n')
file_id.write(' -lfiniteVolume\n')
file_id.write(' #};\n')
file_id.write('\n')
file_id.write(' code\n')
file_id.write(' #{\n')
custom_initial_condition_script = open(path_to_script, 'r')
all_lines = custom_initial_condition_script.readlines()
spaces = ' '
for line in all_lines:
file_id.write(spaces + line)
file_id.write('\n')
file_id.write(' #};\n')
file_id.write('};\n')
file_id.write('\n')
def __write_custom_inlet_profile(self, file_id, leading_spaces, path_to_script):
spaces = ' ' * leading_spaces
file_id.write(spaces + '#codeStream\n')
file_id.write(spaces + '{\n')
file_id.write(spaces + ' codeInclude\n')
file_id.write(spaces + ' #{\n')
file_id.write(spaces + ' #include "fvCFD.H"\n')
file_id.write(spaces + ' #};\n')
file_id.write('\n')
file_id.write(spaces + ' codeOptions\n')
file_id.write(spaces + ' #{\n')
file_id.write(spaces + ' -I$(LIB_SRC)/finiteVolume/lnInclude \\\n')
file_id.write(spaces + ' -I$(LIB_SRC)/meshTools/lnInclude\n')
file_id.write(spaces + ' #};\n')
file_id.write('\n')
file_id.write(spaces + ' codeLibs\n')
file_id.write(spaces + ' #{\n')
file_id.write(spaces + ' -lmeshTools \\\n')
file_id.write(spaces + ' -lfiniteVolume\n')
file_id.write(spaces + ' #};\n')
file_id.write('\n')
file_id.write(spaces + ' code\n')
file_id.write(spaces + ' #{\n')
custom_inlet_script = open(path_to_script, 'r')
all_lines = custom_inlet_script.readlines()
code_spaces = ' ' * 8
for line in all_lines:
file_id.write(spaces + code_spaces + line)
file_id.write('\n')
file_id.write(spaces + ' #};\n')
file_id.write(spaces + '};\n')
| 49.735088
| 119
| 0.612261
|
32001ccd2612680a3352e2e3fe739cd7be3814db
| 3,759
|
py
|
Python
|
lispy_v2.py
|
R3DDY97/Lisp_py
|
a530f2a744c8f74fa324742791a06d332826dd02
|
[
"MIT"
] | null | null | null |
lispy_v2.py
|
R3DDY97/Lisp_py
|
a530f2a744c8f74fa324742791a06d332826dd02
|
[
"MIT"
] | 1
|
2018-03-12T03:53:29.000Z
|
2018-03-12T03:53:29.000Z
|
lispy_v2.py
|
R3DDY97/Lisp_py
|
a530f2a744c8f74fa324742791a06d332826dd02
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
from functools import reduce
from lisp_globals import lisp_env
from lisp_parsing import lisp_parser
# various sub environments
env, OP_ENV, MATH_ENV = lisp_env()
LAMBDAs = []
COMBINED_ENV = {}
def eval_comparision(operation, arguments, env):
arg_list = [lisp_evaluator(argument, env) for argument in arguments]
if operation in ["<", ">", "<=", ">=",] and len(arg_list) > 1:
procedure = OP_ENV.get(operation)
bool_dict = {True: "#t", False: "#f"}
else:
return None
pre_index, post_index = 0, 1
while post_index < len(arg_list):
status = procedure(arg_list[pre_index], arg_list[post_index])
if status:
pre_index, post_index = post_index, post_index+1
# else:
# pre_index, post_index = post_index, post_index+1
return bool_dict[status]
def eval_math(operation, arguments, env):
if not operation in OP_ENV or not operation in MATH_ENV:
return None
arg_list = [lisp_evaluator(argument, env) for argument in arguments]
procedure = OP_ENV.get(operation, False) or MATH_ENV.get(operation, False)
if operation in OP_ENV:
return reduce(procedure, arg_list)
if len(arg_list) == 1:
return procedure(arg_list[0])
arg1, arg2 = arg_list
return procedure(arg1, arg2)
def lisp_evaluator(parsed, env):
if isinstance(parsed, str):
return env.get(parsed, None)
if isinstance(parsed, (int, float)):
return parsed
operation, *arguments = parsed
if operation == 'lambda':
parameters, body = arguments
return Procedure(parameters, body, env)
if operation == 'define':
variable, exp = arguments
print(arguments)
if variable in env:
print("\ncant define variable.. its standard.. Dont mess with standards\n ")
return None
env[variable] = lisp_evaluator(exp, env)
LAMBDAs.append(variable)
return "OK"
if operation == 'if':
condition, value, alt = arguments
if lisp_evaluator(condition, env):
return lisp_evaluator(value, env)
return lisp_evaluator(alt, env)
try:
if operation in env:
proc = lisp_evaluator(operation, env)
arg_list = [lisp_evaluator(argument, env) for argument in arguments]
return proc(*arg_list)
except TypeError:
return eval_comparision(operation, arguments, env) or eval_math(operation, arguments, env)
return None
def Procedure(parameters, body, env):
def callFunction(arg_list):
return lisp_evaluator(body, localEnv(parameters, arg_list, env))
return callFunction
def localEnv(parameters, arg_list, env):
lambda_local = dict(zip(parameters, [arg_list]))
COMBINED_ENV.update(lambda_local)
COMBINED_ENV.update(env)
return COMBINED_ENV
def lisp_interpreter(lisp_str):
lisp_str = lisp_str.strip()
if not lisp_str:
return None
lisp_parsed = lisp_parser(lisp_str)
if lisp_parsed and lisp_parsed[0]:
parsed, _ = lisp_parsed
return lisp_evaluator(parsed, env)
return None
def main():
try:
os.system("clear||cls")
while True:
lisp_input = input("Lisp repl> ")
output = lisp_interpreter(lisp_input)
bools = {False:"#t", True:"#t"}
if output in bools:
print(bools[output])
if output:
print(output)
# else:
# print("\nsyntax error\n")
except KeyboardInterrupt:
print("\n\tExiting Lisp interpreter..\n")
# except:
# print("\nSyntax error\n")
# os.sys.exit()
if __name__ == '__main__':
main()
| 29.598425
| 98
| 0.631019
|
6be2cfac8a7e8b36f79b82b524b59f63d41a33e8
| 752
|
py
|
Python
|
build/fbcode_builder/getdeps/errors.py
|
majra20/LogDevice
|
dea0df7991120d567354d7a29d832b0e10be7477
|
[
"BSD-3-Clause"
] | 2
|
2019-11-17T00:59:09.000Z
|
2019-11-17T00:59:16.000Z
|
build/fbcode_builder/getdeps/errors.py
|
majra20/LogDevice
|
dea0df7991120d567354d7a29d832b0e10be7477
|
[
"BSD-3-Clause"
] | null | null | null |
build/fbcode_builder/getdeps/errors.py
|
majra20/LogDevice
|
dea0df7991120d567354d7a29d832b0e10be7477
|
[
"BSD-3-Clause"
] | 2
|
2021-04-07T08:09:28.000Z
|
2021-06-06T12:26:22.000Z
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import, division, print_function, unicode_literals
class TransientFailure(Exception):
""" Raising this error causes getdeps to return with an error code
that Sandcastle will consider to be a retryable transient
infrastructure error """
pass
class ManifestNotFound(Exception):
def __init__(self, manifest_name):
super(Exception, self).__init__("Unable to find manifest '%s'" % manifest_name)
| 34.181818
| 87
| 0.759309
|
be9a9bac1f2a658fb9d35029ffc7feba5558752a
| 1,733
|
py
|
Python
|
electrum_gui/common/transaction/models.py
|
liuzjalex/electrum
|
98f7c8bfdef071cd859d54f1f72c39688cde41cf
|
[
"MIT"
] | null | null | null |
electrum_gui/common/transaction/models.py
|
liuzjalex/electrum
|
98f7c8bfdef071cd859d54f1f72c39688cde41cf
|
[
"MIT"
] | null | null | null |
electrum_gui/common/transaction/models.py
|
liuzjalex/electrum
|
98f7c8bfdef071cd859d54f1f72c39688cde41cf
|
[
"MIT"
] | null | null | null |
import peewee
from electrum_gui.common.basic.orm.models import AutoDateTimeField, BaseModel
from electrum_gui.common.transaction.data import TxActionStatus
class TxAction(BaseModel):
id = peewee.IntegerField(primary_key=True)
txid = peewee.CharField()
status = peewee.IntegerField(choices=TxActionStatus.to_choices())
chain_code = peewee.CharField()
coin_code = peewee.CharField()
value = peewee.DecimalField(max_digits=32, decimal_places=0)
decimals = peewee.IntegerField()
symbol = peewee.CharField()
from_address = peewee.CharField(index=True)
to_address = peewee.CharField(index=True)
fee_limit = peewee.DecimalField(max_digits=32, decimal_places=0)
fee_used = peewee.DecimalField(max_digits=32, decimal_places=0, default=0)
fee_price_per_unit = peewee.DecimalField(max_digits=32, decimal_places=0, default=1)
raw_tx = peewee.TextField()
block_number = peewee.IntegerField(null=True)
block_hash = peewee.CharField(null=True)
block_time = peewee.IntegerField(null=True)
index = peewee.IntegerField(default=0, help_text="action index of the transaction")
nonce = peewee.IntegerField(default=-1, help_text="a special field of the nonce model, likes eth")
created_time = AutoDateTimeField()
modified_time = AutoDateTimeField()
def __str__(self):
value_in_decimals = self.value / pow(10, self.decimals)
return (
f"id: {self.id}, coin_code: {self.coin_code}, "
f"action: <from {self.from_address} to {self.to_address} for {value_in_decimals} {self.symbol}>, "
f"status: {self.status}, txid: {self.txid}"
)
class Meta:
indexes = ((("txid", "coin_code", "index"), True),)
| 43.325
| 110
| 0.71206
|
e358f09359c480e620fdc3e69b73041e7c6eefc0
| 274
|
py
|
Python
|
tests/artificial/transf_Quantization/trend_MovingAverage/cycle_7/ar_/test_artificial_32_Quantization_MovingAverage_7__0.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/artificial/transf_Quantization/trend_MovingAverage/cycle_7/ar_/test_artificial_32_Quantization_MovingAverage_7__0.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/artificial/transf_Quantization/trend_MovingAverage/cycle_7/ar_/test_artificial_32_Quantization_MovingAverage_7__0.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 7, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 0);
| 39.142857
| 169
| 0.737226
|
41059823c13cc4581dd16aabe87bbe9fcebbf08e
| 1,791
|
py
|
Python
|
raksha/openstack/common/excutils.py
|
DPaaS-Raksha/raksha
|
e4e482865d2860473bc0a80e10d76bb127e9f6c5
|
[
"Apache-2.0"
] | 8
|
2015-03-19T20:22:44.000Z
|
2021-04-11T06:00:52.000Z
|
raksha/openstack/common/excutils.py
|
DPaaS-Raksha/raksha
|
e4e482865d2860473bc0a80e10d76bb127e9f6c5
|
[
"Apache-2.0"
] | 1
|
2015-07-21T23:05:23.000Z
|
2016-03-16T08:11:54.000Z
|
raksha/openstack/common/excutils.py
|
DPaaS-Raksha/raksha
|
e4e482865d2860473bc0a80e10d76bb127e9f6c5
|
[
"Apache-2.0"
] | 5
|
2015-10-09T17:42:24.000Z
|
2021-03-11T18:33:00.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception related utilities.
"""
import contextlib
import logging
import sys
import traceback
from raksha.openstack.common.gettextutils import _
@contextlib.contextmanager
def save_and_reraise_exception():
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
being attempted to be re-raised after an exception handler is run. This
can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised.
"""
type_, value, tb = sys.exc_info()
try:
yield
except Exception:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(type_, value, tb))
raise
raise type_, value, tb
| 34.442308
| 78
| 0.724176
|
62e7e282c7fcdab63aa69a497267bc30a72492ec
| 3,198
|
py
|
Python
|
tests/util/test_struct_stream.py
|
Jsewill/Olive-blockchain
|
ba0169a56d7e67cefd95dc1f1f60e9a19d5cd2c5
|
[
"Apache-2.0"
] | 10
|
2021-08-01T17:15:15.000Z
|
2021-09-16T08:04:46.000Z
|
tests/util/test_struct_stream.py
|
Jsewill/Olive-blockchain
|
ba0169a56d7e67cefd95dc1f1f60e9a19d5cd2c5
|
[
"Apache-2.0"
] | 8
|
2021-08-06T08:11:13.000Z
|
2021-11-03T20:49:37.000Z
|
tests/util/test_struct_stream.py
|
Jsewill/Olive-blockchain
|
ba0169a56d7e67cefd95dc1f1f60e9a19d5cd2c5
|
[
"Apache-2.0"
] | 7
|
2021-08-07T06:45:36.000Z
|
2022-03-15T08:43:24.000Z
|
import pytest
import io
from olive.util.ints import int8, uint8, int16, uint16, int32, uint32, int64, uint64, uint128, int512
class TestStructStream:
def _test_impl(self, cls, upper_boundary, lower_boundary):
with pytest.raises(ValueError):
t = cls(upper_boundary + 1)
with pytest.raises(ValueError):
t = cls(lower_boundary - 1)
t = cls(upper_boundary)
assert t == upper_boundary
t = cls(lower_boundary)
assert t == lower_boundary
t = cls(0)
assert t == 0
def test_int512(self):
# int512 is special. it uses 65 bytes to allow positive and negative
# "uint512"
self._test_impl(
int512,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, # noqa: E501
-0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, # noqa: E501
)
def test_uint128(self):
self._test_impl(uint128, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, 0)
def test_uint64(self):
self._test_impl(uint64, 0xFFFFFFFFFFFFFFFF, 0)
def test_int64(self):
self._test_impl(int64, 0x7FFFFFFFFFFFFFFF, -0x8000000000000000)
def test_uint32(self):
self._test_impl(uint32, 0xFFFFFFFF, 0)
def test_int32(self):
self._test_impl(int32, 0x7FFFFFFF, -0x80000000)
def test_uint16(self):
self._test_impl(uint16, 0xFFFF, 0)
def test_int16(self):
self._test_impl(int16, 0x7FFF, -0x8000)
def test_uint8(self):
self._test_impl(uint8, 0xFF, 0)
def test_int8(self):
self._test_impl(int8, 0x7F, -0x80)
def test_roundtrip(self):
def roundtrip(v):
s = io.BytesIO()
v.stream(s)
s.seek(0)
cls = type(v)
v2 = cls.parse(s)
assert v2 == v
# int512 is special. it uses 65 bytes to allow positive and negative
# "uint512"
roundtrip(
int512(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF # noqa: E501
)
)
roundtrip(
int512(
-0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF # noqa: E501
)
)
roundtrip(uint128(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))
roundtrip(uint128(0))
roundtrip(uint64(0xFFFFFFFFFFFFFFFF))
roundtrip(uint64(0))
roundtrip(int64(0x7FFFFFFFFFFFFFFF))
roundtrip(int64(-0x8000000000000000))
roundtrip(uint32(0xFFFFFFFF))
roundtrip(uint32(0))
roundtrip(int32(0x7FFFFFFF))
roundtrip(int32(-0x80000000))
roundtrip(uint16(0xFFFF))
roundtrip(uint16(0))
roundtrip(int16(0x7FFF))
roundtrip(int16(-0x8000))
roundtrip(uint8(0xFF))
roundtrip(uint8(0))
roundtrip(int8(0x7F))
roundtrip(int8(-0x80))
| 29.33945
| 161
| 0.64853
|
9baec4f4a2d491647d9f065985c09a62d2b84b58
| 790
|
py
|
Python
|
.history/my_classes/FirstClassFunctions/reducing_functions_20210707140101.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/FirstClassFunctions/reducing_functions_20210707140101.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/FirstClassFunctions/reducing_functions_20210707140101.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
"""Reducing Functions in Python
These are functions that recombine an iterable recursively, ending up with a single return value
Also called accumulators, aggregators, or folding functions
Example: Finding the maximum value in an iterable
a0, a1, a2, ...,, aN-1
max(a, b) _> maximum of a and b
result =a0
result = max(result, a1)
result = max(result, a2)
...
result = max(result, an-1)
# max value in a0, a1, a2, ..., an-1
the special case of sequences
(i.e. we can use indexes to access elements in the sequence)
Using a loop # result = 5
"""
l = l[5, 8, 6, 10, 9]
max_value = lambda a, b: a if a > b else b
def max_sequence(sequence):
result = sequence[0]
for e in sequence[1:]:
result = max_value(result, e)
return result
| 23.235294
| 96
| 0.653165
|
843dfb82dceb37d9d3153a754cddadf2b7cd500a
| 2,482
|
py
|
Python
|
code/dm.py
|
marcusbotacin/ELF.Classifier
|
d017e82d24d047145ef92cb2437c6a03d845a90e
|
[
"MIT"
] | 2
|
2019-10-19T05:10:38.000Z
|
2020-08-28T19:23:40.000Z
|
code/dm.py
|
marcusbotacin/ELF.Classifier
|
d017e82d24d047145ef92cb2437c6a03d845a90e
|
[
"MIT"
] | null | null | null |
code/dm.py
|
marcusbotacin/ELF.Classifier
|
d017e82d24d047145ef92cb2437c6a03d845a90e
|
[
"MIT"
] | null | null | null |
# FORSETI - Feature extractor and classificator for ELF binaries
# Author: Lucas Galante
# Advisor: Marcus Botacin, Andre Gregio, Paulo de Geus
# 2019, UFPR, UNICAMP
class DataMetrics:
def __init__(self):
self.fp = 0.0 # False postive
self.fn = 0.0 # False negative
self.tp = 0.0 # True positive
self.tn = 0.0 # True negative
self.accuracy = None # Accuracy
self.precision = None # Precision
self.recall = None # Recall
self.f1 = None # F1
def incrementFp(self,amount=1.0):
self.fp += amount
def incrementFn(self,amount=1.0):
self.fn += amount
def incrementTp(self,amount=1.0):
self.tp += amount
def incrementTn(self,amount=1.0):
self.tn += amount
def getFp(self):
return self.fp
def getFn(self):
return self.fn
def getTp(self):
return self.tp
def getTn(self):
return self.tn
def getAccuracy(self):
return self.accuracy
def getPrecision(self):
return self.precision
def getRecall(self):
return self.recall
def getF1(self):
return self.f1
def calculateAccuracy(self):
# =(tp + tn)/(tp + tn + fp + fn)
try:
self.accuracy = (self.getTp() + self.getTn())/(self.getTp() + self.getTn() + self.getFp() + self.getFn())
except:
# print('Error DM: Accuracy calculation.')
pass
return
def calculatePrecision(self):
# =(tp)/(tp + fp)
try:
self.precision = (self.getTp())/(self.getTp() + self.getFp())
except:
# print('Error DM: Precision calculation.')
pass
return
def calculateRecall(self):
# =(tp)/(tp + fn)
try:
self.recall = (self.getTp())/(self.getTp() + self.getFn())
except:
# print('Error DM: Recall calculation.')
pass
return
def calculateF1(self):
# =2*(precision*recall)/(precision+recall)
try:
self.f1 = 2*(self.getPrecision()*self.getRecall())/(self.getPrecision()+self.getRecall())
except:
# print('Error DM: F1 calculation.')
pass
return
def calculateAll(self):
self.calculateAccuracy()
self.calculatePrecision()
self.calculateRecall()
self.calculateF1()
return
| 26.126316
| 117
| 0.542707
|
8265bcf1dfb47be0e1e2b1dd94e522cfa97b3602
| 1,772
|
py
|
Python
|
src/urls.py
|
chunky2808/Enlighten-Me-
|
357f83d79a8d6b9821a117d1ec694400aeecef9a
|
[
"MIT"
] | null | null | null |
src/urls.py
|
chunky2808/Enlighten-Me-
|
357f83d79a8d6b9821a117d1ec694400aeecef9a
|
[
"MIT"
] | null | null | null |
src/urls.py
|
chunky2808/Enlighten-Me-
|
357f83d79a8d6b9821a117d1ec694400aeecef9a
|
[
"MIT"
] | null | null | null |
"""src URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import logout
from django.contrib.auth import views as auth_views
from Enlight import views
from accounts_info import views as accounts_info_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^oauth/', include('social_django.urls', namespace='social')),
url(r'^signup/$',accounts_info_views.signup,name = 'signup'),
url(r'^logout/$', auth_views.LogoutView.as_view(), name='logout'),
url(r'^login/$', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
url(r'^forum/(?P<pk>\d+)/topics/(?P<topic_pk>\d+)/reply/$', views.reply_topic, name='reply_topic'),
url(r'^forum/(?P<pk>\d+)/$', views.topic_list, name='topic'),
url(r'^forum/(?P<pk>\d+)/topics/(?P<topic_pk>\d+)/$', views.topic_posts, name='topic_posts'),
url(r'^forum/(?P<pk>\d+)/topics/(?P<topic_pk>\d+)/delete/$', views.delete, name='delete'),
url(r'^forum/(?P<pk>\d+)/new/$', views.new_topic, name='new_topic'),
url(r'^discuss/$',views.forum_list,name='home'),
url(r'^$',views.news,name='news')
]
| 45.435897
| 103
| 0.683409
|
8e50619eee6e688d659ba04a44c0081882be1974
| 3,482
|
py
|
Python
|
tests/sentry/api/serializers/test_incident_activity.py
|
evanspjz/sentry
|
032afdc4dd46cc54a99315bfa13304c44c4e546f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/serializers/test_incident_activity.py
|
evanspjz/sentry
|
032afdc4dd46cc54a99315bfa13304c44c4e546f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/serializers/test_incident_activity.py
|
evanspjz/sentry
|
032afdc4dd46cc54a99315bfa13304c44c4e546f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import timedelta
from uuid import uuid4
import six
from django.utils import timezone
from freezegun import freeze_time
from sentry.api.serializers import serialize
from sentry.incidents.models import IncidentActivityType
from sentry.incidents.logic import create_incident_activity, create_initial_event_stats_snapshot
from sentry.testutils import SnubaTestCase, TestCase
class IncidentActivitySerializerTest(TestCase, SnubaTestCase):
def test_simple(self):
activity = create_incident_activity(
incident=self.create_incident(),
activity_type=IncidentActivityType.COMMENT,
user=self.user,
comment="hello",
)
result = serialize(activity)
assert result["id"] == six.text_type(activity.id)
assert result["incidentIdentifier"] == six.text_type(activity.incident.identifier)
assert result["user"] == serialize(activity.user)
assert result["type"] == activity.type
assert result["value"] is None
assert result["previousValue"] is None
assert result["comment"] == activity.comment
assert result["dateCreated"] == activity.date_added
def test_no_user(self):
activity = create_incident_activity(
incident=self.create_incident(),
activity_type=IncidentActivityType.COMMENT,
user=None,
comment="hello",
)
result = serialize(activity)
assert result["id"] == six.text_type(activity.id)
assert result["incidentIdentifier"] == six.text_type(activity.incident.identifier)
assert result["user"] is None
assert result["type"] == activity.type
assert result["value"] is None
assert result["previousValue"] is None
assert result["comment"] == activity.comment
assert result["dateCreated"] == activity.date_added
@freeze_time()
def test_event_stats(self):
for _ in range(2):
self.store_event(
data={
"event_id": uuid4().hex,
"fingerprint": ["group1"],
"timestamp": (timezone.now() - timedelta(seconds=1)).isoformat()[:19],
},
project_id=self.project.id,
)
incident = self.create_incident(
date_started=timezone.now() - timedelta(hours=2), projects=[self.project], query=""
)
snapshot = create_initial_event_stats_snapshot(incident)
activity = create_incident_activity(
incident=incident,
activity_type=IncidentActivityType.COMMENT,
user=self.user,
comment="hello",
event_stats_snapshot=snapshot,
)
result = serialize(activity)
assert result["id"] == six.text_type(activity.id)
assert result["incidentIdentifier"] == six.text_type(activity.incident.identifier)
assert result["user"] == serialize(activity.user)
assert result["type"] == activity.type
assert result["value"] is None
assert result["previousValue"] is None
assert result["comment"] == activity.comment
event_stats = result["eventStats"]["data"]
assert [stat[1] for stat in event_stats[:-1]] == [[]] * len(event_stats[:-1])
assert event_stats[-1][1] == [{"count": 2}]
assert result["dateCreated"] == activity.date_added
| 38.688889
| 96
| 0.637277
|
6850da51174a702451eefe00fab31d0cbac33aa0
| 3,185
|
py
|
Python
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/quota_quotas_summary.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/quota_quotas_summary.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/quota_quotas_summary.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_1.models.quota_quotas_summary_summary import QuotaQuotasSummarySummary # noqa: F401,E501
class QuotaQuotasSummary(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'summary': 'QuotaQuotasSummarySummary'
}
attribute_map = {
'summary': 'summary'
}
def __init__(self, summary=None): # noqa: E501
"""QuotaQuotasSummary - a model defined in Swagger""" # noqa: E501
self._summary = None
self.discriminator = None
if summary is not None:
self.summary = summary
@property
def summary(self):
"""Gets the summary of this QuotaQuotasSummary. # noqa: E501
# noqa: E501
:return: The summary of this QuotaQuotasSummary. # noqa: E501
:rtype: QuotaQuotasSummarySummary
"""
return self._summary
@summary.setter
def summary(self, summary):
"""Sets the summary of this QuotaQuotasSummary.
# noqa: E501
:param summary: The summary of this QuotaQuotasSummary. # noqa: E501
:type: QuotaQuotasSummarySummary
"""
self._summary = summary
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QuotaQuotasSummary):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.222222
| 106
| 0.574882
|
d9d5c150ccd8b454b076647c18923e313b6c9ed4
| 9,166
|
py
|
Python
|
.waf3-1.5.18-402a8e0721eb718ff717906f130db0f4/wafadmin/Tools/vala.py
|
TirolJPN/ngweight
|
37b60bd8fdfdf0ba0d0fb74069831cbdfbdd3e7c
|
[
"MIT"
] | null | null | null |
.waf3-1.5.18-402a8e0721eb718ff717906f130db0f4/wafadmin/Tools/vala.py
|
TirolJPN/ngweight
|
37b60bd8fdfdf0ba0d0fb74069831cbdfbdd3e7c
|
[
"MIT"
] | null | null | null |
.waf3-1.5.18-402a8e0721eb718ff717906f130db0f4/wafadmin/Tools/vala.py
|
TirolJPN/ngweight
|
37b60bd8fdfdf0ba0d0fb74069831cbdfbdd3e7c
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# encoding: utf-8
import os.path,shutil
import Task,Runner,Utils,Logs,Build,Node,Options
from TaskGen import extension,after,before
EXT_VALA=['.vala','.gs']
class valac_task(Task.Task):
vars=("VALAC","VALAC_VERSION","VALAFLAGS")
before=("cc","cxx")
def run(self):
env=self.env
inputs=[a.srcpath(env)for a in self.inputs]
valac=env['VALAC']
vala_flags=env.get_flat('VALAFLAGS')
top_src=self.generator.bld.srcnode.abspath()
top_bld=self.generator.bld.srcnode.abspath(env)
if env['VALAC_VERSION']>(0,1,6):
cmd=[valac,'-C','--quiet',vala_flags]
else:
cmd=[valac,'-C',vala_flags]
if self.threading:
cmd.append('--thread')
if self.profile:
cmd.append('--profile=%s'%self.profile)
if self.target_glib:
cmd.append('--target-glib=%s'%self.target_glib)
features=self.generator.features
if'cshlib'in features or'cstaticlib'in features:
output_dir=self.outputs[0].bld_dir(env)
cmd.append('--library '+self.target)
if env['VALAC_VERSION']>=(0,7,0):
for x in self.outputs:
if x.name.endswith('.h'):
cmd.append('--header '+x.bldpath(self.env))
cmd.append('--basedir '+top_src)
cmd.append('-d '+top_bld)
if env['VALAC_VERSION']>(0,7,2)and hasattr(self,'gir'):
cmd.append('--gir=%s.gir'%self.gir)
else:
output_dir=self.outputs[0].bld_dir(env)
cmd.append('-d %s'%output_dir)
for vapi_dir in self.vapi_dirs:
cmd.append('--vapidir=%s'%vapi_dir)
for package in self.packages:
cmd.append('--pkg %s'%package)
for package in self.packages_private:
cmd.append('--pkg %s'%package)
cmd.append(" ".join(inputs))
result=self.generator.bld.exec_command(" ".join(cmd))
if not'cprogram'in features:
if self.packages:
filename=os.path.join(self.generator.path.abspath(env),"%s.deps"%self.target)
deps=open(filename,'w')
for package in self.packages:
deps.write(package+'\n')
deps.close()
self._fix_output("../%s.vapi"%self.target)
self._fix_output("%s.vapi"%self.target)
self._fix_output("%s.gidl"%self.target)
self._fix_output("%s.gir"%self.target)
if hasattr(self,'gir'):
self._fix_output("%s.gir"%self.gir)
first=None
for node in self.outputs:
if not first:
first=node
else:
if first.parent.id!=node.parent.id:
if env['VALAC_VERSION']<(0,7,0):
shutil.move(first.parent.abspath(self.env)+os.sep+node.name,node.abspath(self.env))
return result
def install(self):
bld=self.generator.bld
features=self.generator.features
if self.attr("install_path")and("cshlib"in features or"cstaticlib"in features):
headers_list=[o for o in self.outputs if o.suffix()==".h"]
vapi_list=[o for o in self.outputs if(o.suffix()in(".vapi",".deps"))]
gir_list=[o for o in self.outputs if o.suffix()==".gir"]
for header in headers_list:
top_src=self.generator.bld.srcnode
package=self.env['PACKAGE']
try:
api_version=Utils.g_module.API_VERSION
except AttributeError:
version=Utils.g_module.VERSION.split(".")
if version[0]=="0":
api_version="0."+version[1]
else:
api_version=version[0]+".0"
install_path='${INCLUDEDIR}/%s-%s/%s'%(package,api_version,header.relpath_gen(top_src))
bld.install_as(install_path,header,self.env)
bld.install_files('${DATAROOTDIR}/vala/vapi',vapi_list,self.env)
bld.install_files('${DATAROOTDIR}/gir-1.0',gir_list,self.env)
def _fix_output(self,output):
top_bld=self.generator.bld.srcnode.abspath(self.env)
try:
src=os.path.join(top_bld,output)
dst=self.generator.path.abspath(self.env)
shutil.move(src,dst)
except:
pass
def vala_file(self,node):
valatask=getattr(self,"valatask",None)
if not valatask:
valatask=self.create_task('valac')
self.valatask=valatask
self.includes=Utils.to_list(getattr(self,'includes',[]))
self.uselib=self.to_list(self.uselib)
valatask.packages=[]
valatask.packages_private=Utils.to_list(getattr(self,'packages_private',[]))
valatask.vapi_dirs=[]
valatask.target=self.target
valatask.threading=False
valatask.install_path=self.install_path
valatask.profile=getattr(self,'profile','gobject')
valatask.target_glib=None
packages=Utils.to_list(getattr(self,'packages',[]))
vapi_dirs=Utils.to_list(getattr(self,'vapi_dirs',[]))
includes=[]
if hasattr(self,'uselib_local'):
local_packages=Utils.to_list(self.uselib_local)
seen=[]
while len(local_packages)>0:
package=local_packages.pop()
if package in seen:
continue
seen.append(package)
package_obj=self.name_to_obj(package)
if not package_obj:
raise Utils.WafError("object '%s' was not found in uselib_local (required by '%s')"%(package,self.name))
package_name=package_obj.target
package_node=package_obj.path
package_dir=package_node.relpath_gen(self.path)
for task in package_obj.tasks:
for output in task.outputs:
if output.name==package_name+".vapi":
valatask.set_run_after(task)
if package_name not in packages:
packages.append(package_name)
if package_dir not in vapi_dirs:
vapi_dirs.append(package_dir)
if package_dir not in includes:
includes.append(package_dir)
if hasattr(package_obj,'uselib_local'):
lst=self.to_list(package_obj.uselib_local)
lst.reverse()
local_packages=[pkg for pkg in lst if pkg not in seen]+local_packages
valatask.packages=packages
for vapi_dir in vapi_dirs:
try:
valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath())
valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath(self.env))
except AttributeError:
Logs.warn("Unable to locate Vala API directory: '%s'"%vapi_dir)
self.includes.append(node.bld.srcnode.abspath())
self.includes.append(node.bld.srcnode.abspath(self.env))
for include in includes:
try:
self.includes.append(self.path.find_dir(include).abspath())
self.includes.append(self.path.find_dir(include).abspath(self.env))
except AttributeError:
Logs.warn("Unable to locate include directory: '%s'"%include)
if valatask.profile=='gobject':
if hasattr(self,'target_glib'):
Logs.warn('target_glib on vala tasks is deprecated --vala-target-glib=MAJOR.MINOR from the vala tool options')
if getattr(Options.options,'vala_target_glib',None):
valatask.target_glib=Options.options.vala_target_glib
if not'GOBJECT'in self.uselib:
self.uselib.append('GOBJECT')
if hasattr(self,'threading'):
if valatask.profile=='gobject':
valatask.threading=self.threading
if not'GTHREAD'in self.uselib:
self.uselib.append('GTHREAD')
else:
Logs.warn("Profile %s does not have threading support"%valatask.profile)
if hasattr(self,'gir'):
valatask.gir=self.gir
env=valatask.env
output_nodes=[]
c_node=node.change_ext('.c')
output_nodes.append(c_node)
self.allnodes.append(c_node)
if env['VALAC_VERSION']<(0,7,0):
output_nodes.append(node.change_ext('.h'))
else:
if not'cprogram'in self.features:
output_nodes.append(self.path.find_or_declare('%s.h'%self.target))
if not'cprogram'in self.features:
output_nodes.append(self.path.find_or_declare('%s.vapi'%self.target))
if env['VALAC_VERSION']>(0,7,2):
if hasattr(self,'gir'):
output_nodes.append(self.path.find_or_declare('%s.gir'%self.gir))
elif env['VALAC_VERSION']>(0,3,5):
output_nodes.append(self.path.find_or_declare('%s.gir'%self.target))
elif env['VALAC_VERSION']>(0,1,7):
output_nodes.append(self.path.find_or_declare('%s.gidl'%self.target))
if valatask.packages:
output_nodes.append(self.path.find_or_declare('%s.deps'%self.target))
valatask.inputs.append(node)
valatask.outputs.extend(output_nodes)
def detect(conf):
min_version=(0,1,6)
min_version_str="%d.%d.%d"%min_version
valac=conf.find_program('valac',var='VALAC',mandatory=True)
if not conf.env["HAVE_GOBJECT"]:
pkg_args={'package':'gobject-2.0','uselib_store':'GOBJECT','args':'--cflags --libs'}
if getattr(Options.options,'vala_target_glib',None):
pkg_args['atleast_version']=Options.options.vala_target_glib
conf.check_cfg(**pkg_args)
if not conf.env["HAVE_GTHREAD"]:
pkg_args={'package':'gthread-2.0','uselib_store':'GTHREAD','args':'--cflags --libs'}
if getattr(Options.options,'vala_target_glib',None):
pkg_args['atleast_version']=Options.options.vala_target_glib
conf.check_cfg(**pkg_args)
try:
output=Utils.cmd_output(valac+" --version",silent=True)
version=output.split(' ',1)[-1].strip().split(".")[0:3]
version=[int(x)for x in version]
valac_version=tuple(version)
except Exception:
valac_version=(0,0,0)
conf.check_message('program version','valac >= '+min_version_str,valac_version>=min_version,"%d.%d.%d"%valac_version)
conf.check_tool('gnu_dirs')
if valac_version<min_version:
conf.fatal("valac version too old to be used with this tool")
return
conf.env['VALAC_VERSION']=valac_version
conf.env['VALAFLAGS']=''
def set_options(opt):
valaopts=opt.add_option_group('Vala Compiler Options')
valaopts.add_option('--vala-target-glib',default=None,dest='vala_target_glib',metavar='MAJOR.MINOR',help='Target version of glib for Vala GObject code generation')
extension(EXT_VALA)(vala_file)
| 38.838983
| 164
| 0.717216
|
f78652dacb37ccc1e0b5cd748255a5b9ad7dff13
| 1,313
|
py
|
Python
|
v1.0/dc/utils/mail.py
|
iaiting/Flask-and-pywebview-followup-application-gui
|
b665334403b4a8471b5f28054ee2dc7adda7d9fc
|
[
"MIT"
] | null | null | null |
v1.0/dc/utils/mail.py
|
iaiting/Flask-and-pywebview-followup-application-gui
|
b665334403b4a8471b5f28054ee2dc7adda7d9fc
|
[
"MIT"
] | null | null | null |
v1.0/dc/utils/mail.py
|
iaiting/Flask-and-pywebview-followup-application-gui
|
b665334403b4a8471b5f28054ee2dc7adda7d9fc
|
[
"MIT"
] | 1
|
2019-12-25T11:57:45.000Z
|
2019-12-25T11:57:45.000Z
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class Mail:
"""Sending an email with smtp library"""
def __init__(self, smtpaddr, smtpport):
self.smtpaddr = smtpaddr
self.smtpport = smtpport
def check_mail_inputs(self, fromaddr, frompassword, toaddr, subject, body):
"""All must be type string"""
inputs_mail = [fromaddr, frompassword, toaddr, subject, body]
for i in inputs_mail:
if not isinstance(i, str):
raise Exception("Parameter must be string!")
def send_mail(self, fromaddr, frompassword, toaddr, subject, body):
"""Send and email using standard smtp module"""
self.check_mail_inputs(fromaddr, frompassword, toaddr, subject, body)
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(self.smtpaddr, self.smtpport)
server.ehlo()
server.starttls()
server.ehlo()
server.login(fromaddr, frompassword)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
| 32.02439
| 80
| 0.592536
|
bf47cef9bc02b08fe3a0b0e8280cb7452deb3cad
| 4,217
|
py
|
Python
|
venv/lib/python2.7/site-packages/ansible/plugins/action/ios_config.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 37
|
2017-08-15T15:02:43.000Z
|
2021-07-23T03:44:31.000Z
|
venv/lib/python2.7/site-packages/ansible/plugins/action/ios_config.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 12
|
2018-01-10T05:25:25.000Z
|
2021-11-28T06:55:48.000Z
|
venv/lib/python2.7/site-packages/ansible/plugins/action/ios_config.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 49
|
2017-08-15T09:52:13.000Z
|
2022-03-21T17:11:54.000Z
|
#
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ios import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in list(result.keys()):
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| 36.991228
| 85
| 0.640977
|
966b25e5478c1d8e71883bab0908f407a72036dd
| 6,231
|
py
|
Python
|
sonarqube/community/projects.py
|
0x646e78/python-sonarqube-api
|
c641ab4dd180b4184f2663bd28277aa796b36417
|
[
"MIT"
] | null | null | null |
sonarqube/community/projects.py
|
0x646e78/python-sonarqube-api
|
c641ab4dd180b4184f2663bd28277aa796b36417
|
[
"MIT"
] | null | null | null |
sonarqube/community/projects.py
|
0x646e78/python-sonarqube-api
|
c641ab4dd180b4184f2663bd28277aa796b36417
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Jialiang Shi
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_PROJECTS_BULK_DELETE_ENDPOINT,
API_PROJECTS_SEARCH_ENDPOINT,
API_PROJECTS_CREATE_ENDPOINT,
API_PROJECTS_DELETE_ENDPOINT,
API_PROJECTS_UPDATE_VISIBILITY_ENDPOINT,
API_PROJECTS_UPDATE_KEY_ENDPOINT
)
class SonarQubeProjects(RestClient):
"""
SonarQube projects Operations
"""
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubeProjects, self).__init__(**kwargs)
def __getitem__(self, key):
result = list(self.search_projects(projects=key))
for project in result:
if project['key'] == key:
return project
def search_projects(self, analyzedBefore=None, onProvisionedOnly=False, projects=None, q=None, qualifiers="TRK"):
"""
Search for projects or views to administrate them.
:param analyzedBefore: Filter the projects for which last analysis is older than the given date (exclusive).
Either a date (server timezone) or datetime can be provided.
:param onProvisionedOnly: Filter the projects that are provisioned.
Possible values are for: True or False. default value is False.
:param projects: Comma-separated list of project keys
:param q:
Limit search to:
* component names that contain the supplied string
* component keys that contain the supplied string
:param qualifiers: Comma-separated list of component qualifiers. Filter the results with the specified
qualifiers. Possible values are for:
* TRK
* VW
* APP
default value is TRK.
:return:
"""
params = {
'onProvisionedOnly': onProvisionedOnly and 'true' or 'false',
'qualifiers': qualifiers.upper()
}
page_num = 1
page_size = 1
total = 2
if analyzedBefore:
params.update({'analyzedBefore': analyzedBefore})
if projects:
params.update({'projects': projects})
if q:
params.update({'q': q})
while page_num * page_size < total:
resp = self.get(API_PROJECTS_SEARCH_ENDPOINT, params=params)
response = resp.json()
page_num = response['paging']['pageIndex']
page_size = response['paging']['pageSize']
total = response['paging']['total']
params['p'] = page_num + 1
for component in response['components']:
yield component
def create_project(self, project, name, visibility=None):
"""
Create a project.
:param project: Key of the project
:param name: Name of the project. If name is longer than 500, it is abbreviated.
:param visibility: Whether the created project should be visible to everyone, or only specific user/groups.
If no visibility is specified, the default project visibility of the organization will be used.
Possible values are for:
* private
* public
:return: request response
"""
params = {
'name': name,
'project': project
}
if visibility:
params.update({'visibility': visibility})
return self.post(API_PROJECTS_CREATE_ENDPOINT, params=params)
def delete_project(self, project):
"""
Delete a project.
:param project: Project key
:return:
"""
params = {
'project': project
}
self.post(API_PROJECTS_DELETE_ENDPOINT, params=params)
def bulk_delete_projects(self, analyzedBefore=None, onProvisionedOnly=False, projects=None,
q=None, qualifiers="TRK"):
"""
Delete one or several projects.
At least one parameter is required among analyzedBefore, projects, projectIds (deprecated since 6.4) and q
:param analyzedBefore: Filter the projects for which last analysis is older than the given date (exclusive).
Either a date (server timezone) or datetime can be provided.
:param onProvisionedOnly: Filter the projects that are provisioned.
Possible values are for: True or False. default value is False.
:param projects: Comma-separated list of project keys
:param q:
Limit to:
* component names that contain the supplied string
* component keys that contain the supplied string
:param qualifiers: Comma-separated list of component qualifiers. Filter the results with the specified
qualifiers. Possible values are for:
* TRK
* VW
* APP
default value is TRK.
:return:
"""
params = {
'onProvisionedOnly': onProvisionedOnly and 'true' or 'false',
'qualifiers': qualifiers.upper()
}
if analyzedBefore:
params.update({'analyzedBefore': analyzedBefore})
if projects:
params.update({'projects': projects})
if q:
params.update({'q': q})
self.post(API_PROJECTS_BULK_DELETE_ENDPOINT, params=params)
def update_project_key(self, previous_project_key, new_project_key):
"""
Update a project or module key and all its sub-components keys.
:param previous_project_key: Project or module key
:param new_project_key: New component key
:return:
"""
params = {
'from': previous_project_key,
'to': new_project_key
}
self.post(API_PROJECTS_UPDATE_KEY_ENDPOINT, params=params)
def update_project_visibility(self, project, visibility):
"""
Updates visibility of a project.
:param project: Project key
:param visibility: New visibility
:return:
"""
params = {
'project': project,
'visibility': visibility
}
self.post(API_PROJECTS_UPDATE_VISIBILITY_ENDPOINT, params=params)
| 33.320856
| 117
| 0.611619
|
ccbe08906af0cbc69931c089fa9cfc5d589890ee
| 20,915
|
py
|
Python
|
lib/python2.7/site-packages/sklearn/mixture/tests/test_gmm.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
lib/python2.7/site-packages/sklearn/mixture/tests/test_gmm.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
lib/python2.7/site-packages/sklearn/mixture/tests/test_gmm.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
# Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
import unittest
import copy
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import (assert_true, assert_greater,
assert_raise_message, assert_warns_message,
ignore_warnings)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, spherecv, 'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
with ignore_warnings(category=DeprecationWarning):
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
with ignore_warnings(category=DeprecationWarning):
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
with ignore_warnings(category=DeprecationWarning):
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
with ignore_warnings(category=DeprecationWarning):
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
with ignore_warnings(category=DeprecationWarning):
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
with ignore_warnings(category=DeprecationWarning):
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.dpgmm._DPGMMBase):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def score(self, g, X):
with ignore_warnings(category=DeprecationWarning):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
with ignore_warnings(category=DeprecationWarning):
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_n_parameters():
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
with ignore_warnings(category=DeprecationWarning):
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
with ignore_warnings(category=DeprecationWarning):
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| 39.020522
| 79
| 0.641214
|
13d850947b4f2b1599f20ecc7aece744ede5ca27
| 554
|
py
|
Python
|
config.py
|
dislazy/DailyNotes
|
e1c689ac54413772911a74f1ece14781507a27d6
|
[
"MIT"
] | 437
|
2020-01-19T01:28:31.000Z
|
2022-03-30T05:25:40.000Z
|
config.py
|
dislazy/DailyNotes
|
e1c689ac54413772911a74f1ece14781507a27d6
|
[
"MIT"
] | 51
|
2020-01-19T09:20:27.000Z
|
2022-03-27T14:27:10.000Z
|
config.py
|
dislazy/DailyNotes
|
e1c689ac54413772911a74f1ece14781507a27d6
|
[
"MIT"
] | 28
|
2020-05-08T10:42:24.000Z
|
2022-03-05T19:25:29.000Z
|
import os
import datetime
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
JWT_SECRET_KEY = os.environ.get('API_SECRET_KEY')
DB_ENCRYPTION_KEY = os.environ.get('DB_ENCRYPTION_KEY')
PREVENT_SIGNUPS = os.environ.get('PREVENT_SIGNUPS', False)
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URI') or 'sqlite:///' + os.path.join(basedir + '/config', 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_ACCESS_TOKEN_EXPIRES = datetime.timedelta(days=7)
EXPORT_FILE = os.path.join(basedir, 'config', 'export.zip')
| 39.571429
| 120
| 0.758123
|
fd1820a6c3e0bb9d2ec9110e01f326f556afb996
| 1,742
|
py
|
Python
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | 1
|
2022-03-30T05:23:29.000Z
|
2022-03-30T05:23:29.000Z
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ExportData
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_DatasetService_ExportData_async]
from google.cloud import aiplatform_v1
async def sample_export_data():
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
# Initialize request argument(s)
export_config = aiplatform_v1.ExportDataConfig()
export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
request = aiplatform_v1.ExportDataRequest(
name="name_value",
export_config=export_config,
)
# Make the request
operation = client.export_data(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_ExportData_async]
| 32.259259
| 85
| 0.761768
|
20b2b60bd8999bb9949eb2b07294d5a76a2497c6
| 1,426
|
py
|
Python
|
examples/github_test.py
|
tomejorge/SeleniumBase
|
e3e50bbd80594c52131b0d88ca3e2c2f7692e340
|
[
"MIT"
] | 1
|
2021-05-12T14:27:31.000Z
|
2021-05-12T14:27:31.000Z
|
examples/github_test.py
|
tomejorge/SeleniumBase
|
e3e50bbd80594c52131b0d88ca3e2c2f7692e340
|
[
"MIT"
] | null | null | null |
examples/github_test.py
|
tomejorge/SeleniumBase
|
e3e50bbd80594c52131b0d88ca3e2c2f7692e340
|
[
"MIT"
] | null | null | null |
from seleniumbase import BaseCase
class GitHubTests(BaseCase):
def test_github(self):
# Selenium can trigger GitHub's anti-automation system:
# "You have triggered an abuse detection mechanism."
# "Please wait a few minutes before you try again."
# To avoid this automation blocker, two steps are being taken:
# 1. self.slow_click() is being used to slow down Selenium actions.
# 2. The browser's User Agent is modified to avoid Selenium-detection
# when running in headless mode on Chrome or Edge (Chromium).
if self.headless and (
self.browser == "chrome" or self.browser == "edge"
):
self.get_new_driver(
agent="""Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) """
"""AppleWebKit/537.36 (KHTML, like Gecko) """
"""Chrome/90.0.4430.85 Safari/537.36"""
)
self.open("https://github.com/search?q=SeleniumBase")
self.slow_click('a[href="/seleniumbase/SeleniumBase"]')
self.click_if_visible('[data-action="click:signup-prompt#dismiss"]')
self.assert_element("div.repository-content")
self.assert_text("SeleniumBase", "h1")
self.slow_click('a[title="seleniumbase"]')
self.slow_click('a[title="fixtures"]')
self.slow_click('a[title="base_case.py"]')
self.assert_text("Code", "nav a.selected")
| 47.533333
| 77
| 0.624825
|
6124de1cd5e502d5124a0d1668378ef5fc168ad7
| 7,972
|
py
|
Python
|
build.py
|
riccardobl/Effekseer
|
40dea55d7d2fafa365e4365adfe58a1059a5a980
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
build.py
|
riccardobl/Effekseer
|
40dea55d7d2fafa365e4365adfe58a1059a5a980
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
build.py
|
riccardobl/Effekseer
|
40dea55d7d2fafa365e4365adfe58a1059a5a980
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
import os
import shutil
import urllib.request
import platform
from distutils.spawn import find_executable
import distutils
from distutils import dir_util
def call(cmd, env=None):
""" call command line.
"""
print(cmd)
p = subprocess.Popen(cmd, shell=True, env=env)
ret = p.wait()
if ret != 0:
print("Failed {}".format(cmd))
raise Exception
def get_files(path):
""" get files.
"""
def getlistdir(path, l):
dirs = os.listdir(path)
for d in dirs:
newpath = os.path.join(path, d)
if os.path.isdir(newpath):
getlistdir(newpath, l)
else:
l.append(newpath)
ret = []
getlistdir(path, ret)
return ret
def copytreeWithExt(src, dst, exts):
files = get_files(src)
for _from in files:
root, ext = os.path.splitext(_from)
if not ext in exts:
continue
_to = dst + _from[len(src):]
print(_from + '\t:\t' + _to)
if not os.path.exists(os.path.dirname(_to)):
os.makedirs(os.path.dirname(_to))
shutil.copy(_from, _to)
def isWin():
return platform.system() == 'Windows'
def isMac():
return platform.system() == 'Darwin'
def wget(address):
urllib.request.urlretrieve(address, os.path.basename(address))
def rm(path):
if os.path.exists(path):
os.remove(path)
def rmdir(path):
if os.path.exists(path):
print("rmdir : " + path)
shutil.rmtree(path)
else:
print("rmdir : not found " + path)
def cd(path):
os.chdir(path)
def cdToScript():
cd(os.path.dirname(os.path.abspath(__file__)))
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def copy(src, dst):
print("copying from {0} to {1}".format(src, dst))
shutil.copy(src, dst)
def copytree(src, dst, change=False, ignoreList=None):
print("copying tree from {0} to {1}".format(src, dst))
if change and os.path.exists(dst):
rmdir(dst)
if not os.path.exists(dst):
shutil.copytree(src, dst, ignore=ignoreList)
class CurrentDir:
def __init__(self, path):
self.prev = os.getcwd()
self.path = path
def __enter__(self):
cd(self.path)
#print("cd: " + os.getcwd())
return self
def __exit__(self, type, value, traceback):
cd(self.prev)
#print("cd: " + os.getcwd())
env = os.environ.copy()
env = os.environ.copy()
env["PKG_CONFIG_PATH"] = os.getenv(
'PKG_CONFIG_PATH', '/Library/Frameworks/Mono.framework/Versions/Current/lib/pkgconfig')
env["AS"] = os.getenv('AS', 'as -arch i386')
env["CC"] = os.getenv('CC', 'clang -arch i386 -mmacosx-version-min=10.6')
env["MONO_SDK_PATH"] = os.getenv(
'MONO_SDK_PATH', '/Library/Frameworks/Mono.framework/Versions/Current')
env["PACKAGEING_FOR_MAC"] = os.getenv('PACKAGEING_FOR_MAC', '0')
env["PACKAGEING_FOR_LINUX"] = os.getenv('PACKAGEING_FOR_LINUX', '0')
env["IGNORE_BUILD"] = os.getenv('IGNORE_BUILD', '0')
is_x86 = 'x86' in sys.argv
is_from_ci = 'from_ci' in sys.argv
if env['IGNORE_BUILD'] == '0':
os.makedirs('build', exist_ok=True)
if isWin():
#import winreg
#reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
#key = winreg.OpenKey(reg, r"SOFTWARE\Microsoft\MSBuild\ToolsVersions\12.0")
#msbuild_path = winreg.QueryValueEx(key, 'MSBuildToolsPath')[0] + 'MSBuild.exe'
candidates = [
r"C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\MSBuild\Current\Bin\MSBuild.exe",
r"C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\MSBuild\15.0\Bin\MSBuild.exe",
r"C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe",
]
candidate = None
for candidate in candidates:
if os.path.exists(candidate):
msbuild_path = candidate
break
if candidate is None:
raise Exception("MSBuild is not found")
elif isMac():
msbuild_path = 'msbuild'
with CurrentDir('build'):
if isWin() or isMac():
# for auto restore of .csproj
wget(r'https://dist.nuget.org/win-x86-commandline/v5.1.0/nuget.exe')
if isWin():
suffix = ''
if is_from_ci:
suffix += ' -D FROM_CI=ON'
if is_x86:
call('cmake .. -A Win32 -DBUILD_VIEWER=ON' + suffix)
else:
# run tests on x64
call('cmake .. -A x64 -DBUILD_VIEWER=ON -D BUILD_TEST=ON' + suffix)
elif isMac():
call('cmake .. -G "Xcode" -DBUILD_VIEWER=ON')
elif find_executable('ninja'):
call('cmake .. -G Ninja -DBUILD_VIEWER=ON')
else:
call('cmake .. -G "Unix Makefiles" -DBUILD_VIEWER=ON')
call('cmake --build . --config Release')
if isWin():
call('build\\nuget.exe restore Dev/Editor/Effekseer.sln')
if isMac():
call('dotnet build Dev/Editor/Effekseer/Effekseer.Std.csproj')
call('dotnet publish Dev/Editor/Effekseer/Effekseer.Std.csproj -c Release --self-contained -r osx.10.11-x64')
call('cp -r Dev/release/osx.10.11-x64/publish/* Dev/release/')
elif isWin():
if is_x86:
call('"' + msbuild_path + '"' +
' Dev/Editor/EffekseerCore/EffekseerCore.csproj /t:build /p:Configuration=Release /p:Platform=x86')
call('"' + msbuild_path + '"' +
' Dev/Editor/Effekseer/Effekseer.csproj /t:build /p:Configuration=Release /p:Platform=x86')
else:
call('"' + msbuild_path + '"' +
' Dev/Editor/EffekseerCore/EffekseerCore.csproj /t:build /p:Configuration=Release /p:Platform=x64')
call('"' + msbuild_path + '"' +
' Dev/Editor/Effekseer/Effekseer.csproj /t:build /p:Configuration=Release /p:Platform=x64')
else:
call('dotnet build Dev/Editor/Effekseer/Effekseer.Std.csproj')
call('dotnet publish Dev/Editor/Effekseer/Effekseer.Std.csproj -c Release --self-contained -r linux-x64')
call('cp -r Dev/release/linux-x64/publish/* Dev/release/')
if env['PACKAGEING_FOR_MAC'] == '1' and isMac():
cd('Dev')
mkdir('Mac/Effekseer.app/Contents/Resources/')
distutils.dir_util.copy_tree('release/', 'Mac/Effekseer.app/Contents/Resources/')
call('chmod +x Mac/Effekseer.app/Contents/MacOS/script.sh')
call('chmod +x Mac/Effekseer.app/Contents/Resources/tools/fbxToEffekseerCurveConverter')
call('chmod +x Mac/Effekseer.app/Contents/Resources/tools/fbxToEffekseerModelConverter')
os.makedirs('Mac/Package', exist_ok=True)
distutils.dir_util.copy_tree('Mac/Effekseer.app', 'Mac/Package/Effekseer.app')
call('ln -s /Applications Applications > /dev/null 2>&1')
call('mv Applications Mac/Package/')
call('hdiutil create Effekseer.dmg -volname "Effekseer" -srcfolder "Mac/Package"')
cd('../')
os.makedirs('EffekseerToolMac', exist_ok=True)
shutil.copy('Dev/Effekseer.dmg', 'EffekseerToolMac/')
shutil.copy('docs/Help_Ja.html', 'EffekseerToolMac/')
shutil.copy('docs/Help_En.html', 'EffekseerToolMac/')
shutil.copy('LICENSE_TOOL', 'EffekseerToolMac/LICENSE_TOOL')
shutil.copy('readme_tool_mac.txt', 'EffekseerToolMac/readme.txt')
os.makedirs('EffekseerToolMac/Sample/', exist_ok=True)
distutils.dir_util.copy_tree('Release/Sample', 'EffekseerToolMac/Sample')
distutils.dir_util.copy_tree(
'ResourceData/samples', 'EffekseerToolMac/Sample')
shutil.copy('docs/readme_sample.txt', 'EffekseerToolMac/Sample/readme.txt')
| 32.942149
| 118
| 0.606247
|
076aeaa8b7f98f961f7b8a98231677cc7b9f7b36
| 18,612
|
py
|
Python
|
electrum/lnaddr.py
|
ben-abraham/electrum-ravencoin
|
597d27b722be1f8bea4e33e69deadb3dc8348340
|
[
"MIT"
] | 28
|
2021-04-03T21:30:17.000Z
|
2022-02-22T08:13:14.000Z
|
electrum/lnaddr.py
|
ben-abraham/electrum-ravencoin
|
597d27b722be1f8bea4e33e69deadb3dc8348340
|
[
"MIT"
] | 37
|
2021-05-26T14:08:10.000Z
|
2022-02-09T23:25:07.000Z
|
electrum/lnaddr.py
|
ben-abraham/electrum-ravencoin
|
597d27b722be1f8bea4e33e69deadb3dc8348340
|
[
"MIT"
] | 13
|
2021-04-09T20:22:32.000Z
|
2021-12-17T17:18:51.000Z
|
#! /usr/bin/env python3
# This was forked from https://github.com/rustyrussell/lightning-payencode/tree/acc16ec13a3fa1dc16c07af6ec67c261bd8aff23
import re
import time
from hashlib import sha256
from binascii import hexlify
from decimal import Decimal
from typing import Optional, TYPE_CHECKING, Type
import random
import bitstring
from electrum.transaction import RavenValue
from electrum.util import Satoshis
from .ravencoin import hash160_to_b58_address, b58_address_to_hash160, TOTAL_COIN_SUPPLY_LIMIT_IN_BTC
from .segwit_addr import bech32_encode, bech32_decode, CHARSET
from . import segwit_addr
from . import constants
from .constants import AbstractNet
from . import ecc
from .ravencoin import COIN
if TYPE_CHECKING:
from .lnutil import LnFeatures
class LnInvoiceException(Exception): pass
class LnDecodeException(LnInvoiceException): pass
class LnEncodeException(LnInvoiceException): pass
# BOLT #11:
#
# A writer MUST encode `amount` as a positive decimal integer with no
# leading zeroes, SHOULD use the shortest representation possible.
def shorten_amount(amount):
""" Given an amount in bitcoin, shorten it
"""
# Convert to pico initially
amount = int(amount * 10**12)
units = ['p', 'n', 'u', 'm']
for unit in units:
if amount % 1000 == 0:
amount //= 1000
else:
break
else:
unit = ''
return str(amount) + unit
def unshorten_amount(amount) -> Decimal:
""" Given a shortened amount, convert it into a decimal
"""
# BOLT #11:
# The following `multiplier` letters are defined:
#
#* `m` (milli): multiply by 0.001
#* `u` (micro): multiply by 0.000001
#* `n` (nano): multiply by 0.000000001
#* `p` (pico): multiply by 0.000000000001
units = {
'p': 10**12,
'n': 10**9,
'u': 10**6,
'm': 10**3,
}
unit = str(amount)[-1]
# BOLT #11:
# A reader SHOULD fail if `amount` contains a non-digit, or is followed by
# anything except a `multiplier` in the table above.
if not re.fullmatch("\\d+[pnum]?", str(amount)):
raise LnDecodeException("Invalid amount '{}'".format(amount))
if unit in units.keys():
return Decimal(amount[:-1]) / units[unit]
else:
return Decimal(amount)
_INT_TO_BINSTR = {a: '0' * (5-len(bin(a)[2:])) + bin(a)[2:] for a in range(32)}
# Bech32 spits out array of 5-bit values. Shim here.
def u5_to_bitarray(arr):
b = ''.join(_INT_TO_BINSTR[a] for a in arr)
return bitstring.BitArray(bin=b)
def bitarray_to_u5(barr):
assert barr.len % 5 == 0
ret = []
s = bitstring.ConstBitStream(barr)
while s.pos != s.len:
ret.append(s.read(5).uint)
return ret
def encode_fallback(fallback: str, net: Type[AbstractNet]):
""" Encode all supported fallback addresses.
"""
wver, wprog_ints = segwit_addr.decode_segwit_address(net.SEGWIT_HRP, fallback)
if wver is not None:
wprog = bytes(wprog_ints)
else:
addrtype, addr = b58_address_to_hash160(fallback)
if addrtype == net.ADDRTYPE_P2PKH:
wver = 17
elif addrtype == net.ADDRTYPE_P2SH:
wver = 18
else:
raise LnEncodeException(f"Unknown address type {addrtype} for {net}")
wprog = addr
return tagged('f', bitstring.pack("uint:5", wver) + wprog)
def parse_fallback(fallback, net: Type[AbstractNet]):
wver = fallback[0:5].uint
if wver == 17:
addr = hash160_to_b58_address(fallback[5:].tobytes(), net.ADDRTYPE_P2PKH)
elif wver == 18:
addr = hash160_to_b58_address(fallback[5:].tobytes(), net.ADDRTYPE_P2SH)
elif wver <= 16:
witprog = fallback[5:] # cut witver
witprog = witprog[:len(witprog) // 8 * 8] # can only be full bytes
witprog = witprog.tobytes()
addr = segwit_addr.encode_segwit_address(net.SEGWIT_HRP, wver, witprog)
else:
return None
return addr
base58_prefix_map = {
constants.RavencoinMainnet.SEGWIT_HRP : (constants.RavencoinMainnet.ADDRTYPE_P2PKH, constants.RavencoinMainnet.ADDRTYPE_P2SH),
constants.RavencoinTestnet.SEGWIT_HRP : (constants.RavencoinTestnet.ADDRTYPE_P2PKH, constants.RavencoinTestnet.ADDRTYPE_P2SH)
}
BOLT11_HRP_INV_DICT = {net.BOLT11_HRP: net for net in constants.NETS_LIST}
# Tagged field containing BitArray
def tagged(char, l):
# Tagged fields need to be zero-padded to 5 bits.
while l.len % 5 != 0:
l.append('0b0')
return bitstring.pack("uint:5, uint:5, uint:5",
CHARSET.find(char),
(l.len / 5) / 32, (l.len / 5) % 32) + l
# Tagged field containing bytes
def tagged_bytes(char, l):
return tagged(char, bitstring.BitArray(l))
def trim_to_min_length(bits):
"""Ensures 'bits' have min number of leading zeroes.
Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.
"""
bits = bits[:] # copy
# make sure we can be split into 5 bit blocks
while bits.len % 5 != 0:
bits.prepend('0b0')
# Get minimal length by trimming leading 5 bits at a time.
while bits.startswith('0b00000'):
if len(bits) == 5:
break # v == 0
bits = bits[5:]
return bits
# Discard trailing bits, convert to bytes.
def trim_to_bytes(barr):
# Adds a byte if necessary.
b = barr.tobytes()
if barr.len % 8 != 0:
return b[:-1]
return b
# Try to pull out tagged data: returns tag, tagged data and remainder.
def pull_tagged(stream):
tag = stream.read(5).uint
length = stream.read(5).uint * 32 + stream.read(5).uint
return (CHARSET[tag], stream.read(length * 5), stream)
def lnencode(addr: 'LnAddr', privkey) -> str:
if addr.amount:
amount = addr.net.BOLT11_HRP + shorten_amount(addr.amount)
else:
amount = addr.net.BOLT11_HRP if addr.net else ''
hrp = 'ln' + amount
# Start with the timestamp
data = bitstring.pack('uint:35', addr.date)
tags_set = set()
# Payment hash
data += tagged_bytes('p', addr.paymenthash)
tags_set.add('p')
if addr.payment_secret is not None:
data += tagged_bytes('s', addr.payment_secret)
tags_set.add('s')
for k, v in addr.tags:
# BOLT #11:
#
# A writer MUST NOT include more than one `d`, `h`, `n` or `x` fields,
if k in ('d', 'h', 'n', 'x', 'p', 's'):
if k in tags_set:
raise LnEncodeException("Duplicate '{}' tag".format(k))
if k == 'r':
route = bitstring.BitArray()
for step in v:
pubkey, channel, feebase, feerate, cltv = step
route.append(bitstring.BitArray(pubkey) + bitstring.BitArray(channel) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv))
data += tagged('r', route)
elif k == 't':
pubkey, feebase, feerate, cltv = v
route = bitstring.BitArray(pubkey) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv)
data += tagged('t', route)
elif k == 'f':
data += encode_fallback(v, addr.net)
elif k == 'd':
# truncate to max length: 1024*5 bits = 639 bytes
data += tagged_bytes('d', v.encode()[0:639])
elif k == 'x':
expirybits = bitstring.pack('intbe:64', v)
expirybits = trim_to_min_length(expirybits)
data += tagged('x', expirybits)
elif k == 'h':
data += tagged_bytes('h', sha256(v.encode('utf-8')).digest())
elif k == 'n':
data += tagged_bytes('n', v)
elif k == 'c':
finalcltvbits = bitstring.pack('intbe:64', v)
finalcltvbits = trim_to_min_length(finalcltvbits)
data += tagged('c', finalcltvbits)
elif k == '9':
if v == 0:
continue
feature_bits = bitstring.BitArray(uint=v, length=v.bit_length())
feature_bits = trim_to_min_length(feature_bits)
data += tagged('9', feature_bits)
else:
# FIXME: Support unknown tags?
raise LnEncodeException("Unknown tag {}".format(k))
tags_set.add(k)
# BOLT #11:
#
# A writer MUST include either a `d` or `h` field, and MUST NOT include
# both.
if 'd' in tags_set and 'h' in tags_set:
raise ValueError("Cannot include both 'd' and 'h'")
if not 'd' in tags_set and not 'h' in tags_set:
raise ValueError("Must include either 'd' or 'h'")
# We actually sign the hrp, then data (padded to 8 bits with zeroes).
msg = hrp.encode("ascii") + data.tobytes()
privkey = ecc.ECPrivkey(privkey)
sig = privkey.sign_message(msg, is_compressed=False, algo=lambda x:sha256(x).digest())
recovery_flag = bytes([sig[0] - 27])
sig = bytes(sig[1:]) + recovery_flag
data += sig
return bech32_encode(segwit_addr.Encoding.BECH32, hrp, bitarray_to_u5(data))
class LnAddr(object):
def __init__(self, *, paymenthash: bytes = None, amount=None, net: Type[AbstractNet] = None, tags=None, date=None,
payment_secret: bytes = None):
self.date = int(time.time()) if not date else int(date)
self.tags = [] if not tags else tags
self.unknown_tags = []
self.paymenthash = paymenthash
self.payment_secret = payment_secret
self.signature = None
self.pubkey = None
self.net = constants.net if net is None else net # type: Type[AbstractNet]
self._amount = amount # type: Optional[Decimal] # in bitcoins
self._min_final_cltv_expiry = 18
@property
def amount(self) -> Optional[Decimal]:
return self._amount
@amount.setter
def amount(self, value):
if not (isinstance(value, Decimal) or value is None):
raise LnInvoiceException(f"amount must be Decimal or None, not {value!r}")
if value is None:
self._amount = None
return
assert isinstance(value, Decimal)
if value.is_nan() or not (0 <= value <= TOTAL_COIN_SUPPLY_LIMIT_IN_BTC):
raise LnInvoiceException(f"amount is out-of-bounds: {value!r} BTC")
if value * 10**12 % 10:
# max resolution is millisatoshi
raise LnInvoiceException(f"Cannot encode {value!r}: too many decimal places")
self._amount = value
def get_amount_sat(self) -> Optional[RavenValue]:
# note that this has msat resolution potentially
if self.amount is None:
return None
return RavenValue(Satoshis(self.amount * COIN))
def get_routing_info(self, tag):
# note: tag will be 't' for trampoline
r_tags = list(filter(lambda x: x[0] == tag, self.tags))
# strip the tag type, it's implicitly 'r' now
r_tags = list(map(lambda x: x[1], r_tags))
# if there are multiple hints, we will use the first one that works,
# from a random permutation
random.shuffle(r_tags)
return r_tags
def get_amount_msat(self) -> Optional[int]:
if self.amount is None:
return None
return int(self.amount * COIN * 1000)
def get_features(self) -> 'LnFeatures':
from .lnutil import LnFeatures
return LnFeatures(self.get_tag('9') or 0)
def __str__(self):
return "LnAddr[{}, amount={}{} tags=[{}]]".format(
hexlify(self.pubkey.serialize()).decode('utf-8') if self.pubkey else None,
self.amount, self.net.BOLT11_HRP,
", ".join([k + '=' + str(v) for k, v in self.tags])
)
def get_min_final_cltv_expiry(self) -> int:
return self._min_final_cltv_expiry
def get_tag(self, tag):
for k, v in self.tags:
if k == tag:
return v
return None
def get_description(self) -> str:
return self.get_tag('d') or ''
def get_expiry(self) -> int:
exp = self.get_tag('x')
if exp is None:
exp = 3600
return int(exp)
def is_expired(self) -> bool:
now = time.time()
# BOLT-11 does not specify what expiration of '0' means.
# we treat it as 0 seconds here (instead of never)
return now > self.get_expiry() + self.date
class SerializableKey:
def __init__(self, pubkey):
self.pubkey = pubkey
def serialize(self):
return self.pubkey.get_public_key_bytes(True)
def lndecode(invoice: str, *, verbose=False, net=None) -> LnAddr:
if net is None:
net = constants.net
decoded_bech32 = bech32_decode(invoice, ignore_long_length=True)
hrp = decoded_bech32.hrp
data = decoded_bech32.data
if decoded_bech32.encoding is None:
raise LnDecodeException("Bad bech32 checksum")
if decoded_bech32.encoding != segwit_addr.Encoding.BECH32:
raise LnDecodeException("Bad bech32 encoding: must be using vanilla BECH32")
# BOLT #11:
#
# A reader MUST fail if it does not understand the `prefix`.
if not hrp.startswith('ln'):
raise LnDecodeException("Does not start with ln")
if not hrp[2:].startswith(net.BOLT11_HRP):
raise LnDecodeException(f"Wrong Lightning invoice HRP {hrp[2:]}, should be {net.BOLT11_HRP}")
data = u5_to_bitarray(data)
# Final signature 65 bytes, split it off.
if len(data) < 65*8:
raise LnDecodeException("Too short to contain signature")
sigdecoded = data[-65*8:].tobytes()
data = bitstring.ConstBitStream(data[:-65*8])
addr = LnAddr()
addr.pubkey = None
m = re.search("[^\\d]+", hrp[2:])
if m:
addr.net = BOLT11_HRP_INV_DICT[m.group(0)]
amountstr = hrp[2+m.end():]
# BOLT #11:
#
# A reader SHOULD indicate if amount is unspecified, otherwise it MUST
# multiply `amount` by the `multiplier` value (if any) to derive the
# amount required for payment.
if amountstr != '':
addr.amount = unshorten_amount(amountstr)
addr.date = data.read(35).uint
while data.pos != data.len:
tag, tagdata, data = pull_tagged(data)
# BOLT #11:
#
# A reader MUST skip over unknown fields, an `f` field with unknown
# `version`, or a `p`, `h`, or `n` field which does not have
# `data_length` 52, 52, or 53 respectively.
data_length = len(tagdata) / 5
if tag == 'r':
# BOLT #11:
#
# * `r` (3): `data_length` variable. One or more entries
# containing extra routing information for a private route;
# there may be more than one `r` field, too.
# * `pubkey` (264 bits)
# * `short_channel_id` (64 bits)
# * `feebase` (32 bits, big-endian)
# * `feerate` (32 bits, big-endian)
# * `cltv_expiry_delta` (16 bits, big-endian)
route=[]
s = bitstring.ConstBitStream(tagdata)
while s.pos + 264 + 64 + 32 + 32 + 16 < s.len:
route.append((s.read(264).tobytes(),
s.read(64).tobytes(),
s.read(32).uintbe,
s.read(32).uintbe,
s.read(16).uintbe))
addr.tags.append(('r',route))
elif tag == 't':
s = bitstring.ConstBitStream(tagdata)
e = (s.read(264).tobytes(),
s.read(32).uintbe,
s.read(32).uintbe,
s.read(16).uintbe)
addr.tags.append(('t', e))
elif tag == 'f':
fallback = parse_fallback(tagdata, addr.net)
if fallback:
addr.tags.append(('f', fallback))
else:
# Incorrect version.
addr.unknown_tags.append((tag, tagdata))
continue
elif tag == 'd':
addr.tags.append(('d', trim_to_bytes(tagdata).decode('utf-8')))
elif tag == 'h':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.tags.append(('h', trim_to_bytes(tagdata)))
elif tag == 'x':
addr.tags.append(('x', tagdata.uint))
elif tag == 'p':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.paymenthash = trim_to_bytes(tagdata)
elif tag == 's':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.payment_secret = trim_to_bytes(tagdata)
elif tag == 'n':
if data_length != 53:
addr.unknown_tags.append((tag, tagdata))
continue
pubkeybytes = trim_to_bytes(tagdata)
addr.pubkey = pubkeybytes
elif tag == 'c':
addr._min_final_cltv_expiry = tagdata.uint
elif tag == '9':
features = tagdata.uint
addr.tags.append(('9', features))
from .lnutil import validate_features
validate_features(features)
else:
addr.unknown_tags.append((tag, tagdata))
if verbose:
print('hex of signature data (32 byte r, 32 byte s): {}'
.format(hexlify(sigdecoded[0:64])))
print('recovery flag: {}'.format(sigdecoded[64]))
print('hex of data for signing: {}'
.format(hexlify(hrp.encode("ascii") + data.tobytes())))
print('SHA256 of above: {}'.format(sha256(hrp.encode("ascii") + data.tobytes()).hexdigest()))
# BOLT #11:
#
# A reader MUST check that the `signature` is valid (see the `n` tagged
# field specified below).
addr.signature = sigdecoded[:65]
hrp_hash = sha256(hrp.encode("ascii") + data.tobytes()).digest()
if addr.pubkey: # Specified by `n`
# BOLT #11:
#
# A reader MUST use the `n` field to validate the signature instead of
# performing signature recovery if a valid `n` field is provided.
ecc.ECPubkey(addr.pubkey).verify_message_hash(sigdecoded[:64], hrp_hash)
pubkey_copy = addr.pubkey
class WrappedBytesKey:
serialize = lambda: pubkey_copy
addr.pubkey = WrappedBytesKey
else: # Recover pubkey from signature.
addr.pubkey = SerializableKey(ecc.ECPubkey.from_sig_string(sigdecoded[:64], sigdecoded[64], hrp_hash))
return addr
| 35.38403
| 197
| 0.596604
|
e651671a325f45ea0ef7cc753fced2fb0cf64551
| 3,664
|
py
|
Python
|
tests/test_std.py
|
originalcoding/settingslib
|
f452778cb7a3d2c4cc292dc2df9e0f7733ab56c9
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_std.py
|
originalcoding/settingslib
|
f452778cb7a3d2c4cc292dc2df9e0f7733ab56c9
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_std.py
|
originalcoding/settingslib
|
f452778cb7a3d2c4cc292dc2df9e0f7733ab56c9
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from settingslib import Settings
def test_setting():
settings = Settings()
settings['foo'] = 'bar'
assert not settings
assert settings == {}
assert not hasattr(settings, 'foo')
settings['BAR'] = 'baz'
assert settings
assert settings == {'BAR': 'baz'}
assert settings.BAR == 'baz'
settings.BAR = 'bar'
assert settings == {'BAR': 'bar'}
assert settings['BAR'] == 'bar'
def test_getting(tmpdir, monkeypatch):
settings1 = Settings(A=1, B=2)
assert settings1.A == 1
pytest.raises(AttributeError, lambda: settings1.a)
assert settings1.get('a') == None
assert settings1.get('a', 42) == 42
assert settings1['A'] == 1
pytest.raises(KeyError, lambda: settings1['a'])
module_name, module_dir = 'somemodule', tmpdir.mkdir('test_getting')
module = module_dir.join(module_name + '.py')
module.write('obj = {\'foo\': \'bar\'}')
monkeypatch.syspath_prepend(str(module_dir))
setting_name, setting_value = 'OBJ_FROM_OTHER_MODULE', module_name + ':obj'
settings2 = Settings({setting_name: setting_value})
assert settings2.get(setting_name, imp=True) == {'foo': 'bar'}
assert settings2.get(setting_name, 'smth', imp=True) == {'foo': 'bar'}
assert settings2.get(setting_name) == setting_value
assert settings2.get('bbb', imp=True) is None
assert settings2.get('aaa', 123, imp=True) == 123
def test_deleting():
settings = Settings(A=1, B=2, C=3)
delattr(settings, 'A')
assert settings == {'B': 2, 'C': 3}
pytest.raises(KeyError, lambda: settings['A'])
pytest.raises(AttributeError, lambda: settings.A)
del settings['C']
assert settings == {'B': 2}
pytest.raises(AttributeError, lambda: settings.C)
pytest.raises(KeyError, lambda: settings['C'])
def test_del():
del settings['123']
pytest.raises(KeyError, test_del)
pytest.raises(AttributeError, lambda: delattr(settings, '123'))
assert settings == {'B': 2}
assert settings.__dict__ == {'B': 2}
def test_comparing_for_equality():
settings1 = Settings()
settings2 = Settings({})
settings3 = Settings({}, {})
assert settings1 == settings2
assert settings2 == settings3
assert settings1 != {'a': 1}
settings4 = Settings(A=1, B=2)
settings5 = Settings({'b': 1}, {'A': 1}, B=2)
assert settings4 == {'A': 1, 'B': 2}
assert settings4 == settings5
def test_updating(check_settings):
expected_data = {'A': 1, 'B': 2, 'C': 3, 'D': 4}
settings = Settings()
settings.update({'A': 1, 'foo': 'bar'}, B=2, C=3)
settings.update(D=4, bar='baz')
settings = check_settings(settings, expected_data)
settings.update() # we can do this on dict
def test_copying():
settings1 = Settings(A=1)
settings2 = settings1.copy()
assert not settings1 is settings2
assert settings1 == settings2
assert settings2.A == 1
settings2['A'] = 2
assert settings1.A != 2
assert settings2.A == 2
def test_adding(check_settings):
expected_data = {'A': 26, 'B': 2, 'C': 24}
settings1 = Settings(A=1, B=2)
settings2 = Settings(C=24, A=26)
combined_settings = settings1 + settings2
assert combined_settings != settings1
assert combined_settings != settings2
combined_settings = check_settings(combined_settings, expected_data)
expected_data['A'] = 1
combined_settings += dict(A=1)
combined_settings = check_settings(combined_settings, expected_data)
expected_data['A'] = -3
combined_settings = combined_settings + dict(A=-3)
check_settings(combined_settings, expected_data)
| 26.941176
| 79
| 0.645469
|
5e428493ca0430fe4ba000cb8af8b8bd8fd7231c
| 95,049
|
py
|
Python
|
test/functional/p2p_segwit.py
|
diazcoin/diaz
|
0fbc2b2fc5259871e86b47b7b5f1023c37416b61
|
[
"MIT"
] | 3
|
2019-08-12T15:58:48.000Z
|
2019-09-04T07:27:44.000Z
|
test/functional/p2p_segwit.py
|
diazcoin/diaz
|
0fbc2b2fc5259871e86b47b7b5f1023c37416b61
|
[
"MIT"
] | null | null | null |
test/functional/p2p_segwit.py
|
diazcoin/diaz
|
0fbc2b2fc5259871e86b47b7b5f1023c37416b61
|
[
"MIT"
] | 1
|
2019-09-07T02:45:39.000Z
|
2019-09-07T02:45:39.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_no_witness_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitV0SignatureHash,
LegacySignatureHash,
hash160,
)
from test_framework.test_framework import DiazTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
softfork_active,
hex_str_to_bytes,
assert_raises_rpc_error,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
SEGWIT_HEIGHT = 120
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitV0SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize())
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_tx(tx) if with_witness else msg_tx(tx))
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_block(block) if with_witness else msg_no_witness_block(block))
p2p.sync_with_ping()
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(DiazTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-whitelist=127.0.0.1", "-acceptnonstdtxn=1", "-segwitheight=-1"]
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
self.log.info("Starting tests before segwit activation")
self.segwit_active = False
self.test_non_witness_transaction()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.test_getblocktemplate_before_lockin()
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_standardness_v0()
self.log.info("Advancing to segwit activation")
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit active = {})".format(func.__name__, self.segwit_active))
# Assert segwit status is as expected
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
self.sync_blocks()
# Assert segwit status is as expected at end of subtest
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_no_witness_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert tx.hash in self.nodes[0].getrawmempool()
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert tx.sha256 != tx.calc_sha256(with_witness=True)
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_no_witness_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if not self.segwit_active:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(), wit_block.serialize())
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert len(block.vtx[0].wit.vtxinwit) == 1
assert len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(), block.serialize())
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize()))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize())
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert block4.sha256 not in self.old_node.getdataset
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment.
assert 'default_witness_commitment' not in gbt_results
else:
# For segwit-aware nodes, check the witness
# commitment is correct.
assert 'default_witness_commitment' in gbt_results
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, script.hex())
# Clear out the mempool
self.nodes[0].generate(1)
self.sync_blocks()
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert self.old_node.last_message["getdata"].inv[0].type == 1
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
self.sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if not self.segwit_active:
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
assert not softfork_active(self.nodes[0], 'segwit')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(SEGWIT_HEIGHT - height - 2)
assert not softfork_active(self.nodes[0], 'segwit')
self.nodes[0].generate(1)
assert softfork_active(self.nodes[0], 'segwit')
self.segwit_active = True
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
self.sync_blocks()
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older diazd's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert msg_block(block).serialize() != msg_no_witness_block(block).serialize()
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert len(block_3.vtx[0].vout) == 4 # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() != block.hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() == block.hash
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert len(self.utxo) > 0
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert parent_tx.vout[0].nValue > 0
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert len(block.serialize()) > 2 * 1024 * 1024
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert self.nodes[0].getbestblockhash() != block.hash
# Now redo commitment with the standard nonce, but let diazd fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(block.serialize().hex())
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(block_2.serialize().hex())
# Tip should not advance!
assert self.nodes[0].getbestblockhash() != block_2.hash
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert len(long_witness_program) == MAX_PROGRAM_LENGTH + 1
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert len(witness_program) == MAX_PROGRAM_LENGTH
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert tx.vout[0].nValue >= 0
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex())
assert vsize != raw_tx["size"]
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit versions are non-standard to spend, but valid in blocks.
Sending to future segwit versions is always allowed.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
self.sync_blocks()
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
self.sync_blocks()
assert len(self.nodes[0].getrawmempool()) == 0
# Finally, verify that version 0 -> version 1 transactions
# are standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to both policy-enforcing nodes and others.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
self.sync_blocks()
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
self.sync_blocks()
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = LegacySignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert len(temp_utxos) > num_inputs
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
self.sync_blocks()
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-segwitheight={}".format(SEGWIT_HEIGHT)])
connect_nodes(self.nodes[0], 2)
self.sync_blocks()
# Make sure that this peer thinks segwit has activated.
assert softfork_active(self.nodes[2], 'segwit')
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert extra_sigops_available < 100 # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
self.sync_blocks()
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('bcrt'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
if __name__ == '__main__':
SegWitTest().main()
| 46.050872
| 187
| 0.654347
|
ee93d706862ea7c36e3eaf26368b840664981a39
| 16,340
|
py
|
Python
|
relate/urls.py
|
hanjianwei/relate
|
971e27a1bdd69236dc6dc294024b50584435a18d
|
[
"Unlicense"
] | null | null | null |
relate/urls.py
|
hanjianwei/relate
|
971e27a1bdd69236dc6dc294024b50584435a18d
|
[
"Unlicense"
] | 6
|
2015-08-18T00:13:40.000Z
|
2018-01-31T05:55:13.000Z
|
relate/urls.py
|
davis68/relate
|
eb40c8c17d4a724a60de3caa3334521a833bad5c
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division
__copyright__ = "Copyright (C) 2014 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from django.conf.urls import include, url
from django.contrib import admin
from course.constants import COURSE_ID_REGEX, FLOW_ID_REGEX, STATICPAGE_PATH_REGEX
import course.auth
import course.views
import course.im
import course.sandbox
import course.grades
import course.grading
import course.calendar
import course.versioning
import course.flow
import course.analytics
import course.exam
import course.api
urlpatterns = [
url(r"^login/$",
course.auth.sign_in_choice,
name="relate-sign_in_choice"),
url(r"^login/user-password/$",
course.auth.sign_in_by_user_pw,
name="relate-sign_in_by_user_pw"),
url(r"^login/sign-up/$",
course.auth.sign_up,
name="relate-sign_up"),
url(r"^login/reset-password/$",
course.auth.reset_password,
name="relate-reset_password"),
url(r"^login/reset-password/(?P<field>instid)/$",
course.auth.reset_password,
name="relate-reset_password"),
url(r"^login/reset-password/stage-2"
"/(?P<user_id>[0-9]+)"
"/(?P<sign_in_key>[a-zA-Z0-9]+)",
course.auth.reset_password_stage2,
name="relate-reset_password_stage2"),
url(r"^login/by-email/$",
course.auth.sign_in_by_email,
name="relate-sign_in_by_email"),
url(r"^login/token"
"/(?P<user_id>[0-9]+)"
"/(?P<sign_in_key>[a-zA-Z0-9]+)"
"/$",
course.auth.sign_in_stage2_with_token,
name="relate-sign_in_stage2_with_token"),
url(r"^logout/$",
course.auth.sign_out,
name="relate-logout"),
url(r"^logout-confirmation/$",
course.auth.sign_out_confirmation,
name="relate-logout-confirmation"),
url(r"^profile/$",
course.auth.user_profile,
name="relate-user_profile"),
url(
r"^course"
"/" + COURSE_ID_REGEX
+ "/auth-tokens/$",
course.auth.manage_authentication_tokens,
name="relate-manage_authentication_tokens"),
url(r"^generate-ssh-key/$",
course.views.generate_ssh_keypair,
name="relate-generate_ssh_keypair"),
url(r"^monitor-task"
"/(?P<task_id>[-0-9a-f]+)"
"$",
course.views.monitor_task,
name="relate-monitor_task"),
# {{{ troubleshooting
url(r"^user/impersonate/$",
course.auth.impersonate,
name="relate-impersonate"),
url(r"^user/stop_impersonating/$",
course.auth.stop_impersonating,
name="relate-stop_impersonating"),
url(r"^time/set-fake-time/$",
course.views.set_fake_time,
name="relate-set_fake_time"),
url(r"^time/set-pretend-facilities/$",
course.views.set_pretend_facilities,
name="relate-set_pretend_facilities"),
# }}}
# {{{ course
url(r"^$", course.views.home, name="relate-home"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/$",
course.views.course_page,
name="relate-course_page"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/edit/$",
course.views.edit_course,
name="relate-edit_course"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/page"
"/" + STATICPAGE_PATH_REGEX
+ "/$",
course.views.static_page,
name="relate-content_page"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/instant-message/$",
course.im.send_instant_message,
name="relate-send_instant_message"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/sandbox/markup/$",
course.sandbox.view_markup_sandbox,
name="relate-view_markup_sandbox"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/sandbox/page/$",
course.sandbox.view_page_sandbox,
name="relate-view_page_sandbox"),
url("^purge-pageview-data/$",
course.flow.purge_page_view_data,
name="relate-purge_page_view_data"),
# }}}
# {{{ grading
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/my/$",
course.grades.view_participant_grades,
name="relate-view_participant_grades"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/participant"
"/(?P<participation_id>[0-9]+)"
"/$",
course.grades.view_participant_grades,
name="relate-view_participant_grades"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/participants/$",
course.grades.view_participant_list,
name="relate-view_participant_list"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/opportunities/$",
course.grades.view_grading_opportunity_list,
name="relate-view_grading_opportunity_list"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/overview/$",
course.grades.view_gradebook,
name="relate-view_gradebook"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/overview/csv/$",
course.grades.export_gradebook_csv,
name="relate-export_gradebook_csv"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/by-opportunity"
"/(?P<opp_id>[0-9]+)"
"/$",
course.grades.view_grades_by_opportunity,
name="relate-view_grades_by_opportunity"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/single-grade"
"/(?P<participation_id>[0-9]+)"
"/(?P<opportunity_id>[0-9]+)"
"/$",
course.grades.view_single_grade,
name="relate-view_single_grade"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/reopen-session"
"/(?P<flow_session_id>[0-9]+)"
"/(?P<opportunity_id>[0-9]+)"
"/$",
course.grades.view_reopen_session,
name="relate-view_reopen_session"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading"
"/csv-import"
"/$",
course.grades.import_grades,
name="relate-import_grades"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading"
"/flow-page"
"/(?P<flow_session_id>[0-9]+)"
"/(?P<page_ordinal>[0-9]+)"
"/$",
course.grading.grade_flow_page,
name="relate-grade_flow_page"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/prev-grades"
"/flow-page"
"/(?P<flow_session_id>[0-9]+)"
"/(?P<page_ordinal>[0-9]+)"
"/$",
course.grading.get_prev_grades_dropdown_content,
name="relate-get_prev_grades_dropdown_content"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/statistics"
"/" + FLOW_ID_REGEX
+ "/$",
course.grading.show_grader_statistics,
name="relate-show_grader_statistics"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grading/download-submissions"
"/" + FLOW_ID_REGEX
+ "/$",
course.grades.download_all_submissions,
name="relate-download_all_submissions"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/edit-grading-opportunity"
"/(?P<opportunity_id>[-0-9]+)"
"/$",
course.grades.edit_grading_opportunity,
name="relate-edit_grading_opportunity"),
# }}}
# {{{ enrollment
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/enroll/$",
course.enrollment.enroll_view,
name="relate-enroll"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/preapprove"
"/$",
course.enrollment.create_preapprovals,
name="relate-create_preapprovals"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/query-participations"
"/$",
course.enrollment.query_participations,
name="relate-query_participations"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/edit-participation"
"/(?P<participation_id>[-0-9]+)"
"/$",
course.enrollment.edit_participation,
name="relate-edit_participation"),
# }}}
# {{{ media
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/media/(?P<commit_sha>[a-f0-9]+)"
"/(?P<media_path>.*)$",
course.views.get_media,
name="relate-get_media"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/file-version/(?P<commit_sha>[a-f0-9]+)"
"/(?P<path>.*)$",
course.views.get_repo_file,
name="relate-get_repo_file"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/f"
"/(?P<path>.*)$",
course.views.get_current_repo_file,
name="relate-get_current_repo_file"),
# }}}
# {{{ calendar
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/create-recurring-events/$",
course.calendar.create_recurring_events,
name="relate-create_recurring_events"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/renumber-events/$",
course.calendar.renumber_events,
name="relate-renumber_events"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/calendar/$",
course.calendar.view_calendar,
name="relate-view_calendar"),
# }}}
# {{{ versioning
url(r"^new-course/$",
course.versioning.set_up_new_course,
name="relate-set_up_new_course"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/update/$",
course.versioning.update_course,
name="relate-update_course"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/git"
"/(?P<git_path>.*)"
"$",
course.versioning.git_endpoint,
name="relate-git_endpoint"),
# }}}
# {{{ flow-related
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow"
"/" + FLOW_ID_REGEX
+ "/start"
"/$",
course.flow.view_start_flow,
name="relate-view_start_flow"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow-session"
"/(?P<flow_session_id>[-0-9]+)"
"/resume"
"/$",
course.flow.view_resume_flow,
name="relate-view_resume_flow"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow-session"
"/(?P<flow_session_id>[0-9]+)"
"/(?P<page_ordinal>[0-9]+)"
"/$",
course.flow.view_flow_page,
name="relate-view_flow_page"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/prev_answers"
"/flow-page"
"/(?P<flow_session_id>[0-9]+)"
"/(?P<page_ordinal>[0-9]+)"
"/$",
course.flow.get_prev_answer_visits_dropdown_content,
name="relate-get_prev_answer_visits_dropdown_content"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow-session"
"/(?P<flow_session_id>[-0-9]+)"
"/update-expiration-mode"
"/$",
course.flow.update_expiration_mode,
name="relate-update_expiration_mode"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow-session"
"/(?P<flow_session_id>[-0-9]+)"
"/(?P<page_ordinal>[0-9]+)"
"/update-bookmark-state"
"/$",
course.flow.update_page_bookmark_state,
name="relate-update_page_bookmark_state"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow-session"
"/(?P<flow_session_id>[0-9]+)"
"/finish"
"/$",
course.flow.finish_flow_session_view,
name="relate-finish_flow_session_view"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow-session"
"/(?P<flow_session_id>[0-9]+)"
"/(?P<page_ordinal>[0-9]+)"
"/flow-page-interaction-email"
"/$",
course.flow.send_email_about_flow_page,
name="relate-flow_page_interaction_email"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow-session"
"/(?P<flow_session_id>[0-9]+)"
"/(?P<page_ordinal>[0-9]+)"
"/unsubmit/$",
course.flow.view_unsubmit_flow_page,
name="relate-unsubmit_flow_page"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/test-flow"
"/$",
course.views.test_flow,
name="relate-test_flow"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/instant-flow"
"/$",
course.views.manage_instant_flow_requests,
name="relate-manage_instant_flow_requests"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/regrade-flows"
"/$",
course.flow.regrade_flows_view,
name="relate-regrade_flows_view"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grant-exception"
"/$",
course.views.grant_exception,
name="relate-grant_exception"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grant-exception"
"/(?P<participation_id>[0-9]+)"
"/" + FLOW_ID_REGEX
+ "/$",
course.views.grant_exception_stage_2,
name="relate-grant_exception_stage_2"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/grant-exception"
"/(?P<participation_id>[0-9]+)"
"/" + FLOW_ID_REGEX
+ "/(?P<session_id>[0-9]+)"
"/$",
course.views.grant_exception_stage_3,
name="relate-grant_exception_stage_3"),
# }}}
# {{{ analytics
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow-analytics"
"/$",
course.analytics.flow_list,
name="relate-flow_list"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow-analytics"
"/" + FLOW_ID_REGEX
+ "/$",
course.analytics.flow_analytics,
name="relate-flow_analytics"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/flow-analytics"
"/" + FLOW_ID_REGEX
+ "/page"
"/(?P<group_id>[-_a-zA-Z0-9]+)"
"/(?P<page_id>[-_a-zA-Z0-9]+)"
"/$",
course.analytics.page_analytics,
name="relate-page_analytics"),
# }}}
# {{{ exams
url(r"^issue-exam-ticket"
"/$",
course.exam.issue_exam_ticket,
name="relate-issue_exam_ticket"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/batch-issue-exam-tickets"
"/$",
course.exam.batch_issue_exam_tickets,
name="relate-batch_issue_exam_tickets"),
url(r"^exam-check-in/$",
course.exam.check_in_for_exam,
name="relate-check_in_for_exam"),
url(r"^list-available-exams/$",
course.exam.list_available_exams,
name="relate-list_available_exams"),
# }}}
# {{{ django-select2
url(r"^select2/", include("django_select2.urls")),
#}}}
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/api/v1/get-flow-sessions$",
course.api.get_flow_sessions,
name="relate-course_get_flow_session"),
url(r"^course"
"/" + COURSE_ID_REGEX
+ "/api/v1/get-flow-session-content$",
course.api.get_flow_session_content,
name="relate-course_get_flow_session_content"),
url(r"^admin/", admin.site.urls),
url("^social-auth/", include("social_django.urls", namespace="social")),
url(r"^saml2/", include("djangosaml2.urls")),
]
# vim: fdm=marker
| 28.767606
| 82
| 0.57448
|
adb243b22e881df2460835b8d3590499df402e2c
| 1,974
|
py
|
Python
|
SoftLayer/CLI/hardware/bandwidth.py
|
ko101/softlayer-python
|
f4cc9fa2eb01d97c0e890907ef6735390f1a5b10
|
[
"MIT"
] | null | null | null |
SoftLayer/CLI/hardware/bandwidth.py
|
ko101/softlayer-python
|
f4cc9fa2eb01d97c0e890907ef6735390f1a5b10
|
[
"MIT"
] | null | null | null |
SoftLayer/CLI/hardware/bandwidth.py
|
ko101/softlayer-python
|
f4cc9fa2eb01d97c0e890907ef6735390f1a5b10
|
[
"MIT"
] | null | null | null |
"""GBandwidth data over date range. Bandwidth is listed in GB"""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import helpers
from SoftLayer.CLI.virt.bandwidth import create_bandwidth_table
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('identifier')
@click.option('--start_date', '-s', type=click.STRING, required=True,
help="Start Date YYYY-MM-DD, YYYY-MM-DDTHH:mm:ss,")
@click.option('--end_date', '-e', type=click.STRING, required=True,
help="End Date YYYY-MM-DD, YYYY-MM-DDTHH:mm:ss")
@click.option('--summary_period', '-p', type=click.INT, default=3600, show_default=True,
help="300, 600, 1800, 3600, 43200 or 86400 seconds")
@click.option('--quite_summary', '-q', is_flag=True, default=False, show_default=True,
help="Only show the summary table")
@environment.pass_env
def cli(env, identifier, start_date, end_date, summary_period, quite_summary):
"""Bandwidth data over date range. Bandwidth is listed in GB
Using just a date might get you times off by 1 hour, use T00:01 to get just the specific days data
Timezones can also be included with the YYYY-MM-DDTHH:mm:ss.00000-HH:mm format.
Due to some rounding and date alignment details, results here might be slightly different than
results in the control portal.
Example::
slcli hw bandwidth 1234 -s 2019-05-01T00:01 -e 2019-05-02T00:00:01.00000-12:00
"""
hardware = SoftLayer.HardwareManager(env.client)
hardware_id = helpers.resolve_id(hardware.resolve_ids, identifier, 'hardware')
data = hardware.get_bandwidth_data(hardware_id, start_date, end_date, None, summary_period)
title = "Bandwidth Report: %s - %s" % (start_date, end_date)
table, sum_table = create_bandwidth_table(data, summary_period, title)
env.fout(sum_table)
if not quite_summary:
env.fout(table)
| 42.913043
| 102
| 0.718845
|
81abd0e80275384fb494f25f89d0580808288748
| 3,386
|
py
|
Python
|
method4_unclassified_cookies.py
|
dibollinger/CookieBlock-Violation-Detection
|
044c183bc52e8aaf3febe9caa3591be7a13bccac
|
[
"MIT"
] | 4
|
2021-06-08T20:29:25.000Z
|
2022-03-29T14:51:02.000Z
|
method4_unclassified_cookies.py
|
dibollinger/CookieBlock-Other-Scripts
|
6a1b028e5d56c0068d2725f7f1b43fa8e2815875
|
[
"MIT"
] | null | null | null |
method4_unclassified_cookies.py
|
dibollinger/CookieBlock-Other-Scripts
|
6a1b028e5d56c0068d2725f7f1b43fa8e2815875
|
[
"MIT"
] | 1
|
2021-10-19T09:47:02.000Z
|
2021-10-19T09:47:02.000Z
|
# Copyright (C) 2021 Dino Bollinger, ETH Zürich, Information Security Group
# Released under the MIT License
"""
Using a database of collected cookie + label data, determine potential GDPR violations by
determining uncategorized cookies, which usually cannot be rejected and have no description.
----------------------------------
Required arguments:
<db_path> Path to database to analyze.
Optional arguments:
--out_path <out_path>: Directory to store the resutls.
Usage:
method4_unclassified_cookies.py <db_path> [--out_path <out_path>]
"""
from docopt import docopt
import os
import sqlite3
import re
import logging
from utils import (setupLogger, CONSENTDATA_QUERY, write_json,
write_vdomains, get_violation_details_consent_table)
logger = logging.getLogger("vd")
unclass_pattern = re.compile("(unclassified|uncategorized|Unclassified Cookies|no clasificados)", re.IGNORECASE)
def main():
"""
Detect potential violations by extracting all cookies that are unclassified.
@return: exit code, 0 for success
"""
argv = None
cargs = docopt(__doc__, argv=argv)
setupLogger(".")
logger.info("Running method 04: Unclassified Cookies")
database_path = cargs["<db_path>"]
if not os.path.exists(database_path):
logger.error("Database file does not exist.")
return 1
logger.info(f"Database used: {database_path}")
# enable dictionary access by column name
conn = sqlite3.connect(database_path)
conn.row_factory = sqlite3.Row
# variables to collection violation details
total_domains = set()
violation_details = dict()
violation_domains = set()
violation_count = 0
total_count = 0
logger.info("Extracting info from database...")
with conn:
cur = conn.cursor()
cur.execute(CONSENTDATA_QUERY)
for row in cur:
if row["cat_id"] == 4 or unclass_pattern.match(row["cat_name"]):
#logger.debug(f"Potential Violation: {row['consent_name']};{row['consent_domain']};{row['cat_name']}")
vdomain = row["site_url"]
violation_domains.add(vdomain)
violation_count += 1
if vdomain not in violation_details:
violation_details[vdomain] = list()
violation_details[vdomain].append(get_violation_details_consent_table(row))
total_domains.add(row["site_url"])
total_count += 1
conn.close()
logger.info(f"Total number of cookies: {total_count}")
logger.info(f"Number of unclassified cookies: {violation_count}")
logger.info(f"Number of sites in total: {len(total_domains)}")
logger.info(f"Number of sites with unclassified cookies: {len(violation_domains)}")
v_per_cmp = [0, 0, 0]
for url, violating_cookies in violation_details.items():
for c in violating_cookies:
assert(c["cmp_type"] >= 0)
v_per_cmp[c["cmp_type"]] += 1
logger.info(f"Potential Violations per CMP Type: {v_per_cmp}")
if cargs["--out_path"]:
out_path = cargs["--out_path"]
else:
out_path = "./violation_stats/"
write_json(violation_details, "method4_cookies.json", out_path)
write_vdomains(violation_domains, "method4_domains.txt", out_path)
return 0
if __name__ == '__main__':
exit(main())
| 33.196078
| 118
| 0.664501
|
80363b03bd62462e21d05eefdf268e931b50736d
| 2,326
|
py
|
Python
|
pymc/backends/__init__.py
|
percevalve/pymc
|
05aa247957553f608d9690ff9f61240aa35b71f8
|
[
"Apache-2.0"
] | 5,476
|
2015-02-24T13:55:15.000Z
|
2021-09-30T01:10:06.000Z
|
pymc/backends/__init__.py
|
Sayam753/pymc3
|
e03f5bf6a85ab350bf2ae3029dade7b9fc12dd07
|
[
"Apache-2.0"
] | 4,076
|
2015-02-23T20:19:19.000Z
|
2021-09-30T07:35:45.000Z
|
pymc/backends/__init__.py
|
Sayam753/pymc3
|
e03f5bf6a85ab350bf2ae3029dade7b9fc12dd07
|
[
"Apache-2.0"
] | 1,540
|
2015-02-25T00:58:51.000Z
|
2021-09-23T08:59:33.000Z
|
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Storage backends for traces
The NDArray (pymc.backends.NDArray) backend holds the entire trace in memory.
Selecting values from a backend
-------------------------------
After a backend is finished sampling, it returns a MultiTrace object.
Values can be accessed in a few ways. The easiest way is to index the
backend object with a variable or variable name.
>>> trace['x'] # or trace.x or trace[x]
The call will return the sampling values of `x`, with the values for
all chains concatenated. (For a single call to `sample`, the number of
chains will correspond to the `cores` argument.)
To discard the first N values of each chain, slicing syntax can be
used.
>>> trace['x', 1000:]
The `get_values` method offers more control over which values are
returned. The call below will discard the first 1000 iterations
from each chain and keep the values for each chain as separate arrays.
>>> trace.get_values('x', burn=1000, combine=False)
The `chains` parameter of `get_values` can be used to limit the chains
that are retrieved.
>>> trace.get_values('x', burn=1000, chains=[0, 2])
MultiTrace objects also support slicing. For example, the following
call would return a new trace object without the first 1000 sampling
iterations for all traces and variables.
>>> sliced_trace = trace[1000:]
The backend for the new trace is always NDArray, regardless of the
type of original trace.
Loading a saved backend
-----------------------
Saved backends can be loaded using `arviz.from_netcdf`
"""
from pymc.backends.arviz import predictions_to_inference_data, to_inference_data
from pymc.backends.ndarray import (
NDArray,
load_trace,
point_list_to_multitrace,
save_trace,
)
| 33.228571
| 80
| 0.734308
|
c35812c0b9db94444ddfc7c241118853858a397b
| 4,519
|
py
|
Python
|
regression/gp/sparse_gp_torch.py
|
hanyas/regression
|
da0f5072f3f7ff8785b02ec693737a8043653411
|
[
"MIT"
] | 3
|
2020-04-29T16:45:55.000Z
|
2021-01-22T15:08:53.000Z
|
regression/gp/sparse_gp_torch.py
|
hanyas/regression
|
da0f5072f3f7ff8785b02ec693737a8043653411
|
[
"MIT"
] | null | null | null |
regression/gp/sparse_gp_torch.py
|
hanyas/regression
|
da0f5072f3f7ff8785b02ec693737a8043653411
|
[
"MIT"
] | 1
|
2020-07-23T17:44:12.000Z
|
2020-07-23T17:44:12.000Z
|
import numpy as np
import torch
from torch.optim import Adam
import gpytorch
from gpytorch.means import ZeroMean
from gpytorch.kernels import ScaleKernel, RBFKernel, InducingPointKernel
from gpytorch.distributions import MultivariateNormal
from gpytorch.mlls import ExactMarginalLogLikelihood
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.settings import max_preconditioner_size
from gpytorch.settings import max_root_decomposition_size
from gpytorch.settings import fast_pred_var
from sklearn.preprocessing import StandardScaler
from regression.gp.utils import transform, inverse_transform
from regression.gp.utils import ensure_args_torch_floats
from regression.gp.utils import ensure_res_numpy_floats
class SparseGPRegressor(gpytorch.models.ExactGP):
@ensure_args_torch_floats
def __init__(self, input, inducing_size, device='cpu',
input_transform=None, target_transform=None):
if device == 'gpu' and torch.cuda.is_available():
self.device = torch.device('cuda:0')
else:
self.device = torch.device('cpu')
if input.ndim == 1:
self.input_size = 1
else:
self.input_size = input.shape[-1]
self.inducing_size = inducing_size
_likelihood = GaussianLikelihood()
super(SparseGPRegressor, self).__init__(train_inputs=None,
train_targets=None,
likelihood=_likelihood)
self.mean_module = ZeroMean()
self.base_covar_module = ScaleKernel(RBFKernel())
inducing_idx = np.random.choice(len(input), inducing_size, replace=False)
self.covar_module = InducingPointKernel(self.base_covar_module,
inducing_points=input[inducing_idx, ...],
likelihood=_likelihood)
self.input_trans = input_transform
self.target_trans = target_transform
@property
def model(self):
return self
def forward(self, input):
mean = self.mean_module(input)
covar = self.covar_module(input)
return MultivariateNormal(mean, covar)
@ensure_args_torch_floats
@ensure_res_numpy_floats
def predict(self, input):
self.device = torch.device('cpu')
self.model.eval().to(self.device)
self.likelihood.eval().to(self.device)
input = transform(input.reshape((-1, self.input_size)), self.input_trans)
with max_preconditioner_size(10), torch.no_grad():
with max_root_decomposition_size(30), fast_pred_var():
output = self.likelihood(self.model(input)).mean
output = inverse_transform(output, self.target_trans).squeeze()
return output
def init_preprocess(self, target, input):
self.target_trans = StandardScaler()
self.input_trans = StandardScaler()
self.target_trans.fit(target[:, None])
self.input_trans.fit(input)
@ensure_args_torch_floats
def fit(self, target, input, nb_iter=100, lr=1e-1,
verbose=True, preprocess=True):
if input.ndim == 1:
input = input.reshape(-1, self.input_size)
if preprocess:
if self.input_trans is None and self.target_trans is None:
self.init_preprocess(target, input)
target = transform(target[:, None], self.target_trans).squeeze()
input = transform(input, self.input_trans)
# update inducing points
inducing_idx = np.random.choice(len(input), self.inducing_size, replace=False)
self.model.covar_module.inducing_points.data = input[inducing_idx, ...]
target = target.to(self.device)
input = input.to(self.device)
self.model.set_train_data(input, target, strict=False)
self.model.train().to(self.device)
self.likelihood.train().to(self.device)
optimizer = Adam([{'params': self.parameters()}], lr=lr)
mll = ExactMarginalLogLikelihood(self.likelihood, self.model)
for i in range(nb_iter):
optimizer.zero_grad()
_output = self.model(input)
loss = - mll(_output, target)
loss.backward()
if verbose:
print('Iter %d/%d - Loss: %.3f' % (i + 1, nb_iter, loss.item()))
optimizer.step()
if torch.cuda.is_available():
torch.cuda.empty_cache()
| 35.031008
| 90
| 0.643505
|
854633d33e25feabc6efb32507ec530c39592716
| 7,620
|
py
|
Python
|
tests/test_wraith.py
|
AndreasHeger/CGATReport
|
681e198cd13ef533f1c496645a1754b2e0829232
|
[
"MIT"
] | 9
|
2015-02-14T16:53:58.000Z
|
2022-01-03T20:22:42.000Z
|
tests/test_wraith.py
|
AndreasHeger/CGATReport
|
681e198cd13ef533f1c496645a1754b2e0829232
|
[
"MIT"
] | 26
|
2015-01-29T15:39:02.000Z
|
2018-02-14T09:04:21.000Z
|
tests/test_wraith.py
|
AndreasHeger/CGATReport
|
681e198cd13ef533f1c496645a1754b2e0829232
|
[
"MIT"
] | 4
|
2015-11-25T17:11:11.000Z
|
2022-01-03T20:22:45.000Z
|
"""Use wraith to compare current version against published docs.
"""
import unittest
import os
import copy
import re
import yaml
import subprocess
import contextlib
from distutils.version import LooseVersion
import http.server
import socketserver
import threading
REFERENCE_URL = "https://www.cgat.org/downloads/public/CGATReport/documentation"
WRAITH_WORKDIR = os.path.abspath("wraith")
TEST_PORT=9100
TEST_HOST="localhost"
spider_config_template = """
browser: "phantomjs"
domains:
test: http://{test_host}:{test_port}
spider_skips:
- !ruby/regexp /static$/
- !ruby/regexp /%23/
- !ruby/regexp /.eps$/
- !ruby/regexp /.svg$/
- !ruby/regexp /.xlsx$/
- !ruby/regexp /notebook/
- !ruby/regexp /code/
directory: 'shots'
imports: "{wraith_data_config}"
phantomjs_options: '--ignore-ssl-errors=true --ssl-protocol=tlsv1'
"""
capture_config_template = """
browser: "phantomjs"
domains:
test: http://{test_host}:{test_port}
current: {reference_url}
spider_skips:
- !ruby/regexp /static$/
- !ruby/regexp /%23/
imports: "{wraith_data_config}"
screen_widths:
- 1280
directory: 'shots'
fuzz: '20%'
threshold: 5
gallery:
thumb_width: 200
thumb_height: 200
mode: diffs_only
phantomjs_options: '--ignore-ssl-errors=true --ssl-protocol=tlsv1'
"""
@contextlib.contextmanager
def changedir(path):
save_dir = os.path.abspath(os.getcwd())
os.chdir(path)
try:
yield
finally:
os.chdir(save_dir)
def run_server():
run("python -m http.server {} >& server.log".format(TEST_PORT))
@contextlib.contextmanager
def start_server(workdir):
handler = http.server.SimpleHTTPRequestHandler
with changedir(workdir):
# thread = threading.Thread(target=run_server)
# thread.start()
print("yielding")
yield
print("back from yield")
def run(statement,
return_stdout=False,
return_popen=False,
**kwargs):
'''execute a command line statement.
By default this method returns the code returned by the executed
command. If *return_stdout* is True, the contents of stdout are
returned as a file object. If *return_popen*, the Popen object is
returned.
``kwargs`` are passed on to subprocess.call,
subprocess.check_output or subprocess.Popen.
Raises
------
OSError
If process failed or was terminated.
'''
# remove new lines
statement = " ".join(re.sub("\t+", " ", statement).split("\n")).strip()
print(statement)
if "<(" in statement:
shell = os.environ.get('SHELL', "/bin/bash")
if "bash" not in shell:
raise ValueError(
"require bash for advanced shell syntax: <()")
# Note: pipes.quote is deprecated. In Py3, use shlex.quote
# (not present in Py2.7)
statement = "%s -c %s" % (shell, pipes.quote(statement))
if return_stdout:
return subprocess.check_output(statement, shell=True, **kwargs).decode("utf-8")
elif return_popen:
return subprocess.Popen(statement, shell=True, **kwargs)
else:
retcode = subprocess.call(statement, shell=True, **kwargs)
if retcode < 0:
raise OSError("process was terminated by signal %i" % -retcode)
return retcode
def check_version(cmd, regex, min_version):
version_txt = run(cmd , return_stdout=True)
version = re.search(regex, version_txt).groups()[0]
if LooseVersion(version) < LooseVersion(min_version):
raise ValueError("version check failed: {} < {}, '{}'".format(
version, min_version, cmd))
return version
class TestWraith(unittest.TestCase):
def setUp(self):
source_dir = os.path.join(
os.path.dirname(os.path.dirname(
os.path.abspath(__file__))),
"doc", "_build", "html")
# check if npm is intalled
npm_version = check_version("npm --version", "(\S+)", "3.10")
# check if phantomjs is installed
phantomjs_version = check_version("npm list -g | grep phantom",
"phantomjs@(\S+)",
"2.1")
ruby_version = check_version("ruby --version",
"ruby (\S+)",
"2.1")
wraith_version = check_version(
"gem list | grep wraith",
"wraith \((\S+)\)",
"4.0.1")
# get gem info
gem_data = yaml.load(run("gem environment", return_stdout=True))
gem_paths = []
for record in gem_data["RubyGems Environment"]:
for key, value in record.items():
if key == "GEM PATHS":
gem_paths.extend(value)
break
if not gem_paths:
raise ValueError("could not find GEM PATHS in gem environment")
filenames = [os.path.join(path,
"gems/wraith-{}/lib/wraith/spider.rb".format(wraith_version))
for path in gem_paths]
if sum([os.path.exists(fn) for fn in filenames]) == 0:
raise ValueError("could not find file spider.rb to patch in {}".format(filenames))
for fn in filenames:
if not os.path.exists(fn):
continue
with open(fn) as inf:
data = inf.read()
if "path.downcase" in data:
with open(fn, "w") as outf:
outf.write(re.sub("path.downcase", "path", data))
# crawl new docs to collect documents to test
config_dir = os.path.abspath(os.path.join(WRAITH_WORKDIR, "config"))
wraith_spider_config = os.path.join(config_dir, "wraith_spider.yml")
wraith_capture_config = os.path.join(config_dir, "wraith_capture.yml")
wraith_data_config = os.path.join(config_dir, "wraith_data.yml")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
if not os.path.exists(wraith_spider_config):
# do not crawl with reference, as crawler follows external links
spider_config = spider_config_template.format(
wraith_data_config=os.path.basename(wraith_data_config),
test_host=TEST_HOST,
test_port=TEST_PORT)
with open(wraith_spider_config, "w") as outf:
outf.write(spider_config)
if not os.path.exists(wraith_data_config):
with start_server(source_dir) as server:
run("cd {} && wraith spider {}".format(WRAITH_WORKDIR, wraith_spider_config))
if not os.path.exists(wraith_capture_config):
# do not crawl with reference, as crawler follows external links
capture_config = capture_config_template.format(
wraith_data_config=os.path.basename(wraith_data_config),
reference_url=REFERENCE_URL,
test_host=TEST_HOST,
test_port=TEST_PORT)
with open(wraith_capture_config, "w") as outf:
outf.write(capture_config)
self.wraith_capture_config = wraith_capture_config
self.source_dir = source_dir
def test_against_reference(self):
with start_server(self.source_dir) as server:
run("cd {} && wraith capture {}".format(WRAITH_WORKDIR,
self.wraith_capture_config))
if __name__ == "__main__":
unittest.main()
| 31.229508
| 106
| 0.59895
|
80403d1f4d366ad53d0f77a57e58ecdd5e3facf4
| 3,109
|
py
|
Python
|
py_editor/oozaar/filect.py
|
surajsinghbisht054/py-editor
|
b9554e5589351384f42030eebe85a755fe674a4a
|
[
"Apache-2.0"
] | 3
|
2020-12-05T06:37:36.000Z
|
2021-06-08T18:41:27.000Z
|
py_editor/oozaar/filect.py
|
surajsinghbisht054/py-editor
|
b9554e5589351384f42030eebe85a755fe674a4a
|
[
"Apache-2.0"
] | null | null | null |
py_editor/oozaar/filect.py
|
surajsinghbisht054/py-editor
|
b9554e5589351384f42030eebe85a755fe674a4a
|
[
"Apache-2.0"
] | 1
|
2021-11-05T08:20:32.000Z
|
2021-11-05T08:20:32.000Z
|
#!/usr/bin/python
# ---------------- READ ME ---------------------------------------------
# This Script is Created Only For Practise And Educational Purpose Only
# This Script Is Created For http://bitforestinfo.blogspot.com
# This Script is Written By
#
#
##################################################
######## Please Don't Remove Author Name #########
############### Thanks ###########################
##################################################
#
#
__author__='''
######################################################
By S.S.B Group
######################################################
Suraj Singh
Admin
S.S.B Group
surajsinghbisht054@gmail.com
http://bitforestinfo.blogspot.com/
Note: We Feel Proud To Be Indian
######################################################
'''
# For Dialog Box
from Tkinter import *
import tkFileDialog
ExtType=[
('All Files ', '*.*'),
('.py (With Console)', '*.py'),
('.pyw (Without Console) ', '*.pyw')
]
def Save_as_txt(txtbox=None,mode='a', filepath=None, rt=None):
path=tkFileDialog.asksaveasfile(title='Save As', defaultextension='*.py',filetypes=ExtType)
storeobj=txtbox.get('1.0', END)
print path
if filepath:
filepath.set(path.name)
rt.title(path.name)
filetmp=open(path.name,mode)
filetmp.write(storeobj)
filetmp.close()
def open_txt(txtbox=None, rt=None,filepath=None):
path=tkFileDialog.askopenfilename(title="Open Script")
if path:
storefile=open(path,'r')
storechar=storefile.read()
rt.title(str('pyEditor - '+path))
filepath.set(path)
txtbox.insert('1.0', storechar)
storefile.close()
def save_txt(txtbox=None, rt=None, filepath=None):
if filepath.get()=="Unitled.py":
Save_as_txt(txtbox=txtbox,mode='w',filepath=filepath,rt=rt)
else:
storeobjchar=txtbox.get('1.0',END)
storefile=open(filepath.get(),'w')
storefile.write(storeobjchar)
storefile.close()
def close_txt(txtbox=None, rt=None,filepath=None):
save_txt(txtbox=txtbox, rt=rt, filepath=filepath)
txtbox.delete('1.0',END)
rt.title('Py-Editor')
def run_script(txtbox=None, rt=None,filepath=None):
save_txt(txtbox=txtbox, rt=rt, filepath=filepath)
from os import system as cm
cm('python %s'%filepath.get())
def compile_script(txtbox=None, rt=None,filepath=None):
save_txt(txtbox=txtbox, rt=rt, filepath=filepath)
from os import system as cm
cm('python -m compileall %s'%filepath.get())
def run_compile_script(txtbox=None, rt=None,filepath=None):
compile_script(txtbox=txtbox, rt=rt, filepath=filepath)
run_script(txtbox=txtbox, rt=rt, filepath=filepath)
"""root=Tk()
root.title('Dialog BOx')
tt=Text(root)
filepath=StringVar()
filepath.set('Unitled.py')
tt.pack(expand='yes')
Button(root, text='Click Me', command=lambda:close_txt(txtbox=tt, rt=root, filepath=filepath)).pack()
root.mainloop()
"""
| 33.793478
| 102
| 0.560952
|
fc15bd61c62cec1b2c7f35b3b54ff10cccd3e109
| 332
|
py
|
Python
|
fast_api_server/constants/constants.py
|
parithy86/Python-api-frameworks
|
a19ca4a8f2ef92e6fc314fed5afe7350a1d771e8
|
[
"Apache-2.0"
] | null | null | null |
fast_api_server/constants/constants.py
|
parithy86/Python-api-frameworks
|
a19ca4a8f2ef92e6fc314fed5afe7350a1d771e8
|
[
"Apache-2.0"
] | null | null | null |
fast_api_server/constants/constants.py
|
parithy86/Python-api-frameworks
|
a19ca4a8f2ef92e6fc314fed5afe7350a1d771e8
|
[
"Apache-2.0"
] | null | null | null |
class Constants:
RESPONSE_CODES_SUCCESS = [200, 201]
RESPONSE_CODES_FAILURE = [400, 401, 404, 500]
RESPONSE_500 = 500
URL = "https://api.coingecko.com/api/v3/coins/{}?localization=false&tickers=false&market_data=false&community_data=false&developer_data=false&sparkline=false"
CONTENT_TYPE = "application/json"
| 41.5
| 162
| 0.746988
|
305244808a33ad4a2bfe88f35dd2e63e8624b6f8
| 20,779
|
py
|
Python
|
python/tvm/autotvm/tuner/xgboost_cost_model.py
|
yongfeng-nv/incubator-tvm
|
a6cb4b8d3778db5341f991db9adf76ff735b72ea
|
[
"Apache-2.0"
] | 5
|
2020-06-19T03:22:24.000Z
|
2021-03-17T22:16:48.000Z
|
python/tvm/autotvm/tuner/xgboost_cost_model.py
|
yongfeng-nv/incubator-tvm
|
a6cb4b8d3778db5341f991db9adf76ff735b72ea
|
[
"Apache-2.0"
] | 2
|
2020-07-08T12:34:59.000Z
|
2020-07-11T15:54:47.000Z
|
python/tvm/autotvm/tuner/xgboost_cost_model.py
|
yongfeng-nv/incubator-tvm
|
a6cb4b8d3778db5341f991db9adf76ff735b72ea
|
[
"Apache-2.0"
] | 3
|
2020-12-10T23:21:18.000Z
|
2020-12-11T01:04:50.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""XGBoost as cost model"""
import multiprocessing
import logging
import time
import numpy as np
try:
import xgboost as xgb
except ImportError:
xgb = None
from .. import feature
from ..util import get_rank
from .metric import max_curve, recall_curve, cover_curve
from .model_based_tuner import CostModel, FeatureCache
logger = logging.getLogger('autotvm')
class XGBoostCostModel(CostModel):
"""XGBoost as cost model
Parameters
----------
task: Task
The tuning task
feature_type: str, optional
If is 'itervar', use features extracted from IterVar (loop variable).
If is 'knob', use flatten ConfigEntity directly.
If is 'curve', use sampled curve feature (relation feature).
Note on choosing feature type:
For single task tuning, 'itervar' and 'knob' are good.
'itervar' is more accurate but 'knob' is much faster.
There are some constraints on 'itervar', if you meet
problems with feature extraction when using 'itervar',
you can switch to 'knob'.
For cross-shape tuning (e.g. many convolutions with different shapes),
'itervar' and 'curve' has better transferability,
'knob' is faster.
For cross-device or cross-operator tuning, you can use 'curve' only.
loss_type: str
If is 'reg', use regression loss to train cost model.
The cost model predicts the normalized flops.
If is 'rank', use pairwise rank loss to train cost model.
The cost model predicts relative rank score.
num_threads: int, optional
The number of threads.
log_interval: int, optional
If is not none, the cost model will print training log every `log_interval` iterations.
upper_model: XGBoostCostModel, optional
The upper model used in transfer learning
"""
def __init__(self, task, feature_type, loss_type, num_threads=None, log_interval=25,
upper_model=None):
super(XGBoostCostModel, self).__init__()
if xgb is None:
raise RuntimeError("XGBoost is required for XGBoostCostModel. "
"Please install its python package first. "
"Help: (https://xgboost.readthedocs.io/en/latest/) ")
self.task = task
self.target = task.target
self.space = task.config_space
self.fea_type = feature_type
self.loss_type = loss_type
self.num_threads = num_threads
self.log_interval = log_interval
if loss_type == 'reg':
self.xgb_params = {
'max_depth': 3,
'gamma': 0.0001,
'min_child_weight': 1,
'subsample': 1.0,
'eta': 0.3,
'lambda': 1.00,
'alpha': 0,
'objective': 'reg:linear',
}
elif loss_type == 'rank':
self.xgb_params = {
'max_depth': 3,
'gamma': 0.0001,
'min_child_weight': 1,
'subsample': 1.0,
'eta': 0.3,
'lambda': 1.00,
'alpha': 0,
'objective': 'rank:pairwise',
}
else:
raise RuntimeError("Invalid loss type: " + loss_type)
self.xgb_params['silent'] = 1
if num_threads:
self.xgb_params['nthread'] = num_threads
self.bst = None
if feature_type == 'itervar':
self.feature_extract_func = _extract_itervar_feature_index
elif feature_type == 'knob':
self.feature_extract_func = _extract_knob_feature_index
elif feature_type == 'curve':
self.feature_extract_func = _extract_curve_feature_index
else:
raise RuntimeError("Invalid feature type " + feature_type)
if upper_model: # share a same feature cache with upper model
self.feature_cache = upper_model.feature_cache
else:
self.feature_cache = FeatureCache()
self.upper_model = upper_model
self.feature_extra_ct = 0
self.pool = None
self.base_model = None
self._sample_size = 0
self._reset_pool(self.space, self.target, self.task)
def _reset_pool(self, space, target, task):
"""reset processing pool for feature extraction"""
if self.upper_model: # base model will reuse upper model's pool,
self.upper_model._reset_pool(space, target, task)
return
self._close_pool()
# use global variable to pass common arguments
global _extract_space, _extract_target, _extract_task
_extract_space = space
_extract_target = target
_extract_task = task
self.pool = multiprocessing.Pool(self.num_threads)
def _close_pool(self):
if self.pool:
self.pool.terminate()
self.pool.join()
self.pool = None
def _get_pool(self):
if self.upper_model:
return self.upper_model._get_pool()
return self.pool
def _base_model_discount(self):
return 1.0 / (2 ** (self._sample_size / 64.0))
def fit(self, xs, ys, plan_size):
tic = time.time()
self._reset_pool(self.space, self.target, self.task)
x_train = self._get_feature(xs)
y_train = np.array(ys)
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-8)
valid_index = y_train > 1e-6
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
self._sample_size = len(x_train)
if self.base_model:
discount = self._base_model_discount()
if discount < 0.05: # discard base model
self.base_model.upper_model = None
self.base_model = None
else:
dtrain.set_base_margin(discount * self.base_model.predict(xs, output_margin=True))
self.bst = xgb.train(self.xgb_params, dtrain,
num_boost_round=8000,
callbacks=[custom_callback(
stopping_rounds=20,
metric='tr-a-recall@%d' % plan_size,
evals=[(dtrain, 'tr')],
maximize=True,
fevals=[
xgb_average_recalln_curve_score(plan_size),
],
verbose_eval=self.log_interval)])
logger.debug("XGB train: %.2f\tobs: %d\terror: %d\tn_cache: %d",
time.time() - tic, len(xs),
len(xs) - np.sum(valid_index),
self.feature_cache.size(self.fea_type))
def fit_log(self, records, plan_size):
tic = time.time()
# filter data, only pick the data with a same task
data = []
for inp, res in records:
if inp.task.name == self.task.name:
data.append((inp, res))
logger.debug("XGB load %d entries from history log file", len(data))
# extract feature
self._reset_pool(self.space, self.target, self.task)
pool = self._get_pool()
if self.fea_type == 'itervar':
feature_extract_func = _extract_itervar_feature_log
elif self.fea_type == 'knob':
feature_extract_func = _extract_knob_feature_log
elif self.fea_type == 'curve':
feature_extract_func = _extract_curve_feature_log
else:
raise RuntimeError("Invalid feature type: " + self.fea_type)
res = pool.map(feature_extract_func, data)
# filter out feature with different shapes
fea_len = len(self._get_feature([0])[0])
xs, ys = [], []
for x, y in res:
if len(x) == fea_len:
xs.append(x)
ys.append(y)
if len(xs) < 500: # no enough samples
return False
xs, ys = np.array(xs), np.array(ys)
x_train = xs
y_train = ys
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-8)
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
plan_size *= 2
self.bst = xgb.train(self.xgb_params, dtrain,
num_boost_round=400,
callbacks=[custom_callback(
stopping_rounds=100,
metric='tr-a-recall@%d' % plan_size,
evals=[(dtrain, 'tr')],
maximize=True,
fevals=[
xgb_average_recalln_curve_score(plan_size),
],
verbose_eval=self.log_interval)])
logger.debug("XGB train: %.2f\tobs: %d", time.time() - tic, len(xs))
return True
def predict(self, xs, output_margin=False):
feas = self._get_feature(xs)
dtest = xgb.DMatrix(feas)
if self.base_model:
dtest.set_base_margin(self._base_model_discount() *
self.base_model.predict(xs, output_margin=True))
return self.bst.predict(dtest, output_margin=output_margin)
def load_basemodel(self, base_model):
self.base_model = base_model
self.base_model._close_pool()
self.base_model.upper_model = self
def spawn_base_model(self):
return XGBoostCostModel(self.task, self.fea_type, self.loss_type,
self.num_threads, self.log_interval, self)
def _get_feature(self, indexes):
"""get features for indexes, run extraction if we do not have cache for them"""
# free feature cache
if self.feature_cache.size(self.fea_type) >= 100000:
self.feature_cache.clear(self.fea_type)
fea_cache = self.feature_cache.get(self.fea_type)
indexes = np.array(indexes)
need_extract = [x for x in indexes if x not in fea_cache]
if need_extract:
pool = self._get_pool()
feas = pool.map(self.feature_extract_func, need_extract)
for i, fea in zip(need_extract, feas):
fea_cache[i] = fea
feature_len = None
for idx in indexes:
if fea_cache[idx] is not None:
feature_len = fea_cache[idx].shape[-1]
break
ret = np.empty((len(indexes), feature_len), dtype=np.float32)
for i, ii in enumerate(indexes):
t = fea_cache[ii]
ret[i, :] = t if t is not None else 0
return ret
def __del__(self):
self._close_pool()
_extract_space = None
_extract_target = None
_extract_task = None
def _extract_itervar_feature_index(index):
"""extract iteration var feature for an index in extract_space"""
try:
config = _extract_space.get(index)
with _extract_target:
sch, args = _extract_task.instantiate(config)
fea = feature.get_itervar_feature_flatten(sch, args, take_log=True)
fea = np.concatenate((fea, list(config.get_other_option().values())))
return fea
except Exception: # pylint: disable=broad-except
return None
def _extract_itervar_feature_log(arg):
"""extract iteration var feature for log items"""
try:
inp, res = arg
config = inp.config
with inp.target:
sch, args = inp.task.instantiate(config)
fea = feature.get_itervar_feature_flatten(sch, args, take_log=True)
x = np.concatenate((fea, list(config.get_other_option().values())))
if res.error_no == 0:
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
except Exception: # pylint: disable=broad-except
return None
def _extract_knob_feature_index(index):
"""extract knob feature for an index in extract_space"""
try:
config = _extract_space.get(index)
return config.get_flatten_feature()
except Exception: # pylint: disable=broad-except
return None
def _extract_knob_feature_log(arg):
"""extract knob feature for log items"""
try:
inp, res = arg
config = inp.config
x = config.get_flatten_feature()
if res.error_no == 0:
with inp.target: # necessary, for calculating flops of this task
inp.task.instantiate(config)
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
except Exception: # pylint: disable=broad-except
return None
def _extract_curve_feature_index(index):
"""extract sampled curve feature for an index in extract_space"""
try:
config = _extract_space.get(index)
with _extract_target:
sch, args = _extract_task.instantiate(config)
fea = feature.get_buffer_curve_sample_flatten(sch, args, sample_n=20)
fea = np.concatenate((fea, list(config.get_other_option().values())))
return np.array(fea)
except Exception: # pylint: disable=broad-except
return None
def _extract_curve_feature_log(arg):
"""extract sampled curve feature for log items"""
try:
inp, res = arg
config = inp.config
with inp.target:
sch, args = inp.task.instantiate(config)
fea = feature.get_buffer_curve_sample_flatten(sch, args, sample_n=20)
x = np.concatenate((fea, list(config.get_other_option().values())))
if res.error_no == 0:
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
except Exception: # pylint: disable=broad-except
return None
def custom_callback(stopping_rounds, metric, fevals, evals=(), log_file=None,
maximize=False, verbose_eval=True):
"""callback function for xgboost to support multiple custom evaluation functions"""
# pylint: disable=import-outside-toplevel
from xgboost.core import EarlyStopException
from xgboost.callback import _fmt_metric
from xgboost.training import aggcv
state = {}
metric_shortname = metric.split("-")[1]
def init(env):
"""internal function"""
bst = env.model
state['maximize_score'] = maximize
state['best_iteration'] = 0
if maximize:
state['best_score'] = float('-inf')
else:
state['best_score'] = float('inf')
if bst is not None:
if bst.attr('best_score') is not None:
state['best_score'] = float(bst.attr('best_score'))
state['best_iteration'] = int(bst.attr('best_iteration'))
state['best_msg'] = bst.attr('best_msg')
else:
bst.set_attr(best_iteration=str(state['best_iteration']))
bst.set_attr(best_score=str(state['best_score']))
else:
assert env.cvfolds is not None
def callback(env):
"""internal function"""
if not state:
init(env)
bst = env.model
i = env.iteration
cvfolds = env.cvfolds
res_dict = {}
##### evaluation #####
if cvfolds is not None:
for feval in fevals:
tmp = aggcv([f.eval(i, feval) for f in cvfolds])
for k, mean, std in tmp:
res_dict[k] = [mean, std]
else:
for feval in fevals:
bst_eval = bst.eval_set(evals, i, feval)
res = [x.split(':') for x in bst_eval.split()]
for kv in res[1:]:
res_dict[kv[0]] = [float(kv[1])]
eval_res = []
keys = list(res_dict.keys())
keys.sort(key=lambda x: x if metric_shortname not in x else "a" + x)
for key in keys:
v = res_dict[key]
eval_res.append([key] + v)
##### print eval result #####
infos = ["XGB iter: %3d" % i]
for item in eval_res:
if 'null' in item[0]:
continue
infos.append("%s: %.6f" % (item[0], item[1]))
if not isinstance(verbose_eval, bool) and verbose_eval and i % verbose_eval == 0:
logger.debug("\t".join(infos))
if log_file:
with open(log_file, "a") as fout:
fout.write("\t".join(infos) + '\n')
##### choose score and do early stopping #####
score = None
for item in eval_res:
if item[0] == metric:
score = item[1]
break
assert score is not None
best_score = state['best_score']
best_iteration = state['best_iteration']
maximize_score = state['maximize_score']
if (maximize_score and score > best_score) or \
(not maximize_score and score < best_score):
msg = '[%d] %s' % (
env.iteration,
'\t'.join([_fmt_metric(x) for x in eval_res]))
state['best_msg'] = msg
state['best_score'] = score
state['best_iteration'] = env.iteration
# save the property to attributes, so they will occur in checkpoint.
if env.model is not None:
env.model.set_attr(best_score=str(state['best_score']),
best_iteration=str(state['best_iteration']),
best_msg=state['best_msg'])
elif env.iteration - best_iteration >= stopping_rounds:
best_msg = state['best_msg']
if verbose_eval and env.rank == 0:
logger.debug("XGB stopped. Best iteration: %s ", best_msg)
raise EarlyStopException(best_iteration)
return callback
# feval wrapper for xgboost
def xgb_max_curve_score(N):
"""evaluate max curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
scores = labels[trials]
curve = max_curve(scores)
return "Smax@%d" % N, curve[N] / np.max(labels)
return feval
def xgb_recalln_curve_score(N):
"""evaluate recall-n curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return "recall@%d" % N, curve[N]
return feval
def xgb_average_recalln_curve_score(N):
"""evaluate average recall-n curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return "a-recall@%d" % N, np.sum(curve[:N]) / N
return feval
def xgb_recallk_curve_score(N, topk):
"""evaluate recall-k curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks, topk)
return "recall@%d" % topk, curve[N]
return feval
def xgb_cover_curve_score(N):
"""evaluate cover curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = cover_curve(ranks)
return "cover@%d" % N, curve[N]
return feval
def xgb_null_score(_):
"""empty score function for xgb"""
def feval(__, ___):
return "null", 0
return feval
| 35.519658
| 98
| 0.575196
|
deb934126cd5c982b03ed3c3118681074db2edc6
| 1,497
|
py
|
Python
|
tensorflow/python/data/experimental/ops/matching_files.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 190,993
|
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/python/data/experimental/ops/matching_files.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 48,461
|
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/python/data/experimental/ops/matching_files.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 104,981
|
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for matching input filenames."""
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
class MatchingFilesDataset(dataset_ops.DatasetSource):
"""A `Dataset` that list the files according to the input patterns."""
def __init__(self, patterns):
self._patterns = ops.convert_to_tensor(
patterns, dtype=dtypes.string, name="patterns")
variant_tensor = ged_ops.matching_files_dataset(self._patterns)
super(MatchingFilesDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return tensor_spec.TensorSpec([], dtypes.string)
| 41.583333
| 80
| 0.739479
|
01c07aacec6fb4e5e65380bd14f5ec4aeaf603b3
| 1,871
|
py
|
Python
|
personal/Sebastiano/useful_py/transit_boost.py
|
edervishaj/spotify-recsys-challenge
|
4077201ac7e4ed9da433bd10a92c183614182437
|
[
"Apache-2.0"
] | 3
|
2018-10-12T20:19:57.000Z
|
2019-12-11T01:11:38.000Z
|
personal/Sebastiano/useful_py/transit_boost.py
|
kiminh/spotify-recsys-challenge
|
5e7844a77ce3c26658400f161d2d74d682f30e69
|
[
"Apache-2.0"
] | null | null | null |
personal/Sebastiano/useful_py/transit_boost.py
|
kiminh/spotify-recsys-challenge
|
5e7844a77ce3c26658400f161d2d74d682f30e69
|
[
"Apache-2.0"
] | 4
|
2018-10-27T20:30:18.000Z
|
2020-10-14T07:43:27.000Z
|
import scipy.sparse as sps
import numpy as np
from tqdm import tqdm
from utils.pre_processing import norm_max_row
def transitivity_boost(sim):
sim_col = sps.csc_matrix(sim)
print("Similarity shape: " + str(sim.shape))
for row in tqdm(range(sim.shape[0])):
data_row = sim.data[sim.indptr[row]:sim.indptr[row + 1]]
t_max = np.argwhere(data_row < 0.05).ravel()
row_indices = sim.indices[sim.indptr[row]:sim.indptr[row + 1]]
for ind in t_max:
col = row_indices[ind]
col_indices = sim_col.indices[sim_col.indptr[col]:sim_col.indptr[col + 1]]
data_col = sim_col.data[sim_col.indptr[col]:sim_col.indptr[col + 1]]
data_com_col = data_col[np.where(np.isin(col_indices, row_indices))[0]]
data_com_row = data_row[np.where(np.isin(row_indices, col_indices))[0]]
data_row[ind] = np.max(data_com_row + data_com_col) / 2 # TODO: fai la media invece del max
sim.data[sim.indptr[row]:sim.indptr[row + 1]] = data_row
return sim
if __name__ == '__main__':
print("[ Loading Similarity ]")
sim = sps.csr_matrix(norm_max_row(sps.load_npz("../../scripts/rp3beta_similarity_online.npz").tocsr()))
boosted = transitivity_boost(sim)
sps.save_npz("../../scripts/boosted_rp3beta_similarity.npz", boosted)
# for row in tqdm(range(sim.shape[0])):
# i = sim[row].toarray().ravel()
# data = sim.data[sim.indptr[row]:sim.indptr[row+1]]
# indices = sim.indices[sim.indptr[row]:sim.indptr[row+1]]
# for col in (range(len(data))):
# if data[col] < t_max :
# j = sim_col[:, indices[col]].toarray().ravel()
# data[col] = np.max((i+j)/2)
# sim.data[sim.indptr[row]:sim.indptr[row + 1]] = data
# sps.save_npz("../../scripts/boosted_rp3beta_similarity.npz", sim)
| 38.979167
| 107
| 0.627472
|
c0c28c3ec7b5f196028be4343282287ce283f218
| 3,191
|
py
|
Python
|
tests/unit/test_scope_group.py
|
ignatenkobrain/ns1-python
|
f814fd809d08356c48ba686c041bd057ee6cbb75
|
[
"MIT"
] | null | null | null |
tests/unit/test_scope_group.py
|
ignatenkobrain/ns1-python
|
f814fd809d08356c48ba686c041bd057ee6cbb75
|
[
"MIT"
] | null | null | null |
tests/unit/test_scope_group.py
|
ignatenkobrain/ns1-python
|
f814fd809d08356c48ba686c041bd057ee6cbb75
|
[
"MIT"
] | null | null | null |
import pytest
import ns1.rest.ipam
try: # Python 3.3 +
import unittest.mock as mock
except ImportError:
import mock
@pytest.fixture
def scope_group_config(config):
config.loadFromDict({
'endpoint': 'api.nsone.net',
'default_key': 'test1',
'keys': {
'test1': {
'key': 'key-1',
'desc': 'test key number 1',
'writeLock': True
}
}
})
return config
def test_rest_scope_group_list(scope_group_config):
z = ns1.rest.ipam.Scopegroups(scope_group_config)
z._make_request = mock.MagicMock()
z.list()
z._make_request.assert_called_once_with('GET',
'dhcp/scopegroup',
callback=None,
errback=None)
@pytest.mark.parametrize('scope_group_id, url', [('1', 'dhcp/scopegroup/1')])
def test_rest_scope_group_retrieve(scope_group_config, scope_group_id, url):
z = ns1.rest.ipam.Scopegroups(scope_group_config)
z._make_request = mock.MagicMock()
z.retrieve(scope_group_id)
z._make_request.assert_called_once_with('GET',
url,
callback=None,
errback=None)
@pytest.mark.parametrize('scope_group_name, url',
[('test_scope_group', 'dhcp/scopegroup')])
def test_rest_scope_group_create(scope_group_config, scope_group_name, url):
z = ns1.rest.ipam.Scopegroups(scope_group_config)
z._make_request = mock.MagicMock()
z.create(name=scope_group_name)
z._make_request.assert_called_once_with('PUT',
url,
callback=None,
errback=None,
body={"name": scope_group_name})
@pytest.mark.parametrize('scope_group_id, scope_group_name, url',
[('1', 'awesome scope_group', 'dhcp/scopegroup/1')])
def test_rest_scope_group_update(scope_group_config,
scope_group_id, scope_group_name, url):
z = ns1.rest.ipam.Scopegroups(scope_group_config)
z._make_request = mock.MagicMock()
z.update(scope_group_id, name=scope_group_name)
z._make_request.assert_called_once_with('POST',
url,
callback=None,
errback=None,
body={"name": scope_group_name})
@pytest.mark.parametrize('scope_group_id, url', [('1', 'dhcp/scopegroup/1')])
def test_rest_scope_group_delete(scope_group_config, scope_group_id, url):
z = ns1.rest.ipam.Scopegroups(scope_group_config)
z._make_request = mock.MagicMock()
z.delete(scope_group_id)
z._make_request.assert_called_once_with('DELETE',
url,
callback=None,
errback=None)
| 37.988095
| 77
| 0.529615
|
46ecd6f97cf1501ab3dcc6a17abd94c3af8b1acd
| 876
|
py
|
Python
|
constants.py
|
korsejong/emotion-recognition-neural-networks
|
ee2c58c2809ad201c7cf2ca6ac1d998197baf246
|
[
"MIT"
] | 872
|
2016-07-02T03:45:01.000Z
|
2022-03-29T17:24:41.000Z
|
constants.py
|
vishnutejnk/emotion-recognition-neural-networks
|
8da63cae751f087b3a09684d0396f0f9b1071b95
|
[
"MIT"
] | 66
|
2016-09-02T16:13:26.000Z
|
2022-03-11T23:20:25.000Z
|
constants.py
|
vishnutejnk/emotion-recognition-neural-networks
|
8da63cae751f087b3a09684d0396f0f9b1071b95
|
[
"MIT"
] | 377
|
2016-07-02T07:36:34.000Z
|
2022-02-17T06:22:26.000Z
|
# __ __
# /\ \__ /\ \__
# ___ ___ ___ ____\ \ ,_\ __ ___\ \ ,_\ ____
# /'___\ / __`\ /' _ `\ /',__\\ \ \/ /'__`\ /' _ `\ \ \/ /',__\
# /\ \__//\ \L\ \/\ \/\ \/\__, `\\ \ \_/\ \L\.\_/\ \/\ \ \ \_/\__, `\
# \ \____\ \____/\ \_\ \_\/\____/ \ \__\ \__/.\_\ \_\ \_\ \__\/\____/
# \/____/\/___/ \/_/\/_/\/___/ \/__/\/__/\/_/\/_/\/_/\/__/\/___/ .txt
#
#
CASC_PATH = './haarcascade_files/haarcascade_frontalface_default.xml'
SIZE_FACE = 48
EMOTIONS = ['angry', 'disgusted', 'fearful',
'happy', 'sad', 'surprised', 'neutral']
SAVE_DIRECTORY = './data/'
SAVE_MODEL_FILENAME = 'Gudi_model_100_epochs_20000_faces'
DATASET_CSV_FILENAME = 'fer2013.csv'
SAVE_DATASET_IMAGES_FILENAME = 'data_images.npy'
SAVE_DATASET_LABELS_FILENAME = 'data_labels.npy'
| 43.8
| 74
| 0.477169
|
977e16aa1ec4ea1df06adf82f07a575de35ccbc2
| 4,767
|
py
|
Python
|
pfwra/news/migrations/0001_initial.py
|
johnkellehernz/pfwra
|
5b8c718bb2f1aaa34e9a718e07baf270294f7ba6
|
[
"MIT"
] | null | null | null |
pfwra/news/migrations/0001_initial.py
|
johnkellehernz/pfwra
|
5b8c718bb2f1aaa34e9a718e07baf270294f7ba6
|
[
"MIT"
] | null | null | null |
pfwra/news/migrations/0001_initial.py
|
johnkellehernz/pfwra
|
5b8c718bb2f1aaa34e9a718e07baf270294f7ba6
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.11 on 2021-03-26 08:03
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.contrib.routable_page.models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
initial = True
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('wagtailimages', '0023_add_choose_permissions'),
('wagtailcore', '0060_fix_workflow_unique_constraint'),
('common', '0002_people_standardpage_suburb'),
]
operations = [
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('introduction', models.TextField(blank=True, help_text='Text to describe the page')),
('body', wagtail.core.fields.StreamField([('heading_block', wagtail.core.blocks.StructBlock([('heading_text', wagtail.core.blocks.CharBlock(form_classname='title', required=True)), ('size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'Select a header size'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')], required=False))])), ('paragraph_block', wagtail.core.blocks.RichTextBlock(icon='fa-paragraph', template='common/blocks/paragraph_block.html')), ('image_block', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('attribution', wagtail.core.blocks.CharBlock(required=False))])), ('block_quote', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.TextBlock()), ('attribute_name', wagtail.core.blocks.CharBlock(blank=True, label='e.g. Mary Berry', required=False))])), ('embed_block', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-s15', template='blocks/embed_block.html'))], blank=True, verbose_name='Page body')),
('date_published', models.DateField(blank=True, null=True, verbose_name='Date article published')),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='common.People')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('suburb', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='common.Suburb')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='news.BlogPage')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='news_blogpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='blogpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='news.BlogPageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.CreateModel(
name='BlogIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('subtitle', models.CharField(blank=True, max_length=254, null=True, verbose_name='Title in Te reo Māori')),
('introduction', models.TextField(blank=True, help_text='Text to describe the page')),
('image', models.ForeignKey(blank=True, help_text='Landscape mode only; horizontal width between 1000px and 3000px.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=(wagtail.contrib.routable_page.models.RoutablePageMixin, 'wagtailcore.page'),
),
]
| 66.208333
| 1,140
| 0.665618
|
7ab89b489893df4ce0840e4cbf668526f27eee24
| 2,144
|
py
|
Python
|
libs/external/emscripten/tools/settings_template_readonly.py
|
qq2588258/floweers
|
c7f117f29dee21473821b89ff9b18058f7ebdadf
|
[
"MIT"
] | 58
|
2015-01-05T04:40:48.000Z
|
2021-12-17T06:01:28.000Z
|
tools/settings_template_readonly.py
|
aashish24/emscripten
|
0708b232eef021cc7e1302b02091e6a84ceeb1aa
|
[
"MIT"
] | null | null | null |
tools/settings_template_readonly.py
|
aashish24/emscripten
|
0708b232eef021cc7e1302b02091e6a84ceeb1aa
|
[
"MIT"
] | 46
|
2015-01-03T06:20:54.000Z
|
2020-04-18T13:32:52.000Z
|
# This file will be edited (the {{{ }}} things), and then ~/.emscripten created with the result, if ~/.emscripten doesn't exist.
# Note: If you put paths relative to the home directory, do not forget os.path.expanduser
import os
# this helps projects using emscripten find it
EMSCRIPTEN_ROOT = os.path.expanduser(os.getenv('EMSCRIPTEN') or '{{{ EMSCRIPTEN_ROOT }}}') # directory
LLVM_ROOT = os.path.expanduser(os.getenv('LLVM') or '{{{ LLVM_ROOT }}}') # directory
PYTHON = os.path.expanduser(os.getenv('PYTHON') or '{{{ PYTHON }}}') # executable
# See below for notes on which JS engine(s) you need
NODE_JS = os.path.expanduser(os.getenv('NODE') or '{{{ NODE }}}') # executable
SPIDERMONKEY_ENGINE = [os.path.expanduser(os.getenv('SPIDERMONKEY') or 'js')] # executable
V8_ENGINE = os.path.expanduser(os.getenv('V8') or 'd8') # executable
JAVA = 'java' # executable
TEMP_DIR = '/tmp' # You will need to modify this on Windows
#CLOSURE_COMPILER = '..' # define this to not use the bundled version
########################################################################################################
# Pick the JS engine to use for running the compiler. This engine must exist, or
# nothing can be compiled.
#
# Recommendation: If you already have node installed, use that. Otherwise, build v8 or
# spidermonkey from source. Any of these three is fine, as long as it's
# a recent version (especially for v8 and spidermonkey).
COMPILER_ENGINE = NODE_JS
#COMPILER_ENGINE = V8_ENGINE
#COMPILER_ENGINE = SPIDERMONKEY_ENGINE
# All JS engines to use when running the automatic tests. Not all the engines in this list
# must exist (if they don't, they will be skipped in the test runner).
#
# Recommendation: If you already have node installed, use that. If you can, also build
# spidermonkey from source as well to get more test coverage (node can't
# run all the tests due to node issue 1669). v8 is currently not recommended
# here because of v8 issue 1822.
JS_ENGINES = [NODE_JS] # add this if you have spidermonkey installed too, SPIDERMONKEY_ENGINE]
| 44.666667
| 128
| 0.673507
|
926e0484fce36b1499ac203edd6dacd31aac1e38
| 52,978
|
py
|
Python
|
mockredis/client.py
|
guyhughes/mockredis
|
ba41d90a097f26e1f1ab36603d6a94a2202691f1
|
[
"Apache-2.0"
] | null | null | null |
mockredis/client.py
|
guyhughes/mockredis
|
ba41d90a097f26e1f1ab36603d6a94a2202691f1
|
[
"Apache-2.0"
] | null | null | null |
mockredis/client.py
|
guyhughes/mockredis
|
ba41d90a097f26e1f1ab36603d6a94a2202691f1
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
from collections import defaultdict
from copy import deepcopy
from itertools import chain
from datetime import datetime, timedelta
from hashlib import sha1
from operator import add
from random import choice, sample
import re
import sys
import time
import fnmatch
from mockredis.clock import SystemClock
from mockredis.lock import MockRedisLock
from mockredis.exceptions import RedisError, ResponseError, WatchError
from mockredis.pipeline import MockRedisPipeline
from mockredis.script import Script
from mockredis.sortedset import SortedSet
if sys.version_info >= (3, 0):
long = int
xrange = range
basestring = str
from functools import reduce
class MockRedis(object):
"""
A Mock for a redis-py Redis object
Expire functionality must be explicitly
invoked using do_expire(time). Automatic
expiry is NOT supported.
"""
def __init__(self,
strict=False,
clock=None,
load_lua_dependencies=True,
blocking_timeout=1000,
blocking_sleep_interval=0.01,
**kwargs):
"""
Initialize as either StrictRedis or Redis.
Defaults to non-strict.
"""
self.strict = strict
self.clock = SystemClock() if clock is None else clock
self.load_lua_dependencies = load_lua_dependencies
self.blocking_timeout = blocking_timeout
self.blocking_sleep_interval = blocking_sleep_interval
# The 'Redis' store
self.redis = defaultdict(dict)
self.redis_config = defaultdict(dict)
self.timeouts = defaultdict(dict)
# The 'PubSub' store
self.pubsub = defaultdict(list)
# Dictionary from script to sha ''Script''
self.shas = dict()
@classmethod
def from_url(cls, url, db=None, **kwargs):
return cls(**kwargs)
# Connection Functions #
def echo(self, msg):
return self._encode(msg)
def ping(self):
return b'PONG'
# Transactions Functions #
def lock(self, key, timeout=0, sleep=0):
"""Emulate lock."""
return MockRedisLock(self, key, timeout, sleep)
def pipeline(self, transaction=True, shard_hint=None):
"""Emulate a redis-python pipeline."""
return MockRedisPipeline(self, transaction, shard_hint)
def transaction(self, func, *watches, **kwargs):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
Copied directly from redis-py.
"""
shard_hint = kwargs.pop('shard_hint', None)
value_from_callable = kwargs.pop('value_from_callable', False)
watch_delay = kwargs.pop('watch_delay', None)
with self.pipeline(True, shard_hint) as pipe:
while 1:
try:
if watches:
pipe.watch(*watches)
func_value = func(pipe)
exec_value = pipe.execute()
return func_value if value_from_callable else exec_value
except WatchError:
if watch_delay is not None and watch_delay > 0:
time.sleep(watch_delay)
continue
def watch(self, *argv, **kwargs):
"""
Mock does not support command buffering so watch
is a no-op
"""
pass
def unwatch(self):
"""
Mock does not support command buffering so unwatch
is a no-op
"""
pass
def multi(self, *argv, **kwargs):
"""
Mock does not support command buffering so multi
is a no-op
"""
pass
def execute(self):
"""Emulate the execute method. All piped commands are executed immediately
in this mock, so this is a no-op."""
pass
# Keys Functions #
def type(self, key):
key = self._encode(key)
if key not in self.redis:
return b'none'
type_ = type(self.redis[key])
if type_ is dict:
return b'hash'
elif type_ is str:
return b'string'
elif type_ is set:
return b'set'
elif type_ is list:
return b'list'
elif type_ is SortedSet:
return b'zset'
raise TypeError("unhandled type {}".format(type_))
def keys(self, pattern='*'):
"""Emulate keys."""
# making sure the pattern is unicode/str.
try:
pattern = pattern.decode('utf-8')
# This throws an AttributeError in python 3, or an
# UnicodeEncodeError in python 2
except (AttributeError, UnicodeEncodeError):
pass
# Make regex out of glob styled pattern.
regex = fnmatch.translate(pattern)
regex = re.compile(re.sub(r'(^|[^\\])\.', r'\1[^/]', regex))
# Find every key that matches the pattern
return [key for key in self.redis.keys() if regex.match(key.decode('utf-8'))]
def delete(self, *keys):
"""Emulate delete."""
key_counter = 0
for key in map(self._encode, keys):
if key in self.redis:
del self.redis[key]
key_counter += 1
if key in self.timeouts:
del self.timeouts[key]
return key_counter
def __delitem__(self, name):
if self.delete(name) == 0:
# redispy doesn't correctly raise KeyError here, so we don't either
pass
def exists(self, key):
"""Emulate exists."""
return self._encode(key) in self.redis
__contains__ = exists
def _expire(self, key, delta):
if key not in self.redis:
return False
self.timeouts[key] = self.clock.now() + delta
return True
def expire(self, key, delta):
"""Emulate expire"""
delta = delta if isinstance(delta, timedelta) else timedelta(seconds=delta)
return self._expire(self._encode(key), delta)
def pexpire(self, key, milliseconds):
"""Emulate pexpire"""
return self._expire(self._encode(key), timedelta(milliseconds=milliseconds))
def expireat(self, key, when):
"""Emulate expireat"""
expire_time = datetime.fromtimestamp(when)
key = self._encode(key)
if key in self.redis:
self.timeouts[key] = expire_time
return True
return False
def ttl(self, key):
"""
Emulate ttl
Even though the official redis commands documentation at http://redis.io/commands/ttl
states "Return value: Integer reply: TTL in seconds, -2 when key does not exist or -1
when key does not have a timeout." the redis-py lib returns None for both these cases.
The lib behavior has been emulated here.
:param key: key for which ttl is requested.
:returns: the number of seconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
"""
value = self.pttl(key)
if value is None or value < 0:
return value
return value // 1000
def pttl(self, key):
"""
Emulate pttl
:param key: key for which pttl is requested.
:returns: the number of milliseconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
"""
"""
Returns time to live in milliseconds if output_ms is True, else returns seconds.
"""
key = self._encode(key)
if key not in self.redis:
# as of redis 2.8, -2 is returned if the key does not exist
return long(-2) if self.strict else None
if key not in self.timeouts:
# as of redis 2.8, -1 is returned if the key is persistent
# redis-py returns None; command docs say -1
return long(-1) if self.strict else None
time_to_live = get_total_milliseconds(self.timeouts[key] - self.clock.now())
return long(max(-1, time_to_live))
def do_expire(self):
"""
Expire objects assuming now == time
"""
# Deep copy to avoid RuntimeError: dictionary changed size during iteration
_timeouts = deepcopy(self.timeouts)
for key, value in _timeouts.items():
if value - self.clock.now() < timedelta(0):
del self.timeouts[key]
# removing the expired key
if key in self.redis:
self.redis.pop(key, None)
def flushdb(self):
self.redis.clear()
self.pubsub.clear()
self.timeouts.clear()
def rename(self, old_key, new_key):
return self._rename(old_key, new_key)
def renamenx(self, old_key, new_key):
return 1 if self._rename(old_key, new_key, True) else 0
def _rename(self, old_key, new_key, nx=False):
old_key = self._encode(old_key)
new_key = self._encode(new_key)
if old_key in self.redis and (not nx or new_key not in self.redis):
self.redis[new_key] = self.redis.pop(old_key)
return True
return False
def dbsize(self):
return len(self.redis.keys())
# String Functions #
def get(self, key):
key = self._encode(key)
return self.redis.get(key)
def __getitem__(self, name):
"""
Return the value at key ``name``, raises a KeyError if the key
doesn't exist.
"""
value = self.get(name)
if value is not None:
return value
raise KeyError(name)
def mget(self, keys, *args):
args = self._list_or_args(keys, args)
return [self.get(arg) for arg in args]
def set(self, key, value, ex=None, px=None, nx=False, xx=False):
"""
Set the ``value`` for the ``key`` in the context of the provided kwargs.
As per the behavior of the redis-py lib:
If nx and xx are both set, the function does nothing and None is returned.
If px and ex are both set, the preference is given to px.
If the key is not set for some reason, the lib function returns None.
"""
key = self._encode(key)
value = self._encode(value)
if nx and xx:
return None
mode = "nx" if nx else "xx" if xx else None
if self._should_set(key, mode):
expire = None
if ex is not None:
expire = ex if isinstance(ex, timedelta) else timedelta(seconds=ex)
if px is not None:
expire = px if isinstance(px, timedelta) else timedelta(milliseconds=px)
if expire is not None and expire.total_seconds() <= 0:
raise ResponseError("invalid expire time in SETEX")
result = self._set(key, value)
if expire:
self._expire(key, expire)
return result
__setitem__ = set
def getset(self, key, value):
old_value = self.get(key)
self.set(key, value)
return old_value
def _set(self, key, value):
self.redis[key] = self._encode(value)
# removing the timeout
if key in self.timeouts:
self.timeouts.pop(key, None)
return True
def _should_set(self, key, mode):
"""
Determine if it is okay to set a key.
If the mode is None, returns True, otherwise, returns True of false based on
the value of ``key`` and the ``mode`` (nx | xx).
"""
if mode is None or mode not in ["nx", "xx"]:
return True
if mode == "nx":
if key in self.redis:
# nx means set only if key is absent
# false if the key already exists
return False
elif key not in self.redis:
# at this point mode can only be xx
# xx means set only if the key already exists
# false if is absent
return False
# for all other cases, return true
return True
def setex(self, key, time, value):
"""
Set the value of ``key`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if not self.strict:
# when not strict mode swap value and time args order
time, value = value, time
return self.set(key, value, ex=time)
def psetex(self, key, time, value):
"""
Set the value of ``key`` to ``value`` that expires in ``time``
milliseconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
return self.set(key, value, px=time)
def setnx(self, key, value):
"""Set the value of ``key`` to ``value`` if key doesn't exist"""
return self.set(key, value, nx=True)
def mset(self, *args, **kwargs):
"""
Sets key/values based on a mapping. Mapping can be supplied as a single
dictionary argument or as kwargs.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSET requires **kwargs or a single dict arg')
mapping = args[0]
else:
mapping = kwargs
for key, value in mapping.items():
self.set(key, value)
return True
def msetnx(self, *args, **kwargs):
"""
Sets key/values based on a mapping if none of the keys are already set.
Mapping can be supplied as a single dictionary argument or as kwargs.
Returns a boolean indicating if the operation was successful.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSETNX requires **kwargs or a single dict arg')
mapping = args[0]
else:
mapping = kwargs
for key in mapping.keys():
if self._encode(key) in self.redis:
return False
for key, value in mapping.items():
self.set(key, value)
return True
def decr(self, key, amount=1):
key = self._encode(key)
previous_value = long(self.redis.get(key, '0'))
self.redis[key] = self._encode(previous_value - amount)
return long(self.redis[key])
decrby = decr
def incr(self, key, amount=1):
"""Emulate incr."""
key = self._encode(key)
previous_value = long(self.redis.get(key, '0'))
self.redis[key] = self._encode(previous_value + amount)
return long(self.redis[key])
incrby = incr
def setbit(self, key, offset, value):
"""
Set the bit at ``offset`` in ``key`` to ``value``.
"""
key = self._encode(key)
index, bits, mask = self._get_bits_and_offset(key, offset)
if index >= len(bits):
bits.extend(b"\x00" * (index + 1 - len(bits)))
prev_val = 1 if (bits[index] & mask) else 0
if value:
bits[index] |= mask
else:
bits[index] &= ~mask
self.redis[key] = bytes(bits)
return prev_val
def getbit(self, key, offset):
"""
Returns the bit value at ``offset`` in ``key``.
"""
key = self._encode(key)
index, bits, mask = self._get_bits_and_offset(key, offset)
if index >= len(bits):
return 0
return 1 if (bits[index] & mask) else 0
def _get_bits_and_offset(self, key, offset):
bits = bytearray(self.redis.get(key, b""))
index, position = divmod(offset, 8)
mask = 128 >> position
return index, bits, mask
# Hash Functions #
def hexists(self, hashkey, attribute):
"""Emulate hexists."""
redis_hash = self._get_hash(hashkey, 'HEXISTS')
return self._encode(attribute) in redis_hash
def hget(self, hashkey, attribute):
"""Emulate hget."""
redis_hash = self._get_hash(hashkey, 'HGET')
return redis_hash.get(self._encode(attribute))
def hgetall(self, hashkey):
"""Emulate hgetall."""
redis_hash = self._get_hash(hashkey, 'HGETALL')
return dict(redis_hash)
def hdel(self, hashkey, *keys):
"""Emulate hdel"""
redis_hash = self._get_hash(hashkey, 'HDEL')
count = 0
for key in keys:
attribute = self._encode(key)
if attribute in redis_hash:
count += 1
del redis_hash[attribute]
if not redis_hash:
self.delete(hashkey)
return count
def hlen(self, hashkey):
"""Emulate hlen."""
redis_hash = self._get_hash(hashkey, 'HLEN')
return len(redis_hash)
def hmset(self, hashkey, value):
"""Emulate hmset."""
redis_hash = self._get_hash(hashkey, 'HMSET', create=True)
for key, value in value.items():
attribute = self._encode(key)
redis_hash[attribute] = self._encode(value)
return True
def hmget(self, hashkey, keys, *args):
"""Emulate hmget."""
redis_hash = self._get_hash(hashkey, 'HMGET')
attributes = self._list_or_args(keys, args)
return [redis_hash.get(self._encode(attribute)) for attribute in attributes]
def hset(self, hashkey, attribute, value):
"""Emulate hset."""
redis_hash = self._get_hash(hashkey, 'HSET', create=True)
attribute = self._encode(attribute)
attribute_present = attribute in redis_hash
redis_hash[attribute] = self._encode(value)
return long(0) if attribute_present else long(1)
def hsetnx(self, hashkey, attribute, value):
"""Emulate hsetnx."""
redis_hash = self._get_hash(hashkey, 'HSETNX', create=True)
attribute = self._encode(attribute)
if attribute in redis_hash:
return long(0)
else:
redis_hash[attribute] = self._encode(value)
return long(1)
def hincrby(self, hashkey, attribute, increment=1):
"""Emulate hincrby."""
return self._hincrby(hashkey, attribute, 'HINCRBY', long, increment)
def hincrbyfloat(self, hashkey, attribute, increment=1.0):
"""Emulate hincrbyfloat."""
return self._hincrby(hashkey, attribute, 'HINCRBYFLOAT', float, increment)
def _hincrby(self, hashkey, attribute, command, type_, increment):
"""Shared hincrby and hincrbyfloat routine"""
redis_hash = self._get_hash(hashkey, command, create=True)
attribute = self._encode(attribute)
previous_value = type_(redis_hash.get(attribute, '0'))
redis_hash[attribute] = self._encode(previous_value + increment)
return type_(redis_hash[attribute])
def hkeys(self, hashkey):
"""Emulate hkeys."""
redis_hash = self._get_hash(hashkey, 'HKEYS')
return redis_hash.keys()
def hvals(self, hashkey):
"""Emulate hvals."""
redis_hash = self._get_hash(hashkey, 'HVALS')
return redis_hash.values()
# List Functions #
def lrange(self, key, start, stop):
"""Emulate lrange."""
redis_list = self._get_list(key, 'LRANGE')
start, stop = self._translate_range(len(redis_list), start, stop)
return redis_list[start:stop + 1]
def lindex(self, key, index):
"""Emulate lindex."""
redis_list = self._get_list(key, 'LINDEX')
if self._encode(key) not in self.redis:
return None
try:
return redis_list[index]
except (IndexError):
# Redis returns nil if the index doesn't exist
return None
def llen(self, key):
"""Emulate llen."""
redis_list = self._get_list(key, 'LLEN')
# Redis returns 0 if list doesn't exist
return len(redis_list)
def _blocking_pop(self, pop_func, keys, timeout):
"""Emulate blocking pop functionality"""
if not isinstance(timeout, (int, long)):
raise RuntimeError('timeout is not an integer or out of range')
if timeout is None or timeout == 0:
timeout = self.blocking_timeout
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
elapsed_time = 0
start = time.time()
while elapsed_time < timeout:
key, val = self._pop_first_available(pop_func, keys)
if val:
return key, val
# small delay to avoid high cpu utilization
time.sleep(self.blocking_sleep_interval)
elapsed_time = time.time() - start
return None
def _pop_first_available(self, pop_func, keys):
for key in keys:
val = pop_func(key)
if val:
return self._encode(key), val
return None, None
def blpop(self, keys, timeout=0):
"""Emulate blpop"""
return self._blocking_pop(self.lpop, keys, timeout)
def brpop(self, keys, timeout=0):
"""Emulate brpop"""
return self._blocking_pop(self.rpop, keys, timeout)
def lpop(self, key):
"""Emulate lpop."""
redis_list = self._get_list(key, 'LPOP')
if self._encode(key) not in self.redis:
return None
try:
value = redis_list.pop(0)
if len(redis_list) == 0:
self.delete(key)
return value
except (IndexError):
# Redis returns nil if popping from an empty list
return None
def lpush(self, key, *args):
"""Emulate lpush."""
redis_list = self._get_list(key, 'LPUSH', create=True)
# Creates the list at this key if it doesn't exist, and appends args to its beginning
args_reversed = [self._encode(arg) for arg in args]
args_reversed.reverse()
updated_list = args_reversed + redis_list
self.redis[self._encode(key)] = updated_list
# Return the length of the list after the push operation
return len(updated_list)
def rpop(self, key):
"""Emulate lpop."""
redis_list = self._get_list(key, 'RPOP')
if self._encode(key) not in self.redis:
return None
try:
value = redis_list.pop()
if len(redis_list) == 0:
self.delete(key)
return value
except (IndexError):
# Redis returns nil if popping from an empty list
return None
def rpush(self, key, *args):
"""Emulate rpush."""
redis_list = self._get_list(key, 'RPUSH', create=True)
# Creates the list at this key if it doesn't exist, and appends args to it
redis_list.extend(map(self._encode, args))
# Return the length of the list after the push operation
return len(redis_list)
def lrem(self, key, value, count=0):
"""Emulate lrem."""
value = self._encode(value)
redis_list = self._get_list(key, 'LREM')
removed_count = 0
if self._encode(key) in self.redis:
if count == 0:
# Remove all ocurrences
while redis_list.count(value):
redis_list.remove(value)
removed_count += 1
elif count > 0:
counter = 0
# remove first 'count' ocurrences
while redis_list.count(value):
redis_list.remove(value)
counter += 1
removed_count += 1
if counter >= count:
break
elif count < 0:
# remove last 'count' ocurrences
counter = -count
new_list = []
for v in reversed(redis_list):
if v == value and counter > 0:
counter -= 1
removed_count += 1
else:
new_list.append(v)
redis_list[:] = list(reversed(new_list))
if removed_count > 0 and len(redis_list) == 0:
self.delete(key)
return removed_count
def ltrim(self, key, start, stop):
"""Emulate ltrim."""
redis_list = self._get_list(key, 'LTRIM')
if redis_list:
start, stop = self._translate_range(len(redis_list), start, stop)
self.redis[self._encode(key)] = redis_list[start:stop + 1]
return True
def rpoplpush(self, source, destination):
"""Emulate rpoplpush"""
transfer_item = self.rpop(source)
if transfer_item is not None:
self.lpush(destination, transfer_item)
return transfer_item
def brpoplpush(self, source, destination, timeout=0):
"""Emulate brpoplpush"""
transfer_item = self.brpop(source, timeout)
if transfer_item is None:
return None
key, val = transfer_item
self.lpush(destination, val)
return val
def lset(self, key, index, value):
"""Emulate lset."""
redis_list = self._get_list(key, 'LSET')
if redis_list is None:
raise ResponseError("no such key")
try:
redis_list[index] = self._encode(value)
except IndexError:
raise ResponseError("index out of range")
def sort(self, name,
start=None,
num=None,
by=None,
get=None,
desc=False,
alpha=False,
store=None,
groups=False):
# check valid parameter combos
if [start, num] != [None, None] and None in [start, num]:
raise ValueError('start and num must both be specified together')
# check up-front if there's anything to actually do
items = num != 0 and self.get(name)
if not items:
if store:
return 0
else:
return []
by = self._encode(by) if by is not None else by
# always organize the items as tuples of the value from the list and the sort key
if by and b'*' in by:
items = [(i, self.get(by.replace(b'*', self._encode(i)))) for i in items]
elif by in [None, b'nosort']:
items = [(i, i) for i in items]
else:
raise ValueError('invalid value for "by": %s' % by)
if by != b'nosort':
# if sorting, do alpha sort or float (default) and take desc flag into account
sort_type = alpha and str or float
items.sort(key=lambda x: sort_type(x[1]), reverse=bool(desc))
# results is a list of lists to support different styles of get and also groups
results = []
if get:
if isinstance(get, basestring):
# always deal with get specifiers as a list
get = [get]
for g in map(self._encode, get):
if g == b'#':
results.append([self.get(i) for i in items])
else:
results.append([self.get(g.replace(b'*', self._encode(i[0]))) for i in items])
else:
# if not using GET then returning just the item itself
results.append([i[0] for i in items])
# results to either list of tuples or list of values
if len(results) > 1:
results = list(zip(*results))
elif results:
results = results[0]
# apply the 'start' and 'num' to the results
if not start:
start = 0
if not num:
if start:
results = results[start:]
else:
end = start + num
results = results[start:end]
# if more than one GET then flatten if groups not wanted
if get and len(get) > 1:
if not groups:
results = list(chain(*results))
# either store value and return length of results or just return results
if store:
self.redis[self._encode(store)] = results
return len(results)
else:
return results
# SCAN COMMANDS #
def _common_scan(self, values_function, cursor='0', match=None, count=10, key=None):
"""
Common scanning skeleton.
:param key: optional function used to identify what 'match' is applied to
"""
if count is None:
count = 10
cursor = int(cursor)
count = int(count)
if not count:
raise ValueError('if specified, count must be > 0: %s' % count)
values = values_function()
if cursor + count >= len(values):
# we reached the end, back to zero
result_cursor = 0
else:
result_cursor = cursor + count
values = values[cursor:cursor+count]
if match is not None:
regex = re.compile(b'^' + re.escape(self._encode(match)).replace(b'\\*', b'.*') + b'$')
if not key:
key = lambda v: v
values = [v for v in values if regex.match(key(v))]
return [result_cursor, values]
def scan(self, cursor='0', match=None, count=10):
"""Emulate scan."""
def value_function():
return sorted(self.redis.keys()) # sorted list for consistent order
return self._common_scan(value_function, cursor=cursor, match=match, count=count)
def scan_iter(self, match=None, count=10):
"""Emulate scan_iter."""
cursor = '0'
while cursor != 0:
cursor, data = self.scan(cursor=cursor, match=match, count=count)
for item in data:
yield item
def sscan(self, name, cursor='0', match=None, count=10):
"""Emulate sscan."""
def value_function():
members = list(self.smembers(name))
members.sort() # sort for consistent order
return members
return self._common_scan(value_function, cursor=cursor, match=match, count=count)
def sscan_iter(self, name, match=None, count=10):
"""Emulate sscan_iter."""
cursor = '0'
while cursor != 0:
cursor, data = self.sscan(name, cursor=cursor,
match=match, count=count)
for item in data:
yield item
def zscan(self, name, cursor='0', match=None, count=10):
"""Emulate zscan."""
def value_function():
values = self.zrange(name, 0, -1, withscores=True)
values.sort(key=lambda x: x[1]) # sort for consistent order
return values
return self._common_scan(value_function, cursor=cursor, match=match, count=count, key=lambda v: v[0]) # noqa
def zscan_iter(self, name, match=None, count=10):
"""Emulate zscan_iter."""
cursor = '0'
while cursor != 0:
cursor, data = self.zscan(name, cursor=cursor, match=match,
count=count)
for item in data:
yield item
def hscan(self, name, cursor='0', match=None, count=10):
"""Emulate hscan."""
def value_function():
values = self.hgetall(name)
values = list(values.items()) # list of tuples for sorting and matching
values.sort(key=lambda x: x[0]) # sort for consistent order
return values
scanned = self._common_scan(value_function, cursor=cursor, match=match, count=count, key=lambda v: v[0]) # noqa
scanned[1] = dict(scanned[1]) # from list of tuples back to dict
return scanned
def hscan_iter(self, name, match=None, count=10):
"""Emulate hscan_iter."""
cursor = '0'
while cursor != 0:
cursor, data = self.hscan(name, cursor=cursor,
match=match, count=count)
for item in data.items():
yield item
# SET COMMANDS #
def sadd(self, key, *values):
"""Emulate sadd."""
if len(values) == 0:
raise ResponseError("wrong number of arguments for 'sadd' command")
redis_set = self._get_set(key, 'SADD', create=True)
before_count = len(redis_set)
redis_set.update(map(self._encode, values))
after_count = len(redis_set)
return after_count - before_count
def scard(self, key):
"""Emulate scard."""
redis_set = self._get_set(key, 'SADD')
return len(redis_set)
def sdiff(self, keys, *args):
"""Emulate sdiff."""
func = lambda left, right: left.difference(right)
return self._apply_to_sets(func, "SDIFF", keys, *args)
def sdiffstore(self, dest, keys, *args):
"""Emulate sdiffstore."""
result = self.sdiff(keys, *args)
self.redis[self._encode(dest)] = result
return len(result)
def sinter(self, keys, *args):
"""Emulate sinter."""
func = lambda left, right: left.intersection(right)
return self._apply_to_sets(func, "SINTER", keys, *args)
def sinterstore(self, dest, keys, *args):
"""Emulate sinterstore."""
result = self.sinter(keys, *args)
self.redis[self._encode(dest)] = result
return len(result)
def sismember(self, name, value):
"""Emulate sismember."""
redis_set = self._get_set(name, 'SISMEMBER')
if not redis_set:
return 0
result = self._encode(value) in redis_set
return 1 if result else 0
def smembers(self, name):
"""Emulate smembers."""
return self._get_set(name, 'SMEMBERS').copy()
def smove(self, src, dst, value):
"""Emulate smove."""
src_set = self._get_set(src, 'SMOVE')
dst_set = self._get_set(dst, 'SMOVE')
value = self._encode(value)
if value not in src_set:
return False
src_set.discard(value)
dst_set.add(value)
self.redis[self._encode(src)], self.redis[self._encode(dst)] = src_set, dst_set
return True
def spop(self, name):
"""Emulate spop."""
redis_set = self._get_set(name, 'SPOP')
if not redis_set:
return None
member = choice(list(redis_set))
redis_set.remove(member)
if len(redis_set) == 0:
self.delete(name)
return member
def srandmember(self, name, number=None):
"""Emulate srandmember."""
redis_set = self._get_set(name, 'SRANDMEMBER')
if not redis_set:
return None if number is None else []
if number is None:
return choice(list(redis_set))
elif number > 0:
return sample(list(redis_set), min(number, len(redis_set)))
else:
return [choice(list(redis_set)) for _ in xrange(abs(number))]
def srem(self, key, *values):
"""Emulate srem."""
redis_set = self._get_set(key, 'SREM')
if not redis_set:
return 0
before_count = len(redis_set)
for value in values:
redis_set.discard(self._encode(value))
after_count = len(redis_set)
if before_count > 0 and len(redis_set) == 0:
self.delete(key)
return before_count - after_count
def sunion(self, keys, *args):
"""Emulate sunion."""
func = lambda left, right: left.union(right)
return self._apply_to_sets(func, "SUNION", keys, *args)
def sunionstore(self, dest, keys, *args):
"""Emulate sunionstore."""
result = self.sunion(keys, *args)
self.redis[self._encode(dest)] = result
return len(result)
# SORTED SET COMMANDS #
def zadd(self, name, *args, **kwargs):
zset = self._get_zset(name, "ZADD", create=True)
pieces = []
# args
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
for i in xrange(len(args) // 2):
# interpretation of args order depends on whether Redis
# or StrictRedis is used
score = args[2 * i + (0 if self.strict else 1)]
member = args[2 * i + (1 if self.strict else 0)]
pieces.append((member, score))
# kwargs
pieces.extend(kwargs.items())
insert_count = lambda member, score: 1 if zset.insert(self._encode(member), float(score)) else 0 # noqa
return sum((insert_count(member, score) for member, score in pieces))
def zcard(self, name):
zset = self._get_zset(name, "ZCARD")
return len(zset) if zset is not None else 0
def zcount(self, name, min, max):
zset = self._get_zset(name, "ZCOUNT")
if not zset:
return 0
return len(zset.scorerange(float(min), float(max)))
def zincrby(self, name, value, amount=1):
zset = self._get_zset(name, "ZINCRBY", create=True)
value = self._encode(value)
score = zset.score(value) or 0.0
score += float(amount)
zset[value] = score
return score
def zinterstore(self, dest, keys, aggregate=None):
aggregate_func = self._aggregate_func(aggregate)
members = {}
for key in keys:
zset = self._get_zset(key, "ZINTERSTORE")
if not zset:
return 0
for score, member in zset:
members.setdefault(member, []).append(score)
intersection = SortedSet()
for member, scores in members.items():
if len(scores) != len(keys):
continue
intersection[member] = reduce(aggregate_func, scores)
# always override existing keys
self.redis[self._encode(dest)] = intersection
return len(intersection)
def zrange(self, name, start, end, desc=False, withscores=False,
score_cast_func=float):
zset = self._get_zset(name, "ZRANGE")
if not zset:
return []
start, end = self._translate_range(len(zset), start, end)
func = self._range_func(withscores, score_cast_func)
return [func(item) for item in zset.range(start, end, desc)]
def zrangebyscore(self, name, min, max, start=None, num=None,
withscores=False, score_cast_func=float):
if (start is None) ^ (num is None):
raise RedisError('`start` and `num` must both be specified')
zset = self._get_zset(name, "ZRANGEBYSCORE")
if not zset:
return []
func = self._range_func(withscores, score_cast_func)
include_start, min = self._score_inclusive(min)
include_end, max = self._score_inclusive(max)
scorerange = zset.scorerange(min, max, start_inclusive=include_start, end_inclusive=include_end) # noqa
if start is not None and num is not None:
start, num = self._translate_limit(len(scorerange), int(start), int(num))
scorerange = scorerange[start:start + num]
return [func(item) for item in scorerange]
def zrank(self, name, value):
zset = self._get_zset(name, "ZRANK")
return zset.rank(self._encode(value)) if zset else None
def zrem(self, name, *values):
zset = self._get_zset(name, "ZREM")
if not zset:
return 0
count_removals = lambda value: 1 if zset.remove(self._encode(value)) else 0
removal_count = sum((count_removals(value) for value in values))
if removal_count > 0 and len(zset) == 0:
self.delete(name)
return removal_count
def zremrangebyrank(self, name, start, end):
zset = self._get_zset(name, "ZREMRANGEBYRANK")
if not zset:
return 0
start, end = self._translate_range(len(zset), start, end)
count_removals = lambda score, member: 1 if zset.remove(member) else 0
removal_count = sum((count_removals(score, member) for score, member in zset.range(start, end))) # noqa
if removal_count > 0 and len(zset) == 0:
self.delete(name)
return removal_count
def zremrangebyscore(self, name, min, max):
zset = self._get_zset(name, "ZREMRANGEBYSCORE")
if not zset:
return 0
count_removals = lambda score, member: 1 if zset.remove(member) else 0
include_start, min = self._score_inclusive(min)
include_end, max = self._score_inclusive(max)
removal_count = sum((count_removals(score, member)
for score, member in zset.scorerange(min, max,
start_inclusive=include_start,
end_inclusive=include_end)))
if removal_count > 0 and len(zset) == 0:
self.delete(name)
return removal_count
def zrevrange(self, name, start, end, withscores=False,
score_cast_func=float):
return self.zrange(name, start, end,
desc=True, withscores=withscores, score_cast_func=score_cast_func)
def zrevrangebyscore(self, name, max, min, start=None, num=None,
withscores=False, score_cast_func=float):
if (start is None) ^ (num is None):
raise RedisError('`start` and `num` must both be specified')
zset = self._get_zset(name, "ZREVRANGEBYSCORE")
if not zset:
return []
func = self._range_func(withscores, score_cast_func)
include_start, min = self._score_inclusive(min)
include_end, max = self._score_inclusive(max)
scorerange = [x for x in reversed(zset.scorerange(float(min), float(max),
start_inclusive=include_start,
end_inclusive=include_end))]
if start is not None and num is not None:
start, num = self._translate_limit(len(scorerange), int(start), int(num))
scorerange = scorerange[start:start + num]
return [func(item) for item in scorerange]
def zrevrank(self, name, value):
zset = self._get_zset(name, "ZREVRANK")
if zset is None:
return None
rank = zset.rank(self._encode(value))
if rank is None:
return None
return len(zset) - rank - 1
def zscore(self, name, value):
zset = self._get_zset(name, "ZSCORE")
return zset.score(self._encode(value)) if zset is not None else None
def zunionstore(self, dest, keys, aggregate=None):
union = SortedSet()
aggregate_func = self._aggregate_func(aggregate)
for key in keys:
zset = self._get_zset(key, "ZUNIONSTORE")
if not zset:
continue
for score, member in zset:
if member in union:
union[member] = aggregate_func(union[member], score)
else:
union[member] = score
# always override existing keys
self.redis[self._encode(dest)] = union
return len(union)
# Script Commands #
def eval(self, script, numkeys, *keys_and_args):
"""Emulate eval"""
sha = self.script_load(script)
return self.evalsha(sha, numkeys, *keys_and_args)
def evalsha(self, sha, numkeys, *keys_and_args):
"""Emulates evalsha"""
if not self.script_exists(sha)[0]:
raise RedisError("Sha not registered")
script_callable = Script(self, self.shas[sha], self.load_lua_dependencies)
numkeys = max(numkeys, 0)
keys = keys_and_args[:numkeys]
args = keys_and_args[numkeys:]
return script_callable(keys, args)
def script_exists(self, *args):
"""Emulates script_exists"""
return [arg in self.shas for arg in args]
def script_flush(self):
"""Emulate script_flush"""
self.shas.clear()
def script_kill(self):
"""Emulate script_kill"""
"""XXX: To be implemented, should not be called before that."""
raise NotImplementedError("Not yet implemented.")
def script_load(self, script):
"""Emulate script_load"""
sha_digest = sha1(script.encode("utf-8")).hexdigest()
self.shas[sha_digest] = script
return sha_digest
def register_script(self, script):
"""Emulate register_script"""
return Script(self, script, self.load_lua_dependencies)
def call(self, command, *args):
"""
Sends call to the function, whose name is specified by command.
Used by Script invocations and normalizes calls using standard
Redis arguments to use the expected redis-py arguments.
"""
command = self._normalize_command_name(command)
args = self._normalize_command_args(command, *args)
redis_function = getattr(self, command)
value = redis_function(*args)
return self._normalize_command_response(command, value)
def _normalize_command_name(self, command):
"""
Modifies the command string to match the redis client method name.
"""
command = command.lower()
if command == 'del':
return 'delete'
return command
def _normalize_command_args(self, command, *args):
"""
Modifies the command arguments to match the
strictness of the redis client.
"""
if command == 'zadd' and not self.strict and len(args) >= 3:
# Reorder score and name
zadd_args = [x for tup in zip(args[2::2], args[1::2]) for x in tup]
return [args[0]] + zadd_args
if command in ('zrangebyscore', 'zrevrangebyscore'):
# expected format is: <command> name min max start num with_scores score_cast_func
if len(args) <= 3:
# just plain min/max
return args
start, num = None, None
withscores = False
for i, arg in enumerate(args[3:], 3):
# keywords are case-insensitive
lower_arg = self._encode(arg).lower()
# handle "limit"
if lower_arg == b"limit" and i + 2 < len(args):
start, num = args[i + 1], args[i + 2]
# handle "withscores"
if lower_arg == b"withscores":
withscores = True
# do not expect to set score_cast_func
return args[:3] + (start, num, withscores)
return args
def _normalize_command_response(self, command, response):
if command in ('zrange', 'zrevrange', 'zrangebyscore', 'zrevrangebyscore'):
if response and isinstance(response[0], tuple):
return [value for tpl in response for value in tpl]
return response
# Config Set/Get commands #
def config_set(self, name, value):
"""
Set a configuration parameter.
"""
self.redis_config[name] = value
def config_get(self, pattern='*'):
"""
Get one or more configuration parameters.
"""
result = {}
for name, value in self.redis_config.items():
if fnmatch.fnmatch(name, pattern):
try:
result[name] = int(value)
except ValueError:
result[name] = value
return result
# PubSub commands #
def publish(self, channel, message):
self.pubsub[channel].append(message)
# Internal #
def _get_list(self, key, operation, create=False):
"""
Get (and maybe create) a list by name.
"""
return self._get_by_type(key, operation, create, b'list', [])
def _get_set(self, key, operation, create=False):
"""
Get (and maybe create) a set by name.
"""
return self._get_by_type(key, operation, create, b'set', set())
def _get_hash(self, name, operation, create=False):
"""
Get (and maybe create) a hash by name.
"""
return self._get_by_type(name, operation, create, b'hash', {})
def _get_zset(self, name, operation, create=False):
"""
Get (and maybe create) a sorted set by name.
"""
return self._get_by_type(name, operation, create, b'zset', SortedSet(), return_default=False) # noqa
def _get_by_type(self, key, operation, create, type_, default, return_default=True):
"""
Get (and maybe create) a redis data structure by name and type.
"""
key = self._encode(key)
if self.type(key) in [type_, b'none']:
if create:
return self.redis.setdefault(key, default)
else:
return self.redis.get(key, default if return_default else None)
raise TypeError("{} requires a {}".format(operation, type_))
def _translate_range(self, len_, start, end):
"""
Translate range to valid bounds.
"""
if start < 0:
start += len_
start = max(0, min(start, len_))
if end < 0:
end += len_
end = max(-1, min(end, len_ - 1))
return start, end
def _translate_limit(self, len_, start, num):
"""
Translate limit to valid bounds.
"""
if start > len_ or num <= 0:
return 0, 0
return min(start, len_), num
def _range_func(self, withscores, score_cast_func):
"""
Return a suitable function from (score, member)
"""
if withscores:
return lambda score_member: (score_member[1], score_cast_func(self._encode(score_member[0]))) # noqa
else:
return lambda score_member: score_member[1]
def _aggregate_func(self, aggregate):
"""
Return a suitable aggregate score function.
"""
funcs = {"sum": add, "min": min, "max": max}
func_name = aggregate.lower() if aggregate else 'sum'
try:
return funcs[func_name]
except KeyError:
raise TypeError("Unsupported aggregate: {}".format(aggregate))
def _apply_to_sets(self, func, operation, keys, *args):
"""Helper function for sdiff, sinter, and sunion"""
keys = self._list_or_args(keys, args)
if not keys:
raise TypeError("{} takes at least two arguments".format(operation.lower()))
left = self._get_set(keys[0], operation) or set()
for key in keys[1:]:
right = self._get_set(key, operation) or set()
left = func(left, right)
return left
def _list_or_args(self, keys, args):
"""
Shamelessly copied from redis-py.
"""
# returns a single list combining keys and args
try:
iter(keys)
# a string can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, basestring):
keys = [keys]
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys
def _score_inclusive(self, score):
if isinstance(score, basestring) and score[0] == '(':
return False, float(score[1:])
return True, float(score)
def _encode(self, value):
"Return a bytestring representation of the value. Taken from redis-py connection.py"
if isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
value = str(value).encode('utf-8')
elif isinstance(value, float):
value = repr(value).encode('utf-8')
elif not isinstance(value, basestring):
value = str(value).encode('utf-8')
else:
value = value.encode('utf-8', 'strict')
return value
def get_total_milliseconds(td):
return int((td.days * 24 * 60 * 60 + td.seconds) * 1000 + td.microseconds / 1000.0)
def mock_redis_client(**kwargs):
"""
Mock common.util.redis_client so we
can return a MockRedis object
instead of a Redis object.
"""
return MockRedis()
mock_redis_client.from_url = mock_redis_client
def mock_strict_redis_client(**kwargs):
"""
Mock common.util.redis_client so we
can return a MockRedis object
instead of a StrictRedis object.
"""
return MockRedis(strict=True)
mock_strict_redis_client.from_url = mock_strict_redis_client
| 33.445707
| 120
| 0.57254
|
5bf9e7622dc3ba0349225a86ae06909c4b62c563
| 15,254
|
py
|
Python
|
python-3.6.0/Tools/pybench/Lookups.py
|
emacslisp/python
|
5b89ddcc504108f0dfa1081e338e6475cf6ccd2f
|
[
"Apache-2.0"
] | 1,463
|
2017-09-30T02:46:56.000Z
|
2022-03-30T15:11:05.000Z
|
python-3.6.0/Tools/pybench/Lookups.py
|
emacslisp/python
|
5b89ddcc504108f0dfa1081e338e6475cf6ccd2f
|
[
"Apache-2.0"
] | 702
|
2016-12-02T23:47:21.000Z
|
2022-03-31T08:14:00.000Z
|
python-3.6.0/Tools/pybench/Lookups.py
|
emacslisp/python
|
5b89ddcc504108f0dfa1081e338e6475cf6ccd2f
|
[
"Apache-2.0"
] | 208
|
2018-01-17T05:55:55.000Z
|
2022-03-29T18:27:47.000Z
|
from pybench import Test
class SpecialClassAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
for i in range(self.rounds):
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
c.__a = 2
c.__b = 3
c.__c = 4
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
x = c.__a
x = c.__b
x = c.__c
def calibrate(self):
class c:
pass
for i in range(self.rounds):
pass
class NormalClassAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
for i in range(self.rounds):
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
c.a = 2
c.b = 3
c.c = 4
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
x = c.a
x = c.b
x = c.c
def calibrate(self):
class c:
pass
for i in range(self.rounds):
pass
class SpecialInstanceAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
o = c()
for i in range(self.rounds):
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
o.__a__ = 2
o.__b__ = 3
o.__c__ = 4
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
x = o.__a__
x = o.__b__
x = o.__c__
def calibrate(self):
class c:
pass
o = c()
for i in range(self.rounds):
pass
class NormalInstanceAttribute(Test):
version = 2.0
operations = 5*(12 + 12)
rounds = 100000
def test(self):
class c:
pass
o = c()
for i in range(self.rounds):
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
o.a = 2
o.b = 3
o.c = 4
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
x = o.a
x = o.b
x = o.c
def calibrate(self):
class c:
pass
o = c()
for i in range(self.rounds):
pass
class BuiltinMethodLookup(Test):
version = 2.0
operations = 5*(3*5 + 3*5)
rounds = 70000
def test(self):
l = []
d = {}
for i in range(self.rounds):
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
# d.has_key
# d.has_key
# d.has_key
# d.has_key
# d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
# d.has_key
# d.has_key
# d.has_key
# d.has_key
# d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
# d.has_key
# d.has_key
# d.has_key
# d.has_key
# d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
# d.has_key
# d.has_key
# d.has_key
# d.has_key
# d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
l.append
l.append
l.append
l.append
l.append
l.insert
l.insert
l.insert
l.insert
l.insert
l.sort
l.sort
l.sort
l.sort
l.sort
# d.has_key
# d.has_key
# d.has_key
# d.has_key
# d.has_key
d.items
d.items
d.items
d.items
d.items
d.get
d.get
d.get
d.get
d.get
def calibrate(self):
l = []
d = {}
for i in range(self.rounds):
pass
| 16.124736
| 37
| 0.255277
|
f4afb54921b303447edc329c55c37cfcc5f42e6e
| 4,533
|
py
|
Python
|
TrainingExtensions/torch/src/python/aimet_torch/transformer_utils.py
|
lipovsek/aimet
|
236fb02cc6c45e65c067030416c49a09ace82045
|
[
"BSD-3-Clause"
] | null | null | null |
TrainingExtensions/torch/src/python/aimet_torch/transformer_utils.py
|
lipovsek/aimet
|
236fb02cc6c45e65c067030416c49a09ace82045
|
[
"BSD-3-Clause"
] | null | null | null |
TrainingExtensions/torch/src/python/aimet_torch/transformer_utils.py
|
lipovsek/aimet
|
236fb02cc6c45e65c067030416c49a09ace82045
|
[
"BSD-3-Clause"
] | null | null | null |
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" utils associated with transformer quantization handling """
from typing import Dict
import torch
from aimet_torch.qc_quantize_op import QcQuantizeWrapper
# current implementation sets mask to -6 by default.
# user can register for override on mask add op in an attention head.
MASK_OVERRIDE_VALUE = -6
# default attention types supported
# {attention block type name : mask_op_name}
SUPPORTED_ATTENTION_MASK_OVERRIDE_DICT = {'BertSelfAttention': 'mask_add', #BERT
'MultiHeadSelfAttention': 'mask_add', #DistilBERT
'Attention': 'mask_add', # GPT-2
'RobertaSelfAttention': 'mask_add'} #RoBERTa
def register_attention_mask_override(attention_type_name: str = None,
mask_op_name: str = None):
"""
Registers attention type and op within it to be clamped
:param attention_type_name: Attention type name, as string
:param mask_op_name: Mask op identifier within attention head, as string
:return:
"""
if attention_type_name is not None and mask_op_name is not None:
SUPPORTED_ATTENTION_MASK_OVERRIDE_DICT[attention_type_name] = mask_op_name
def get_supported_attention_types() -> Dict:
"""
returns dictionary of supported attention types with corresponding mask op name
:return:
"""
return SUPPORTED_ATTENTION_MASK_OVERRIDE_DICT
def get_attention_with_mask_add_quantizer_dict(model: torch.nn.Module) -> Dict:
"""
get attention head with associated mask add modules with their names
:param model: model
:return: dictionary of attention module to Tuple(mask add module, name)
"""
attention_with_mask_adds_dict = {}
supported_attention_mask_override_dict = get_supported_attention_types()
for module in model.modules():
# pylint: disable=protected-access
module_name = type(module)._get_name(module)
# find transformer attention head that is supported
if module_name in supported_attention_mask_override_dict:
for name, sub_module in module.named_modules():
# Within attention unit find mask add op (input op to SoftMax)
if name is supported_attention_mask_override_dict[module_name]:
# Override the quantizer that was added by default, to tf mode
if isinstance(sub_module, QcQuantizeWrapper) and sub_module.output_quantizer.enabled:
attention_with_mask_adds_dict[module] = (sub_module, name)
return attention_with_mask_adds_dict
| 43.171429
| 105
| 0.691154
|
1d2a9996638f4f6d402c1938d9d00b79903364aa
| 5,762
|
py
|
Python
|
train.py
|
minhnhatvt/glamor-net
|
c12e1b97aa7354df126795f19402303a00166ec3
|
[
"MIT"
] | 20
|
2020-11-18T08:13:22.000Z
|
2022-03-22T11:34:20.000Z
|
train.py
|
minhnhatvt/glamor-net
|
c12e1b97aa7354df126795f19402303a00166ec3
|
[
"MIT"
] | 1
|
2021-11-17T09:40:56.000Z
|
2021-11-17T09:43:10.000Z
|
train.py
|
minhnhatvt/glamor-net
|
c12e1b97aa7354df126795f19402303a00166ec3
|
[
"MIT"
] | 4
|
2020-12-03T06:16:06.000Z
|
2021-12-22T07:41:50.000Z
|
import tensorflow as tf
from tensorflow.keras.utils import Progbar
from config import config
import numpy as np
import data_utils
import time
from data_utils import get_train_dataset
def train(model, optimizer, train_dataset, val_dataset=None, epochs=5, load_checkpoint=False):
# Define the metrics
train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')
val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)
val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('val_accuracy')
batches_per_epoch = tf.data.experimental.cardinality(train_dataset).numpy()
# init values
best_val = 0
iter_count = 0
val_interval = config.val_interval # epoch
save_interval = config.save_interval # epoch
# setup checkpoints manager
checkpoint = tf.train.Checkpoint(step=tf.Variable(0), optimizer=optimizer, model=model)
manager = tf.train.CheckpointManager(
checkpoint, directory="./checkpoints", max_to_keep=5
)
if load_checkpoint:
status = checkpoint.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing....")
else:
print("Initializing...")
iter_count = checkpoint.step.numpy()
for epoch in range(epochs):
# Reset metrics every epoch
train_loss.reset_states()
train_accuracy.reset_states()
total_lambda_fc = []
print("Epoch {}".format(int(iter_count / batches_per_epoch)+1))
pb_i = Progbar(batches_per_epoch, width=30, stateful_metrics = ['acc'])
# one train step per loop
for x_context, x_face, y in train_dataset:
checkpoint.step.assign_add(1)
iter_count += 1
curr_epoch = int(iter_count / batches_per_epoch)
with tf.GradientTape() as tape:
y_pred = model(x_face, x_context, training=True)
loss = tf.keras.losses.SparseCategoricalCrossentropy()(y, y_pred)
train_loss(loss) # update metric
train_acc = train_accuracy(y, y_pred) # update metric
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients([
(grad, var) for (grad, var) in zip(gradients, model.trainable_variables) if grad is not None])
pb_i.add(1, [('acc', train_acc.numpy())])
save_path = manager.save()
if (curr_epoch) % save_interval == 0:
model.save_weights('weights_checkpoint/epoch_' + str(curr_epoch) + '/Model')
print('End of Epoch: {}, Iter: {}, Loss: {:.4}, Train Acc: {:.4} '.format(curr_epoch, iter_count,
train_loss.result(),
train_accuracy.result()))
if val_dataset is not None:
if (curr_epoch) % val_interval == 0: # validate
val_loss.reset_states()
val_accuracy.reset_states()
for x_context, x_face, y in val_dataset:
y_pred = model(x_face, x_context, training=False)
loss = tf.keras.losses.SparseCategoricalCrossentropy()(y, y_pred)
val_loss(loss) # update metric
val_accuracy(y, y_pred) # update metric
print('Val loss: {:.4}, Val Accuracy: {:.4}'.format(val_loss.result(), val_accuracy.result()))
print('===================================================')
if (val_accuracy.result() > best_val):
model.save_weights("weights_checkpoint/best_val/Model")
print("====Best validation model saved!====")
best_val = val_accuracy.result()
print()
print("Training done!")
print("Best validation accuracy {:.4}".format(best_val))
return model
def get_optimizer(train_dataset):
batches_per_epoch = tf.data.experimental.cardinality(train_dataset).numpy()
lr_init = config.lr
lr_decay = config.lr_decay
decay_steps = np.array(config.lr_steps) * batches_per_epoch
lrs = np.arange(decay_steps.shape[0] + 1)
lrs = lr_init * (lr_decay ** lrs)
lr_minbound = config.lr_minbound if config.lr_minbound else -np.inf
lrs = np.clip(lrs, a_min = lr_minbound, a_max = 1)
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
list(decay_steps), list(lrs))
optimizer = tf.keras.optimizers.SGD(learning_rate=lr_schedule, momentum=config.momentum)
return optimizer
def eval(model, eval_dataset):
print("Evaluating model..")
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy')
test_accuracy.reset_states()
num_class = config.num_classes
start = time.time()
batches_per_epoch = tf.data.experimental.cardinality(eval_dataset).numpy()
pb = Progbar(batches_per_epoch, width=30)
for x_context, x_face, y in eval_dataset:
scores = model(x_face, x_context, training=False)
test_accuracy(y, scores) # update the metric
y_pred = tf.argmax(scores, axis=1)
pb.add(1)
end = time.time()
print("Evaluating time: %d seconds" % ((end - start)))
val_acc = test_accuracy.result().numpy()
print("Evaluate accuracy: {:.4}".format(test_accuracy.result()))
if __name__ == '__main__':
print(get_optimizer(get_train_dataset()))
| 42.681481
| 115
| 0.613155
|
2df6ec7f3669e00a3d8c57a34b9a45f9e9574097
| 38
|
py
|
Python
|
web/web/views.py
|
OPI-py/django_blog
|
43cb0079499a6397246fd01dc50212fd8d432431
|
[
"BSD-2-Clause"
] | null | null | null |
web/web/views.py
|
OPI-py/django_blog
|
43cb0079499a6397246fd01dc50212fd8d432431
|
[
"BSD-2-Clause"
] | null | null | null |
web/web/views.py
|
OPI-py/django_blog
|
43cb0079499a6397246fd01dc50212fd8d432431
|
[
"BSD-2-Clause"
] | null | null | null |
from django.http import HttpResponse
| 12.666667
| 36
| 0.842105
|
5f309afa1892b93616cd9a3782456dd49e55b6bc
| 1,129
|
py
|
Python
|
im/kibot/data/extract/check_realtime_feed.py
|
ajmal017/amp
|
8de7e3b88be87605ec3bad03c139ac64eb460e5c
|
[
"BSD-3-Clause"
] | null | null | null |
im/kibot/data/extract/check_realtime_feed.py
|
ajmal017/amp
|
8de7e3b88be87605ec3bad03c139ac64eb460e5c
|
[
"BSD-3-Clause"
] | null | null | null |
im/kibot/data/extract/check_realtime_feed.py
|
ajmal017/amp
|
8de7e3b88be87605ec3bad03c139ac64eb460e5c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
# Make an API call every 10 seconds to get the history of symbol `MSFT`
> check_realtime_feed.py -u $KIBOT_USERNAME -p $KIBOT_PASSWORD
"""
import logging
import time
import requests
import im.kibot.base.command as vkbcom
import im.kibot.data.config as vkdcon
_LOG = logging.getLogger(__name__)
# #############################################################################
# TODO(*): -> CheckRealtimeFeedCommand
class CheckReadtimeFeedCommand(vkbcom.KibotCommand):
def __init__(self) -> None:
super().__init__(
docstring=__doc__, requires_auth=True, requires_api_login=True
)
def customize_run(self) -> int:
# Download file.
while True:
response = requests.get(
url=vkdcon.API_ENDPOINT,
params=dict(
action="history", symbol="MSFT", interval="1", period="2"
),
)
print(f"received {len(response.text.split())} data points.")
time.sleep(10)
return 0
if __name__ == "__main__":
CheckReadtimeFeedCommand().run()
| 24.021277
| 79
| 0.575731
|
e15d61d9ac289e73a5dc78e34b16519ab993db63
| 7,029
|
py
|
Python
|
tests/test_basic.py
|
neuroailab/tnn
|
0d5e5dc6ab3669309e8c00c23da2928a04bc8d02
|
[
"MIT"
] | 88
|
2018-03-14T15:56:54.000Z
|
2022-03-22T17:19:39.000Z
|
tests/test_basic.py
|
neuroailab/tnn
|
0d5e5dc6ab3669309e8c00c23da2928a04bc8d02
|
[
"MIT"
] | null | null | null |
tests/test_basic.py
|
neuroailab/tnn
|
0d5e5dc6ab3669309e8c00c23da2928a04bc8d02
|
[
"MIT"
] | 19
|
2018-07-05T00:17:26.000Z
|
2021-11-15T06:22:17.000Z
|
from __future__ import absolute_import, division, print_function
import time
import tqdm
import numpy as np
import tensorflow as tf
from tnn import main
from tests import setup
BATCH_SIZE = 256
def test_mnist_fc():
test_mnist(kind='fc')
def test_mnist_conv():
test_mnist(kind='conv')
def test_mnist(kind='conv'):
data = {'images': np.random.standard_normal([BATCH_SIZE, 28*28]).astype(np.float32),
'labels': np.random.randint(10, size=BATCH_SIZE).astype(np.int32)}
if kind == 'conv':
data['images'] = np.reshape(data['images'], [-1, 28, 28, 1])
# initialize the benchmark model
with tf.variable_scope('benchmark'):
if kind == 'conv':
bench_targets = setup.mnist_conv(**data)
elif kind == 'fc':
bench_targets = setup.mnist_fc(**data)
else:
raise ValueError
bench_vars = {v.name[len('benchmark')+1:]:v for v in tf.global_variables()
if v.name.startswith('benchmark')}
bench_targets.update(bench_vars)
for name, var in bench_vars.items():
bench_targets['grad_' + name] = tf.gradients(bench_targets['loss'], var)
# initialize the tconvnet model
with tf.variable_scope('tconvnet'):
G = main.graph_from_json('json/mnist_{}.json'.format(kind))
main.init_nodes(G, batch_size=BATCH_SIZE)
input_seq = tf.constant(data['images'])
main.unroll(G, input_seq=input_seq)
tnn_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=G.node['fc2']['outputs'][-1],
labels=tf.constant(data['labels']))
tnn_targets = {n: G.node[n]['outputs'][-1] for n in G}
tnn_targets['loss'] = tf.reduce_mean(tnn_loss)
tnn_vars = {v.name[len('tconvnet')+1:]:v for v in tf.global_variables()
if v.name.startswith('tconvnet') and 'memory_decay' not in v.name}
tnn_targets.update(tnn_vars)
for name, var in tnn_vars.items():
tnn_targets['grad_' + name] = tf.gradients(tnn_targets['loss'], var)
run(bench_targets, tnn_targets, nsteps=100)
def test_alexnet():
ims = np.random.standard_normal([BATCH_SIZE, 224, 224, 3])
labels = np.random.randint(1000, size=[BATCH_SIZE])
data = {'images': tf.constant(ims.astype(np.float32)),
'labels': tf.constant(labels.astype(np.int32))}
# initialize the benchmark model
with tf.variable_scope('benchmark'):
bench_targets = setup.alexnet(data['images'], data['labels'], 'benchmark', train=False)
bench_targets = {'loss': bench_targets['loss']}
# initialize the tconvnet model
with tf.variable_scope('tconvnet'):
G = main.graph_from_json('json/alexnet.json')
main.init_nodes(G, batch_size=BATCH_SIZE)
main.unroll(G, input_seq=data['images'])
tnn_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=G.node['fc8']['outputs'][-1], labels=data['labels'])
tnn_targets = {'loss': tf.reduce_mean(tnn_loss)}
run(bench_targets, tnn_targets, nsteps=10, n_initial=10)
def run(bench_targets, tnn_targets, nsteps=100, n_initial=2, n_stable=50, check_close=True):
assert np.array_equal(sorted(tnn_targets.keys()), sorted(bench_targets.keys()))
opt = tf.train.MomentumOptimizer(learning_rate=.01, momentum=.9)
bench_targets['optimizer'] = opt.minimize(bench_targets['loss'])
opt = tf.train.MomentumOptimizer(learning_rate=.01, momentum=.9)
tnn_targets['optimizer'] = opt.minimize(tnn_targets['loss'])
init = tf.global_variables_initializer()
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
sess.run(init)
for step in tqdm.trange(nsteps):
# check if the outputs are identical
if step < n_initial:
bench_res = sess.run(bench_targets)
tnn_res = sess.run(tnn_targets)
for name in bench_res:
if name != 'optimizer':
if check_close:
assert np.allclose(bench_res[name], tnn_res[name], atol=1e-2)
else:
assert np.array_equal(bench_res[name], tnn_res[name])
elif step > n_stable: # after that, the loss should be stable
_, bench_loss = sess.run([bench_targets['optimizer'], bench_targets['loss']])
_, tnn_loss = sess.run([tnn_targets['optimizer'], tnn_targets['loss']])
assert np.allclose(bench_loss, tnn_loss, atol=.1, rtol=.1)
else:
bench_loss = sess.run(bench_targets['loss'])
tnn_loss = sess.run(tnn_targets['loss'])
sess.run([bench_targets['optimizer'], tnn_targets['optimizer']])
sess.close()
def train_tnn_alexnet():
imagenet = setup.get_imagenet()
images_plc = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 224, 224, 3])
labels_plc = tf.placeholder(tf.int64, shape=[BATCH_SIZE])
with tf.variable_scope('tconvnet'):
G = main.graph_from_json('json/alexnet.json')
main.init_nodes(G, batch_size=BATCH_SIZE)
main.unroll(G, input_seq=images_plc)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=G.node['fc8']['outputs'][-1], labels=labels_plc)
loss = tf.reduce_mean(loss)
optimizer = tf.train.MomentumOptimizer(learning_rate=.01, momentum=.9).minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
losses = []
for step in range(1000):
start = time.time()
images_batch, labels_batch = imagenet.next()
lo, _ = sess.run([loss, optimizer],
feed_dict={images_plc: images_batch, labels_plc: labels_batch})
end = time.time()
losses.append(lo)
print(step, '{:.4f}'.format(lo), '{:.3f}'.format(end - start))
assert np.mean(losses[-20:]) < 6.8
def memory_usage():
ims = np.random.standard_normal([BATCH_SIZE, 224, 224, 3])
labels = np.random.randint(1000, size=[BATCH_SIZE])
data = {'images': tf.constant(ims.astype(np.float32)),
'labels': tf.constant(labels.astype(np.int32))}
# initialize the benchmark model
# with tf.variable_scope('benchmark'):
# bench_targets = setup.alexnet(data['images'], data['labels'], 'benchmark', train=False)
# loss = bench_targets['loss']
with tf.variable_scope('tconvnet'):
G = main.graph_from_json('json/alexnet.json')
main.init_nodes(G, batch_size=BATCH_SIZE)
main.unroll(G, input_seq=data['images'])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=G.node['fc8']['outputs'][-1], labels=data['labels'])
init = tf.global_variables_initializer()
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
sess.run(init)
sess.run(loss)
import pdb; pdb.set_trace()
if __name__ == '__main__':
test_mnist_fc()
tf.reset_default_graph()
test_mnist_conv()
tf.reset_default_graph()
test_alexnet()
| 39.05
| 125
| 0.646749
|
bca1599745cad89ecf409b4623350eb07e2406d4
| 4,028
|
py
|
Python
|
reg/permutation_imp.py
|
sushmaakoju/regression-api
|
d94376e7c7d30f0ee5e0b3a7815371f9cc32c6cd
|
[
"MIT"
] | null | null | null |
reg/permutation_imp.py
|
sushmaakoju/regression-api
|
d94376e7c7d30f0ee5e0b3a7815371f9cc32c6cd
|
[
"MIT"
] | null | null | null |
reg/permutation_imp.py
|
sushmaakoju/regression-api
|
d94376e7c7d30f0ee5e0b3a7815371f9cc32c6cd
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import matplotlib
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.dates as mdates
from mpl_toolkits import mplot3d
from sklearn.inspection import permutation_importance
import os
__all__ =["plot_benchmark_permutation_importance", "plot_lasso_permutation_importance",
"plot_svr_permutation_importance", "plot_rfr_permutation_importance",
"plot_br_permutation_importance"]
def plot_benchmark_permutation_importance(trainlist, plots_obj, features):
benchmark_dt, X_train, y_train = trainlist
result = permutation_importance(benchmark_dt, X_train, y_train, n_repeats=10,
random_state=42)
perm_sorted_idx = result.importances_mean.argsort()
tree_importance_sorted_idx = np.argsort(benchmark_dt.feature_importances_)
tree_indices = np.arange(0, len(benchmark_dt.feature_importances_)) + 0.5
plots_obj.plot_perm_importances(features[perm_sorted_idx].tolist(),
result.importances[perm_sorted_idx].T, ['Decision Tree Regression', 'dtr'])
def plot_lasso_permutation_importance(trainlist, plots_obj, features):
gridsearch_lasso_cv, X_train, y_train = trainlist
result_lasso = permutation_importance(gridsearch_lasso_cv, X_train, y_train, n_repeats=10,
random_state=42)
perm_sorted_idx = result_lasso.importances_mean.argsort()
tree_importance_sorted_idx = np.argsort(gridsearch_lasso_cv.best_estimator_.coef_)
tree_indices = np.arange(0, len(gridsearch_lasso_cv.best_estimator_.coef_)) + 0.5
print(perm_sorted_idx,tree_importance_sorted_idx )
plots_obj.plot_perm_importances(features[perm_sorted_idx].tolist(),
result_lasso.importances[perm_sorted_idx].T, ['Lasso Regression', 'lr'])
def plot_svr_permutation_importance(trainlist, plots_obj, features):
gridsearch_svr_feat, X_train, y_train = trainlist
print(features, type(X_train))
result_svr = permutation_importance(gridsearch_svr_feat, X_train, y_train, n_repeats=10,
random_state=42)
perm_sorted_idx = result_svr.importances_mean.argsort()
tree_importance_sorted_idx = np.argsort(gridsearch_svr_feat.best_estimator_.coef_[0])
tree_indices = np.arange(0, len(gridsearch_svr_feat.best_estimator_.coef_[0])) + 0.5
print(perm_sorted_idx,tree_importance_sorted_idx )
plots_obj.plot_perm_importances(features[perm_sorted_idx].tolist(),
result_svr.importances[perm_sorted_idx].T,['Support vector Regression', 'svr'])
def plot_rfr_permutation_importance(trainlist, plots_obj, features):
rfr_gridsearch_feat, X_train, y_train = trainlist
result_rfr = permutation_importance(rfr_gridsearch_feat, X_train, y_train, n_repeats=10,
random_state=42)
perm_sorted_idx = result_rfr.importances_mean.argsort()
tree_importance_sorted_idx = np.argsort(rfr_gridsearch_feat.best_estimator_.feature_importances_)
tree_indices = np.arange(0, len(rfr_gridsearch_feat.best_estimator_.feature_importances_)) + 0.5
print(perm_sorted_idx,tree_importance_sorted_idx )
plots_obj.plot_perm_importances(features[perm_sorted_idx].tolist(), result_rfr.importances[perm_sorted_idx].T,
['Random Forest Regression', 'rfr'])
def plot_br_permutation_importance(trainlist, plots_obj, features):
bay_feat, X_train, y_train = trainlist
result_br = permutation_importance(bay_feat, X_train, y_train, n_repeats=10,
random_state=42)
perm_sorted_idx = result_br.importances_mean.argsort()
tree_importance_sorted_idx = np.argsort(bay_feat.coef_)
tree_indices = np.arange(0, len(bay_feat.coef_)) + 0.5
print(perm_sorted_idx,tree_importance_sorted_idx )
plots_obj.plot_perm_importances(features[perm_sorted_idx].tolist(),
result_br.importances[perm_sorted_idx].T, ['Bayesian Ridge Regression', 'br'])
| 47.952381
| 114
| 0.758689
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.