hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8142469152248a85d9c74dde7399667a9998d711
| 15,301
|
py
|
Python
|
zerver/openapi/markdown_extension.py
|
bhargavgajera10/zulip
|
165decee849c5437742094776ce43ccfdcf05764
|
[
"Apache-2.0"
] | 1
|
2020-10-02T07:39:04.000Z
|
2020-10-02T07:39:04.000Z
|
zerver/openapi/markdown_extension.py
|
bhargavgajera10/zulip
|
165decee849c5437742094776ce43ccfdcf05764
|
[
"Apache-2.0"
] | null | null | null |
zerver/openapi/markdown_extension.py
|
bhargavgajera10/zulip
|
165decee849c5437742094776ce43ccfdcf05764
|
[
"Apache-2.0"
] | null | null | null |
import re
import json
import inspect
from django.conf import settings
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from typing import Any, Dict, Optional, List, Tuple, Pattern
import markdown
import zerver.openapi.python_examples
from zerver.openapi.openapi import get_openapi_fixture, openapi_spec, get_openapi_description
MACRO_REGEXP = re.compile(
r'\{generate_code_example(\(\s*(.+?)\s*\))*\|\s*(.+?)\s*\|\s*(.+?)\s*(\(\s*(.+)\s*\))?\}')
PYTHON_EXAMPLE_REGEX = re.compile(r'\# \{code_example\|\s*(.+?)\s*\}')
MACRO_REGEXP_DESC = re.compile(r'\{generate_api_description(\(\s*(.+?)\s*\))}')
PYTHON_CLIENT_CONFIG = """
#!/usr/bin/env python3
import zulip
# Pass the path to your zuliprc file here.
client = zulip.Client(config_file="~/zuliprc")
"""
PYTHON_CLIENT_ADMIN_CONFIG = """
#!/usr/bin/env python
import zulip
# The user for this zuliprc file must be an organization administrator
client = zulip.Client(config_file="~/zuliprc-admin")
"""
DEFAULT_AUTH_EMAIL = "BOT_EMAIL_ADDRESS"
DEFAULT_AUTH_API_KEY = "BOT_API_KEY"
DEFAULT_EXAMPLE = {
"integer": 1,
"string": "demo",
"boolean": False,
}
def parse_language_and_options(input_str: Optional[str]) -> Tuple[str, Dict[str, Any]]:
if not input_str:
return ("", {})
language_and_options = re.match(r"(?P<language>\w+)(,\s*(?P<options>[\"\'\w\d\[\],= ]+))?", input_str)
assert(language_and_options is not None)
kwargs_pattern = re.compile(r"(?P<key>\w+)\s*=\s*(?P<value>[\'\"\w\d]+|\[[\'\",\w\d ]+\])")
language = language_and_options.group("language")
assert(language is not None)
if language_and_options.group("options"):
_options = kwargs_pattern.finditer(language_and_options.group("options"))
options = {}
for m in _options:
options[m.group("key")] = json.loads(m.group("value").replace("'", '"'))
return (language, options)
return (language, {})
def extract_code_example(source: List[str], snippet: List[str],
example_regex: Pattern[str]) -> List[str]:
start = -1
end = -1
for line in source:
match = example_regex.search(line)
if match:
if match.group(1) == 'start':
start = source.index(line)
elif match.group(1) == 'end':
end = source.index(line)
break
if (start == -1 and end == -1):
return snippet
snippet.extend(source[start + 1: end])
source = source[end + 1:]
return extract_code_example(source, snippet, example_regex)
def render_python_code_example(function: str, admin_config: Optional[bool]=False,
**kwargs: Any) -> List[str]:
method = zerver.openapi.python_examples.TEST_FUNCTIONS[function]
function_source_lines = inspect.getsourcelines(method)[0]
if admin_config:
config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines()
else:
config = PYTHON_CLIENT_CONFIG.splitlines()
snippet = extract_code_example(function_source_lines, [], PYTHON_EXAMPLE_REGEX)
code_example = []
code_example.append('```python')
code_example.extend(config)
for line in snippet:
# Remove one level of indentation and strip newlines
code_example.append(line[4:].rstrip())
code_example.append('print(result)')
code_example.append('\n')
code_example.append('```')
return code_example
def curl_method_arguments(endpoint: str, method: str,
api_url: str) -> List[str]:
# We also include the -sS verbosity arguments here.
method = method.upper()
url = "{}/v1{}".format(api_url, endpoint)
valid_methods = ["GET", "POST", "DELETE", "PUT", "PATCH", "OPTIONS"]
if method == "GET":
# Then we need to make sure that each -d option translates to becoming
# a GET parameter (in the URL) and not a POST parameter (in the body).
# TODO: remove the -X part by updating the linting rule. It's redundant.
return ["-sSX", "GET", "-G", url]
elif method in valid_methods:
return ["-sSX", method, url]
else:
msg = "The request method {} is not one of {}".format(method,
valid_methods)
raise ValueError(msg)
def get_openapi_param_example_value_as_string(endpoint: str, method: str, param: Dict[str, Any],
curl_argument: bool=False) -> str:
if "type" in param["schema"]:
param_type = param["schema"]["type"]
else:
# Hack: Ideally, we'd extract a common function for handling
# oneOf values in types and do something with the resulting
# union type. But for this logic's purpose, it's good enough
# to just check the first parameter.
param_type = param["schema"]["oneOf"][0]["type"]
param_name = param["name"]
if param_type in ["object", "array"]:
example_value = param.get("example", None)
if not example_value:
msg = """All array and object type request parameters must have
concrete examples. The openAPI documentation for {}/{} is missing an example
value for the {} parameter. Without this we cannot automatically generate a
cURL example.""".format(endpoint, method, param_name)
raise ValueError(msg)
ordered_ex_val_str = json.dumps(example_value, sort_keys=True)
if curl_argument:
return " --data-urlencode {}='{}'".format(param_name, ordered_ex_val_str)
return ordered_ex_val_str # nocoverage
else:
example_value = param.get("example", DEFAULT_EXAMPLE[param_type])
if isinstance(example_value, bool):
example_value = str(example_value).lower()
if param["schema"].get("format", "") == "json":
example_value = json.dumps(example_value)
if curl_argument:
return " -d '{}={}'".format(param_name, example_value)
return example_value
def generate_curl_example(endpoint: str, method: str,
api_url: str,
auth_email: str=DEFAULT_AUTH_EMAIL,
auth_api_key: str=DEFAULT_AUTH_API_KEY,
exclude: Optional[List[str]]=None,
include: Optional[List[str]]=None) -> List[str]:
if exclude is not None and include is not None:
raise AssertionError("exclude and include cannot be set at the same time.")
lines = ["```curl"]
operation = endpoint + ":" + method.lower()
operation_entry = openapi_spec.spec()['paths'][endpoint][method.lower()]
global_security = openapi_spec.spec()['security']
operation_params = operation_entry.get("parameters", [])
operation_request_body = operation_entry.get("requestBody", None)
operation_security = operation_entry.get("security", None)
if settings.RUNNING_OPENAPI_CURL_TEST: # nocoverage
from zerver.openapi.curl_param_value_generators import patch_openapi_example_values
operation_params, operation_request_body = patch_openapi_example_values(operation, operation_params,
operation_request_body)
format_dict = {}
for param in operation_params:
if param["in"] != "path":
continue
example_value = get_openapi_param_example_value_as_string(endpoint, method, param)
format_dict[param["name"]] = example_value
example_endpoint = endpoint.format_map(format_dict)
curl_first_line_parts = ["curl"] + curl_method_arguments(example_endpoint, method,
api_url)
lines.append(" ".join(curl_first_line_parts))
insecure_operations = ['/dev_fetch_api_key:post']
if operation_security is None:
if global_security == [{'basicAuth': []}]:
authentication_required = True
else:
raise AssertionError("Unhandled global securityScheme."
+ " Please update the code to handle this scheme.")
elif operation_security == []:
if operation in insecure_operations:
authentication_required = False
else:
raise AssertionError("Unknown operation without a securityScheme. "
+ "Please update insecure_operations.")
else:
raise AssertionError("Unhandled securityScheme. Please update the code to handle this scheme.")
if authentication_required:
lines.append(" -u %s:%s" % (auth_email, auth_api_key))
for param in operation_params:
if param["in"] == "path":
continue
param_name = param["name"]
if include is not None and param_name not in include:
continue
if exclude is not None and param_name in exclude:
continue
example_value = get_openapi_param_example_value_as_string(endpoint, method, param,
curl_argument=True)
lines.append(example_value)
if "requestBody" in operation_entry:
properties = operation_entry["requestBody"]["content"]["multipart/form-data"]["schema"]["properties"]
for key, property in properties.items():
lines.append(' -F "{}=@{}"'.format(key, property["example"]))
for i in range(1, len(lines)-1):
lines[i] = lines[i] + " \\"
lines.append("```")
return lines
def render_curl_example(function: str, api_url: str,
exclude: Optional[List[str]]=None,
include: Optional[List[str]]=None) -> List[str]:
""" A simple wrapper around generate_curl_example. """
parts = function.split(":")
endpoint = parts[0]
method = parts[1]
kwargs: Dict[str, Any] = dict()
if len(parts) > 2:
kwargs["auth_email"] = parts[2]
if len(parts) > 3:
kwargs["auth_api_key"] = parts[3]
kwargs["api_url"] = api_url
kwargs["exclude"] = exclude
kwargs["include"] = include
return generate_curl_example(endpoint, method, **kwargs)
SUPPORTED_LANGUAGES: Dict[str, Any] = {
'python': {
'client_config': PYTHON_CLIENT_CONFIG,
'admin_config': PYTHON_CLIENT_ADMIN_CONFIG,
'render': render_python_code_example,
},
'curl': {
'render': render_curl_example
}
}
class APIMarkdownExtension(Extension):
def __init__(self, api_url: Optional[str]) -> None:
self.config = {
'api_url': [
api_url,
'API URL to use when rendering curl examples'
]
}
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.preprocessors.add(
'generate_code_example', APICodeExamplesPreprocessor(md, self.getConfigs()), '_begin'
)
md.preprocessors.add(
'generate_api_description', APIDescriptionPreprocessor(md, self.getConfigs()), '_begin'
)
class APICodeExamplesPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super().__init__(md)
self.api_url = config['api_url']
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP.search(line)
if match:
language, options = parse_language_and_options(match.group(2))
function = match.group(3)
key = match.group(4)
argument = match.group(6)
if self.api_url is None:
raise AssertionError("Cannot render curl API examples without API URL set.")
options['api_url'] = self.api_url
if key == 'fixture':
if argument:
text = self.render_fixture(function, name=argument)
elif key == 'example':
if argument == 'admin_config=True':
text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True)
else:
text = SUPPORTED_LANGUAGES[language]['render'](function, **options)
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = MACRO_REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]:
fixture = []
path, method = function.rsplit(':', 1)
fixture_dict = get_openapi_fixture(path, method, name)
fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True,
separators=(',', ': '))
fixture.append('``` json')
fixture.extend(fixture_json.splitlines())
fixture.append('```')
return fixture
class APIDescriptionPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super().__init__(md)
self.api_url = config['api_url']
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP_DESC.search(line)
if match:
function = match.group(2)
text = self.render_description(function)
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = MACRO_REGEXP_DESC.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_description(self, function: str) -> List[str]:
description: List[str] = []
path, method = function.rsplit(':', 1)
description_dict = get_openapi_description(path, method)
description_dict = description_dict.replace('{{api_url}}', self.api_url)
description.extend(description_dict.splitlines())
return description
def makeExtension(*args: Any, **kwargs: str) -> APIMarkdownExtension:
return APIMarkdownExtension(*args, **kwargs)
| 39.846354
| 109
| 0.595974
|
711806672e45024b8ec0958f9ddd554fa1193b0f
| 2,451
|
py
|
Python
|
vega/metrics/pytorch/r2score.py
|
jie311/vega
|
1bba6100ead802697e691403b951e6652a99ccae
|
[
"MIT"
] | 724
|
2020-06-22T12:05:30.000Z
|
2022-03-31T07:10:54.000Z
|
vega/metrics/pytorch/r2score.py
|
jie311/vega
|
1bba6100ead802697e691403b951e6652a99ccae
|
[
"MIT"
] | 147
|
2020-06-30T13:34:46.000Z
|
2022-03-29T11:30:17.000Z
|
vega/metrics/pytorch/r2score.py
|
jie311/vega
|
1bba6100ead802697e691403b951e6652a99ccae
|
[
"MIT"
] | 160
|
2020-06-29T18:27:58.000Z
|
2022-03-23T08:42:21.000Z
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Metric of Regression task."""
import numpy as np
from scipy import stats
import torch
from torch.nn import functional as F
from vega.metrics.pytorch.metrics import MetricBase
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.METRIC, alias='r2score')
class R2Score(MetricBase):
"""Calculate R2 Score between output and target."""
__metric_name__ = 'r2score'
def __init__(self):
"""Init R2 Score metric."""
self.ess = 0
self.tss = 0
self.sum = 0
self.num = 0
self.pfm = 0
self.targets = None
self.outputs = None
def __call__(self, output, target, *args, **kwargs):
"""Forward and calculate r2 score."""
output, target = torch.squeeze(output), torch.squeeze(target)
temp_mean = torch.sum(target) / target.size(0)
temp_ess = F.mse_loss(target, output, reduction='sum').cpu().item()
temp_tss = F.mse_loss(target, temp_mean.repeat(target.size(0)), reduction='sum').cpu().item()
temp_r2_score = 1 - temp_ess / temp_tss
self.sum += torch.sum(target)
self.num += target.size(0)
mean = self.sum / self.num
self.ess += temp_ess
if self.targets is not None:
self.targets = torch.cat((self.targets, target), dim=0)
else:
self.targets = target
self.tss = F.mse_loss(self.targets, mean.repeat(self.num), reduction='sum').cpu().item()
self.pfm = 1 - self.ess / self.tss
return temp_r2_score
@property
def objective(self):
"""Define reward mode, default is max."""
return 'MAX'
def reset(self):
"""Reset states for new evaluation after each epoch."""
self.ess = 0
self.tss = 0
self.sum = 0
self.num = 0
self.pfm = 0
self.targets = None
self.outputs = None
def summary(self):
"""Summary all cached records, here is the last pfm record."""
return self.pfm
| 33.575342
| 101
| 0.631987
|
8ff1ea505080691bf556ef93bdd8302cde815e94
| 270
|
py
|
Python
|
minecraft_learns/__init__.py
|
Nathan-Nesbitt/Minecraft_Learns
|
089dd51c9514fa94b24630a034c257306722f64b
|
[
"MIT"
] | null | null | null |
minecraft_learns/__init__.py
|
Nathan-Nesbitt/Minecraft_Learns
|
089dd51c9514fa94b24630a034c257306722f64b
|
[
"MIT"
] | 1
|
2021-04-22T01:02:50.000Z
|
2021-04-22T01:02:50.000Z
|
minecraft_learns/__init__.py
|
Nathan-Nesbitt/Minecraft_Learns
|
089dd51c9514fa94b24630a034c257306722f64b
|
[
"MIT"
] | null | null | null |
"""
Init file for the PIP package config.
Written By: Nathan Nesbitt
Date: 2021-01-15
"""
from minecraft_learns.common import *
from minecraft_learns.data import Data
from minecraft_learns.errors import *
from minecraft_learns.graphing import *
| 22.5
| 42
| 0.725926
|
a3e475c559318500b7dc6651e374a8bbc09396d9
| 1,442
|
py
|
Python
|
roster_own.py
|
nocibambi/ds-practice
|
9b6c4414f4700fb0bd017101b5a61c9d824a9b98
|
[
"MIT"
] | null | null | null |
roster_own.py
|
nocibambi/ds-practice
|
9b6c4414f4700fb0bd017101b5a61c9d824a9b98
|
[
"MIT"
] | null | null | null |
roster_own.py
|
nocibambi/ds-practice
|
9b6c4414f4700fb0bd017101b5a61c9d824a9b98
|
[
"MIT"
] | null | null | null |
import json
import sqlite3
conn = sqlite3.connect('rosterdb.sqlite')
cur = conn.cursor()
cur.executescript('''
DROP TABLE IF EXISTS User;
DROP TABLE IF EXISTS Member;
DROP TABLE IF EXISTS Course;
CREATE TABLE User (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Course (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
title TEXT UNIQUE
);
CREATE TABLE Member (
user_id INTEGER,
course_id INTEGER,
role INTEGER,
-- This is a composite primary key
-- There can be multiple instances of the same foreign keys
-- but their combinations need to be unique
PRIMARY KEY (user_id, course_id)
)
''')
fname = input("File to load:")
if not fname: fname = ("roster_data.json")
json_text = open(fname).read()
json_data = json.loads(json_text)
for item in json_data:
user_name = item[0]
course_code = item[1]
role = item[2]
cur.execute('INSERT OR IGNORE INTO User (name) VALUES (?)', (user_name,))
cur.execute('SELECT id FROM User WHERE name = ?', (user_name,))
user_id = cur.fetchone()[0]
cur.execute('INSERT OR IGNORE INTO Course (title) VALUES (?)', (course_code,))
cur.execute('SELECT id FROM Course WHERE title = ?', (course_code,))
course_id = cur.fetchone()[0]
cur.execute('INSERT OR REPLACE INTO Member (user_id, course_id, role) VALUES (?, ?, ?)', (user_id, course_id, role))
conn.commit()
conn.close()
| 25.298246
| 120
| 0.685853
|
e85d1c4a596eddac093a85c83d259b88dc47884b
| 2,215
|
py
|
Python
|
nets/TSP_edge_classification/gcn_net.py
|
SauravMaheshkar/benchmarking-gnns
|
4665316322527634c23307556b63291c69dac4b0
|
[
"MIT"
] | null | null | null |
nets/TSP_edge_classification/gcn_net.py
|
SauravMaheshkar/benchmarking-gnns
|
4665316322527634c23307556b63291c69dac4b0
|
[
"MIT"
] | 6
|
2022-01-06T21:27:06.000Z
|
2022-01-19T06:28:56.000Z
|
nets/TSP_edge_classification/gcn_net.py
|
SauravMaheshkar/benchmarking-gnns
|
4665316322527634c23307556b63291c69dac4b0
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.mlp_readout_layer import MLPReadout
class GCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params["in_dim"]
hidden_dim = net_params["hidden_dim"]
out_dim = net_params["out_dim"]
n_classes = net_params["n_classes"]
in_feat_dropout = net_params["in_feat_dropout"]
dropout = net_params["dropout"]
n_layers = net_params["L"]
self.readout = net_params["readout"]
self.batch_norm = net_params["batch_norm"]
self.residual = net_params["residual"]
self.n_classes = n_classes
self.device = net_params["device"]
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList(
[
GCNLayer(
hidden_dim,
hidden_dim,
F.relu,
dropout,
self.batch_norm,
self.residual,
)
for _ in range(n_layers - 1)
]
)
self.layers.append(
GCNLayer(
hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual
)
)
self.MLP_layer = MLPReadout(2 * out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h.float())
h = self.in_feat_dropout(h)
for conv in self.layers:
h = conv(g, h)
g.ndata["h"] = h
def _edge_feat(edges):
e = torch.cat([edges.src["h"], edges.dst["h"]], dim=1)
e = self.MLP_layer(e)
return {"e": e}
g.apply_edges(_edge_feat)
return g.edata["e"]
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss(weight=None)
loss = criterion(pred, label)
return loss
| 29.932432
| 109
| 0.565688
|
c29ed961b8e78f2e37551abe2ae2d0b836436ea7
| 1,102
|
py
|
Python
|
game/game/middleware.py
|
ClaytonTurner/pylti1.3-django-example
|
2ee32aa531f526144a523f92fcf88442396060fe
|
[
"MIT"
] | 10
|
2020-04-11T13:43:42.000Z
|
2021-12-30T18:46:31.000Z
|
game/game/middleware.py
|
ClaytonTurner/pylti1.3-django-example
|
2ee32aa531f526144a523f92fcf88442396060fe
|
[
"MIT"
] | 12
|
2020-05-06T01:49:32.000Z
|
2022-01-31T17:20:08.000Z
|
game/game/middleware.py
|
ClaytonTurner/pylti1.3-django-example
|
2ee32aa531f526144a523f92fcf88442396060fe
|
[
"MIT"
] | 13
|
2020-04-01T15:15:35.000Z
|
2022-03-29T02:48:52.000Z
|
import django
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
class SameSiteMiddleware(MiddlewareMixin):
def process_response(self, request, response):
django_support_samesite_none = django.VERSION[0] > 3 \
or (django.VERSION[0] == 3 and django.VERSION[1] >= 1)
if request.is_secure() and not django_support_samesite_none:
session_cookie_samesite = getattr(settings, 'SESSION_COOKIE_SAMESITE', None)
csrf_cookie_samesite = getattr(settings, 'CSRF_COOKIE_SAMESITE', None)
session_cookie_name = getattr(settings, 'SESSION_COOKIE_NAME', None)
csrf_cookie_name = getattr(settings, 'CSRF_COOKIE_NAME', None)
if session_cookie_samesite is None and session_cookie_name in response.cookies:
response.cookies[session_cookie_name]['samesite'] = 'None'
if csrf_cookie_samesite is None and csrf_cookie_name in response.cookies:
response.cookies[csrf_cookie_name]['samesite'] = 'None'
return response
| 47.913043
| 93
| 0.69147
|
668202189074a94e7243f7bc079586a21f4c2753
| 87
|
py
|
Python
|
plugins/digitalocean/komand_digitalocean/actions/retrieve_existing_floating_ip/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/digitalocean/komand_digitalocean/actions/retrieve_existing_floating_ip/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/digitalocean/komand_digitalocean/actions/retrieve_existing_floating_ip/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import RetrieveExistingFloatingIp
| 29
| 46
| 0.816092
|
9b586a30d8d76e1420241fd5f90cdd0687ce4f9d
| 2,110
|
py
|
Python
|
setup.py
|
andrewrfreed/flask-oidc-ext
|
ae2715ca037ebffabc01449c8c78ce78cdf39209
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
andrewrfreed/flask-oidc-ext
|
ae2715ca037ebffabc01449c8c78ce78cdf39209
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
andrewrfreed/flask-oidc-ext
|
ae2715ca037ebffabc01449c8c78ce78cdf39209
|
[
"BSD-2-Clause"
] | null | null | null |
import os.path
import io
import sys
from setuptools import setup
# This check is to make sure we checkout docs/_themes before running sdist
if not os.path.exists("./docs/_themes/README"):
print("Please make sure you have docs/_themes checked out while running setup.py!")
if os.path.exists(".git"):
print(
"You seem to be using a git checkout, please execute the following commands to get the docs/_themes directory:"
)
print(" - git submodule init")
print(" - git submodule update")
else:
print(
"You seem to be using a release. Please use the release tarball from PyPI instead of the archive from GitHub"
)
sys.exit(1)
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, "README.rst")) as f:
readme = f.read()
setup(
name="flask-oidc-ext",
description="OpenID Connect extension for Flask",
long_description=readme,
url="https://github.com/svintit/flask-oidc-ext",
author="Erica Ehrhardt, Patrick Uiterwijk, Traian Svinti",
author_email="traian.svinti@gmail.com",
version="1.4.2",
packages=["flask_oidc_ext"],
install_requires=["Flask", "itsdangerous", "oauth2client", "six",],
tests_require=["nose", "mock"],
entry_points={
"console_scripts": ["oidc-register=flask_oidc_ext.registration_util:main"],
},
zip_safe=False,
classifiers=[
"Environment :: Web Environment",
"Framework :: Flask",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 35.762712
| 123
| 0.636019
|
f4e38dba3b74174aef831001c5060cda0b66c9db
| 476
|
py
|
Python
|
hackerearth/Algorithms/Equalize strings (1)/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/Equalize strings (1)/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/Equalize strings (1)/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'4',
'1101',
'0011',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(), '2\n')
if __name__ == '__main__':
unittest.main()
| 21.636364
| 53
| 0.640756
|
f35ccc749791230b2303bfb2ab807b1ea680cbeb
| 1,756
|
py
|
Python
|
src/plugins/dfplayer/Platform.py
|
andreaswatch/piTomation
|
140bff77ad0b84ad17898106c7be7dc48a2d0783
|
[
"MIT"
] | null | null | null |
src/plugins/dfplayer/Platform.py
|
andreaswatch/piTomation
|
140bff77ad0b84ad17898106c7be7dc48a2d0783
|
[
"MIT"
] | null | null | null |
src/plugins/dfplayer/Platform.py
|
andreaswatch/piTomation
|
140bff77ad0b84ad17898106c7be7dc48a2d0783
|
[
"MIT"
] | null | null | null |
from modules.base.Configuration import *
from modules.base.Instances import *
@configuration
class DFPlayerPlatformConfiguration(PlatformConfiguration):
'''Configuration settings for the DFPlayer'''
@validator('platform')
def check_platform(cls, v):
if "plugins.dfplayer" not in v:
raise ValueError("wrong platform: plugins.dfplayer, is: " + v)
return v
tx_pin: str = "GPIO5"
'''GPIO pin for TX communication to the DFPlayer Mini'''
baud_rate: Optional[int] = 9600
'''Baudrate for the serial interface, 9600 by default'''
class Platform(BasePlatform, Logging):
'''This platform opens a serial console through the tx_pin to the DFPlayer.
Use the Action to invoke commands like 'play'.
'''
def __init__(self, parent: Stackable, config: DFPlayerPlatformConfiguration) -> None:
super().__init__(parent, config)
self.configuration = config
tx_pin = int(str(self.configuration.tx_pin).replace("GPIO", ""))
try:
import pigpio
except Exception as e:
self.log_error("pigpio library not installed, DFPlayer Mini not initialized")
try:
import sys, os
#find actual path
realpath = os.path.realpath(__file__)
dirname = os.path.dirname(realpath)
#add modules & plugins
app_path = os.path.join(dirname, "SimpleDFPlayerMini_for_RaspberryPi")
sys.path.append(app_path)
from dfplayer import SimpleDFPlayerMini #type: ignore
self.player = SimpleDFPlayerMini(tx_pin, self.configuration.baud_rate)
except Exception as e:
self.log_error(e)
self.player = None
| 32.518519
| 89
| 0.64123
|
62185d630b2e4affddab33d145ea543861f963d8
| 7,179
|
py
|
Python
|
examples/angular_spec_doa.py
|
hwp/apkit
|
35fea20e4cb5b175c43c9a29aade167e886c8368
|
[
"MIT"
] | 3
|
2021-04-09T09:33:09.000Z
|
2021-12-23T02:34:20.000Z
|
examples/angular_spec_doa.py
|
hwp/apkit
|
35fea20e4cb5b175c43c9a29aade167e886c8368
|
[
"MIT"
] | null | null | null |
examples/angular_spec_doa.py
|
hwp/apkit
|
35fea20e4cb5b175c43c9a29aade167e886c8368
|
[
"MIT"
] | 2
|
2018-11-06T00:44:11.000Z
|
2020-12-14T16:11:23.000Z
|
#!/usr/bin/env python
"""
angular_spec_doa.py
Written by Weipeng He <heweipeng@gmail.com>
"""
import time # debug
import sys # debug
import math
import argparse
import numpy as np
import apkit
# Microphone 3D coordinates (unit is meter)
_MICROPHONE_COORDINATES = np.array([[-0.0267, 0.0343, 0.2066],
[-0.0267, -0.0343, 0.2066],
[0.0313, 0.0343, 0.2066],
[0.0313, -0.0343, 0.2066]])
# Use signal up to 8 kHz for prediction
_MAX_FREQ = 8000
def load_ncov(path, win_size, hop_size):
fs, sig = apkit.load_wav(path)
nfbin = _MAX_FREQ * win_size // fs # 0-8kHz
tf = apkit.stft(sig, apkit.cola_hamming, win_size, hop_size)
tf = tf[:, :, :nfbin]
return apkit.cov_matrix(tf)
def main(infile, outdir, afunc, win_size, hop_size, block_size, block_hop,
min_sc):
stime = time.time()
# load candidate DOAs
pts = apkit.load_pts_on_sphere()
pts = pts[pts[:, 2] > -0.05] # use upper half of the sphere
# NOTE: alternatively use only points on the horizontal plane
# pts = apkit.load_pts_horizontal(360)
print('%.3fs: load points (%d)' % (time.time() - stime, len(pts)),
file=sys.stderr)
# compute neighbors (for peak finding)
nlist = apkit.neighbor_list(pts, math.pi / 180.0 * 8.0)
print('%.3fs: neighbor list' % (time.time() - stime), file=sys.stderr)
# load signal
fs, sig = apkit.load_wav(infile)
print('%.3fs: load signal' % (time.time() - stime), file=sys.stderr)
# compute delays (delay for each candidate DOA and each microphone)
delays = apkit.compute_delay(_MICROPHONE_COORDINATES, pts, fs=fs)
print('%.3fs: compute delays' % (time.time() - stime), file=sys.stderr)
# compute empirical covariance matrix
tf = apkit.stft(sig, apkit.cola_hamming, win_size, hop_size)
max_fbin = _MAX_FREQ * win_size // fs # int
assert max_fbin <= win_size // 2
tf = tf[:, :, :max_fbin] # 0-8kHz
fbins = np.arange(max_fbin, dtype=float) / win_size
if block_size is None:
ecov = apkit.empirical_cov_mat(tf)
else:
ecov = apkit.empirical_cov_mat_by_block(tf, block_size, block_hop)
nch, _, nblock, nfbin = ecov.shape
print('%.3fs: empirical cov matrix (nfbin=%d)' %
(time.time() - stime, nfbin),
file=sys.stderr)
# local angular spectrum function
phi = afunc(ecov, delays, fbins)
print('%.3fs: compute phi' % (time.time() - stime), file=sys.stderr)
# find local maxima
lmax = apkit.local_maxima(phi, nlist, th_phi=min_sc)
print('%.3fs: find local maxima' % (time.time() - stime), file=sys.stderr)
# merge predictions that have similar azimuth predicitons
# NOTE: skip this step if the candinate DOAs are on the horizontal plane
lmax = apkit.merge_lm_on_azimuth(phi, lmax, pts, math.pi / 180.0 * 5.0)
print('%.3fs: refine local maxima' % (time.time() - stime),
file=sys.stderr)
# save results
# each file contains the predicted angular spectrum for each frame/block
# each line has five tokens:
# (1) x coordinate of the candidate DOA
# (2) y coordinate of the candidate DOA
# (3) z coordinate of the candidate DOA
# (4) angular spectrum value
# (5) 1 if this is a local maximum, otherwise 0
for t in range(nblock):
with open(f'{outdir}/{t:06d}', 'w') as f:
for i in range(len(pts)):
print('%g %g %g %g %d' % (pts[i, 0], pts[i, 1], pts[i, 2],
phi[i, t], 1 if i in lmax[t] else 0),
file=f)
print('%.3fs: save results' % (time.time() - stime), file=sys.stderr)
_FUNCTIONS = {
'mvdr': apkit.phi_mvdr,
'mvdr-snr': apkit.phi_mvdr_snr,
'srp-phat': apkit.phi_srp_phat,
'srp-nonlin': apkit.phi_srp_phat_nonlin,
'sevd-music': apkit.sevd_music
}
_FUNCS_WITH_NCOV = {
'mvdr-ncov': apkit.MVDR_NCOV,
'mvdr-ncov-snr': apkit.MVDR_NCOV_SNR,
'mvdr-ncov-sig': apkit.MVDR_NCOV_SIG,
'music': apkit.MUSIC,
'gsvd-music': apkit.GSVD_MUSIC
}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='DOA estimation using angular spectrum methods')
parser.add_argument('infile',
metavar='INPUT_FILE',
type=argparse.FileType('rb'),
help='input wav file')
parser.add_argument('outdir',
metavar='OUTPUT_DIR',
type=str,
help='output directory')
parser.add_argument('-w',
'--window-size',
metavar='WIN_SIZE',
type=int,
default=2048,
help='(default 2048) analysis window size')
parser.add_argument('-o',
'--hop-size',
metavar='HOP_SIZE',
type=int,
default=1024,
help='(default 1024) hop size, '
'number of samples between windows')
parser.add_argument('--block-size',
metavar='BLOCK_SIZE',
type=int,
default=7,
help='(default 7) if not None, compute '
'spatial covariance matrix in blocks, each include '
'block_size frames.')
parser.add_argument('--block-hop',
metavar='BLOCK_HOP',
type=int,
default=4,
help='(default 4) used with block_size, '
'number of frames between consecutive blocks.')
parser.add_argument('-f',
'--function',
metavar='FUNC',
choices=list(_FUNCTIONS.keys()) +
list(_FUNCS_WITH_NCOV.keys()),
required=True,
help='local angular spectrum function')
parser.add_argument('-n',
'--noise',
metavar='NOISE_FILE',
type=argparse.FileType('rb'),
default=None,
help='(default None) sample background noise file')
parser.add_argument('--min-score',
metavar='SCORE',
type=float,
default=0.0,
help='(default 0.0) minimun score for peaks')
args = parser.parse_args()
if args.function in _FUNCTIONS:
func = _FUNCTIONS[args.function]
elif args.function in _FUNCS_WITH_NCOV:
ncov = load_ncov(args.noise, args.window_size, args.hop_size)
func = _FUNCS_WITH_NCOV[args.function](ncov)
args.noise.close()
else:
raise KeyError
main(args.infile, args.outdir, func, args.window_size, args.hop_size,
args.block_size, args.block_hop, args.min_score)
args.infile.close()
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
| 37.586387
| 79
| 0.551887
|
7103f51c3cb2aebf4df9a2e6e9acc6343f26e93f
| 4,143
|
py
|
Python
|
zippyshare_downloader/network.py
|
TheKrakenss/zippyshare-downloader
|
94a03628319bbaf3815a82665614375fe9d221e8
|
[
"MIT"
] | null | null | null |
zippyshare_downloader/network.py
|
TheKrakenss/zippyshare-downloader
|
94a03628319bbaf3815a82665614375fe9d221e8
|
[
"MIT"
] | null | null | null |
zippyshare_downloader/network.py
|
TheKrakenss/zippyshare-downloader
|
94a03628319bbaf3815a82665614375fe9d221e8
|
[
"MIT"
] | null | null | null |
import requests
import aiohttp
import asyncio
__all__ = (
'Net', 'NetworkObject',
'set_proxy', 'clear_proxy'
)
# Modified requests session class with __del__ handler
# so the session will be closed properly
class requestsProxiedSession(requests.Session):
def __init__(self, trust_env=True) -> None:
super().__init__()
self.trust_env = trust_env
def __del__(self):
self.close()
# Because aiohttp doesn't support proxy from session
# we need to subclass it to proxy each requests without
# add "proxy" parameter to each requests
class aiohttpProxiedSession(aiohttp.ClientSession):
def __init__(self, proxy, *args, **kwargs):
super().__init__(*args, **kwargs)
self.proxy = proxy
def set_proxy(self, proxy):
self.proxy = proxy
def remove_proxy(self):
self.proxy = None
async def _request(self, *args, **kwargs):
kwargs.update(proxy=self.proxy)
return await super()._request(*args, **kwargs)
class NetworkObject:
def __init__(self, proxy=None, trust_env=False) -> None:
self._proxy = proxy
self._aiohttp = None # type: aiohttpProxiedSession
self._trust_env = trust_env
# This will be disable proxy from environtments
self._requests = requestsProxiedSession(trust_env=self._trust_env)
@property
def proxy(self):
"""Return HTTP/SOCKS proxy, return ``None`` if not configured"""
return self._proxy
@proxy.setter
def proxy(self, proxy):
self.set_proxy(proxy)
@property
def trust_env(self):
"""Return ``True`` if http/socks proxy are grabbed from env"""
return self._trust_env
@trust_env.setter
def trust_env(self, yes):
self._trust_env = yes
if self.aiohttp:
self.aiohttp._trust_env = yes
self._requests.trust_env = yes
def is_proxied(self):
"""Return ``True`` if requests/aiohttp from :class:`NetworkObject`
are configured using proxy.
"""
return self.proxy is not None
def set_proxy(self, proxy):
"""Setup HTTP/SOCKS proxy for aiohttp/requests"""
if proxy is None:
self.clear_proxy()
self._proxy = proxy
pr = {
'http': proxy,
'https': proxy
}
self._requests.proxies.update(pr)
if self.aiohttp:
self.aiohttp.set_proxy(proxy)
def clear_proxy(self):
"""Remove all proxy from aiohttp/requests"""
self._proxy = None
self._requests.proxies.clear()
if self.aiohttp:
self.aiohttp.remove_proxy()
@property
def aiohttp(self):
"""Return proxied aiohttp (if configured)"""
self._create_aiohttp()
return self._aiohttp
@property
def requests(self):
"""Return proxied requests (if configured)"""
return self._requests
def _create_aiohttp(self):
# Check if current asyncio loop is running
# if running create aiohttp session
# if not don't create it
loop = asyncio.get_event_loop()
# Raise error if using in another thread
if self._aiohttp and self._aiohttp._loop != loop:
raise RuntimeError('created aiohttp session cannot be used in different thread')
if self._aiohttp is None:
self._aiohttp = aiohttpProxiedSession(self.proxy)
def close(self):
"""Close requests session only"""
self._requests.close()
self._requests = requestsProxiedSession(self._trust_env)
async def close_async(self):
"""Close aiohttp & requests session"""
self.close()
if not self._aiohttp.closed:
await self._aiohttp.close()
self._aiohttp = None
Net = NetworkObject()
def set_proxy(proxy):
"""Setup HTTP/SOCKS proxy for aiohttp/requests
This is shortcut for :meth:`NetworkObject.set_proxy`.
"""
Net.set_proxy(proxy)
def clear_proxy():
"""Remove all proxy from aiohttp/requests
This is shortcut for :meth:`NetworkObject.clear_proxy`.
"""
Net.clear_proxy()
| 28.770833
| 92
| 0.632875
|
60e28139e2f3a78e288527dd757b4a6a10686f9f
| 2,012
|
py
|
Python
|
tutorials/data_augmentation.py
|
pmeier/kornia
|
57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-04-15T01:20:01.000Z
|
2022-01-12T14:12:54.000Z
|
tutorials/data_augmentation.py
|
pmeier/kornia
|
57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tutorials/data_augmentation.py
|
pmeier/kornia
|
57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-10-20T06:57:07.000Z
|
2020-10-20T06:57:07.000Z
|
"""
Data augmentation on the GPU
============================
In this data you learn how to use `kornia` modules in order to perform the data augmentatio on the GPU in batch mode.
"""
################################
# 1. Create a dummy data loader
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
# from: https://gist.github.com/edgarriba/a781de516c508826f79568d08598efdb
class DummyDataset(Dataset):
def __init__(self, data_root=None):
self.data_root = data_root
self.data_index = self.build_index(self.data_root)
def build_index(self, data_root):
return range(10)
def __len__(self):
return len(self.data_index)
def __getitem__(self, idx):
# get data sample
sample = self.data_index[idx]
# load data, NOTE: modify by cv2.imread(...)
image = torch.rand(3, 240, 320)
label = torch.rand(1, 240, 320)
return dict(images=image, labels=label)
################################
# 2. Define the data augmentation operations
# Thanks to the `kornia` design all the operators can be placed inside inside a `nn.Sequential`.
import kornia
transform = nn.Sequential(
kornia.enhance.AdjustBrightness(0.5),
kornia.enhance.AdjustGamma(gamma=2.),
kornia.enhance.AdjustContrast(0.7),
)
################################
# 3. Run the dataset and perform the data augmentation
# NOTE: change device to 'cuda'
device = torch.device('cpu')
print(f"Running with device: {device}")
# create the dataloader
dataset = DummyDataset()
dataloader = DataLoader(dataset, batch_size=4, shuffle=True)
# get samples and perform the data augmentation
for i_batch, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
labels = sample_batched['labels'].to(device)
# perform the transforms
images = transform(images)
labels = transform(labels)
print(f"Iteration: {i_batch} Image shape: {images.shape}")
| 27.944444
| 117
| 0.66501
|
fa084c04bf22c29c12e71a9eb5bcc3af5b78b976
| 916
|
py
|
Python
|
samples/client/petstore/python/test/test_outer_composite.py
|
wwadge/swagger-codegen
|
777619d4d106b7b387f8ee8469f4ec43f3cdfdc7
|
[
"Apache-2.0"
] | 3
|
2017-05-24T11:22:36.000Z
|
2021-02-09T15:26:41.000Z
|
samples/client/petstore/python/test/test_outer_composite.py
|
wwadge/swagger-codegen
|
777619d4d106b7b387f8ee8469f4ec43f3cdfdc7
|
[
"Apache-2.0"
] | 5
|
2019-03-06T07:41:25.000Z
|
2020-01-20T12:21:53.000Z
|
samples/client/petstore/python/test/test_outer_composite.py
|
wwadge/swagger-codegen
|
777619d4d106b7b387f8ee8469f4ec43f3cdfdc7
|
[
"Apache-2.0"
] | 11
|
2017-07-07T18:07:15.000Z
|
2021-11-10T02:12:04.000Z
|
# coding: utf-8
"""
Swagger Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\
OpenAPI spec version: 1.0.0
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import petstore_api
from petstore_api.rest import ApiException
from petstore_api.models.outer_composite import OuterComposite
class TestOuterComposite(unittest.TestCase):
""" OuterComposite unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testOuterComposite(self):
"""
Test OuterComposite
"""
model = petstore_api.models.outer_composite.OuterComposite()
if __name__ == '__main__':
unittest.main()
| 21.302326
| 160
| 0.708515
|
f978b44c67f56e47239226e06943d3d8441c95a1
| 1,091
|
py
|
Python
|
lambdata/style_example.py
|
B-Meister/ds18_lambdata
|
8b5983219513a25357d81a1c4403a6d643b7b6d5
|
[
"MIT"
] | null | null | null |
lambdata/style_example.py
|
B-Meister/ds18_lambdata
|
8b5983219513a25357d81a1c4403a6d643b7b6d5
|
[
"MIT"
] | null | null | null |
lambdata/style_example.py
|
B-Meister/ds18_lambdata
|
8b5983219513a25357d81a1c4403a6d643b7b6d5
|
[
"MIT"
] | null | null | null |
""" This file was to practice PEP8 style"""
import math
import sys
def example1():
# This is a long comment. This should be wrapped to fit within 72
# characters.
some_tuple = (1, 2, 3, 'a')
some_variable = {
'long': 'Long code lines should be wrapped within 79 characters.',
'other': [math.pi, 100, 200, 300, 9876543210,
'This is a long string that goes on'],
'more': {
'inner': 'This whole logical line should be wrapped.',
some_tuple: [1, 20, 300, 40000, 500000000, 60000000000000000]}}
return (some_tuple, some_variable)
def example2():
return {'has_key() is deprecated': True}.has_key({'f': 2}.has_key(''))
# TODO - fix this!
# class Example3(object):
# def __init__ ( self, bar ):
# # Comments should have a space after the hash.
# if bar : bar += 1;
# return bar
# else:
# bar = bar * bar;
# return (sys.path, some_string)
some_string = """
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
"""
| 28.710526
| 75
| 0.604033
|
76cf07c2e0e759dfcd4fe24ebdaeabd0936e3647
| 1,048
|
py
|
Python
|
setup.py
|
marujore/stmetrics
|
16beeb262bde7942d0c9bb49772a409e0e2e2f1f
|
[
"MIT"
] | 12
|
2020-04-15T14:19:59.000Z
|
2022-03-01T09:25:19.000Z
|
setup.py
|
andersonreisoares/time-metrics
|
dd988d406fa090738a4d50461e9a46492d2f078e
|
[
"MIT"
] | 6
|
2020-07-04T12:57:22.000Z
|
2021-06-10T02:28:51.000Z
|
setup.py
|
andersonreisoares/time-metrics
|
dd988d406fa090738a4d50461e9a46492d2f078e
|
[
"MIT"
] | 4
|
2020-03-17T14:52:00.000Z
|
2021-05-14T11:29:34.000Z
|
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="stmetrics",
version="0.1.7",
author="Brazil Data Cube Team",
author_email="brazildatacube@dpi.inpe.br",
description="A package to compute features from Satellite Image Time Series (SITS).",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/brazil-data-cube/stmetrics/",
packages=['stmetrics'],
install_requires=[
'scipy',
'sklearn',
'pandas',
'numpy',
'matplotlib',
'shapely',
'descartes',
'nolds',
'dtaidistance',
'rasterio',
'geopandas',
'pointpats',
'fastremap',
'connected-components-3d',
'rasterstats',
'xarray',
'affine',
'numba',
'tqdm'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta"
],
)
| 23.818182
| 89
| 0.610687
|
d23c74c48bd63a487c69cc10088dfba3386fa662
| 581
|
py
|
Python
|
main.py
|
OleHenFo/hyttemon
|
8cacd61c395b330736287db0b8489dd27f72cabc
|
[
"MIT"
] | null | null | null |
main.py
|
OleHenFo/hyttemon
|
8cacd61c395b330736287db0b8489dd27f72cabc
|
[
"MIT"
] | null | null | null |
main.py
|
OleHenFo/hyttemon
|
8cacd61c395b330736287db0b8489dd27f72cabc
|
[
"MIT"
] | null | null | null |
import time,random,os
from PIL import Image,ImageDraw,ImageFont
fontTemp = ImageFont.truetype("font/Roboto-Medium.ttf", 136)
textColor = (200,200,200,150)
tempUte = 14
tempInne = 23
while(True):
im = Image.open("img/bg.png")
draw = ImageDraw.Draw(im)
tempUte = 14 + random.randint(0,1)-1
tempInne = 23 + random.randint(0,1)-1
draw.text((50, 100),str(tempUte)+"°",textColor,font=fontTemp)
draw.text((560, 290),str(tempInne)+"°",textColor,font=fontTemp)
im.save("img/image.png")
os.system("sudo fbi --autozoom --noverbose --vt 1 img/image.png")
time.sleep(2)
| 27.666667
| 67
| 0.69191
|
876c9d812d6289519cca4791c9c09fed7ae12d24
| 13,419
|
py
|
Python
|
gpvdm_gui/gui/scan_io.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 12
|
2016-09-13T08:58:13.000Z
|
2022-01-17T07:04:52.000Z
|
gpvdm_gui/gui/scan_io.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 3
|
2017-11-11T12:33:02.000Z
|
2019-03-08T00:48:08.000Z
|
gpvdm_gui/gui/scan_io.py
|
roderickmackenzie/gpvdm
|
914fd2ee93e7202339853acaec1d61d59b789987
|
[
"BSD-3-Clause"
] | 6
|
2019-01-03T06:17:12.000Z
|
2022-01-01T15:59:00.000Z
|
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package scan_io
# IO functions for the scanning simulation parameters.
#
import sys
import os
import shutil
import gc
from inp import inp_get_token_value
from scan_tree import tree_load_flat_list
from scan_tree import tree_gen_flat_list
from scan_tree import tree_gen
from scan_tree import tree_save_flat_list
from server_io import server_find_simulations_to_run
from server import server_break
from numpy import std
from error_dlg import error_dlg
from scan_tree import scan_tree_leaf
from process_events import process_events
import i18n
_ = i18n.language.gettext
import zipfile
from util_zip import archive_add_dir
from inp import inp
from scan_program_line import scan_program_line
from clean_sim import ask_to_delete
from progress_class import progress_class
def scan_next_archive(sim_dir):
i=0
while(1):
name="archive"+str(i)+".zip"
full_name=os.path.join(sim_dir,name)
if os.path.isfile(full_name)==False:
return name
i=i+1
def scan_archive(sim_dir,progress_window=None):
own_progress_window=False
if progress_window==None:
progress_window=progress_class()
progress_window.show()
progress_window.start()
own_progress_window=True
archive_path=os.path.join(sim_dir,"build_archive.zip")
if os.path.isfile(archive_path)==True:
os.remove(archive_path)
zf = zipfile.ZipFile(archive_path, 'a',zipfile.ZIP_DEFLATED)
l=os.listdir(sim_dir)
for i in range(0,len(l)):
dir_to_zip=os.path.join(sim_dir,l[i])
if os.path.isdir(dir_to_zip)==True:
archive_add_dir(archive_path,dir_to_zip,sim_dir,zf=zf,remove_src_dir=True,exclude=["gmon.out"])
progress_window.set_fraction(float(i)/float(len(l)))
progress_window.set_text(_("Adding: ")+l[i])
#if server_break()==True:
# break
process_events()
zf.close()
os.rename(archive_path, os.path.join(sim_dir,scan_next_archive(sim_dir)))
if own_progress_window==True:
progress_window.stop()
def scan_list_simulations(dir_to_search):
found_dirs=[]
for root, dirs, files in os.walk(dir_to_search):
for name in files:
# full_name=os.path.join(root, name)
if name=="sim.gpvdm":
found_dirs.append(root)
return found_dirs
def scan_plot_fits(dir_to_search):
files=os.listdir(dir_to_search)
for i in range(0,len(files)):
if files[i].endswith(".jpg"):
os.remove(os.path.join(dir_to_search,files[i]))
#print("remove",os.path.join(dir_to_search,files[i]))
sim_dirs=tree_load_flat_list(dir_to_search)
for i in range(0,len(sim_dirs)):
os.chdir(sim_dirs[i])
name=sim_dirs[i].replace("/","_")
os.system("gnuplot fit.plot >plot.eps")
os.system("gs -dNOPAUSE -r600 -dEPSCrop -sDEVICE=jpeg -sOutputFile="+os.path.join(dir_to_search,name+".jpg")+" plot.eps -c quit")
os.chdir(dir_to_search)
def scan_get_converged_status(fit_log_path):
error=False
if os.path.isfile(fit_log_path):
f = open(fit_log_path, "r")
lines = f.readlines()
f.close()
for l in range(0, len(lines)):
lines[l]=lines[l].rstrip()
if len(lines)>4:
error=float(lines[len(lines)-2].split()[1])
return error
def scan_list_unconverged_simulations(dir_to_search):
found_dirs=[]
sim_dirs=tree_load_flat_list(dir_to_search)
for i in range(0,len(sim_dirs)):
add=False
fit_log=os.path.join(sim_dirs[i],'fitlog.dat')
error=scan_get_converged_status(fit_log)
if error==False:
add=True
elif error>0.1:
add=True
if add==True:
found_dirs.append(sim_dirs[i])
return found_dirs
class report_token():
def __init__(self,file_name,token):
self.file_name=file_name
self.token=token
self.values=[]
def scan_gen_report(path):
tokens=[]
tokens.append(report_token("dos0.inp","#Etrape"))
tokens.append(report_token("dos0.inp","#mueffe"))
tokens.append(report_token("dos0.inp","#Ntrape"))
tokens.append(report_token("dos0.inp","#srhsigman_e"))
tokens.append(report_token("dos0.inp","#srhsigmap_e"))
tokens.append(report_token("dos0.inp","#srhsigman_h"))
tokens.append(report_token("dos0.inp","#srhsigmap_h"))
tokens.append(report_token("sim/thick_light/sim_info.dat","#jv_pmax_tau"))
tokens.append(report_token("sim/thick_light/sim_info.dat","#jv_pmax_mue"))
tokens.append(report_token("sim/thick_light/sim_info.dat","#jv_pmax_muh"))
tokens.append(report_token("jv1.inp","#jv_Rcontact"))
tokens.append(report_token("jv1.inp","#jv_Rshunt"))
simulation_dirs=tree_load_flat_list(path)
errors=[]
for i in range(0,len(simulation_dirs)):
print(simulation_dirs[i])
error=scan_get_converged_status(os.path.join(simulation_dirs[i],"fitlog.dat"))
print("error",error)
errors.append(error)
for ii in range(0,len(tokens)):
value=inp_get_token_value(os.path.join(simulation_dirs[i],tokens[ii].file_name), tokens[ii].token)
#print(os.path.join(simulation_dirs[i],tokens[ii].file_name), tokens[ii].token,value)
if value!=None:
print(tokens[ii].token,str(value))
tokens[ii].values.append(float(value))
print("Errors:",errors)
for ii in range(0,len(tokens)):
print(tokens[ii].token,tokens[ii].values,sum(tokens[ii].values)/len(tokens[ii].values),std(tokens[ii].values))
for ii in range(0,len(tokens)):
print(tokens[ii].token,sum(tokens[ii].values)/len(tokens[ii].values),std(tokens[ii].values))
def scan_build_nested_simulation(root_simulation,nest_simulation):
if os.path.isdir(nest_simulation)==False:
print("Path ",nest_simulation,"does not exist")
sys.exit(0)
progress_window=progress_class()
progress_window.show()
progress_window.start()
process_events()
nest_simulation_name=os.path.basename(nest_simulation)
program_list=tree_load_program(nest_simulation)
files = os.listdir(root_simulation)
simulations=[]
for i in range(0,len(files)):
if os.path.isfile(os.path.join(root_simulation,files[i],"sim.gpvdm"))==True:
simulations.append(files[i])
flat_simulation_list=[]
path=os.getcwd()
for i in range(0,len(simulations)):
dest_name=os.path.join(root_simulation,simulations[i],nest_simulation_name)
base_dir=os.path.join(root_simulation,simulations[i])
#print(">>>",dest_name,base_dir,"<<",nest_simulation_name)
tree_gen(dest_name,flat_simulation_list,program_list,base_dir)
progress_window.set_fraction(float(i)/float(len(simulations)))
progress_window.set_text(simulations[i])
process_events()
progress_window.stop()
os.chdir(path)
flat_simulation_list=tree_gen_flat_list(root_simulation,level=1)
#print(flat_simulation_list)
tree_save_flat_list(root_simulation,flat_simulation_list)
return
def scan_clean_nested_simulation(root_simulation,nest_simulation):
files = os.listdir(root_simulation)
simulations=[]
for i in range(0,len(files)):
if os.path.isfile(os.path.join(root_simulation,files[i],"sim.gpvdm"))==True:
simulations.append(files[i])
for i in range(0,len(simulations)):
dest_name=os.path.join(root_simulation,simulations[i])
files = os.listdir(dest_name)
for file in files:
if file.endswith(".inp") or file.endswith(".gpvdm") or file.endswith(".dat") :
os.remove(os.path.join(dest_name,file))
return
def scan_clean_unconverged(parent,dir_to_clean):
dirs_to_del=[]
dirs_to_del=scan_list_unconverged_simulations(dir_to_clean)
ask_to_delete(parent,dirs_to_del)
def scan_push_to_hpc(base_dir,only_unconverged):
config_file=os.path.join(os.getcwd(),"server.inp")
#print(config_file)
hpc_path=inp_get_token_value(config_file, "#hpc_dir")
hpc_path=os.path.abspath(hpc_path)
if os.path.isdir(hpc_path)==True:
hpc_files=[]
hpc_files=scan_list_simulations(hpc_path)
#print("hpc files=",hpc_files)
delete_files(hpc_files)
files=[]
if only_unconverged==True:
files=scan_list_unconverged_simulations(base_dir)
else:
files=scan_list_simulations(base_dir)
#print("copy files=",files)
for i in range(0,len(files)):
dest_path=os.path.join(hpc_path,files[i][len(base_dir)+1:])
#print(dest_path)
shutil.copytree(files[i], dest_path,symlinks=True)
else:
print("HPC dir not found",hpc_path)
def scan_import_from_hpc(base_dir):
config_file=os.path.join(os.getcwd(),"server.inp")
hpc_path=inp_get_token_value(config_file, "#hpc_dir")
hpc_path=os.path.abspath(hpc_path)
if os.path.isdir(hpc_path)==True:
hpc_files=scan_list_simulations(hpc_path)
for i in range(0,len(hpc_files)):
if hpc_files[i].endswith("orig")==False:
src_path=hpc_files[i]
dest_path=os.path.join(base_dir,hpc_files[i][len(hpc_path)+1:])
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.copytree(src_path, dest_path, symlinks=False, ignore=None)
#print(src_path,dest_path)
else:
print("HPC dir not found",hpc_path)
class scan_io:
def __init__(self):
self.parent_window=None
self.interactive=True
self.scan_dir=None
self.base_dir=None
self.human_name=None
self.config_file=None
self.program_list=[]
self.myserver=None
def load(self,file_name):
self.program_list=[]
self.config_file=file_name
f=inp()
f.load(self.config_file)
self.human_name=f.get_token("#scan_name")
self.scan_dir=os.path.join(os.path.dirname(self.config_file),self.human_name)
pos=2
mylen=int(f.lines[pos])
pos=pos+1
for i in range(0, mylen):
item=scan_program_line()
#item.file=f.lines[pos]
#item.token=f.lines[pos+1]
item.human_name=f.lines[pos+2]
item.values=f.lines[pos+3]
item.opp=f.lines[pos+4]
self.program_list.append(item)
pos=pos+6
#print(f.lines)
def save(self):
f=inp()
f.lines=[]
f.lines.append("#scan_name")
f.lines.append(self.human_name)
#print(self.tab.rowCount())
f.lines.append(str(len(self.program_list)))
for item in self.program_list:
#print(i)
f.lines.append("notused")
f.lines.append("notused")
f.lines.append(item.human_name)
f.lines.append(item.values)
f.lines.append(item.opp)
f.lines.append("notused")
f.save_as(self.config_file)
if os.path.isfile(os.path.join(self.scan_dir,"scan_config.inp"))==False:
a = open(os.path.join(self.scan_dir,"scan_config.inp"), "w")
a.write("#scan_config_args\n")
a.write("\n")
a.write("#scan_config_compress\n")
a.write("false\n")
a.write("#end\n")
a.close()
def set_path(self,scan_dir):
self.scan_dir=scan_dir
def set_base_dir(self,base_dir):
self.base_dir=base_dir
def clean_dir(self):
dirs_to_del=[]
listing=os.listdir(self.scan_dir)
for i in range(0,len(listing)):
full_path=os.path.join(self.scan_dir,listing[i])
if os.path.isdir(full_path)==True:
dirs_to_del.append(full_path)
ask_to_delete(self.parent_window,dirs_to_del,interactive=self.interactive)
def apply_constants_to_dir(self,folder):
leaf=scan_tree_leaf()
leaf.json_load(os.path.join(folder,"sim.json"))
leaf.directory=folder
leaf.program_list=self.program_list
leaf.apply_constants()
leaf.apply_python_scripts()
leaf.json_save()
def run(self,run_simulation=True,generate_simulations=True,args=""):
f=inp()
f.load(os.path.join(self.scan_dir,"scan_config.inp"))
args=f.get_token("#scan_config_args")
if args==False:
args=""
args=args+" --mindbustx"
if self.scan_dir=="":
error_dlg(self.parent_window,_("No sim dir name"))
return
self.make_dir()
if generate_simulations==True:
self.build_scan()
if run_simulation==True:
commands=tree_load_flat_list(self.scan_dir)
if commands==False:
error_dlg(self.parent_window,_("I can't load flat_list.inp. This usually means there is a problem with how you have set up your scan."))
return
for i in range(0, len(commands)):
self.myserver.add_job(commands[i],args)
#print("Adding job"+commands[i])
self.myserver.start()
gc.collect()
def build_scan(self):
self.clean_dir()
flat_simulation_list=[]
path=os.getcwd()
#print(self.scan_dir,flat_simulation_list,self.program_list,self.base_dir)
if tree_gen(self.scan_dir,flat_simulation_list,self.program_list,self.base_dir)==False:
error_dlg(self.parent_window,_("Problem generating tree."))
return False
os.chdir(path)
tree_save_flat_list(self.scan_dir,flat_simulation_list)
def make_dir(self):
if os.path.isdir(self.scan_dir)==False:
os.makedirs(self.scan_dir)
def rename(self,new_name):
new_path=os.path.join(os.path.dirname(self.scan_dir),new_name)
f=inp()
f.load(self.config_file)
f.set_token("#scan_name",new_name)
f.save()
self.human_name=new_name
shutil.move(self.scan_dir, new_path)
self.scan_dir=new_path
def clone(self,new_human,new_config_file):
self.scan_dir=os.path.join(os.path.dirname(self.scan_dir),new_name)
print(self.config_file)
| 27.441718
| 141
| 0.743274
|
c4260d4f1439eba4b39cd2a41b38b213237c83b7
| 19,696
|
py
|
Python
|
gtm.py
|
hkaneko1985/structure_generator_based_on_r_group
|
56fa945d9caf0f342b4457a9293d33af1a152bf8
|
[
"MIT"
] | 5
|
2019-05-19T19:45:14.000Z
|
2020-10-30T09:01:44.000Z
|
gtm.py
|
hkaneko1985/structure_generator_based_on_r_group
|
56fa945d9caf0f342b4457a9293d33af1a152bf8
|
[
"MIT"
] | null | null | null |
gtm.py
|
hkaneko1985/structure_generator_based_on_r_group
|
56fa945d9caf0f342b4457a9293d33af1a152bf8
|
[
"MIT"
] | 4
|
2019-08-25T06:09:44.000Z
|
2022-02-01T01:19:14.000Z
|
# -*- coding: utf-8 -*-
# %reset -f
"""
@author: Hiromasa Kaneko
"""
# GTM (generative topographic mapping) class
import math
import numpy as np
import numpy.matlib
from scipy.spatial.distance import cdist
from scipy.stats import norm, multivariate_normal
from sklearn.decomposition import PCA
class GTM:
def __init__(self, shape_of_map=[30, 30], shape_of_rbf_centers=[10, 10],
variance_of_rbfs=4, lambda_in_em_algorithm=0.001,
number_of_iterations=200, display_flag=1, sparse_flag=False):
self.shape_of_map = shape_of_map
self.shape_of_rbf_centers = shape_of_rbf_centers
self.variance_of_rbfs = variance_of_rbfs
self.lambda_in_em_algorithm = lambda_in_em_algorithm
self.number_of_iterations = number_of_iterations
self.display_flag = display_flag
self.sparse_flag = sparse_flag
def calculate_grids(self, num_x, num_y):
"""
Calculate grid coordinates on the GTM map
Parameters
----------
num_x : int
number_of_x_grids
num_y : int
number_of_y_grids
"""
grids_x, grids_y = np.meshgrid(np.arange(0.0, num_x), np.arange(0.0, num_y))
grids = np.c_[np.ndarray.flatten(grids_x)[:, np.newaxis],
np.ndarray.flatten(grids_y)[:, np.newaxis]]
max_grids = grids.max(axis=0)
grids[:, 0] = 2 * (grids[:, 0] - max_grids[0] / 2) / max_grids[0]
grids[:, 1] = 2 * (grids[:, 1] - max_grids[1] / 2) / max_grids[1]
return grids
def fit(self, input_dataset):
"""
Train the GTM map
Parameters
----------
input_dataset : numpy.array or pandas.DataFrame
Training dataset for GTM.
input_dataset must be autoscaled.
"""
input_dataset = np.array(input_dataset)
self.success_flag = True
self.shape_of_map = [int(self.shape_of_map[0]), int(self.shape_of_map[1])]
self.shape_of_rbf_centers = [int(self.shape_of_rbf_centers[0]), int(self.shape_of_rbf_centers[1])]
# make rbf grids
self.rbf_grids = self.calculate_grids(self.shape_of_rbf_centers[0],
self.shape_of_rbf_centers[1])
# make map grids
self.map_grids = self.calculate_grids(self.shape_of_map[0],
self.shape_of_map[1])
# calculate phi of map_grids and rbf_grids
distance_between_map_and_rbf_grids = cdist(self.map_grids, self.rbf_grids,
'sqeuclidean')
self.phi_of_map_rbf_grids = np.exp(-distance_between_map_and_rbf_grids / 2.0
/ self.variance_of_rbfs)
# PCA for initializing W and beta
pca_model = PCA(n_components=3)
pca_model.fit_transform(input_dataset)
if np.linalg.matrix_rank(self.phi_of_map_rbf_grids) < min(self.phi_of_map_rbf_grids.shape):
self.success_flag = False
return
self.W = np.linalg.pinv(self.phi_of_map_rbf_grids).dot(
self.map_grids.dot(pca_model.components_[0:2, :]))
self.beta = min(pca_model.explained_variance_[2], 1 / (
(
cdist(self.phi_of_map_rbf_grids.dot(self.W),
self.phi_of_map_rbf_grids.dot(self.W))
+ np.diag(np.ones(np.prod(self.shape_of_map)) * 10 ** 100)
).min(axis=0).mean() / 2))
self.bias = input_dataset.mean(axis=0)
self.mixing_coefficients = np.ones(np.prod(self.shape_of_map)) / np.prod(self.shape_of_map)
# EM algorithm
phi_of_map_rbf_grids_with_one = np.c_[self.phi_of_map_rbf_grids,
np.ones((np.prod(self.shape_of_map), 1))]
for iteration in range(self.number_of_iterations):
responsibilities = self.responsibility(input_dataset)
phi_t_G_phi_etc = phi_of_map_rbf_grids_with_one.T.dot(
np.diag(responsibilities.sum(axis=0)).dot(phi_of_map_rbf_grids_with_one)
) + self.lambda_in_em_algorithm / self.beta * np.identity(
phi_of_map_rbf_grids_with_one.shape[1])
if 1 / np.linalg.cond(phi_t_G_phi_etc) < 10 ** -15:
self.success_flag = False
break
self.W_with_one = np.linalg.inv(phi_t_G_phi_etc).dot(
phi_of_map_rbf_grids_with_one.T.dot(responsibilities.T.dot(input_dataset)))
self.beta = input_dataset.size / (responsibilities
* cdist(input_dataset,
phi_of_map_rbf_grids_with_one.dot(self.W_with_one)) ** 2).sum()
self.W = self.W_with_one[:-1, :]
self.bias = self.W_with_one[-1, :]
if self.sparse_flag == True:
self.mixing_coefficients = sum(responsibilities) / input_dataset.shape[0]
if self.display_flag:
print("{0}/{1} ... likelihood: {2}".format(iteration + 1, self.number_of_iterations,
self.likelihood_value))
def calculate_distance_between_phi_w_and_input_distances(self, input_dataset):
"""
Calculate distance between phi*W
Parameters
----------
input_dataset : numpy.array
Training dataset for GTM.
Returns
-------
distance : distance between phi*W
"""
distance = cdist(
input_dataset,
self.phi_of_map_rbf_grids.dot(self.W)
+ np.ones((np.prod(self.shape_of_map), 1)).dot(
np.reshape(self.bias, (1, len(self.bias)))
),
'sqeuclidean')
return distance
def responsibility(self, input_dataset):
"""
Get responsibilities and likelihood.
Parameters
----------
input_dataset : numpy.array or pandas.DataFrame
Training dataset for GTM.
input_dataset must be autoscaled.
Returns
-------
reponsibilities : numpy.array
Responsibilities of input_dataset for each grid point.
likelihood_value : float
likelihood of input_dataset.
"""
input_dataset = np.array(input_dataset)
distance = self.calculate_distance_between_phi_w_and_input_distances(input_dataset)
rbf_for_responsibility = np.exp(-self.beta / 2.0 * distance) * self.mixing_coefficients
sum_of_rbf_for_responsibility = rbf_for_responsibility.sum(axis=1)
zero_sample_index = np.where(sum_of_rbf_for_responsibility == 0)[0]
if len(zero_sample_index):
sum_of_rbf_for_responsibility[zero_sample_index] = 1
rbf_for_responsibility[zero_sample_index, :] = 1 / rbf_for_responsibility.shape[1]
reponsibilities = rbf_for_responsibility / np.reshape(sum_of_rbf_for_responsibility,
(rbf_for_responsibility.shape[0], 1))
self.likelihood_value = (np.log((self.beta / 2.0 / np.pi) ** (input_dataset.shape[1] / 2.0) /
np.prod(self.shape_of_map) * rbf_for_responsibility.sum(axis=1))).sum()
return reponsibilities
def likelihood(self, input_dataset):
"""
Get likelihood.
Parameters
----------
input_dataset : numpy.array or pandas.DataFrame
Training dataset for GTM.
input_dataset must be autoscaled.
Returns
-------
likelihood : scalar
likelihood of input_dataset.
"""
input_dataset = np.array(input_dataset)
distance = self.calculate_distance_between_phi_w_and_input_distances(input_dataset)
return (np.log((self.beta / 2.0 / np.pi) ** (input_dataset.shape[1] / 2.0) *
(np.exp(-self.beta / 2.0 * distance) * self.mixing_coefficients).sum(axis=1))).sum()
def mlr(self, X, y):
"""
Train the MLR model
Parameters
----------
X, y : numpy.array or pandas.DataFrame
Both X and y must NOT be autoscaled.
"""
X = np.array(X)
y = np.array(y)
y = np.reshape(y, (len(y), 1))
# autoscaling
self.Xmean = X.mean(axis=0)
self.Xstd = X.std(axis=0, ddof=1)
autoscaled_X = (X - self.Xmean) / self.Xstd
self.y_mean = y.mean(axis=0)
self.ystd = y.std(axis=0, ddof=1)
autoscaled_y = (y - self.y_mean) / self.ystd
self.regression_coefficients = np.linalg.inv(
np.dot(autoscaled_X.T, autoscaled_X)
).dot(autoscaled_X.T.dot(autoscaled_y))
calculated_y = (autoscaled_X.dot(self.regression_coefficients)
* self.ystd + self.y_mean)
self.sigma = sum((y - calculated_y) ** 2) / len(y)
def mlr_predict(self, X):
"""
Predict y-values from X-values using the MLR model
Parameters
----------
X : numpy.array or pandas.DataFrame
X must NOT be autoscaled.
"""
autoscaled_X = (X - self.Xmean) / self.Xstd
return (autoscaled_X.dot(self.regression_coefficients)
* self.ystd + self.y_mean)
def inverse_gtm_mlr(self, target_y_value):
"""
Predict X-values from a y-value using the MLR model
Parameters
----------
target_v_alue : a target y-value
scaler
Returns
-------
responsibilities_inverse can be used to discussed assigned grids on
the GTM map.
"""
# target_y_values = np.ndarray.flatten(np.array(target_y_values))
myu_i = self.phi_of_map_rbf_grids.dot(self.W) + np.ones(
(np.prod(self.shape_of_map), 1)).dot(np.reshape(self.bias, (1, len(self.bias))))
sigma_i = np.diag(np.ones(len(self.regression_coefficients))) / self.beta
inverse_sigma_i = np.diag(np.ones(len(self.regression_coefficients))) * self.beta
delta_i = np.linalg.inv(inverse_sigma_i
+ self.regression_coefficients.dot(self.regression_coefficients.T) / self.sigma)
# for target_y_value in target_y_values:
pxy_means = np.empty(myu_i.shape)
for i in range(pxy_means.shape[0]):
pxy_means[i, :] = np.ndarray.flatten(
delta_i.dot(
self.regression_coefficients / self.sigma * target_y_value
+ inverse_sigma_i.dot(np.reshape(myu_i[i, :], [myu_i.shape[1], 1]))
))
pyz_means = myu_i.dot(self.regression_coefficients)
pyz_var = self.sigma + self.regression_coefficients.T.dot(
sigma_i.dot(self.regression_coefficients))
pyzs = np.empty(len(pyz_means))
for i in range(len(pyz_means)):
pyzs[i] = norm.pdf(target_y_value, pyz_means[i], pyz_var ** (1 / 2))
responsibilities_inverse = pyzs / pyzs.sum()
estimated_x_mean = responsibilities_inverse.dot(pxy_means)
estimated_x_mode = pxy_means[np.argmax(responsibilities_inverse), :]
# pyzs : vector of probability of y given zi, which can be used to
# discuss applicability domains
return estimated_x_mean, estimated_x_mode, responsibilities_inverse
def gtmr_predict(self, input_variables, numbers_of_input_variables, numbers_of_output_variables):
"""
Predict values of variables for forward analysis (regression) and inverse analysis
Parameters
----------
input_variables: numpy.array or pandas.DataFrame
(autoscaled) m x n matrix of input variables of training data or test data,
m is the number of sammples and
n is the number of input variables
When this is X-variables, it is forward analysis (regression) and
when this is Y-variables, it is inverse analysis
numbers_of_input_variables: list or numpy.array
vector of numbers of input variables
When this is numbers of X-variables, it is forward analysis (regression) and
when this is numbers of Y-variables, it is inverse analysis
numbers_of_output_variables: list or numpy.array
vector of numbers of output variables
When this is numbers of Y-variables, it is forward analysis (regression) and
when this is numbers of X-variables, it is inverse analysis
Returns
-------
mode_of_estimated_mean : numpy.array
(autoscaled) m x k matrix of output variables estimated using mode of weights,
k is the number of output variables
weighted_estimated_mean : numpy.array
(autoscaled) m x k matrix of output variables estimated using weighted mean,
estimated_mean_for_all_components : numpy.array
(autoscaled) l x m x k matrix of output variables estimated for all components,
weights : numpy.array
m x l matrix of weights,
"""
input_variables = np.array(input_variables)
if input_variables.ndim == 0:
input_variables = np.reshape(input_variables, (1, 1))
elif input_variables.ndim == 1:
input_variables = np.reshape(input_variables, (1, input_variables.shape[0]))
if self.success_flag:
means = self.phi_of_map_rbf_grids.dot(self.W) + np.ones(
(np.prod(self.shape_of_map), 1)
).dot(np.reshape(self.bias, (1, len(self.bias))))
input_means = means[:, numbers_of_input_variables]
output_means = means[:, numbers_of_output_variables]
input_covariances = np.diag(np.ones(len(numbers_of_input_variables))) / self.beta
px = np.empty([input_variables.shape[0], input_means.shape[0]])
for sample_number in range(input_means.shape[0]):
px[:, sample_number] = multivariate_normal.pdf(input_variables, input_means[sample_number, :],
input_covariances)
responsibilities = px.T / px.T.sum(axis=0)
responsibilities = responsibilities.T
estimated_y_mean = responsibilities.dot(output_means)
estimated_y_mode = output_means[np.argmax(responsibilities, axis=1), :]
else:
estimated_y_mean = np.zeros(input_variables.shape[0])
estimated_y_mode = np.zeros(input_variables.shape[0])
px = np.empty([input_variables.shape[0], np.prod(self.shape_of_map)])
responsibilities = np.empty([input_variables.shape[0], np.prod(self.shape_of_map)])
return estimated_y_mean, estimated_y_mode, responsibilities, px
def gtmr_cv_opt(self, dataset, numbers_of_output_variables, candidates_of_shape_of_map,
candidates_of_shape_of_rbf_centers,
candidates_of_variance_of_rbfs, candidates_of_lambda_in_em_algorithm, fold_number,
number_of_iterations):
self.display_flag = 0
self.number_of_iterations = number_of_iterations
dataset = np.array(dataset)
numbers_of_output_variables = np.array(numbers_of_output_variables)
numbers_of_input_variables = np.arange(dataset.shape[1])
numbers_of_input_variables = np.delete(numbers_of_input_variables, numbers_of_output_variables)
min_number = math.floor(dataset.shape[0] / fold_number)
mod_number = dataset.shape[0] - min_number * fold_number
index = np.matlib.repmat(np.arange(1, fold_number + 1, 1), 1, min_number).ravel()
if mod_number != 0:
index = np.r_[index, np.arange(1, mod_number + 1, 1)]
# np.random.seed(999)
fold_index_in_cv = np.random.permutation(index)
np.random.seed()
# grid search
y = np.ravel(dataset[:, numbers_of_output_variables])
parameters_and_r2_cv = []
all_calculation_numbers = len(candidates_of_shape_of_map) * len(candidates_of_shape_of_rbf_centers) * len(
candidates_of_variance_of_rbfs) * len(candidates_of_lambda_in_em_algorithm)
calculation_number = 0
for shape_of_map_grid in candidates_of_shape_of_map:
for shape_of_rbf_centers_grid in candidates_of_shape_of_rbf_centers:
for variance_of_rbfs_grid in candidates_of_variance_of_rbfs:
for lambda_in_em_algorithm_grid in candidates_of_lambda_in_em_algorithm:
calculation_number += 1
estimated_y_in_cv = np.zeros([dataset.shape[0], len(numbers_of_output_variables)])
success_flag_cv = True
for fold_number_in_cv in np.arange(1, fold_number + 1, 1):
dataset_train_in_cv = dataset[fold_index_in_cv != fold_number_in_cv, :]
dataset_test_in_cv = dataset[fold_index_in_cv == fold_number_in_cv, :]
self.shape_of_map = [shape_of_map_grid, shape_of_map_grid]
self.shape_of_rbf_centers = [shape_of_rbf_centers_grid, shape_of_rbf_centers_grid]
self.variance_of_rbfs = variance_of_rbfs_grid
self.lambda_in_em_algorithm = lambda_in_em_algorithm_grid
self.fit(dataset_train_in_cv)
if self.success_flag:
estimated_y_mean, estimated_y_mode, responsibilities, px = self.gtmr_predict(
dataset_test_in_cv[:, numbers_of_input_variables], numbers_of_input_variables,
numbers_of_output_variables)
estimated_y_in_cv[fold_index_in_cv == fold_number_in_cv, :] = estimated_y_mode
else:
success_flag_cv = False
break
if success_flag_cv:
y_pred = np.ravel(estimated_y_in_cv)
r2_cv = float(1 - sum((y - y_pred) ** 2) / sum((y - y.mean()) ** 2))
else:
r2_cv = -10 ** 10
parameters_and_r2_cv.append(
[shape_of_map_grid, shape_of_rbf_centers_grid, variance_of_rbfs_grid,
lambda_in_em_algorithm_grid,
r2_cv])
print([calculation_number, all_calculation_numbers, r2_cv])
# optimized GTMR
parameters_and_r2_cv = np.array(parameters_and_r2_cv)
optimized_hyperparameter_number = \
np.where(parameters_and_r2_cv[:, 4] == np.max(parameters_and_r2_cv[:, 4]))[0][0]
self.shape_of_map = [int(parameters_and_r2_cv[optimized_hyperparameter_number, 0]),
int(parameters_and_r2_cv[optimized_hyperparameter_number, 0])]
self.shape_of_rbf_centers = [int(parameters_and_r2_cv[optimized_hyperparameter_number, 1]),
int(parameters_and_r2_cv[optimized_hyperparameter_number, 1])]
self.variance_of_rbfs = parameters_and_r2_cv[optimized_hyperparameter_number, 2]
self.lambda_in_em_algorithm = parameters_and_r2_cv[optimized_hyperparameter_number, 3]
| 46.895238
| 117
| 0.600579
|
f13a803e440d714d719cbe16ae982eb996d79408
| 1,563
|
py
|
Python
|
tools/tiny-test-fw/CIAssignExampleTest.py
|
Mixerito/esp-idf
|
20a662936483f44ee9c8d16f3251a5a1191ca6e5
|
[
"Apache-2.0"
] | 14
|
2018-04-23T20:34:38.000Z
|
2022-02-03T05:06:57.000Z
|
tools/tiny-test-fw/CIAssignExampleTest.py
|
Mixerito/esp-idf
|
20a662936483f44ee9c8d16f3251a5a1191ca6e5
|
[
"Apache-2.0"
] | null | null | null |
tools/tiny-test-fw/CIAssignExampleTest.py
|
Mixerito/esp-idf
|
20a662936483f44ee9c8d16f3251a5a1191ca6e5
|
[
"Apache-2.0"
] | 3
|
2018-11-25T06:51:59.000Z
|
2019-07-14T15:47:48.000Z
|
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command line tool to assign example tests to CI test jobs.
"""
# TODO: Need to handle running examples on different chips
import os
import sys
import re
import argparse
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path:
sys.path.insert(0, test_fw_path)
from Utility.CIAssignTest import AssignTest
class CIExampleAssignTest(AssignTest):
CI_TEST_JOB_PATTERN = re.compile(r"^example_test_.+")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("test_case",
help="test case folder or file")
parser.add_argument("ci_config_file",
help="gitlab ci config file")
parser.add_argument("output_path",
help="output path of config files")
args = parser.parse_args()
assign_test = CIExampleAssignTest(args.test_case, args.ci_config_file)
assign_test.assign_cases()
assign_test.output_configs(args.output_path)
| 31.897959
| 74
| 0.725528
|
d6ddd88faba42caf19ab392483d9cec0fa476884
| 391
|
py
|
Python
|
test_mo/asgi.py
|
Qmanes/test_mo
|
f175f3d623d499473d88d2357769a3259b9b7484
|
[
"MIT"
] | null | null | null |
test_mo/asgi.py
|
Qmanes/test_mo
|
f175f3d623d499473d88d2357769a3259b9b7484
|
[
"MIT"
] | null | null | null |
test_mo/asgi.py
|
Qmanes/test_mo
|
f175f3d623d499473d88d2357769a3259b9b7484
|
[
"MIT"
] | null | null | null |
"""
ASGI config for test_mo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_mo.settings')
application = get_asgi_application()
| 23
| 78
| 0.785166
|
ebcaf9aa7dbc00bc9ed32c8b5cf12ff21cc8fe5e
| 1,982
|
py
|
Python
|
robotframework-ls/src/robotframework_ls/__init__.py
|
JohanMabille/robotframework-lsp
|
610f0257fdcd79b8c38107a0ecf600f60160bc1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
robotframework-ls/src/robotframework_ls/__init__.py
|
JohanMabille/robotframework-lsp
|
610f0257fdcd79b8c38107a0ecf600f60160bc1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
robotframework-ls/src/robotframework_ls/__init__.py
|
JohanMabille/robotframework-lsp
|
610f0257fdcd79b8c38107a0ecf600f60160bc1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
__version__ = "0.6.0"
version_info = [int(x) for x in __version__.split(".")]
import os.path
import sys
__file__ = os.path.abspath(__file__)
if __file__.endswith((".pyc", ".pyo")):
__file__ = __file__[:-1]
def import_robocorp_ls_core():
"""
Helper function to make sure that robocorp_ls_core is imported properly
(either in dev or in release mode).
"""
try:
import robocorp_ls_core
except ImportError:
log_contents = []
use_folder = None
try:
src_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
log_contents.append("Source folder: %s" % (src_folder,))
src_core_folder = os.path.abspath(
os.path.join(src_folder, "..", "..", "robocorp-python-ls-core", "src")
)
if os.path.isdir(src_core_folder):
log_contents.append("Dev mode detected. Found: %s" % (src_core_folder,))
use_folder = src_core_folder
else:
vendored_folder = os.path.join(
src_folder, "robotframework_ls", "vendored"
)
log_contents.append(
"Using vendored mode. Found: %s" % (vendored_folder,)
)
use_folder = vendored_folder
assert os.path.isdir(
use_folder
), "Expected: %s to exist and be a directory." % (use_folder,)
sys.path.append(use_folder)
import robocorp_ls_core
except:
try:
if use_folder:
log_contents.append(
"%s contents:\n%s" % (use_folder, os.listdir(use_folder))
)
except:
log_contents.append("Error in os.listdir('%s')." % (use_folder,))
raise ImportError(
"Error importing robocorp_ls_core. Log: %s" % "\n".join(log_contents)
)
| 33.59322
| 88
| 0.534309
|
0ed6c9352b89cb95c37f903654e23611852b093a
| 25,197
|
py
|
Python
|
src/persistence/response_cache.py
|
nostalgebraist/nostalgebraist-autoresponder
|
622349c4cad2a7aec1017837416c58a678151aae
|
[
"MIT"
] | 39
|
2020-06-19T05:38:11.000Z
|
2022-03-28T04:35:31.000Z
|
src/persistence/response_cache.py
|
nostalgebraist/nostalgebraist-autoresponder
|
622349c4cad2a7aec1017837416c58a678151aae
|
[
"MIT"
] | null | null | null |
src/persistence/response_cache.py
|
nostalgebraist/nostalgebraist-autoresponder
|
622349c4cad2a7aec1017837416c58a678151aae
|
[
"MIT"
] | 2
|
2021-04-13T18:12:03.000Z
|
2021-12-16T23:20:12.000Z
|
"""
Originally for caching tumblr API responses to help w/ ratelimiting.
Scope creep has caused this to be more of a general cache for lots of stuff, so it now
holds a lot of stuff needed for persistent-over-time elements of bot operation, like
the mood feature.
"""
import subprocess
from collections import namedtuple, defaultdict
from enum import Enum
from datetime import datetime, timedelta
import pytumblr
import time
import os
import pickle
from util.times import now_pst
from util.cloudsave import resilient_pickle_load, resilient_pickle_save, CLOUDSAVE_BUCKET
import config.bot_config_singleton
bot_specific_constants = config.bot_config_singleton.bot_specific_constants
NO_REBLOG_IDS = bot_specific_constants.NO_REBLOG_IDS
blogName = bot_specific_constants.blogName
PostIdentifier = namedtuple("PostIdentifier", "blog_name id_")
ReplyIdentifier = namedtuple("ReplyIdentifier", "blog_name id_ timestamp")
CachedResponseType = Enum("CachedResponseType", "POSTS NOTES")
UserInputType = Enum("UserInputType", "ASK REBLOG REPLY")
UserInputIdentifier = namedtuple(
"UserInputIdentifier", "input_type blog_name id_ timestamp"
)
class ResponseCache:
def __init__(
self, client: pytumblr.TumblrRestClient, path: str, backup_path: str, cache: dict = None
):
self.client = client
self.path = path
self.backup_path = backup_path
self.cache = cache
if self.cache is None:
self.cache = {rtype: {} for rtype in CachedResponseType}
self.cache["reblogs_handled"] = set()
if "reblogs_handled" not in self.cache:
self.cache["reblogs_handled"] = set()
if "replies_handled" not in self.cache:
self.cache["replies_handled"] = set()
if "user_input_sentiments" not in self.cache:
self.cache["user_input_sentiments"] = {}
if "blocked_by_users" not in self.cache:
self.cache["blocked_by_users"] = set()
if "last_accessed_time" not in self.cache:
self.cache["last_accessed_time"] = {}
if "dash_post_judgments" not in self.cache:
self.cache["dash_post_judgments"] = {}
if "last_seen_ts" not in self.cache:
self.cache["last_seen_ts"] = defaultdict(int)
if "following_names" not in self.cache:
self.cache["following_names"] = set()
@staticmethod
def load(client=None,
path=f"data/response_cache.pkl.gz",
backup_path="data/cloudsave_backups/response_cache.pkl.gz",
verbose=True):
cache = resilient_pickle_load(path=path)
if verbose:
lengths = {k: len(cache[k]) for k in cache.keys()}
print(f"loaded response cache with lengths {lengths}")
loaded = ResponseCache(client, path, backup_path, cache)
loaded.remove_oldest()
return loaded
def save(self, verbose=True):
self.remove_oldest()
t1 = time.time()
resilient_pickle_save(obj=self.cache, path=self.path, backup_path=self.backup_path)
_tsave = time.time()
print(f"response_cache save 1: {_tsave-t1:.3f}s sec")
if verbose:
lengths = {k: len(self.cache[k]) for k in CachedResponseType}
print(f"saved response cache with lengths {lengths}")
def decomposed_save(self, directory: str):
# for tracking sizes of individual parts
os.makedirs(directory, exist_ok=True)
for k, v in self.cache.items():
path = os.path.join(directory, f"{repr(k)}.pkl.gz")
with open(path, "wb") as f:
pickle.dump({k: self.cache[k]}, f)
print(f"wrote {repr(k)} to {path}")
def remove_oldest(self, max_hours=18, dryrun=False):
lat = self.cache["last_accessed_time"]
existing_p = self.cache[CachedResponseType.POSTS]
existing_n = self.cache[CachedResponseType.NOTES]
existing_dpj = self.cache["dash_post_judgments"]
last_allowed_time = now_pst() - timedelta(hours=max_hours)
allowed_p = {pi for pi, t in lat.items() if t >= last_allowed_time}
new_p = {pi: existing_p[pi] for pi in existing_p if pi in allowed_p}
new_n = {pi: existing_n[pi] for pi in existing_n if pi in allowed_p}
new_lat = {pi: lat[pi] for pi in lat if pi in allowed_p}
new_dpj = {pi: existing_dpj[pi] for pi in existing_dpj if pi in allowed_p}
before_len_p = len(existing_p)
before_len_n = len(existing_n)
before_len_lat = len(lat)
before_len_dpj = len(existing_dpj)
delta_len_p = before_len_p - len(new_p)
delta_len_n = before_len_n - len(new_n)
delta_len_lat = before_len_lat - len(new_lat)
delta_len_dpj = before_len_dpj - len(new_dpj)
if dryrun:
print(f"remove_oldest: would drop {delta_len_p} of {before_len_p} POSTS")
print(f"remove_oldest: would drop {delta_len_n} of {before_len_n} NOTES")
print(f"remove_oldest: would drop {delta_len_lat} of {before_len_lat} last_accessed_time")
print(f"remove_oldest: would drop {delta_len_dpj} of {before_len_dpj} dash_post_judgments")
else:
print(f"remove_oldest: dropping {delta_len_p} of {before_len_p} POSTS")
print(f"remove_oldest: dropping {delta_len_n} of {before_len_n} NOTES")
print(f"remove_oldest: dropping {delta_len_lat} of {before_len_lat} last_accessed_time")
print(f"remove_oldest: dropping {delta_len_dpj} of {before_len_dpj} dash_post_judgments")
self.cache[CachedResponseType.POSTS] = new_p
self.cache[CachedResponseType.NOTES] = new_n
self.cache["last_accessed_time"] = new_lat
self.cache["dash_post_judgments"] = new_dpj
def record_response_to_cache(
self, response: dict, care_about_notes=True, care_about_likes=False
):
if response.get("response") == "You do not have permission to view this blog":
# TODO: make this work properly
user = response.get("blog", {}).get("name", None)
if user is not None:
self.mark_blocked_by_user(user)
return response
if "posts" not in response:
print(f"weirdness: {response}")
return response
for response_core in response["posts"]:
identifier = PostIdentifier(response_core["blog_name"], response_core["id"])
post_payload = {k: v for k, v in response_core.items() if k != "notes"}
notes = self.normalized_lookup(CachedResponseType.NOTES, identifier)
if notes is None:
notes = []
timestamps = {n["timestamp"] for n in notes}
payload_notes = response_core.get("notes", [])
new_notes = [n for n in payload_notes if n["timestamp"] not in timestamps]
if care_about_notes and len(payload_notes) > 0:
compare_on_conversational = (
len(self.conversational_notes(payload_notes)) > 0
)
if compare_on_conversational:
latest_obtained_ts = self.latest_stored_conversational_note_ts(
identifier
)
else:
latest_obtained_ts = self.latest_stored_note_ts(identifier)
reference_note_ts = self.earliest_conversational_note_ts(payload_notes)
notes.extend(new_notes)
if care_about_notes and len(payload_notes) > 0:
expected_notes = response_core["note_count"] + 1
cache_up_to_date = (
False
if len(timestamps) == 0
else (reference_note_ts < latest_obtained_ts)
)
if not cache_up_to_date and response_core["id"] not in NO_REBLOG_IDS:
done_calling_notes_endpt = False
# need this to get the links
time.sleep(0.33)
print(f"\thave {len(notes)} notes of {expected_notes}")
note_response = self.client.notes(
identifier.blog_name, id=identifier.id_, mode="conversation"
)
payload_notes = note_response["notes"]
new_notes = [
n for n in payload_notes if n["timestamp"] not in timestamps
]
notes.extend(new_notes)
done_calling_notes_endpt = (
False
if len(timestamps) == 0
else (
self.earliest_conversational_note_ts(payload_notes)
< latest_obtained_ts
)
)
while (not done_calling_notes_endpt) and (
"_links" in note_response
):
time.sleep(0.33)
note_response = self.client.notes(
identifier.blog_name,
id=identifier.id_,
mode="conversation",
before_timestamp=note_response["_links"]["next"][
"query_params"
]["before_timestamp"],
)
payload_notes = note_response["notes"]
new_notes = [
n for n in payload_notes if n["timestamp"] not in timestamps
]
notes.extend(new_notes)
print(f"\thave {len(notes)} notes of {expected_notes}")
done_calling_notes_endpt = (
False
if len(timestamps) == 0
else (
self.earliest_conversational_note_ts(payload_notes)
< latest_obtained_ts
)
)
self.cache[CachedResponseType.NOTES][identifier] = sorted(
notes, key=lambda n: n["timestamp"], reverse=True
)
self.cache[CachedResponseType.POSTS][identifier] = post_payload
return response
def _cached_note_count(self, rtype, identifier, use_overrides=True):
if ((identifier, "note_count_override") in self.cache[rtype]) and use_overrides:
return self.cache[rtype][(identifier, "note_count_override")]
return max(0, len(self.cache[rtype][identifier]))
def _note_cache_uptodate(
self,
identifier: PostIdentifier,
expected_notes: int,
reference_note_ts: dict,
compare_on_conversational=True,
):
if expected_notes is None and reference_note_ts is None:
print(f"matchers not provided, pulling fresh notes for {identifier}")
return False
normalized_ident = self.get_normalized_ident(
CachedResponseType.NOTES, identifier
)
if normalized_ident is None:
print(f"note cache unavailable for {identifier}")
return False
if reference_note_ts is not None:
if compare_on_conversational:
latest_stored_ts = self.latest_stored_conversational_note_ts(
normalized_ident
)
else:
latest_stored_ts = self.latest_stored_note_ts(normalized_ident)
if latest_stored_ts < reference_note_ts:
print(f"_note_cache_uptodate: NOT up to date")
print(
f"_note_cache_uptodate: got latest_stored_ts={latest_stored_ts} vs reference_note_ts={reference_note_ts}"
)
print(f"_note_cache_uptodate: {latest_stored_ts >= reference_note_ts}")
return latest_stored_ts >= reference_note_ts
cached_notes = self._cached_note_count(
CachedResponseType.NOTES, normalized_ident
)
cache_uptodate = expected_notes <= cached_notes
if not cache_uptodate:
print(
f"note cache stale for {normalized_ident}: expected {expected_notes} notes but have {cached_notes} in cache"
)
return cache_uptodate
def _api_call_for_rtype(
self,
rtype: CachedResponseType,
identifier: PostIdentifier,
care_about_notes=True,
care_about_likes=False,
notes_field=None, # TODO: use this properly
):
time.sleep(0.33)
response = self.client.posts(
identifier.blog_name, id=identifier.id_, notes_info=True
)
self.record_response_to_cache(
response,
care_about_notes=care_about_notes,
care_about_likes=care_about_likes,
)
def _can_use_cached(
self,
rtype: CachedResponseType,
identifier: PostIdentifier,
expected_notes: int = None,
notes_field: list = None,
):
is_in_cache = self.get_normalized_ident(rtype, identifier) is not None
cache_uptodate = True
if rtype == CachedResponseType.NOTES and is_in_cache:
reference_note_ts = self.earliest_conversational_note_ts(notes_field)
if reference_note_ts is None:
cache_uptodate = True
else:
compare_on_conversational = (
True
if notes_field is None
else len(self.conversational_notes(notes_field)) > 0
)
cache_uptodate = self._note_cache_uptodate(
identifier,
expected_notes,
reference_note_ts,
compare_on_conversational=compare_on_conversational,
)
return is_in_cache and cache_uptodate
def _record_unexpected_note_counts(self, rtype, identifier, expected_notes):
cached_notes = self._cached_note_count(rtype, identifier, use_overrides=False)
if cached_notes != expected_notes:
print(
f"cache note count {cached_notes} still != expected {expected_notes} for {identifier}, marking {expected_notes} as override"
)
self.cache[rtype][(identifier, "note_count_override")] = expected_notes
if (cached_notes == expected_notes) and (
(identifier, "note_count_override") in self.cache[rtype]
):
print(
f"cache note count {cached_notes} = expected {expected_notes} for {identifier}, unsetting override"
)
del self.cache[rtype][(identifier, "note_count_override")]
def get_normalized_ident(self, rtype, identifier):
identifier_int = PostIdentifier(identifier.blog_name, int(identifier.id_))
identifier_str = PostIdentifier(identifier.blog_name, str(identifier.id_))
if identifier_int in self.cache[rtype]:
self.cache["last_accessed_time"][identifier_int] = now_pst()
return identifier_int
if identifier_str in self.cache[rtype]:
self.cache["last_accessed_time"][identifier_str] = now_pst()
return identifier_str
return None
def normalized_lookup(self, rtype, identifier, expect_in_cache=False):
normalized_ident = self.get_normalized_ident(rtype, identifier)
if normalized_ident is None:
if expect_in_cache:
print(f"{identifier} should be in {rtype} cache but isn't")
return None
return self.cache[rtype][normalized_ident]
def query(
self,
rtype: CachedResponseType,
identifier: PostIdentifier,
expected_notes: int = None,
notes_field: list = None,
care_about_notes=True,
care_about_likes=False,
):
if care_about_likes:
notes_field = None
if not self._can_use_cached(rtype, identifier, expected_notes, notes_field):
self._api_call_for_rtype(
rtype,
identifier,
care_about_notes=care_about_notes,
care_about_likes=care_about_likes,
notes_field=notes_field,
)
return self.normalized_lookup(rtype, identifier, expect_in_cache=True)
def mark_handled(self, identifier: PostIdentifier):
identifier_normalized = PostIdentifier(
blog_name=identifier.blog_name, id_=str(identifier.id_)
)
tip = self.cached_trail_tip(identifier_normalized)
if tip is not None:
if tip != identifier and tip.blog_name != blogName:
print(
f"mark_handled: for {identifier}, also marking tip {tip} as handled"
)
self.cache["reblogs_handled"].add(tip)
else:
print(f"mark_handled: for {identifier}, found no tip {tip} to mark")
self.cache["reblogs_handled"].add(identifier_normalized)
def mark_unhandled(self, identifier: PostIdentifier):
tip = self.cached_trail_tip(identifier)
if tip is not None and tip in self.cache["reblogs_handled"]:
if tip != identifier:
print(
f"mark_unhandled: for {identifier}, also marking tip {tip} as unhandled"
)
self.cache["reblogs_handled"].remove(tip)
if identifier in self.cache["reblogs_handled"]:
self.cache["reblogs_handled"].remove(identifier)
@staticmethod
def trail_tip(trail: list):
if trail is None:
return None
ordered_trail = sorted(trail, key=lambda x: x.get("post", {}).get("id", "-1"))
if len(ordered_trail) > 0:
tip = ordered_trail[-1]
tip_ident = PostIdentifier(
tip.get("blog", {}).get("name", ""),
str(tip.get("post", {}).get("id", "-1")),
)
return tip_ident
def cached_trail_tip(self, identifier: PostIdentifier):
cached_post = self.normalized_lookup(CachedResponseType.POSTS, identifier)
if cached_post is not None:
tip = ResponseCache.trail_tip(cached_post.get("trail"))
return tip
def is_handled(self, identifier: PostIdentifier, check_tip=True):
identifier_normalized = PostIdentifier(
blog_name=identifier.blog_name, id_=str(identifier.id_)
)
handled_at_ident = identifier_normalized in self.cache["reblogs_handled"]
handled_at_tip = None
tip = self.cached_trail_tip(identifier_normalized) if check_tip else None
if tip is not None:
handled_at_tip = self.is_handled(tip, check_tip=False)
if handled_at_tip is not None:
# print(f"identifier: handled_at_ident={handled_at_ident}")
# print(f"identifier: handled_at_tip={handled_at_tip}")
handled = handled_at_tip or handled_at_ident
else:
handled = handled_at_ident
return handled
def mark_reply_handled(self, identifier: ReplyIdentifier):
identifier_normalized = ReplyIdentifier(
blog_name=identifier.blog_name,
id_=str(identifier.id_),
timestamp=identifier.timestamp,
)
self.cache["replies_handled"].add(identifier_normalized)
def is_reply_handled(self, identifier: ReplyIdentifier):
identifier_normalized = ReplyIdentifier(
blog_name=identifier.blog_name,
id_=str(identifier.id_),
timestamp=identifier.timestamp,
)
return identifier_normalized in self.cache["replies_handled"]
def mark_user_input_sentiment(
self, identifier: UserInputIdentifier, sentiment: dict
):
identifier_normalized = UserInputIdentifier(
input_type=identifier.input_type,
blog_name=identifier.blog_name,
id_=str(identifier.id_) if identifier.id_ is not None else None,
timestamp=identifier.timestamp,
)
self.cache["user_input_sentiments"][identifier_normalized] = sentiment
def get_cached_user_input_sentiment(self, identifier: UserInputIdentifier):
identifier_normalized = UserInputIdentifier(
input_type=identifier.input_type,
blog_name=identifier.blog_name,
id_=str(identifier.id_) if identifier.id_ is not None else None,
timestamp=identifier.timestamp,
)
return self.cache["user_input_sentiments"].get(identifier_normalized)
def mark_dash_post_judgments(
self, identifier: PostIdentifier, judgments: dict
):
identifier_normalized = PostIdentifier(
blog_name=identifier.blog_name, id_=str(identifier.id_)
)
self.cache["dash_post_judgments"][identifier_normalized] = judgments
def get_cached_dash_post_judgments(
self, identifier: PostIdentifier
):
return self.normalized_lookup('dash_post_judgments', identifier)
def mark_blocked_by_user(self, blog_name: str):
self.cache["blocked_by_users"].add(blog_name)
@staticmethod
def conversational_notes(notes_field: list):
# return [n for n in notes_field if n.get('type') != "like"]
added_text_fields = ["added_text", "reply_text"]
return [
n
for n in notes_field
if any([field in n for field in added_text_fields])
or n.get("type") == "posted"
]
@staticmethod
def conversational_notes_with_fallback(notes_field: list, direction="earliest"):
conv_notes = ResponseCache.conversational_notes(notes_field)
if len(conv_notes) > 0:
return conv_notes
# fallback
if direction == "earliest":
return sorted(notes_field, key=lambda n: n.get("timestamp", -1))[:1]
else:
return sorted(notes_field, key=lambda n: n.get("timestamp", -1))[-1:]
@staticmethod
def conversational_notes_ts_with_fallback(
notes_field: list, direction="earliest", debug=False
):
conv_notes = ResponseCache.conversational_notes_with_fallback(
notes_field, direction=direction
)
notes_ts = [n.get("timestamp") for n in conv_notes if "timestamp" in n]
if debug:
print(
f"\nnotes_ts={notes_ts}\n\tgot notes_field={notes_field}\n\tgot conv_notes={conv_notes}\n"
)
return notes_ts
@staticmethod
def earliest_conversational_note_ts(notes_field: list):
if notes_field is None:
return None
return min(
ResponseCache.conversational_notes_ts_with_fallback(
notes_field, direction="earliest"
)
)
@staticmethod
def latest_conversational_note_ts(notes_field: list):
if notes_field is None:
return None
return max(
ResponseCache.conversational_notes_ts_with_fallback(
notes_field, direction="latest"
)
)
def latest_stored_conversational_note_ts(self, identifier: PostIdentifier):
notes = self.normalized_lookup(CachedResponseType.NOTES, identifier)
if notes is not None:
return self.latest_conversational_note_ts(notes)
return None
def latest_stored_note_ts(self, identifier: PostIdentifier):
notes = self.normalized_lookup(CachedResponseType.NOTES, identifier)
if notes is not None:
notes_ts = [n.get("timestamp") for n in notes if "timestamp" in n]
return -1 if len(notes_ts) == 0 else max(notes_ts)
return -1
def get_last_seen_ts(self, key):
return self.cache['last_seen_ts'][key]
def update_last_seen_ts(self, key, ts):
prev = self.get_last_seen_ts(key)
print(
f"updating {key}: {prev} --> {ts} (+{ts-prev})"
)
self.cache['last_seen_ts'][key] = ts
def set_following_names(self, following_names):
self.cache["following_names"] = following_names
def follow(self, name, dashboard_client):
self.cache["following_names"].add(name)
dashboard_client.follow(name)
def unfollow(self, name, dashboard_client):
self.cache["following_names"].remove(name)
dashboard_client.unfollow(name)
@property
def reblogs_handled(self):
return self.cache["reblogs_handled"]
@property
def replies_handled(self):
return self.cache["replies_handled"]
@property
def text_selector_probs(self):
return self.cache["text_selector_probs"]
@property
def text_sentiments(self):
return self.cache["text_sentiments"]
@property
def user_input_sentiments(self):
return self.cache["user_input_sentiments"]
@property
def blocked_by_users(self):
return self.cache["blocked_by_users"]
@property
def following_names(self):
return self.cache["following_names"]
| 39.49373
| 140
| 0.613168
|
ed3bbae33cd01724c5151b3ac4c16ed8a05d72bf
| 125
|
py
|
Python
|
sourcehold/maps/sections/section1021.py
|
J-T-de/sourcehold-maps
|
330ab1b3426dbd93b5de3b0c031419e54f6a1618
|
[
"MIT"
] | null | null | null |
sourcehold/maps/sections/section1021.py
|
J-T-de/sourcehold-maps
|
330ab1b3426dbd93b5de3b0c031419e54f6a1618
|
[
"MIT"
] | null | null | null |
sourcehold/maps/sections/section1021.py
|
J-T-de/sourcehold-maps
|
330ab1b3426dbd93b5de3b0c031419e54f6a1618
|
[
"MIT"
] | null | null | null |
from .types import TileCompressedMapSection
class Section1021(TileCompressedMapSection):
_TYPE_ = "H"
_CLASS_ = int
| 20.833333
| 44
| 0.768
|
eb21c84cf904c163f20c464c1db0f9d3948b53f3
| 7,464
|
py
|
Python
|
sort.py
|
qadeer7194/-Dyanmic-Traffic-Light-Management-System
|
4b34fdf74b07967b49c2d03807bb6a5ef310208e
|
[
"MIT"
] | null | null | null |
sort.py
|
qadeer7194/-Dyanmic-Traffic-Light-Management-System
|
4b34fdf74b07967b49c2d03807bb6a5ef310208e
|
[
"MIT"
] | null | null | null |
sort.py
|
qadeer7194/-Dyanmic-Traffic-Light-Management-System
|
4b34fdf74b07967b49c2d03807bb6a5ef310208e
|
[
"MIT"
] | null | null | null |
"""
SORT: A Simple, Online and Realtime Tracker
Copyright (C) 2016 Alex Bewley alex@dynamicdetection.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
from numba import jit
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from filterpy.kalman import KalmanFilter
@jit
def iou(bb_test,bb_gt):
"""
Computes IUO between two bboxes in the form [x1,y1,x2,y2]
"""
xx1 = np.maximum(bb_test[0], bb_gt[0])
yy1 = np.maximum(bb_test[1], bb_gt[1])
xx2 = np.minimum(bb_test[2], bb_gt[2])
yy2 = np.minimum(bb_test[3], bb_gt[3])
w = np.maximum(0., xx2 - xx1)
h = np.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bb_test[2]-bb_test[0])*(bb_test[3]-bb_test[1])
+ (bb_gt[2]-bb_gt[0])*(bb_gt[3]-bb_gt[1]) - wh)
return(o)
def convert_bbox_to_z(bbox):
"""
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
the aspect ratio
"""
w = bbox[2]-bbox[0]
h = bbox[3]-bbox[1]
x = bbox[0]+w/2.
y = bbox[1]+h/2.
s = w*h #scale is just area
r = w/float(h)
return np.array([x,y,s,r]).reshape((4,1))
def convert_x_to_bbox(x,score=None):
"""
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
"""
w = np.sqrt(x[2]*x[3])
h = x[2]/w
if(score==None):
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
else:
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
class KalmanBoxTracker(object):
"""
This class represents the internel state of individual tracked objects observed as bbox.
"""
count = 0
def __init__(self,bbox):
"""
Initialises a tracker using initial bounding box.
"""
#define constant velocity model
self.kf = KalmanFilter(dim_x=7, dim_z=4)
self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
self.kf.R[2:,2:] *= 10.
self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities
self.kf.P *= 10.
self.kf.Q[-1,-1] *= 0.01
self.kf.Q[4:,4:] *= 0.01
self.kf.x[:4] = convert_bbox_to_z(bbox)
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 0
self.hit_streak = 0
self.age = 0
def update(self,bbox):
"""
Updates the state vector with observed bbox.
"""
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
self.kf.update(convert_bbox_to_z(bbox))
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
if((self.kf.x[6]+self.kf.x[2])<=0):
self.kf.x[6] *= 0.0
self.kf.predict()
self.age += 1
if(self.time_since_update>0):
self.hit_streak = 0
self.time_since_update += 1
self.history.append(convert_x_to_bbox(self.kf.x))
return self.history[-1]
def get_state(self):
"""
Returns the current bounding box estimate.
"""
return convert_x_to_bbox(self.kf.x)
def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
"""
Assigns detections to tracked object (both represented as bounding boxes)
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
"""
if(len(trackers)==0) or (len(detections)==0):
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)
for d,det in enumerate(detections):
for t,trk in enumerate(trackers):
iou_matrix[d,t] = iou(det,trk)
matched_indices = linear_assignment(-iou_matrix)
unmatched_detections = []
for d,det in enumerate(detections):
if(d not in matched_indices[:,0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t,trk in enumerate(trackers):
if(t not in matched_indices[:,1]):
unmatched_trackers.append(t)
#filter out matched with low IOU
matches = []
for m in matched_indices:
if(iou_matrix[m[0],m[1]]<iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2),dtype=int)
else:
matches = np.concatenate(matches,axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
class Sort(object):
def __init__(self,max_age=1,min_hits=3):
"""
Sets key parameters for SORT
"""
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
def update(self,dets):
"""
Params:
dets - a numpy array of detections in the format [[x,y,w,h,score],[x,y,w,h,score],...]
Requires: this method must be called once for each frame even with empty detections.
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
if len(dets)==0:
return np.empty((0))
self.frame_count += 1
#get predicted locations from existing trackers.
trks = np.zeros((len(self.trackers),5))
to_del = []
ret = []
for t,trk in enumerate(trks):
pos = self.trackers[t].predict()[0]
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
if(np.any(np.isnan(pos))):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks)
#update matched trackers with assigned detections
for t,trk in enumerate(self.trackers):
if(t not in unmatched_trks):
d = matched[np.where(matched[:,1]==t)[0],0]
trk.update(dets[d,:][0])
#create and initialise new trackers for unmatched detections
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i,:])
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
d = trk.get_state()[0]
if((trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits)):
ret.append(np.concatenate((d,[trk.id+1])).reshape(1,-1)) # +1 as MOT benchmark requires positive
i -= 1
#remove dead tracklet
if(trk.time_since_update > self.max_age):
self.trackers.pop(i)
if(len(ret)>0):
return np.concatenate(ret)
return np.empty((0,5))
| 33.470852
| 141
| 0.649652
|
74d2597968e3350550b601757925a3fa106e4c36
| 63,112
|
py
|
Python
|
geoalchemy2/_functions.py
|
jimfulton/geoalchemy2
|
0ed84a38cc5a224c6d3f9dc6281800bd0f158f9b
|
[
"MIT"
] | 388
|
2015-01-05T05:18:04.000Z
|
2022-03-04T23:24:08.000Z
|
geoalchemy2/_functions.py
|
jimfulton/geoalchemy2
|
0ed84a38cc5a224c6d3f9dc6281800bd0f158f9b
|
[
"MIT"
] | 233
|
2015-01-01T20:16:10.000Z
|
2022-03-23T14:05:46.000Z
|
geoalchemy2/_functions.py
|
jimfulton/geoalchemy2
|
0ed84a38cc5a224c6d3f9dc6281800bd0f158f9b
|
[
"MIT"
] | 81
|
2015-05-04T15:45:33.000Z
|
2022-03-02T13:51:27.000Z
|
# -*- coding: utf-8 -*-
# flake8: noqa
from . import types
_FUNCTIONS = [
('AddGeometryColumn', None,
'''Adds a geometry column to an existing table.'''),
('DropGeometryColumn', None,
'''Removes a geometry column from a spatial table.'''),
('DropGeometryTable', None,
'''Drops a table and all its references in geometry_columns.'''),
('Find_SRID', None,
'''Returns the SRID defined for a geometry column.'''),
('Populate_Geometry_Columns', None,
'''Ensures geometry columns are defined with type modifiers or have appropriate spatial constraints.'''),
('UpdateGeometrySRID', None,
'''Updates the SRID of all features in a geometry column, and the table metadata.'''),
('ST_Collect', types.Geometry,
'''Creates a GeometryCollection or Multi* geometry from a set of geometries.'''),
('ST_LineFromMultiPoint', types.Geometry,
'''Creates a LineString from a MultiPoint geometry.'''),
('ST_MakeEnvelope', types.Geometry,
'''Creates a rectangular Polygon from minimum and maximum coordinates.'''),
('ST_MakeLine', types.Geometry,
'''Creates a Linestring from Point, MultiPoint, or LineString geometries.'''),
('ST_MakePoint', types.Geometry,
'''Creates a 2D, 3DZ or 4D Point.'''),
('ST_MakePointM', types.Geometry,
'''Creates a Point from X, Y and M values.'''),
('ST_MakePolygon', types.Geometry,
'''Creates a Polygon from a shell and optional list of holes.'''),
('ST_Point', types.Geometry,
'''Creates a Point with the given coordinate values. Alias for ST_MakePoint.'''),
('ST_Polygon', types.Geometry,
'''[geometry] Creates a Polygon from a LineString with a specified SRID.\nOR\n[raster] Returns a multipolygon geometry formed by the union of pixels that have a pixel value that is not no data value. If no band number is specified, band num defaults to 1.'''),
('ST_TileEnvelope', types.Geometry,
'''Creates a rectangular Polygon in Web Mercator (SRID:3857) using the XYZ tile system.'''),
('GeometryType', None,
'''Returns the type of a geometry as text.'''),
('ST_Boundary', types.Geometry,
'''Returns the boundary of a geometry.'''),
('ST_CoordDim', None,
'''Return the coordinate dimension of a geometry.'''),
('ST_Dimension', None,
'''Returns the topological dimension of a geometry.'''),
('ST_Dump', types.GeometryDump,
'''Returns a set of geometry_dump rows for the components of a geometry.'''),
('ST_DumpPoints', types.GeometryDump,
'''Returns a set of geometry_dump rows for the points in a geometry.'''),
('ST_DumpRings', types.GeometryDump,
'''Returns a set of geometry_dump rows for the exterior and interior rings of a Polygon.'''),
('ST_EndPoint', types.Geometry,
'''Returns the last point of a LineString or CircularLineString.'''),
('ST_Envelope', types.Geometry,
'''[geometry] Returns a geometry representing the bounding box of a geometry.\nOR\n[raster] Returns the polygon representation of the extent of the raster.'''),
('ST_BoundingDiagonal', types.Geometry,
'''Returns the diagonal of a geometry's bounding box.'''),
('ST_ExteriorRing', types.Geometry,
'''Returns a LineString representing the exterior ring of a Polygon.'''),
('ST_GeometryN', types.Geometry,
'''Return the Nth geometry element of a geometry collection.'''),
('ST_GeometryType', None,
'''Returns the SQL-MM type of a geometry as text.'''),
('ST_HasArc', None,
'''Tests if a geometry contains a circular arc'''),
('ST_InteriorRingN', types.Geometry,
'''Returns the Nth interior ring (hole) of a Polygon.'''),
('ST_IsPolygonCCW', None,
'''Tests if Polygons have exterior rings oriented counter-clockwise and interior rings oriented clockwise.'''),
('ST_IsPolygonCW', None,
'''Tests if Polygons have exterior rings oriented clockwise and interior rings oriented counter-clockwise.'''),
('ST_IsClosed', None,
'''Tests if a LineStrings's start and end points are coincident. For a PolyhedralSurface tests if it is closed (volumetric).'''),
('ST_IsCollection', None,
'''Tests if a geometry is a geometry collection type.'''),
('ST_IsEmpty', None,
'''[geometry] Tests if a geometry is empty.\nOR\n[raster] Returns true if the raster is empty (width = 0 and height = 0). Otherwise, returns false.'''),
('ST_IsRing', None,
'''Tests if a LineString is closed and simple.'''),
('ST_IsSimple', None,
'''Tests if a geometry has no points of self-intersection or self-tangency.'''),
('ST_M', None,
'''Returns the M coordinate of a Point.'''),
('ST_MemSize', None,
'''[geometry] Returns the amount of memory space a geometry takes.\nOR\n[raster] Returns the amount of space (in bytes) the raster takes.'''),
('ST_NDims', None,
'''Returns the coordinate dimension of a geometry.'''),
('ST_NPoints', None,
'''Returns the number of points (vertices) in a geometry.'''),
('ST_NRings', None,
'''Returns the number of rings in a polygonal geometry.'''),
('ST_NumGeometries', None,
'''Returns the number of elements in a geometry collection.'''),
('ST_NumInteriorRings', None,
'''Returns the number of interior rings (holes) of a Polygon.'''),
('ST_NumInteriorRing', None,
'''Returns the number of interior rings (holes) of a Polygon. Aias for ST_NumInteriorRings'''),
('ST_NumPatches', None,
'''Return the number of faces on a Polyhedral Surface. Will return null for non-polyhedral geometries.'''),
('ST_NumPoints', None,
'''Returns the number of points in a LineString or CircularString.'''),
('ST_PatchN', types.Geometry,
'''Returns the Nth geometry (face) of a PolyhedralSurface.'''),
('ST_PointN', types.Geometry,
'''Returns the Nth point in the first LineString or circular LineString in a geometry.'''),
('ST_Points', types.Geometry,
'''Returns a MultiPoint containing all the coordinates of a geometry.'''),
('ST_StartPoint', types.Geometry,
'''Returns the first point of a LineString.'''),
('ST_Summary', None,
'''[geometry] Returns a text summary of the contents of a geometry.\nOR\n[raster] Returns a text summary of the contents of the raster.'''),
('ST_X', None,
'''Returns the X coordinate of a Point.'''),
('ST_Y', None,
'''Returns the Y coordinate of a Point.'''),
('ST_Z', None,
'''Returns the Z coordinate of a Point.'''),
('ST_Zmflag', None,
'''Returns a code indicating the ZM coordinate dimension of a geometry.'''),
('ST_AddPoint', types.Geometry,
'''Add a point to a LineString.'''),
('ST_CollectionExtract', types.Geometry,
'''Given a (multi)geometry, return a (multi)geometry consisting only of elements of the specified type.'''),
('ST_CollectionHomogenize', types.Geometry,
'''Given a geometry collection, return the \"simplest\" representation of the contents.'''),
('ST_Force2D', types.Geometry,
'''Force the geometries into a \"2-dimensional mode\".'''),
('ST_Force3D', types.Geometry,
('''Force the geometries into XYZ mode. This is an alias for ST_Force3DZ.''', 'ST_Force_3D')),
('ST_Force3DZ', types.Geometry,
('''Force the geometries into XYZ mode.''', 'ST_Force_3DZ')),
('ST_Force3DM', types.Geometry,
('''Force the geometries into XYM mode.''', 'ST_Force_3DZ')),
('ST_Force4D', types.Geometry,
('''Force the geometries into XYZM mode.''', 'ST_Force_4D')),
('ST_ForcePolygonCCW', types.Geometry,
'''Orients all exterior rings counter-clockwise and all interior rings clockwise.'''),
('ST_ForceCollection', types.Geometry,
('''Convert the geometry into a GEOMETRYCOLLECTION.''', 'ST_Force_Collection')),
('ST_ForcePolygonCW', types.Geometry,
'''Orients all exterior rings clockwise and all interior rings counter-clockwise.'''),
('ST_ForceSFS', types.Geometry,
'''Force the geometries to use SFS 1.1 geometry types only.'''),
('ST_ForceRHR', types.Geometry,
'''Force the orientation of the vertices in a polygon to follow the Right-Hand-Rule.'''),
('ST_ForceCurve', types.Geometry,
'''Upcast a geometry into its curved type, if applicable.'''),
('ST_LineMerge', types.Geometry,
'''Return a (set of) LineString(s) formed by sewing together a MULTILINESTRING.'''),
('ST_Multi', types.Geometry,
'''Return the geometry as a MULTI* geometry.'''),
('ST_Normalize', types.Geometry,
'''Return the geometry in its canonical form.'''),
('ST_QuantizeCoordinates', types.Geometry,
'''Sets least significant bits of coordinates to zero'''),
('ST_RemovePoint', types.Geometry,
'''Remove point from a linestring.'''),
('ST_Reverse', types.Geometry,
'''Return the geometry with vertex order reversed.'''),
('ST_Segmentize', types.Geometry,
'''Return a modified geometry/geography having no segment longer than the given distance.'''),
('ST_SetPoint', types.Geometry,
'''Replace point of a linestring with a given point.'''),
('ST_SnapToGrid', types.Geometry,
'''[geometry] Snap all points of the input geometry to a regular grid.\nOR\n[raster] Resample a raster by snapping it to a grid. New pixel values are computed using the NearestNeighbor (english or american spelling), Bilinear, Cubic, CubicSpline or Lanczos resampling algorithm. Default is NearestNeighbor.'''),
('ST_Snap', types.Geometry,
'''Snap segments and vertices of input geometry to vertices of a reference geometry.'''),
('ST_SwapOrdinates', types.Geometry,
'''Returns a version of the given geometry with given ordinate values swapped.'''),
('ST_IsValid', None,
'''Tests if a geometry is well-formed in 2D.'''),
('ST_IsValidDetail', None,
'''Returns a valid_detail row stating if a geometry is valid, and if not a reason why and a location.'''),
('ST_IsValidReason', None,
'''Returns text stating if a geometry is valid, or a reason for invalidity.'''),
('ST_SetSRID', types.Geometry,
'''[geometry] Set the SRID on a geometry to a particular integer value.\nOR\n[raster] Sets the SRID of a raster to a particular integer srid defined in the spatial_ref_sys table.'''),
('ST_SRID', None,
'''[geometry] Returns the spatial reference identifier for the ST_Geometry as defined in spatial_ref_sys table.\nOR\n[raster] Returns the spatial reference identifier of the raster as defined in spatial_ref_sys table.'''),
('ST_Transform', types.Geometry,
'''[geometry] Return a new geometry with its coordinates transformed to a different spatial reference system.\nOR\n[raster] Reprojects a raster in a known spatial reference system to another known spatial reference system using specified resampling algorithm. Options are NearestNeighbor, Bilinear, Cubic, CubicSpline, Lanczos defaulting to NearestNeighbor.'''),
('ST_BdPolyFromText', types.Geometry,
'''Construct a Polygon given an arbitrary collection of closed linestrings as a MultiLineString Well-Known text representation.'''),
('ST_BdMPolyFromText', types.Geometry,
'''Construct a MultiPolygon given an arbitrary collection of closed linestrings as a MultiLineString text representation Well-Known text representation.'''),
('ST_GeogFromText', types.Geography,
'''Return a specified geography value from Well-Known Text representation or extended (WKT).'''),
('ST_GeographyFromText', types.Geography,
'''Return a specified geography value from Well-Known Text representation or extended (WKT).'''),
('ST_GeomCollFromText', types.Geometry,
'''Makes a collection Geometry from collection WKT with the given SRID. If SRID is not given, it defaults to 0.'''),
('ST_GeomFromEWKT', types.Geometry,
'''Return a specified ST_Geometry value from Extended Well-Known Text representation (EWKT).'''),
('ST_GeometryFromText', types.Geometry,
'''Return a specified ST_Geometry value from Well-Known Text representation (WKT). This is an alias name for ST_GeomFromText'''),
('ST_GeomFromText', types.Geometry,
'''Return a specified ST_Geometry value from Well-Known Text representation (WKT).'''),
('ST_LineFromText', types.Geometry,
'''Makes a Geometry from WKT representation with the given SRID. If SRID is not given, it defaults to 0.'''),
('ST_MLineFromText', types.Geometry,
'''Return a specified ST_MultiLineString value from WKT representation.'''),
('ST_MPointFromText', types.Geometry,
'''Makes a Geometry from WKT with the given SRID. If SRID is not given, it defaults to 0.'''),
('ST_MPolyFromText', types.Geometry,
'''Makes a MultiPolygon Geometry from WKT with the given SRID. If SRID is not given, it defaults to 0.'''),
('ST_PointFromText', types.Geometry,
'''Makes a point Geometry from WKT with the given SRID. If SRID is not given, it defaults to unknown.'''),
('ST_PolygonFromText', types.Geometry,
'''Makes a Geometry from WKT with the given SRID. If SRID is not given, it defaults to 0.'''),
('ST_WKTToSQL', types.Geometry,
'''Return a specified ST_Geometry value from Well-Known Text representation (WKT). This is an alias name for ST_GeomFromText'''),
('ST_GeogFromWKB', types.Geography,
'''Creates a geography instance from a Well-Known Binary geometry representation (WKB) or extended Well Known Binary (EWKB).'''),
('ST_GeomFromEWKB', types.Geometry,
'''Return a specified ST_Geometry value from Extended Well-Known Binary representation (EWKB).'''),
('ST_GeomFromWKB', types.Geometry,
'''Creates a geometry instance from a Well-Known Binary geometry representation (WKB) and optional SRID.'''),
('ST_LineFromWKB', types.Geometry,
'''Makes a LINESTRING from WKB with the given SRID'''),
('ST_LinestringFromWKB', types.Geometry,
'''Makes a geometry from WKB with the given SRID.'''),
('ST_PointFromWKB', types.Geometry,
'''Makes a geometry from WKB with the given SRID'''),
('ST_WKBToSQL', types.Geometry,
'''Return a specified ST_Geometry value from Well-Known Binary representation (WKB). This is an alias name for ST_GeomFromWKB that takes no srid'''),
('ST_Box2dFromGeoHash', types.Geometry,
'''Return a BOX2D from a GeoHash string.'''),
('ST_GeomFromGeoHash', types.Geometry,
'''Return a geometry from a GeoHash string.'''),
('ST_GeomFromGML', types.Geometry,
'''Takes as input GML representation of geometry and outputs a PostGIS geometry object'''),
('ST_GeomFromGeoJSON', types.Geometry,
'''Takes as input a geojson representation of a geometry and outputs a PostGIS geometry object'''),
('ST_GeomFromKML', types.Geometry,
'''Takes as input KML representation of geometry and outputs a PostGIS geometry object'''),
('ST_GeomFromTWKB', types.Geometry,
'''Creates a geometry instance from a TWKB (\"Tiny Well-Known Binary\") geometry representation.'''),
('ST_GMLToSQL', types.Geometry,
'''Return a specified ST_Geometry value from GML representation. This is an alias name for ST_GeomFromGML'''),
('ST_LineFromEncodedPolyline', types.Geometry,
'''Creates a LineString from an Encoded Polyline.'''),
('ST_PointFromGeoHash', types.Geometry,
'''Return a point from a GeoHash string.'''),
('ST_AsEWKT', None,
'''Return the Well-Known Text (WKT) representation of the geometry with SRID meta data.'''),
('ST_AsText', None,
'''Return the Well-Known Text (WKT) representation of the geometry/geography without SRID metadata.'''),
('ST_AsBinary', None,
'''Return the Well-Known Binary (WKB) representation of the geometry/geography without SRID meta data.'''),
('ST_AsEWKB', None,
'''Return the Well-Known Binary (WKB) representation of the geometry with SRID meta data.'''),
('ST_AsHEXEWKB', None,
'''Returns a Geometry in HEXEWKB format (as text) using either little-endian (NDR) or big-endian (XDR) encoding.'''),
('ST_AsEncodedPolyline', None,
'''Returns an Encoded Polyline from a LineString geometry.'''),
('ST_AsGeobuf', None,
'''Return a Geobuf representation of a set of rows.'''),
('ST_AsGML', None,
'''Return the geometry as a GML version 2 or 3 element.'''),
('ST_AsKML', None,
'''Return the geometry as a KML element. Several variants. Default version=2, default maxdecimaldigits=15'''),
('ST_AsLatLonText', None,
'''Return the Degrees, Minutes, Seconds representation of the given point.'''),
('ST_AsMVTGeom', types.Geometry,
'''Transform a geometry into the coordinate space of a Mapbox Vector Tile.'''),
('ST_AsMVT', None,
'''Aggregate function returning a Mapbox Vector Tile representation of a set of rows.'''),
('ST_AsSVG', None,
'''Returns SVG path data for a geometry.'''),
('ST_AsTWKB', None,
'''Returns the geometry as TWKB, aka \"Tiny Well-Known Binary\"'''),
('ST_AsX3D', None,
'''Returns a Geometry in X3D xml node element format: ISO-IEC-19776-1.2-X3DEncodings-XML'''),
('ST_GeoHash', None,
'''Return a GeoHash representation of the geometry.'''),
('ST_3DIntersects', None,
'''Returns TRUE if the Geometries \"spatially intersect\" in 3D - only for points, linestrings, polygons, polyhedral surface (area).'''),
('ST_Contains', None,
'''[geometry] Returns true if and only if no points of B lie in the exterior of A, and at least one point of the interior of B lies in the interior of A.\nOR\n[raster] Return true if no points of raster rastB lie in the exterior of raster rastA and at least one point of the interior of rastB lies in the interior of rastA.'''),
('ST_ContainsProperly', None,
'''[geometry] Returns true if B intersects the interior of A but not the boundary (or exterior). A does not contain properly itself, but does contain itself.\nOR\n[raster] Return true if rastB intersects the interior of rastA but not the boundary or exterior of rastA.'''),
('ST_Covers', None,
'''[geometry] Returns 1 (TRUE) if no point in Geometry B is outside Geometry A\nOR\n[raster] Return true if no points of raster rastB lie outside raster rastA.'''),
('ST_CoveredBy', None,
'''[geometry] Returns 1 (TRUE) if no point in Geometry/Geography A is outside Geometry/Geography B\nOR\n[raster] Return true if no points of raster rastA lie outside raster rastB.'''),
('ST_Crosses', None,
'''Returns TRUE if the supplied geometries have some, but not all, interior points in common.'''),
('ST_LineCrossingDirection', None,
'''Given 2 linestrings, returns a number between -3 and 3 denoting what kind of crossing behavior. 0 is no crossing.'''),
('ST_Disjoint', None,
'''[geometry] Returns TRUE if the Geometries do not \"spatially intersect\" - if they do not share any space together.\nOR\n[raster] Return true if raster rastA does not spatially intersect rastB.'''),
('ST_Equals', None,
'''Returns true if the given geometries represent the same geometry. Directionality is ignored.'''),
('ST_Intersects', None,
'''[geometry] Returns TRUE if the Geometries/Geography \"spatially intersect in 2D\" - (share any portion of space) and FALSE if they don't (they are Disjoint). For geography tolerance is 0.00001 meters (so any points that close are considered to intersect)\nOR\n[raster] Return true if raster rastA spatially intersects raster rastB.'''),
('ST_OrderingEquals', None,
'''Returns true if the given geometries represent the same geometry and points are in the same directional order.'''),
('ST_Overlaps', None,
'''[geometry] Returns TRUE if the Geometries share space, are of the same dimension, but are not completely contained by each other.\nOR\n[raster] Return true if raster rastA and rastB intersect but one does not completely contain the other.'''),
('ST_PointInsideCircle', None,
'''Is the point geometry inside the circle defined by center_x, center_y, radius'''),
('ST_Relate', None,
'''Returns true if this Geometry is spatially related to anotherGeometry, by testing for intersections between the Interior, Boundary and Exterior of the two geometries as specified by the values in the intersectionMatrixPattern. If no intersectionMatrixPattern is passed in, then returns the maximum intersectionMatrixPattern that relates the 2 geometries.'''),
('ST_RelateMatch', None,
'''Returns true if intersectionMattrixPattern1 implies intersectionMatrixPattern2'''),
('ST_Touches', None,
'''[geometry] Returns TRUE if the geometries have at least one point in common, but their interiors do not intersect.\nOR\n[raster] Return true if raster rastA and rastB have at least one point in common but their interiors do not intersect.'''),
('ST_Within', None,
'''[geometry] Returns true if the geometry A is completely inside geometry B\nOR\n[raster] Return true if no points of raster rastA lie in the exterior of raster rastB and at least one point of the interior of rastA lies in the interior of rastB.'''),
('ST_3DDWithin', None,
'''For 3d (z) geometry type Returns true if two geometries 3d distance is within number of units.'''),
('ST_3DDFullyWithin', None,
'''Returns true if all of the 3D geometries are within the specified distance of one another.'''),
('ST_DFullyWithin', None,
'''[geometry] Returns true if all of the geometries are within the specified distance of one another\nOR\n[raster] Return true if rasters rastA and rastB are fully within the specified distance of each other.'''),
('ST_DWithin', None,
'''[geometry] Returns true if the geometries are within the specified distance of one another. For geometry units are in those of spatial reference and for geography units are in meters and measurement is defaulted to use_spheroid=true (measure around spheroid), for faster check, use_spheroid=false to measure along sphere.\nOR\n[raster] Return true if rasters rastA and rastB are within the specified distance of each other.'''),
('ST_Area', None,
'''Returns the area of a polygonal geometry.'''),
('ST_Azimuth', None,
'''Returns the north-based azimuth as the angle in radians measured clockwise from the vertical on pointA to pointB.'''),
('ST_Angle', None,
'''Returns the angle between 3 points, or between 2 vectors (4 points or 2 lines).'''),
('ST_ClosestPoint', types.Geometry,
'''Returns the 2D point on g1 that is closest to g2. This is the first point of the shortest line.'''),
('ST_3DClosestPoint', types.Geometry,
'''Returns the 3D point on g1 that is closest to g2. This is the first point of the 3D shortest line.'''),
('ST_Distance', None,
'''Returns the distance between two geometry or geography values.'''),
('ST_3DDistance', None,
'''Returns the 3D cartesian minimum distance (based on spatial ref) between two geometries in projected units.'''),
('ST_DistanceSphere', None,
'''Returns minimum distance in meters between two lon/lat geometries using a spherical earth model.'''),
('ST_DistanceSpheroid', None,
'''Returns the minimum distance between two lon/lat geometries using a spheroidal earth model.'''),
('ST_FrechetDistance', None,
'''Returns the Fréchet distance between two geometries.'''),
('ST_HausdorffDistance', None,
'''Returns the Hausdorff distance between two geometries.'''),
('ST_Length', None,
'''Returns the 2D length of a linear geometry.'''),
('ST_Length2D', None,
'''Returns the 2D length of a linear geometry. Alias for ST_Length'''),
('ST_3DLength', None,
'''Returns the 3D length of a linear geometry.'''),
('ST_LengthSpheroid', None,
'''Returns the 2D or 3D length/perimeter of a lon/lat geometry on a spheroid.'''),
('ST_LongestLine', types.Geometry,
'''Returns the 2D longest line between two geometries.'''),
('ST_3DLongestLine', types.Geometry,
'''Returns the 3D longest line between two geometries'''),
('ST_MaxDistance', None,
'''Returns the 2D largest distance between two geometries in projected units.'''),
('ST_3DMaxDistance', None,
'''Returns the 3D cartesian maximum distance (based on spatial ref) between two geometries in projected units.'''),
('ST_MinimumClearance', None,
'''Returns the minimum clearance of a geometry, a measure of a geometry's robustness.'''),
('ST_MinimumClearanceLine', types.Geometry,
'''Returns the two-point LineString spanning a geometry's minimum clearance.'''),
('ST_Perimeter', None,
'''Returns the length of the boundary of a polygonal geometry or geography.'''),
('ST_Perimeter2D', None,
'''Returns the 2D perimeter of a polygonal geometry. Alias for ST_Perimeter.'''),
('ST_3DPerimeter', None,
'''Returns the 3D perimeter of a polygonal geometry.'''),
('ST_Project', types.Geography,
'''Returns a point projected from a start point by a distance and bearing (azimuth).'''),
('ST_ShortestLine', types.Geometry,
'''Returns the 2D shortest line between two geometries'''),
('ST_3DShortestLine', types.Geometry,
'''Returns the 3D shortest line between two geometries'''),
('ST_Buffer', types.Geometry,
'''(T) Returns a geometry covering all points within a given distance from the input geometry.'''),
('ST_BuildArea', types.Geometry,
'''Creates an areal geometry formed by the constituent linework of given geometry'''),
('ST_Centroid', types.Geometry,
'''Returns the geometric center of a geometry.'''),
('ST_ClipByBox2D', types.Geometry,
'''Returns the portion of a geometry falling within a rectangle.'''),
('ST_ConcaveHull', types.Geometry,
'''The concave hull of a geometry represents a possibly concave geometry that encloses all geometries within the set. You can think of it as shrink wrapping.'''),
('ST_ConvexHull', types.Geometry,
'''[geometry] Computes the convex hull of a geometry.\nOR\n[raster] Return the convex hull geometry of the raster including pixel values equal to BandNoDataValue. For regular shaped and non-skewed rasters, this gives the same result as ST_Envelope so only useful for irregularly shaped or skewed rasters.'''),
('ST_CurveToLine', types.Geometry,
'''Converts a CIRCULARSTRING/CURVEPOLYGON/MULTISURFACE to a LINESTRING/POLYGON/MULTIPOLYGON'''),
('ST_DelaunayTriangles', types.Geometry,
'''Return a Delaunay triangulation around the given input points.'''),
('ST_Difference', types.Geometry,
'''Returns a geometry that represents that part of geometry A that does not intersect with geometry B.'''),
('ST_FlipCoordinates', types.Geometry,
'''Returns a version of the given geometry with X and Y axis flipped. Useful for people who have built latitude/longitude features and need to fix them.'''),
('ST_GeneratePoints', types.Geometry,
'''Converts a polygon or multi-polygon into a multi-point composed of randomly location points within the original areas.'''),
('ST_GeometricMedian', types.Geometry,
'''Returns the geometric median of a MultiPoint.'''),
('ST_Intersection', types.Geometry,
'''[geometry] (T) Returns a geometry that represents the shared portion of geomA and geomB.\nOR\n[raster] Returns a raster or a set of geometry-pixelvalue pairs representing the shared portion of two rasters or the geometrical intersection of a vectorization of the raster and a geometry.'''),
('ST_LineToCurve', types.Geometry,
'''Converts a LINESTRING/POLYGON to a CIRCULARSTRING, CURVEPOLYGON'''),
('ST_MakeValid', types.Geometry,
'''Attempts to make an invalid geometry valid without losing vertices.'''),
('ST_MemUnion', types.Geometry,
'''Same as ST_Union, only memory-friendly (uses less memory and more processor time).'''),
('ST_MinimumBoundingCircle', types.Geometry,
'''Returns the smallest circle polygon that can fully contain a geometry. Default uses 48 segments per quarter circle.'''),
('ST_MinimumBoundingRadius', None,
'''Returns the center point and radius of the smallest circle that can fully contain a geometry.'''),
('ST_OrientedEnvelope', types.Geometry,
'''Returns a minimum rotated rectangle enclosing a geometry.'''),
('ST_Polygonize', types.Geometry,
'''Aggregate. Creates a GeometryCollection containing possible polygons formed from the constituent linework of a set of geometries.'''),
('ST_Node', types.Geometry,
'''Node a set of linestrings.'''),
('ST_OffsetCurve', types.Geometry,
'''Return an offset line at a given distance and side from an input line. Useful for computing parallel lines about a center line'''),
('ST_PointOnSurface', types.Geometry,
'''Returns a POINT guaranteed to lie on the surface.'''),
('ST_RemoveRepeatedPoints', types.Geometry,
'''Returns a version of the given geometry with duplicated points removed.'''),
('ST_SharedPaths', types.Geometry,
'''Returns a collection containing paths shared by the two input linestrings/multilinestrings.'''),
('ST_ShiftLongitude', types.Geometry,
('''Toggle geometry coordinates between -180..180 and 0..360 ranges.''', 'ST_Shift_Longitude')),
('ST_WrapX', types.Geometry,
'''Wrap a geometry around an X value.'''),
('ST_Simplify', types.Geometry,
'''Returns a \"simplified\" version of the given geometry using the Douglas-Peucker algorithm.'''),
('ST_SimplifyPreserveTopology', types.Geometry,
'''Returns a \"simplified\" version of the given geometry using the Douglas-Peucker algorithm. Will avoid creating derived geometries (polygons in particular) that are invalid.'''),
('ST_SimplifyVW', types.Geometry,
'''Returns a \"simplified\" version of the given geometry using the Visvalingam-Whyatt algorithm'''),
('ST_ChaikinSmoothing', types.Geometry,
'''Returns a \"smoothed\" version of the given geometry using the Chaikin algorithm'''),
('ST_FilterByM', types.Geometry,
'''Filters vertex points based on their m-value'''),
('ST_SetEffectiveArea', types.Geometry,
'''Sets the effective area for each vertex, storing the value in the M ordinate. A simplified geometry can then be generated by filtering on the M ordinate.'''),
('ST_Split', types.Geometry,
'''Returns a collection of geometries resulting by splitting a geometry.'''),
('ST_SymDifference', types.Geometry,
'''Returns a geometry that represents the portions of A and B that do not intersect. It is called a symmetric difference because ST_SymDifference(A,B) = ST_SymDifference(B,A).'''),
('ST_Subdivide', types.Geometry,
'''Returns a set of geometry where no geometry in the set has more than the specified number of vertices.'''),
('ST_Union', types.Geometry,
'''[geometry] Returns a geometry that represents the point set union of the Geometries.\nOR\n[raster] Returns the union of a set of raster tiles into a single raster composed of 1 or more bands.'''),
('ST_UnaryUnion', types.Geometry,
'''Like ST_Union, but working at the geometry component level.'''),
('ST_VoronoiLines', types.Geometry,
'''Returns the boundaries between the cells of the Voronoi diagram constructed from the vertices of a geometry.'''),
('ST_VoronoiPolygons', types.Geometry,
'''Returns the cells of the Voronoi diagram constructed from the vertices of a geometry.'''),
('ST_Affine', types.Geometry,
'''Apply a 3D affine transformation to a geometry.'''),
('ST_Rotate', types.Geometry,
'''Rotates a geometry about an origin point.'''),
('ST_RotateX', types.Geometry,
'''Rotates a geometry about the X axis.'''),
('ST_RotateY', types.Geometry,
'''Rotates a geometry about the Y axis.'''),
('ST_RotateZ', types.Geometry,
'''Rotates a geometry about the Z axis.'''),
('ST_Scale', types.Geometry,
'''Scales a geometry by given factors.'''),
('ST_Translate', types.Geometry,
'''Translates a geometry by given offsets.'''),
('ST_TransScale', types.Geometry,
'''Translates and scales a geometry by given offsets and factors.'''),
('ST_ClusterDBSCAN', None,
'''Window function that returns a cluster id for each input geometry using the DBSCAN algorithm.'''),
('ST_ClusterIntersecting', types.Geometry,
'''Aggregate function that clusters the input geometries into connected sets.'''),
('ST_ClusterKMeans', None,
'''Window function that returns a cluster id for each input geometry using the K-means algorithm.'''),
('ST_ClusterWithin', types.Geometry,
'''Aggregate function that clusters the input geometries by separation distance.'''),
('Box2D', types.Geometry,
('''Returns a BOX2D representing the 2D extent of the geometry.''', 'Box2D_type')),
('Box3D', types.Geometry,
('''[geometry] Returns a BOX3D representing the 3D extent of the geometry.\nOR\n[raster] Returns the box 3d representation of the enclosing box of the raster.''', 'Box3D_type')),
('ST_EstimatedExtent', types.Geometry,
'''Return the 'estimated' extent of a spatial table.'''),
('ST_Expand', types.Geometry,
'''Returns a bounding box expanded from another bounding box or a geometry.'''),
('ST_Extent', types.Geometry,
'''an aggregate function that returns the bounding box that bounds rows of geometries.'''),
('ST_3DExtent', types.Geometry,
'''an aggregate function that returns the 3D bounding box that bounds rows of geometries.'''),
('ST_MakeBox2D', types.Geometry,
'''Creates a BOX2D defined by two 2D point geometries.'''),
('ST_3DMakeBox', types.Geometry,
'''Creates a BOX3D defined by two 3D point geometries.'''),
('ST_XMax', None,
'''Returns the X maxima of a 2D or 3D bounding box or a geometry.'''),
('ST_XMin', None,
'''Returns the X minima of a 2D or 3D bounding box or a geometry.'''),
('ST_YMax', None,
'''Returns the Y maxima of a 2D or 3D bounding box or a geometry.'''),
('ST_YMin', None,
'''Returns the Y minima of a 2D or 3D bounding box or a geometry.'''),
('ST_ZMax', None,
'''Returns the Z maxima of a 2D or 3D bounding box or a geometry.'''),
('ST_ZMin', None,
'''Returns the Z minima of a 2D or 3D bounding box or a geometry.'''),
('ST_LineInterpolatePoint', types.Geometry,
'''Returns a point interpolated along a line. Second argument is a float8 between 0 and 1 representing fraction of total length of linestring the point has to be located.'''),
('ST_3DLineInterpolatePoint', types.Geometry,
'''Returns a point interpolated along a line in 3D. Second argument is a float8 between 0 and 1 representing fraction of total length of linestring the point has to be located.'''),
('ST_LineInterpolatePoints', types.Geometry,
'''Returns one or more points interpolated along a line.'''),
('ST_LineLocatePoint', None,
'''Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.'''),
('ST_LineSubstring', types.Geometry,
'''Return a linestring being a substring of the input one starting and ending at the given fractions of total 2d length. Second and third arguments are float8 values between 0 and 1.'''),
('ST_LocateAlong', types.Geometry,
'''Return a derived geometry collection value with elements that match the specified measure. Polygonal elements are not supported.'''),
('ST_LocateBetween', types.Geometry,
'''Return a derived geometry collection value with elements that match the specified range of measures inclusively.'''),
('ST_LocateBetweenElevations', types.Geometry,
'''Return a derived geometry (collection) value with elements that intersect the specified range of elevations inclusively.'''),
('ST_InterpolatePoint', None,
'''Return the value of the measure dimension of a geometry at the point closed to the provided point.'''),
('ST_AddMeasure', types.Geometry,
'''Return a derived geometry with measure elements linearly interpolated between the start and end points.'''),
('ST_IsValidTrajectory', None,
'''Returns true if the geometry is a valid trajectory.'''),
('ST_ClosestPointOfApproach', None,
'''Returns the measure at which points interpolated along two trajectories are closest.'''),
('ST_DistanceCPA', None,
'''Returns the distance between the closest point of approach of two trajectories.'''),
('ST_CPAWithin', None,
'''Returns true if the closest point of approach of two trajectories is within the specified distance.'''),
('postgis_sfcgal_version', None,
'''Returns the version of SFCGAL in use'''),
('ST_Extrude', types.Geometry,
'''Extrude a surface to a related volume'''),
('ST_StraightSkeleton', types.Geometry,
'''Compute a straight skeleton from a geometry'''),
('ST_ApproximateMedialAxis', types.Geometry,
'''Compute the approximate medial axis of an areal geometry.'''),
('ST_IsPlanar', None,
'''Check if a surface is or not planar'''),
('ST_Orientation', None,
'''Determine surface orientation'''),
('ST_ForceLHR', types.Geometry,
'''Force LHR orientation'''),
('ST_MinkowskiSum', types.Geometry,
'''Performs Minkowski sum'''),
('ST_ConstrainedDelaunayTriangles', types.Geometry,
'''Return a constrained Delaunay triangulation around the given input geometry.'''),
('ST_3DIntersection', types.Geometry,
'''Perform 3D intersection'''),
('ST_3DDifference', types.Geometry,
'''Perform 3D difference'''),
('ST_3DUnion', types.Geometry,
'''Perform 3D union'''),
('ST_3DArea', None,
'''Computes area of 3D surface geometries. Will return 0 for solids.'''),
('ST_Tesselate', types.Geometry,
'''Perform surface Tesselation of a polygon or polyhedralsurface and returns as a TIN or collection of TINS'''),
('ST_Volume', None,
'''Computes the volume of a 3D solid. If applied to surface (even closed) geometries will return 0.'''),
('ST_MakeSolid', types.Geometry,
'''Cast the geometry into a solid. No check is performed. To obtain a valid solid, the input geometry must be a closed Polyhedral Surface or a closed TIN.'''),
('ST_IsSolid', None,
'''Test if the geometry is a solid. No validity check is performed.'''),
('AddAuth', None,
'''Adds an authorization token to be used in the current transaction.'''),
('CheckAuth', None,
'''Creates a trigger on a table to prevent/allow updates and deletes of rows based on authorization token.'''),
('DisableLongTransactions', None,
'''Disables long transaction support.'''),
('EnableLongTransactions', None,
'''Enables long transaction support.'''),
('LockRow', None,
'''Sets lock/authorization for a row in a table.'''),
('UnlockRows', None,
'''Removes all locks held by an authorization token.'''),
('PostGIS_Extensions_Upgrade', None,
'''Packages and upgrades postgis extensions (e.g. postgis_raster, postgis_topology, postgis_sfcgal) to latest available version.'''),
('PostGIS_Full_Version', None,
'''Reports full postgis version and build configuration infos.'''),
('PostGIS_GEOS_Version', None,
'''Returns the version number of the GEOS library.'''),
('PostGIS_Liblwgeom_Version', None,
'''Returns the version number of the liblwgeom library. This should match the version of PostGIS.'''),
('PostGIS_LibXML_Version', None,
'''Returns the version number of the libxml2 library.'''),
('PostGIS_Lib_Build_Date', None,
'''Returns build date of the PostGIS library.'''),
('PostGIS_Lib_Version', None,
'''Returns the version number of the PostGIS library.'''),
('PostGIS_PROJ_Version', None,
'''Returns the version number of the PROJ4 library.'''),
('PostGIS_Wagyu_Version', None,
'''Returns the version number of the internal Wagyu library.'''),
('PostGIS_Scripts_Build_Date', None,
'''Returns build date of the PostGIS scripts.'''),
('PostGIS_Scripts_Installed', None,
'''Returns version of the postgis scripts installed in this database.'''),
('PostGIS_Scripts_Released', None,
'''Returns the version number of the postgis.sql script released with the installed postgis lib.'''),
('PostGIS_Version', None,
'''Returns PostGIS version number and compile-time options.'''),
('postgis.backend', None,
('''The backend to service a function where GEOS and SFCGAL overlap. Options: geos or sfcgal. Defaults to geos.''', 'postgis_backend')),
('postgis.gdal_datapath', None,
('''A configuration option to assign the value of GDAL's GDAL_DATA option. If not set, the environmentally set GDAL_DATA variable is used.''', 'postgis_gdal_datapath')),
('postgis.gdal_enabled_drivers', None,
('''A configuration option to set the enabled GDAL drivers in the PostGIS environment. Affects the GDAL configuration variable GDAL_SKIP.''', 'postgis_gdal_enabled_drivers')),
('postgis.enable_outdb_rasters', None,
('''A boolean configuration option to enable access to out-db raster bands.''', 'postgis_enable_outdb_rasters')),
('PostGIS_AddBBox', types.Geometry,
'''Add bounding box to the geometry.'''),
('PostGIS_DropBBox', types.Geometry,
'''Drop the bounding box cache from the geometry.'''),
('PostGIS_HasBBox', None,
'''Returns TRUE if the bbox of this geometry is cached, FALSE otherwise.'''),
('ST_AddBand', types.Raster,
('''Returns a raster with the new band(s) of given type added with given initial value in the given index location. If no index is specified, the band is added to the end.''', 'RT_ST_AddBand')),
('ST_AsRaster', types.Raster,
('''Converts a PostGIS geometry to a PostGIS raster.''', 'RT_ST_AsRaster')),
('ST_Band', types.Raster,
('''Returns one or more bands of an existing raster as a new raster. Useful for building new rasters from existing rasters.''', 'RT_ST_Band')),
('ST_MakeEmptyCoverage', types.Raster,
('''Cover georeferenced area with a grid of empty raster tiles.''', 'RT_ST_MakeEmptyCoverage')),
('ST_MakeEmptyRaster', types.Raster,
('''Returns an empty raster (having no bands) of given dimensions (width & height), upperleft X and Y, pixel size and rotation (scalex, scaley, skewx & skewy) and reference system (srid). If a raster is passed in, returns a new raster with the same size, alignment and SRID. If srid is left out, the spatial ref is set to unknown (0).''', 'RT_ST_MakeEmptyRaster')),
('ST_Tile', types.Raster,
('''Returns a set of rasters resulting from the split of the input raster based upon the desired dimensions of the output rasters.''', 'RT_ST_Tile')),
('ST_Retile', types.Raster,
('''Return a set of configured tiles from an arbitrarily tiled raster coverage.''', 'RT_ST_Retile')),
('ST_FromGDALRaster', types.Raster,
('''Returns a raster from a supported GDAL raster file.''', 'RT_ST_FromGDALRaster')),
('ST_GeoReference', None,
('''Returns the georeference meta data in GDAL or ESRI format as commonly seen in a world file. Default is GDAL.''', 'RT_ST_GeoReference')),
('ST_Height', None,
('''Returns the height of the raster in pixels.''', 'RT_ST_Height')),
('ST_MetaData', None,
('''Returns basic meta data about a raster object such as pixel size, rotation (skew), upper, lower left, etc.''', 'RT_ST_MetaData')),
('ST_NumBands', None,
('''Returns the number of bands in the raster object.''', 'RT_ST_NumBands')),
('ST_PixelHeight', None,
('''Returns the pixel height in geometric units of the spatial reference system.''', 'RT_ST_PixelHeight')),
('ST_PixelWidth', None,
('''Returns the pixel width in geometric units of the spatial reference system.''', 'RT_ST_PixelWidth')),
('ST_ScaleX', None,
('''Returns the X component of the pixel width in units of coordinate reference system.''', 'RT_ST_ScaleX')),
('ST_ScaleY', None,
('''Returns the Y component of the pixel height in units of coordinate reference system.''', 'RT_ST_ScaleY')),
('ST_RasterToWorldCoord', None,
('''Returns the raster's upper left corner as geometric X and Y (longitude and latitude) given a column and row. Column and row starts at 1.''', 'RT_ST_RasterToWorldCoord')),
('ST_RasterToWorldCoordX', None,
('''Returns the geometric X coordinate upper left of a raster, column and row. Numbering of columns and rows starts at 1.''', 'RT_ST_RasterToWorldCoordX')),
('ST_RasterToWorldCoordY', None,
('''Returns the geometric Y coordinate upper left corner of a raster, column and row. Numbering of columns and rows starts at 1.''', 'RT_ST_RasterToWorldCoordY')),
('ST_Rotation', None,
('''Returns the rotation of the raster in radian.''', 'RT_ST_Rotation')),
('ST_SkewX', None,
('''Returns the georeference X skew (or rotation parameter).''', 'RT_ST_SkewX')),
('ST_SkewY', None,
('''Returns the georeference Y skew (or rotation parameter).''', 'RT_ST_SkewY')),
('ST_UpperLeftX', None,
('''Returns the upper left X coordinate of raster in projected spatial ref.''', 'RT_ST_UpperLeftX')),
('ST_UpperLeftY', None,
('''Returns the upper left Y coordinate of raster in projected spatial ref.''', 'RT_ST_UpperLeftY')),
('ST_Width', None,
('''Returns the width of the raster in pixels.''', 'RT_ST_Width')),
('ST_WorldToRasterCoord', None,
('''Returns the upper left corner as column and row given geometric X and Y (longitude and latitude) or a point geometry expressed in the spatial reference coordinate system of the raster.''', 'RT_ST_WorldToRasterCoord')),
('ST_WorldToRasterCoordX', None,
('''Returns the column in the raster of the point geometry (pt) or a X and Y world coordinate (xw, yw) represented in world spatial reference system of raster.''', 'RT_ST_WorldToRasterCoordX')),
('ST_WorldToRasterCoordY', None,
('''Returns the row in the raster of the point geometry (pt) or a X and Y world coordinate (xw, yw) represented in world spatial reference system of raster.''', 'RT_ST_WorldToRasterCoordY')),
('ST_BandMetaData', None,
('''Returns basic meta data for a specific raster band. band num 1 is assumed if none-specified.''', 'RT_ST_BandMetaData')),
('ST_BandNoDataValue', None,
('''Returns the value in a given band that represents no data. If no band num 1 is assumed.''', 'RT_ST_BandNoDataValue')),
('ST_BandIsNoData', None,
('''Returns true if the band is filled with only nodata values.''', 'RT_ST_BandIsNoData')),
('ST_BandPath', None,
('''Returns system file path to a band stored in file system. If no bandnum specified, 1 is assumed.''', 'RT_ST_BandPath')),
('ST_BandFileSize', None,
('''Returns the file size of a band stored in file system. If no bandnum specified, 1 is assumed.''', 'RT_ST_BandFileSize')),
('ST_BandFileTimestamp', None,
('''Returns the file timestamp of a band stored in file system. If no bandnum specified, 1 is assumed.''', 'RT_ST_BandFileTimestamp')),
('ST_BandPixelType', None,
('''Returns the type of pixel for given band. If no bandnum specified, 1 is assumed.''', 'RT_ST_BandPixelType')),
('ST_MinPossibleValue', None,
'''Returns the minimum value this pixeltype can store.'''),
('ST_HasNoBand', None,
('''Returns true if there is no band with given band number. If no band number is specified, then band number 1 is assumed.''', 'RT_ST_HasNoBand')),
('ST_PixelAsPolygon', types.Geometry,
('''Returns the polygon geometry that bounds the pixel for a particular row and column.''', 'RT_ST_PixelAsPolygon')),
('ST_PixelAsPolygons', None,
('''Returns the polygon geometry that bounds every pixel of a raster band along with the value, the X and the Y raster coordinates of each pixel.''', 'RT_ST_PixelAsPolygons')),
('ST_PixelAsPoint', types.Geometry,
('''Returns a point geometry of the pixel's upper-left corner.''', 'RT_ST_PixelAsPoint')),
('ST_PixelAsPoints', None,
('''Returns a point geometry for each pixel of a raster band along with the value, the X and the Y raster coordinates of each pixel. The coordinates of the point geometry are of the pixel's upper-left corner.''', 'RT_ST_PixelAsPoints')),
('ST_PixelAsCentroid', types.Geometry,
('''Returns the centroid (point geometry) of the area represented by a pixel.''', 'RT_ST_PixelAsCentroid')),
('ST_PixelAsCentroids', None,
('''Returns the centroid (point geometry) for each pixel of a raster band along with the value, the X and the Y raster coordinates of each pixel. The point geometry is the centroid of the area represented by a pixel.''', 'RT_ST_PixelAsCentroids')),
('ST_Value', None,
('''Returns the value of a given band in a given columnx, rowy pixel or at a particular geometric point. Band numbers start at 1 and assumed to be 1 if not specified. If exclude_nodata_value is set to false, then all pixels include nodata pixels are considered to intersect and return value. If exclude_nodata_value is not passed in then reads it from metadata of raster.''', 'RT_ST_Value')),
('ST_NearestValue', None,
('''Returns the nearest non-NODATA value of a given band's pixel specified by a columnx and rowy or a geometric point expressed in the same spatial reference coordinate system as the raster.''', 'RT_ST_NearestValue')),
('ST_Neighborhood', None,
('''Returns a 2-D double precision array of the non-NODATA values around a given band's pixel specified by either a columnX and rowY or a geometric point expressed in the same spatial reference coordinate system as the raster.''', 'RT_ST_Neighborhood')),
('ST_SetValue', types.Raster,
('''Returns modified raster resulting from setting the value of a given band in a given columnx, rowy pixel or the pixels that intersect a particular geometry. Band numbers start at 1 and assumed to be 1 if not specified.''', 'RT_ST_SetValue')),
('ST_SetValues', types.Raster,
('''Returns modified raster resulting from setting the values of a given band.''', 'RT_ST_SetValues')),
('ST_DumpValues', None,
('''Get the values of the specified band as a 2-dimension array.''', 'RT_ST_DumpValues')),
('ST_PixelOfValue', None,
('''Get the columnx, rowy coordinates of the pixel whose value equals the search value.''', 'RT_ST_PixelOfValue')),
('ST_SetGeoReference', types.Raster,
('''Set Georeference 6 georeference parameters in a single call. Numbers should be separated by white space. Accepts inputs in GDAL or ESRI format. Default is GDAL.''', 'RT_ST_SetGeoReference')),
('ST_SetRotation', types.Raster,
('''Set the rotation of the raster in radian.''', 'RT_ST_SetRotation')),
('ST_SetScale', types.Raster,
('''Sets the X and Y size of pixels in units of coordinate reference system. Number units/pixel width/height.''', 'RT_ST_SetScale')),
('ST_SetSkew', types.Raster,
('''Sets the georeference X and Y skew (or rotation parameter). If only one is passed in, sets X and Y to the same value.''', 'RT_ST_SetSkew')),
('ST_SetUpperLeft', types.Raster,
('''Sets the value of the upper left corner of the pixel of the raster to projected X and Y coordinates.''', 'RT_ST_SetUpperLeft')),
('ST_Resample', types.Raster,
('''Resample a raster using a specified resampling algorithm, new dimensions, an arbitrary grid corner and a set of raster georeferencing attributes defined or borrowed from another raster.''', 'RT_ST_Resample')),
('ST_Rescale', types.Raster,
('''Resample a raster by adjusting only its scale (or pixel size). New pixel values are computed using the NearestNeighbor (english or american spelling), Bilinear, Cubic, CubicSpline or Lanczos resampling algorithm. Default is NearestNeighbor.''', 'RT_ST_Rescale')),
('ST_Reskew', types.Raster,
('''Resample a raster by adjusting only its skew (or rotation parameters). New pixel values are computed using the NearestNeighbor (english or american spelling), Bilinear, Cubic, CubicSpline or Lanczos resampling algorithm. Default is NearestNeighbor.''', 'RT_ST_Reskew')),
('ST_Resize', types.Raster,
('''Resize a raster to a new width/height''', 'RT_ST_Resize')),
('ST_SetBandNoDataValue', types.Raster,
('''Sets the value for the given band that represents no data. Band 1 is assumed if no band is specified. To mark a band as having no nodata value, set the nodata value = NULL.''', 'RT_ST_SetBandNoDataValue')),
('ST_SetBandIsNoData', types.Raster,
('''Sets the isnodata flag of the band to TRUE.''', 'RT_ST_SetBandIsNoData')),
('ST_SetBandPath', types.Raster,
('''Update the external path and band number of an out-db band''', 'RT_ST_SetBandPath')),
('ST_SetBandIndex', types.Raster,
('''Update the external band number of an out-db band''', 'RT_ST_SetBandIndex')),
('ST_Count', None,
('''Returns the number of pixels in a given band of a raster or raster coverage. If no band is specified defaults to band 1. If exclude_nodata_value is set to true, will only count pixels that are not equal to the nodata value.''', 'RT_ST_Count')),
('ST_CountAgg', None,
('''Aggregate. Returns the number of pixels in a given band of a set of rasters. If no band is specified defaults to band 1. If exclude_nodata_value is set to true, will only count pixels that are not equal to the NODATA value.''', 'RT_ST_CountAgg')),
('ST_Histogram', None,
('''Returns a set of record summarizing a raster or raster coverage data distribution separate bin ranges. Number of bins are autocomputed if not specified.''', 'RT_ST_Histogram')),
('ST_Quantile', None,
('''Compute quantiles for a raster or raster table coverage in the context of the sample or population. Thus, a value could be examined to be at the raster's 25%, 50%, 75% percentile.''', 'RT_ST_Quantile')),
('ST_SummaryStats', None,
('''Returns summarystats consisting of count, sum, mean, stddev, min, max for a given raster band of a raster or raster coverage. Band 1 is assumed is no band is specified.''', 'RT_ST_SummaryStats')),
('ST_SummaryStatsAgg', None,
('''Aggregate. Returns summarystats consisting of count, sum, mean, stddev, min, max for a given raster band of a set of raster. Band 1 is assumed is no band is specified.''', 'RT_ST_SummaryStatsAgg')),
('ST_ValueCount', None,
('''Returns a set of records containing a pixel band value and count of the number of pixels in a given band of a raster (or a raster coverage) that have a given set of values. If no band is specified defaults to band 1. By default nodata value pixels are not counted. and all other values in the pixel are output and pixel band values are rounded to the nearest integer.''', 'RT_ST_ValueCount')),
('ST_RastFromWKB', types.Raster,
('''Return a raster value from a Well-Known Binary (WKB) raster.''', 'RT_ST_RastFromWKB')),
('ST_RastFromHexWKB', types.Raster,
('''Return a raster value from a Hex representation of Well-Known Binary (WKB) raster.''', 'RT_ST_RastFromHexWKB')),
('ST_AsBinary/ST_AsWKB', None,
('''Return the Well-Known Binary (WKB) representation of the raster.''', 'RT_ST_AsBinary')),
('ST_AsHexWKB', None,
('''Return the Well-Known Binary (WKB) in Hex representation of the raster.''', 'RT_ST_AsHexWKB')),
('ST_AsGDALRaster', None,
('''Return the raster tile in the designated GDAL Raster format. Raster formats are one of those supported by your compiled library. Use ST_GDALDrivers() to get a list of formats supported by your library.''', 'RT_ST_AsGDALRaster')),
('ST_AsJPEG', None,
('''Return the raster tile selected bands as a single Joint Photographic Exports Group (JPEG) image (byte array). If no band is specified and 1 or more than 3 bands, then only the first band is used. If only 3 bands then all 3 bands are used and mapped to RGB.''', 'RT_ST_AsJPEG')),
('ST_AsPNG', None,
('''Return the raster tile selected bands as a single portable network graphics (PNG) image (byte array). If 1, 3, or 4 bands in raster and no bands are specified, then all bands are used. If more 2 or more than 4 bands and no bands specified, then only band 1 is used. Bands are mapped to RGB or RGBA space.''', 'RT_ST_AsPNG')),
('ST_AsTIFF', None,
('''Return the raster selected bands as a single TIFF image (byte array). If no band is specified or any of specified bands does not exist in the raster, then will try to use all bands.''', 'RT_ST_AsTIFF')),
('ST_Clip', types.Raster,
('''Returns the raster clipped by the input geometry. If band number not is specified, all bands are processed. If crop is not specified or TRUE, the output raster is cropped.''', 'RT_ST_Clip')),
('ST_ColorMap', types.Raster,
('''Creates a new raster of up to four 8BUI bands (grayscale, RGB, RGBA) from the source raster and a specified band. Band 1 is assumed if not specified.''', 'RT_ST_ColorMap')),
('ST_Grayscale', types.Raster,
('''Creates a new one-8BUI band raster from the source raster and specified bands representing Red, Green and Blue''', 'RT_ST_Grayscale')),
('ST_MapAlgebra', None,
('''[raster] Callback function version - Returns a one-band raster given one or more input rasters, band indexes and one user-specified callback function.\nOR\n[raster] Expression version - Returns a one-band raster given one or two input rasters, band indexes and one or more user-specified SQL expressions.''', 'RT_ST_MapAlgebra')),
('ST_MapAlgebraExpr', types.Raster,
('''[raster] 1 raster band version: Creates a new one band raster formed by applying a valid PostgreSQL algebraic operation on the input raster band and of pixeltype provided. Band 1 is assumed if no band is specified.\nOR\n[raster] 2 raster band version: Creates a new one band raster formed by applying a valid PostgreSQL algebraic operation on the two input raster bands and of pixeltype provided. band 1 of each raster is assumed if no band numbers are specified. The resulting raster will be aligned (scale, skew and pixel corners) on the grid defined by the first raster and have its extent defined by the \"extenttype\" parameter. Values for \"extenttype\" can be: INTERSECTION, UNION, FIRST, SECOND.''', 'RT_ST_MapAlgebraExpr')),
('ST_MapAlgebraFct', types.Raster,
('''[raster] 1 band version - Creates a new one band raster formed by applying a valid PostgreSQL function on the input raster band and of pixeltype prodived. Band 1 is assumed if no band is specified.\nOR\n[raster] 2 band version - Creates a new one band raster formed by applying a valid PostgreSQL function on the 2 input raster bands and of pixeltype prodived. Band 1 is assumed if no band is specified. Extent type defaults to INTERSECTION if not specified.''', 'RT_ST_MapAlgebraFct')),
('ST_MapAlgebraFctNgb', types.Raster,
('''1-band version: Map Algebra Nearest Neighbor using user-defined PostgreSQL function. Return a raster which values are the result of a PLPGSQL user function involving a neighborhood of values from the input raster band.''', 'RT_ST_MapAlgebraFctNgb')),
('ST_Reclass', types.Raster,
('''Creates a new raster composed of band types reclassified from original. The nband is the band to be changed. If nband is not specified assumed to be 1. All other bands are returned unchanged. Use case: convert a 16BUI band to a 8BUI and so forth for simpler rendering as viewable formats.''', 'RT_ST_Reclass')),
('ST_Distinct4ma', None,
('''Raster processing function that calculates the number of unique pixel values in a neighborhood.''', 'RT_ST_Distinct4ma')),
('ST_InvDistWeight4ma', None,
('''Raster processing function that interpolates a pixel's value from the pixel's neighborhood.''', 'RT_ST_InvDistWeight4ma')),
('ST_Max4ma', None,
('''Raster processing function that calculates the maximum pixel value in a neighborhood.''', 'RT_ST_Max4ma')),
('ST_Mean4ma', None,
('''Raster processing function that calculates the mean pixel value in a neighborhood.''', 'RT_ST_Mean4ma')),
('ST_Min4ma', None,
('''Raster processing function that calculates the minimum pixel value in a neighborhood.''', 'RT_ST_Min4ma')),
('ST_MinDist4ma', None,
('''Raster processing function that returns the minimum distance (in number of pixels) between the pixel of interest and a neighboring pixel with value.''', 'RT_ST_MinDist4ma')),
('ST_Range4ma', None,
('''Raster processing function that calculates the range of pixel values in a neighborhood.''', 'RT_ST_Range4ma')),
('ST_StdDev4ma', None,
('''Raster processing function that calculates the standard deviation of pixel values in a neighborhood.''', 'RT_ST_StdDev4ma')),
('ST_Sum4ma', None,
('''Raster processing function that calculates the sum of all pixel values in a neighborhood.''', 'RT_ST_Sum4ma')),
('ST_Aspect', types.Raster,
('''Returns the aspect (in degrees by default) of an elevation raster band. Useful for analyzing terrain.''', 'RT_ST_Aspect')),
('ST_HillShade', types.Raster,
('''Returns the hypothetical illumination of an elevation raster band using provided azimuth, altitude, brightness and scale inputs.''', 'RT_ST_HillShade')),
('ST_Roughness', types.Raster,
('''Returns a raster with the calculated \"roughness\" of a DEM.''', 'RT_ST_Roughness')),
('ST_Slope', types.Raster,
('''Returns the slope (in degrees by default) of an elevation raster band. Useful for analyzing terrain.''', 'RT_ST_Slope')),
('ST_TPI', types.Raster,
('''Returns a raster with the calculated Topographic Position Index.''', 'RT_ST_TPI')),
('ST_TRI', types.Raster,
('''Returns a raster with the calculated Terrain Ruggedness Index.''', 'RT_ST_TRI')),
('ST_DumpAsPolygons', None,
('''Returns a set of geomval (geom,val) rows, from a given raster band. If no band number is specified, band num defaults to 1.''', 'RT_ST_DumpAsPolygons')),
('ST_MinConvexHull', types.Geometry,
('''Return the convex hull geometry of the raster excluding NODATA pixels.''', 'RT_ST_MinConvexHull')),
('ST_SameAlignment', None,
('''Returns true if rasters have same skew, scale, spatial ref, and offset (pixels can be put on same grid without cutting into pixels) and false if they don't with notice detailing issue.''', 'RT_ST_SameAlignment')),
('ST_NotSameAlignmentReason', None,
('''Returns text stating if rasters are aligned and if not aligned, a reason why.''', 'RT_ST_NotSameAlignmentReason')),
('ST_Distance_Sphere', None,
'''Returns minimum distance in meters between two lon/lat geometries. Uses a spherical earth and radius of 6370986 meters. Faster than ``ST_Distance_Spheroid``, but less accurate. PostGIS versions prior to 1.5 only implemented for points.'''),
]
| 75.855769
| 742
| 0.704034
|
344208f3709486556112f73b94798978c0424768
| 3,925
|
py
|
Python
|
polyjit/experiments/sequences/test_genetic2.py
|
PolyJIT/polyjit.experiments
|
4ac51473a20f86d4b07b598ac4c9b09df0c8fcb6
|
[
"MIT"
] | null | null | null |
polyjit/experiments/sequences/test_genetic2.py
|
PolyJIT/polyjit.experiments
|
4ac51473a20f86d4b07b598ac4c9b09df0c8fcb6
|
[
"MIT"
] | 3
|
2017-02-02T15:54:52.000Z
|
2017-06-08T03:52:45.000Z
|
polyjit/experiments/sequences/test_genetic2.py
|
PolyJIT/polyjit.experiments
|
4ac51473a20f86d4b07b598ac4c9b09df0c8fcb6
|
[
"MIT"
] | 1
|
2017-04-01T15:30:16.000Z
|
2017-04-01T15:30:16.000Z
|
"""This module provides unit tests for the module genetic2.py."""
import unittest
import genetic2
class ChromosomeTestCase(unittest.TestCase):
def setUp(self):
self.gene_sequences = [['a', 'a', 'a'], ['b', 'b', 'b'],
['c', 'c', 'c']]
self.seq_to_fitness = {"['a', 'a', 'a']": 1, "['b', 'b', 'b']": 2,
"['c', 'c', 'c']": 3}
def test_chromosome_fitness_calculation(self):
for genes in self.gene_sequences:
chromosome = genetic2.Chromosome(genes, 'test')
chromosome.calculate_fitness_value(self.seq_to_fitness)
key = str(genes)
self.assertTrue(
chromosome.fitness_value == self.seq_to_fitness[key])
def test_chromosome_equals_method(self):
a = genetic2.Chromosome(self.gene_sequences[0], 'test')
b = genetic2.Chromosome(self.gene_sequences[0], 'test')
c = genetic2.Chromosome(self.gene_sequences[0], 'not_test')
d = genetic2.Chromosome(self.gene_sequences[1], 'test')
e = genetic2.Chromosome(self.gene_sequences[1], 'not_test')
self.assertTrue(a == b)
self.assertFalse(a == c)
self.assertFalse(a == d)
self.assertFalse(a == e)
class PopulationTestCase(unittest.TestCase):
def setUp(self):
self.gene_pool = ['a']
self.population = genetic2.Population('test', size=4,
gene_pool=self.gene_pool,
chromosome_size=2)
def test_population_creation(self):
size1 = 4
chromosome_size1 = -1
gene_pool1 = []
population1 = genetic2.Population('test', size1, gene_pool1,
chromosome_size1)
self.assertTrue(population1.size == genetic2.MIN_POPULATION_SIZE)
self.assertTrue(population1.gene_pool == genetic2.DEFAULT_GENE_POOL)
self.assertTrue(population1.chromosome_size == 0)
for chromosome in population1.chromosomes:
self.assertTrue(len(chromosome.genes) == 0)
size2 = 11
chromosome_size2 = 4
gene_pool2 = ['a', 'b']
population2 = genetic2.Population('test', size2, gene_pool2,
chromosome_size2)
self.assertTrue(population2.size == size2)
self.assertFalse(population2.gene_pool == genetic2.DEFAULT_GENE_POOL)
self.assertTrue(population2.chromosome_size == chromosome_size2)
for chromosome in population2.chromosomes:
self.assertTrue(len(chromosome.genes) == chromosome_size2)
def test_simulate_generation(self):
env = 'test'
size = 10
gene_pool = ['a', 'b']
chromosome_size = 2
seq_to_fitness = {"['a', 'a']": 4, "['a', 'b']": 3, "['b', 'a']": 2,
"['b', 'b']": 1}
population = genetic2.Population(env, size, gene_pool, chromosome_size)
chromosomes = [genetic2.Chromosome(['a', 'a'], env),
genetic2.Chromosome(['a', 'b'], env),
genetic2.Chromosome(['a', 'b'], env),
genetic2.Chromosome(['b', 'a'], env),
genetic2.Chromosome(['b', 'a'], env),
genetic2.Chromosome(['b', 'a'], env),
genetic2.Chromosome(['b', 'b'], env),
genetic2.Chromosome(['b', 'b'], env),
genetic2.Chromosome(['b', 'b'], env),
genetic2.Chromosome(['b', 'b'], env)]
population.chromosomes = chromosomes
population.simulate_generation(seq_to_fitness)
self.assertTrue(
population.fittest_chromosome == genetic2.Chromosome(['b', 'b'],
env))
if __name__ == '__main__':
unittest.main()
| 42.663043
| 79
| 0.543439
|
11a16ed140dd2bf6043005f43276774013688714
| 12,411
|
py
|
Python
|
scripts/credit_poisoning.py
|
bogdan-kulynych/pots
|
908e8fe00f0bb53765eff61462f50c43f339b17f
|
[
"MIT"
] | 7
|
2018-12-09T15:44:49.000Z
|
2020-07-10T19:15:41.000Z
|
scripts/credit_poisoning.py
|
spring-epfl/pots
|
908e8fe00f0bb53765eff61462f50c43f339b17f
|
[
"MIT"
] | null | null | null |
scripts/credit_poisoning.py
|
spring-epfl/pots
|
908e8fe00f0bb53765eff61462f50c43f339b17f
|
[
"MIT"
] | null | null | null |
import sys
import pickle
import argparse
import functools
import pandas as pd
import numpy as np
import xxhash
from sklearn.svm import SVC
from tqdm import tqdm, trange
import src.credit_utils as cred
from src.influence import influence_func
_cached_transformations = {}
class ExpContext:
"""Experimental context: datasets, target model.
Args:
df: Full dataset as DataFrame
df_train: Full training dataset as DataFrame
df_test: Full test dataset as DataFrame
df_X: Full dataset as DataFrame without labels
df_y: Dataset labels as a Series
raw_datasets: Datasets as numpy, see :py:`credit_utils.Datasets`
clf: Target classifier
model_params: Model parameters
"""
def __init__(self, seed=1):
"""Prepare data, train the target model."""
self.df, self.df_X, self.df_y = cred.load_dataframes(
"data/german_credit_data.csv"
)
self.raw_datasets = cred.to_numpy_data(self.df_X, self.df_y, seed=seed)
self.df_train = pd.DataFrame(
np.hstack(
[
self.raw_datasets.X_train,
np.expand_dims(self.raw_datasets.y_train, 1),
]
),
columns=list(self.df_X.columns) + ["Risk_good"],
)
self.df_test = pd.DataFrame(
np.hstack(
[self.raw_datasets.X_test, np.expand_dims(self.raw_datasets.y_test, 1)]
),
columns=list(self.df_X.columns) + ["Risk_good"],
)
self.clf, self.model_params = cred.train_model(
self.raw_datasets.X_train,
self.raw_datasets.y_train,
self.raw_datasets.X_test,
self.raw_datasets.y_test,
)
def select_candidates(
exp_context,
target_group_idxs,
num_best_samples=10,
seed=1,
use_influence_func=False,
):
clf = exp_context.clf
X, y = exp_context.raw_datasets.X, exp_context.raw_datasets.y
X_train = exp_context.raw_datasets.X_train
y_train = exp_context.raw_datasets.y_train
candidate_idxs = [
i for i in range(len(X)) if i not in target_group_idxs and y[i]
]
X_train_curr = np.array(X_train)
y_train_curr = np.array(y_train)
# Weight examples by their average similarity to the target group.
transformation_wrapper = cred.make_transformation_wrapper(exp_context.df_X.columns)
static_cols = exp_context.df_X.columns[: transformation_wrapper.amount_start_idx]
# sims = [example_similarity(exp_context, target_group_idxs, static_cols, i)
# for i in candidate_idxs]
# weights = softmax(sims)
# sims = [influence(exp_context, target_group_idxs, X[i]) for i in candidate_idxs]
# weights = softmax(sims / np.sum(sims))
# np.random.seed(seed)
# sampled_idxs = np.random.choice(
# candidate_idxs, size=len(candidate_idxs),
# replace=False, p=weights)
sampled_idxs = sorted(
candidate_idxs,
key=lambda i: -cred.example_similarity(
exp_context, target_group_idxs, static_cols, i
),
)
# sampled_idxs = sorted(candidate_idxs, key=lambda i: -influence(
# exp_context, target_group_idxs, X[i]))
if num_best_samples is not None:
sampled_idxs = sampled_idxs[:num_best_samples]
scored_examples = []
influence_data = pd.DataFrame(columns=["influence", "score", "acc"])
index_progbar = tqdm(sampled_idxs)
for i in index_progbar:
x = X[i]
if i in _cached_transformations:
transformations = _cached_transformations[i]
else:
transformations = cred.generate_all_transformations(
x, exp_context.df_X.columns,
transformation_kwargs=dict(
decrease_amount=True,
decrease_duration=True))
_cached_transformations[i] = list(transformations)
# Pick the transformation with the highest influence.
best_inf_val = 0
best_example = None
for t in transformations:
# Check if this transformation would be accepted.
if clf.predict([t]) != [1]:
break
if use_influence_func:
inf_val = influence_func(exp_context, target_group_idxs, t)
if inf_val > best_inf_val:
best_inf_val = inf_val
best_example = t
else:
X_train_adv = np.vstack([X_train_curr, t])
y_train_adv = np.concatenate([y_train_curr, [1]])
new_clf, _ = cred.train_model(X_train_adv, y_train_adv,
exp_context.raw_datasets.X_test,
exp_context.raw_datasets.y_test, verbose=False)
inf_val = cred.score_group(exp_context, target_group_idxs, custom_clf=new_clf)
if inf_val > best_inf_val:
best_inf_val = inf_val
best_example = t
# Retrain a classifier with the poisoned example-candidate.
score = -1
if best_example is not None:
X_train_adv = np.vstack([X_train_curr, best_example])
y_train_adv = np.concatenate([y_train_curr, [1]])
new_clf, _ = cred.train_model(X_train_adv, y_train_adv,
exp_context.raw_datasets.X_test, exp_context.raw_datasets.y_test, verbose=False)
score = cred.score_group(exp_context, target_group_idxs, custom_clf=new_clf)
scored_examples.append((score, best_example))
acc = new_clf.score(
exp_context.raw_datasets.X[candidate_idxs],
exp_context.raw_datasets.y[candidate_idxs]
)
influence_data = influence_data.append(
dict(influence=best_inf_val, score=score, acc=acc),
ignore_index=True,
)
index_progbar.set_description("Score: %2.4f, Infl: %3.2f" % (
score, best_inf_val))
influence_data.to_csv("influence_data_retrained.csv")
with open("scored_examples.pkl", "wb") as f:
pickle.dump(scored_examples, f)
return scored_examples
def find_poisoning_group(
exp_context,
target_group_idxs,
num_best_samples=10,
score_goal=None,
noise_set_size=50,
seed=1,
max_group_size=10,
load_candidates_from_cache=True,
):
"""Find a poisoning group G_pot."""
if score_goal is None:
score_goal = np.inf
clf = exp_context.clf
X, y = exp_context.raw_datasets.X, exp_context.raw_datasets.y
X_train = exp_context.raw_datasets.X_train
y_train = exp_context.raw_datasets.y_train
X_train_curr = np.array(X_train)
y_train_curr = np.array(y_train)
# Add random people to the training data.
np.random.seed(seed)
candidate_idxs = [
i for i in np.arange(len(X)) if i not in target_group_idxs and y[i]
]
noise_idxs = np.random.choice(
[i for i in exp_context.raw_datasets.test_ind if i not in candidate_idxs],
noise_set_size,
replace=False,
)
X_noise = exp_context.raw_datasets.X[noise_idxs]
y_noise = exp_context.raw_datasets.y[noise_idxs]
X_train_noisy = np.vstack([X_train_curr, X_noise])
y_train_noisy = np.concatenate([y_train_curr, y_noise])
X_train_noisy_adv = X_train_noisy
y_train_noisy_adv = y_train_noisy
print("Size of possible noise additions: %i" % len([
i for i in exp_context.raw_datasets.test_ind if i in candidate_idxs]))
print("Size of the noise additions: %i" % len(noise_idxs))
if load_candidates_from_cache:
with open("scored_examples.pkl", "rb") as f:
scored_examples = pickle.load(f)
else:
scored_examples = select_candidates(
exp_context,
target_group_idxs,
num_best_samples=num_best_samples,
seed=seed,
)
score = 0
group_counter = 0
score_baseline = 0
# Compute initial score and datasets.
group_datasets = [(X_train_noisy, y_train_noisy)]
sorted_examples = sorted(scored_examples, key=lambda t: t[0])
with tqdm(total=max_group_size) as progbar:
while sorted_examples and score < score_goal and group_counter < max_group_size:
score, best_example = sorted_examples.pop()
# Add the example to the clean poisoned dataset.
X_train_adv = np.vstack([X_train_curr, best_example])
y_train_adv = np.concatenate([y_train_curr, [1]])
X_train_curr, y_train_curr = X_train_adv, y_train_adv
# Add the example to the noisy poisoned dataset.
X_train_noisy_adv = np.vstack([X_train_noisy_adv, best_example])
y_train_noisy_adv = np.concatenate([y_train_noisy_adv, [1]])
new_clf, _ = cred.train_model(
X_train_adv,
y_train_adv,
exp_context.raw_datasets.X_test,
exp_context.raw_datasets.y_test,
verbose=False,
)
score = cred.score_group(exp_context, target_group_idxs, custom_clf=new_clf)
new_clf, _ = cred.train_model(
X_train_noisy_adv,
y_train_noisy_adv,
exp_context.raw_datasets.X_test,
exp_context.raw_datasets.y_test,
verbose=False,
)
noisy_score = cred.score_group(
exp_context, target_group_idxs, custom_clf=new_clf
)
if score > score_baseline:
score_baseline = score
group_datasets.append((X_train_noisy_adv, y_train_noisy_adv))
group_counter += 1
progbar.set_description(
"Accepted. Score: %2.4f. Group size: %i" % (score, group_counter)
)
else:
progbar.set_description(
"Rejected. Score: %2.4f (%2.4f). Group size: %i"
% (score, score_baseline, group_counter)
)
progbar.update()
print("Final score: %2.4f" % noisy_score)
print("Poisoned size: %i" % group_counter)
return group_datasets
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run a poisoning attack on credit scoring."
)
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument(
"--num_best_samples",
default=None,
type=int,
help="Number of samples to consider",
)
parser.add_argument(
"--noise_set_size", default=0, type=int, help="Number of noise samples to add"
)
parser.add_argument(
"--num_simulations", default=1, type=int, help="Number of simulations"
)
parser.add_argument(
"--max_group_size", default=10, type=int, help="Max poisoning group size"
)
parser.add_argument(
"--load_scores_from_cache", default=False, type=bool, help="Load candidate scores from cache"
)
args = parser.parse_args()
exp_context = ExpContext(seed=args.seed)
# Pick a target group to benefit from poisoning.
df = exp_context.df
target_group_sel = (
(df["Checking account_little"] == 1)
& (df["Saving accounts_little"] == 1)
& (df["Risk_good"] == 1)
& (exp_context.clf.predict(exp_context.raw_datasets.X) == 0).astype(bool)
)
print("Target group size:", sum(target_group_sel))
target_group = exp_context.raw_datasets.X[target_group_sel]
target_group_idxs = np.where(target_group_sel)[0]
print(
"Acceptance rate for the target group: %2.6f"
% cred.score_group(exp_context, target_group_idxs, trade_off=0.)
)
for i in range(args.num_simulations):
group_datasets = find_poisoning_group(
exp_context,
target_group_idxs,
seed=args.seed + i,
max_group_size=args.max_group_size,
noise_set_size=args.noise_set_size,
num_best_samples=args.num_best_samples,
load_candidates_from_cache=args.load_scores_from_cache,
)
out_path = "out/group_poisoning_influence_seed_%d_noise_%d_sim_%d" % (args.seed, args.noise_set_size, i)
with open(out_path, "wb") as f:
pickle.dump(group_datasets, f)
| 35.059322
| 112
| 0.624768
|
879b9c191eb0eb80e3c9af4157515e845dde9f42
| 1,293
|
py
|
Python
|
examples/data_augmentation/classifier.py
|
BehaviorPredictionTestingPlatform/VerifAI
|
db05f3573c2e7d98c03029c1b4efca93e6b08edb
|
[
"BSD-3-Clause"
] | 109
|
2019-04-29T03:30:42.000Z
|
2022-03-31T03:06:26.000Z
|
examples/data_augmentation/classifier.py
|
BehaviorPredictionTestingPlatform/VerifAI
|
db05f3573c2e7d98c03029c1b4efca93e6b08edb
|
[
"BSD-3-Clause"
] | 25
|
2019-03-25T00:27:39.000Z
|
2022-03-27T20:29:23.000Z
|
examples/data_augmentation/classifier.py
|
BehaviorPredictionTestingPlatform/VerifAI
|
db05f3573c2e7d98c03029c1b4efca93e6b08edb
|
[
"BSD-3-Clause"
] | 35
|
2019-02-12T20:50:32.000Z
|
2022-01-05T11:25:06.000Z
|
import numpy as np
from dotmap import DotMap
from verifai.client import Client
try:
import tensorflow as tf
except ModuleNotFoundError:
import sys
sys.exit('This functionality requires tensorflow to be installed')
from renderer.kittiLib import getLib
from renderer.generator import genImage
from model.model import Model
class Classifier(Client):
def __init__(self, classifier_data):
port = classifier_data.port
bufsize = classifier_data.bufsize
super().__init__(port, bufsize)
self.sess = tf.Session()
self.nn = Model()
self.nn.init(classifier_data.graph_path, classifier_data.checkpoint_path, self.sess)
self.lib = getLib()
def simulate(self, sample):
img, _ = genImage(self.lib, sample)
yTrue = len(sample.cars)
yPred = np.argmax(self.nn.predict(np.array(img))[0]) + 1
res = {}
res['yTrue'] = yTrue
res['yPred'] = yPred
return res
PORT = 8888
BUFSIZE = 4096
classifier_data = DotMap()
classifier_data.port = PORT
classifier_data.bufsize = BUFSIZE
classifier_data.graph_path = './data/car_detector/checkpoint/car-detector-model.meta'
classifier_data.checkpoint_path = './data/car_detector/checkpoint/'
client_task = Classifier(classifier_data)
while True:
if not client_task.run_client():
print("End of all classifier calls")
break
| 24.396226
| 86
| 0.753287
|
1ef64256b8caf749726f1e07c7bb8ecd9f635fee
| 7,335
|
py
|
Python
|
main_nolsm_onlyvid.py
|
nviable/deepfake-blips
|
93f57d5e6b8f5b3ac7bdcae4c81a4b2028f1253b
|
[
"Apache-2.0"
] | 5
|
2019-05-22T02:30:43.000Z
|
2020-02-18T12:23:08.000Z
|
main_nolsm_onlyvid.py
|
nviable/deepfake-blips
|
93f57d5e6b8f5b3ac7bdcae4c81a4b2028f1253b
|
[
"Apache-2.0"
] | null | null | null |
main_nolsm_onlyvid.py
|
nviable/deepfake-blips
|
93f57d5e6b8f5b3ac7bdcae4c81a4b2028f1253b
|
[
"Apache-2.0"
] | 1
|
2020-02-28T06:15:07.000Z
|
2020-02-28T06:15:07.000Z
|
#%%
from datetime import datetime
import os,cv2
#from cv2 import getRotationMatrix2D, warpAffine,getAffineTransform,resize,imread,BORDER_REFLECT
import numpy as np
#KERAS IMPORTS
from keras.applications.vgg16 import VGG16
from keras.callbacks import ProgbarLogger, EarlyStopping, ModelCheckpoint, TensorBoard
from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, MaxPooling2D, Conv2DTranspose, Conv2D, concatenate, Dense, Conv1D, TimeDistributed, LSTM, Flatten, Bidirectional, BatchNormalization
from keras.layers.core import Reshape, Activation, Dropout
from keras.preprocessing.image import *
from keras.optimizers import SGD
from dataloader import BlipDatasetLoader
from keras.backend import expand_dims, shape
from keras.utils import plot_model
import matplotlib.pyplot as plt
#%%
time_window = 2
video_w = 512
video_h = 384
video_c = 3
audio_l = 1024
audio_c = 2
n_epochs = 5
G = BlipDatasetLoader(16, frames=time_window, only_vid=True)
train_generator = G.gen(no_timesteps=True)
test_generator = G.gen(False, no_timesteps=True)
validation_generator = G.gen(False, True, no_timesteps=True)
#%%
'''
Model Architecture
using Keras functional API
'''
# first input model
input_rgb = Input(shape=(384, 512, 3))
input_rgb_norm = BatchNormalization(axis=-1)(input_rgb)
conv11a = Conv2D(32, kernel_size=5, padding='same', name="conv11a")(input_rgb)
conv11a = Activation('relu')(conv11a)
conv11b = Conv2D(32, kernel_size=5, padding='same', strides=(2,2), name="conv11b")(conv11a)
conv11b_b = BatchNormalization()(conv11b)
conv11b_a = Activation('relu')(conv11b_b)
conv11b_d = Dropout(0.2)(conv11b_a)
pool11 = MaxPooling2D(pool_size=(3, 3), name="pool11")(conv11b_d)
conv12a = Conv2D(64, kernel_size=3, padding='same', name="conv12a")(pool11)
conv12a = Activation('relu')(conv12a)
conv12b = Conv2D(64, kernel_size=3, padding='same', strides=(2,2), name="conv12b")(conv12a)
conv12b_b = BatchNormalization()(conv12b)
conv12b_a = Activation('relu')(conv12b_b)
conv12b_d = Dropout(0.2)(conv12b_a)
pool12 = MaxPooling2D(pool_size=(2, 2), name="pool12")(conv12b_d)
conv13a = Conv2D(128, kernel_size=3, padding='same', name="conv13a")(pool12)
conv13a = Activation('relu')(conv13a)
conv13b = Conv2D(128, kernel_size=3, padding='same', strides=(2,2), name="conv13b")(conv13a)
conv13b_b = BatchNormalization()(conv13b)
conv13b_a = Activation('relu')(conv13b_b)
conv13b_d = Dropout(0.2)(conv13b_a)
pool13 = MaxPooling2D(pool_size=(2, 2), name="pool13")(conv12b_d)
conv14a = Conv2D(256, kernel_size=3, padding='same', name="conv14a")(pool13)
conv14a = Activation('relu')(conv14a)
conv14b = Conv2D(256, kernel_size=3, padding='same', strides=(2,2), name="conv14b")(conv14a)
conv14b_b = BatchNormalization()(conv14b)
conv14b_a = Activation('relu')(conv14b_b)
conv14b_d = Dropout(0.2)(conv14b_a)
pool14 = MaxPooling2D(pool_size=(2, 2), name="pool14")(conv12b_d)
conv15a = Conv2D(256, kernel_size=3, padding='same', name="conv15a")(pool14)
conv15a = Activation('relu')(conv15a)
conv15b = Conv2D(256, kernel_size=3, padding='same', strides=(2,2), name="conv15b")(conv15a)
conv15b_b = BatchNormalization()(conv15b)
conv15b_a = Activation('relu')(conv15b_b)
conv15b_d = Dropout(0.2)(conv15b_a)
pool15 = MaxPooling2D(pool_size=(2, 2), name="pool15")(conv12b_d)
conv16a = Conv2D(256, kernel_size=3, padding='same', name="conv16a")(pool15)
conv16a = Activation('relu')(conv16a)
conv16b = Conv2D(256, kernel_size=3, padding='same', strides=(2,2), name="conv16b")(conv16a)
conv16b = Activation('relu')(conv16b)
pool16 = MaxPooling2D(pool_size=(2, 2), name="pool16")(conv12b)
conv17a = Conv2D(256, kernel_size=3, padding='same', name="conv17a")(pool16)
conv17a = Activation('relu')(conv17a)
conv17b = Conv2D(256, kernel_size=3, padding='same', strides=(2,2), name="conv17b")(conv17a)
conv17b = Activation('relu')(conv17b)
pool17 = MaxPooling2D(pool_size=(2, 2), name="pool17")(conv12b)
flat1 = Flatten()(pool17)
# conv11 = Conv2D(16, kernel_size=4, padding='valid', activation='relu', name="conv11" ) (input_rgb)
# pool11 = MaxPooling2D(pool_size=(2, 2), name="pool11" ) (conv11)
# conv12 = Conv2D(32, kernel_size=4, padding='valid', activation='relu', name="conv12" ) (pool11)
# pool12 = MaxPooling2D(pool_size=(2, 2), name="pool12" ) (conv12)
# flat1 = Flatten( name="flat1" ) (pool12)
# second input model
# input_stft = Input(shape=(25, 41, 2))
'''
conv21a = Conv2D(16, kernel_size=3, padding='same', name="conv21a")(input_stft)
conv21a = Activation('relu')(conv21a)
conv21b = Conv2D(16, kernel_size=3, padding='same', name="conv21b")(conv21a)
conv21b_b = BatchNormalization()(conv21b)
conv21b_a = Activation('relu')(conv21b_b)
conv21b_d = Dropout(0.2)(conv21b_a)
pool21 = MaxPooling2D(pool_size=(2, 2), name="pool21")(conv21b_d)
conv22a = Conv2D(32, kernel_size=3, padding='same', name="conv22a")(pool21)
conv22a = Activation('relu')(conv22a)
conv22b = Conv2D(32, kernel_size=3, padding='same', name="conv22b")(conv22a)
conv22b_b = BatchNormalization()(conv22b)
conv22b_a = Activation('relu')(conv22b_b)
conv22b_d = Dropout(0.2)(conv22b_a)
pool22 = MaxPooling2D(pool_size=(2, 2), name="pool22")(conv22b_d)
conv23a = Conv2D(32, kernel_size=3, padding='same', name="conv23a")(pool22)
conv23a = Activation('relu')(conv23a)
conv23b = Conv2D(32, kernel_size=3, padding='same', name="conv23b")(conv23a)
conv23b_b = BatchNormalization()(conv23b)
conv23b_a = Activation('relu')(conv23b_b)
conv23b_d = Dropout(0.2)(conv23b_a)
pool23 = MaxPooling2D(pool_size=(2, 2), name="pool23")(conv23b_d)
'''
# conv21 = Conv2D(16, kernel_size=4, activation='relu', name="conv21" )(input_stft)
# pool21 = MaxPooling2D(pool_size=(2, 2), name="pool21" )(conv21)
# conv22 = Conv2D(32, kernel_size=4, activation='relu', name="conv22" )(pool21)
# pool22 = MaxPooling2D(pool_size=(2, 2), name="pool22" )(conv22)
# flat2 = Flatten( name="flat2" )(pool22)
# merge input models
# merge = concatenate([flat1, flat2])
# lstm1 = LSTM(32, return_sequences=True)(merge)
# flatten_lstm = Flatten() (lstm1)
hidden1 = Dense(16, activation='relu') (flat1)
hidden2 = Dense(16, activation='relu') (hidden1)
output = Dense(2, activation='softmax') (hidden2)
model = Model(inputs=input_rgb, outputs=output)
# summarize layers
print(model.summary())
# exit()
model.compile(optimizer=SGD(lr=0.008, decay=1e-6, momentum=0.9, nesterov=True),
loss='mean_squared_error',
metrics=['accuracy'])
history = model.fit_generator(train_generator, steps_per_epoch=100, verbose=1, epochs=n_epochs, validation_data=validation_generator, validation_steps=10, use_multiprocessing=True)
print(model.evaluate_generator(test_generator, steps=100))
plot_model(model, to_file='model.png')
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig("accuracy_nolstm_onlyvid.png")
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig("loss_nolstm_onlyvid.png")
# model.save('final_model')
| 42.155172
| 180
| 0.734015
|
e8c56316ff5d87b0ef64cdc25db0c731179823b1
| 171
|
py
|
Python
|
api/tests/ver2/test_data/office_test_data.py
|
pcf26536/politico-api
|
1c9b8755ddad2baf0bfdeab4aa0674e4197a0d7c
|
[
"MIT"
] | 1
|
2019-02-22T19:34:32.000Z
|
2019-02-22T19:34:32.000Z
|
api/tests/ver2/test_data/office_test_data.py
|
pcf26536/politico-api
|
1c9b8755ddad2baf0bfdeab4aa0674e4197a0d7c
|
[
"MIT"
] | null | null | null |
api/tests/ver2/test_data/office_test_data.py
|
pcf26536/politico-api
|
1c9b8755ddad2baf0bfdeab4aa0674e4197a0d7c
|
[
"MIT"
] | 1
|
2019-02-07T22:12:25.000Z
|
2019-02-07T22:12:25.000Z
|
from api.strings import name_key, type_key
from api.ver1.offices.strings import fed_type
correct_office = {
name_key: "Women Representative",
type_key: fed_type
}
| 24.428571
| 45
| 0.77193
|
9eddc9b44c8e9bb9b2c1b2e233e11af6583e50b9
| 2,380
|
py
|
Python
|
tests/unit/raptiformica/actions/agent/test_agent_already_running.py
|
vdloo/raptiformica
|
e2807e5e913312034161efcbd74525a4b15b37e7
|
[
"MIT"
] | 21
|
2016-09-04T11:27:31.000Z
|
2019-10-30T08:23:14.000Z
|
tests/unit/raptiformica/actions/agent/test_agent_already_running.py
|
vdloo/raptiformica
|
e2807e5e913312034161efcbd74525a4b15b37e7
|
[
"MIT"
] | 5
|
2017-09-17T15:59:37.000Z
|
2018-02-03T14:53:32.000Z
|
tests/unit/raptiformica/actions/agent/test_agent_already_running.py
|
vdloo/raptiformica
|
e2807e5e913312034161efcbd74525a4b15b37e7
|
[
"MIT"
] | 2
|
2017-11-21T18:14:51.000Z
|
2017-11-22T01:20:45.000Z
|
from raptiformica.actions.agent import agent_already_running
from tests.testcase import TestCase
class TestAgentAlreadyRunning(TestCase):
def setUp(self):
self.check_nonzero_exit = self.set_up_patch(
'raptiformica.actions.agent.check_nonzero_exit'
)
self.check_nonzero_exit.return_value = True
self._get_program_name = self.set_up_patch(
'raptiformica.actions.agent._get_program_name'
)
self._get_program_name.return_value = 'raptiformica.actions.spawn'
def test_agent_already_running_checks_if_there_is_another_agent_already_running(self):
agent_already_running()
expected_command = "ps aux | grep 'bin/[r]aptiformica_agent.py' | " \
"grep -v screen -i | grep python3 | grep -v 'sh -c' | " \
"awk '{print $2}' | xargs --no-run-if-empty -I {} " \
"sh -c \"grep -q docker /proc/{}/cgroup 2> /dev/null " \
"&& ! grep -q name=systemd:/docker /proc/1/cgroup || echo {}\" | " \
"wc -l | { read li; test $li -gt 0; }"
self.check_nonzero_exit.assert_called_once_with(
expected_command
)
def test_agent_already_running_allows_2_agents_to_run_if_check_runs_inside_agent(self):
self._get_program_name.return_value = 'raptiformica.actions.agent'
agent_already_running()
expected_command = "ps aux | grep 'bin/[r]aptiformica_agent.py' | " \
"grep -v screen -i | grep python3 | grep -v 'sh -c' | " \
"awk '{print $2}' | xargs --no-run-if-empty -I {} " \
"sh -c \"grep -q docker /proc/{}/cgroup 2> /dev/null " \
"&& ! grep -q name=systemd:/docker /proc/1/cgroup || echo {}\" | " \
"wc -l | { read li; test $li -gt 1; }"
self.check_nonzero_exit.assert_called_once_with(
expected_command
)
def tests_agent_already_running_returns_true_if_check_exits_zero(self):
ret = agent_already_running()
self.assertTrue(ret)
def tests_agent_already_running_returns_false_if_check_exits_nonzero(self):
self.check_nonzero_exit.return_value = False
ret = agent_already_running()
self.assertFalse(ret)
| 43.272727
| 95
| 0.596218
|
040ca3941248932b69f6e566bea6389eea026ab6
| 347
|
py
|
Python
|
tests/test_models.py
|
solocompt/plugs-status
|
1177d3426e8e62801fc435df082ad3ffc2dea0b6
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
solocompt/plugs-status
|
1177d3426e8e62801fc435df082ad3ffc2dea0b6
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
solocompt/plugs-status
|
1177d3426e8e62801fc435df082ad3ffc2dea0b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_plugs-status
------------
Tests for `plugs-status` models module.
"""
from django.test import TestCase
from plugs_status import models
class TestPlugs_status(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
| 13.346154
| 39
| 0.62536
|
f5b1c89503dc67b5dfca05d692d61cdec555d3cc
| 5,689
|
py
|
Python
|
IMLearn/learners/gaussian_estimators.py
|
ereldebel/IML.HUJI
|
1c3d7042071a74ed60f92c013ef6051e2341304c
|
[
"MIT"
] | null | null | null |
IMLearn/learners/gaussian_estimators.py
|
ereldebel/IML.HUJI
|
1c3d7042071a74ed60f92c013ef6051e2341304c
|
[
"MIT"
] | null | null | null |
IMLearn/learners/gaussian_estimators.py
|
ereldebel/IML.HUJI
|
1c3d7042071a74ed60f92c013ef6051e2341304c
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import numpy as np
from numpy.linalg import inv, det, slogdet
class UnivariateGaussian:
"""
Class for univariate Gaussian Distribution Estimator
"""
def __init__(self, biased_var: bool = False) -> None:
"""
Estimator for univariate Gaussian mean and variance parameters
Parameters
----------
biased_var : bool, default=False
Should fitted estimator of variance be a biased or unbiased estimator
Attributes
----------
fitted_ : bool
Initialized as false indicating current estimator instance has not been fitted.
To be set as True in `UnivariateGaussian.fit` function.
mu_: float
Estimated expectation initialized as None. To be set in `UnivariateGaussian.fit`
function.
var_: float
Estimated variance initialized as None. To be set in `UnivariateGaussian.fit`
function.
"""
self.biased_ = biased_var
self.fitted_, self.mu_, self.var_ = False, None, None
def fit(self, X: np.ndarray) -> UnivariateGaussian:
"""
Estimate Gaussian expectation and variance from given samples
Parameters
----------
X: ndarray of shape (n_samples, )
Training data
Returns
-------
self : returns an instance of self.
Notes
-----
Sets `self.mu_`, `self.var_` attributes according to calculated estimation (where
estimator is either biased or unbiased). Then sets `self.fitted_` attribute to `True`
"""
self.mu_ = X.mean()
if self.biased_:
self.var_ = X.var()
# == np.mean(np.square(X - self.mu_))
else:
self.var_ = X.var(ddof=1)
# == np.sum(np.square(X - self.mu_)) / (X.size - 1)
self.fitted_ = True
return self
def pdf(self, X: np.ndarray) -> np.ndarray:
"""
Calculate PDF of observations under Gaussian model with fitted estimators
Parameters
----------
X: ndarray of shape (n_samples, )
Samples to calculate PDF for
Returns
-------
pdfs: ndarray of shape (n_samples, )
Calculated values of given samples for PDF function of N(mu_, var_)
Raises
------
ValueError: In case function was called prior fitting the model
"""
if not self.fitted_:
raise ValueError(
"Estimator must first be fitted before calling `pdf` function")
return np.exp(-(X - self.mu_) ** 2 / (2 * self.var_)) / np.sqrt(
2 * np.pi * self.var_)
@staticmethod
def log_likelihood(mu: float, sigma: float, X: np.ndarray) -> float:
"""
Calculate the log-likelihood of the data under a specified Gaussian model
Parameters
----------
mu : float
Expectation of Gaussian
sigma : float
Variance of Gaussian
X : ndarray of shape (n_samples, )
Samples to calculate log-likelihood with
Returns
-------
log_likelihood: float
log-likelihood calculated
"""
return -np.log(2 * np.pi * sigma) * X.size / 2 \
- np.sum(np.power(X - mu, 2)) / (2 * sigma)
class MultivariateGaussian:
"""
Class for multivariate Gaussian Distribution Estimator
"""
def __init__(self):
"""
Initialize an instance of multivariate Gaussian estimator
Attributes
----------
fitted_ : bool
Initialized as false indicating current estimator instance has not been fitted.
To be set as True in `MultivariateGaussian.fit` function.
mu_: ndarray of shape (n_features,)
Estimated expectation initialized as None. To be set in `MultivariateGaussian.fit`
function.
cov_: ndarray of shape (n_features, n_features)
Estimated covariance initialized as None. To be set in `MultivariateGaussian.fit`
function.
"""
self.mu_, self.cov_ = None, None
self.fitted_ = False
def fit(self, X: np.ndarray) -> MultivariateGaussian:
"""
Estimate Gaussian expectation and covariance from given samples
Parameters
----------
X: ndarray of shape (n_samples, n_features)
Training data
Returns
-------
self : returns an instance of self
Notes
-----
Sets `self.mu_`, `self.cov_` attributes according to calculated estimation.
Then sets `self.fitted_` attribute to `True`
"""
self.mu_ = X.mean(axis=0)
self.cov_ = np.cov(X.transpose(), bias=False)
self.fitted_ = True
return self
def pdf(self, X: np.ndarray):
"""
Calculate PDF of observations under Gaussian model with fitted estimators
Parameters
----------
X: ndarray of shape (m_samples, n_features)
Samples to calculate PDF for
Returns
-------
pdfs: ndarray of shape (m_samples, )
Calculated values of given samples for PDF function of N(mu_, cov_)
Raises
------
ValueError: In case function was called prior fitting the model
"""
if not self.fitted_:
raise ValueError(
"Estimator must first be fitted before calling `pdf` function")
x_minus_mu = X - self.mu_
cov_inv = inv(self.cov_)
def sample_pdf(x):
return np.exp(-np.sum(x @ cov_inv @ x.T, axis=0) / 2) / np.sqrt(
np.power(2 * np.pi, X.shape[1]) * det(self.cov_))
return np.apply_along_axis(sample_pdf, 1, x_minus_mu)
@staticmethod
def log_likelihood(mu: np.ndarray, cov: np.ndarray,
X: np.ndarray) -> float:
"""
Calculate the log-likelihood of the data under a specified Gaussian model
Parameters
----------
mu : ndarray of shape (n_features,)
Expectation of Gaussian
cov : ndarray of shape (n_features, n_features)
covariance matrix of Gaussian
X : ndarray of shape (n_samples, n_features)
Samples to calculate log-likelihood with
Returns
-------
log_likelihood: float
log-likelihood calculated over all input data and under given parameters of Gaussian
"""
m = X.shape[0]
d = X.shape[1]
x_minus_mu = X - mu
cov_inv = inv(cov)
return -m * d / 2 * np.log(2 * np.pi) \
- m / 2 * slogdet(cov)[1] \
- np.sum(x_minus_mu @ cov_inv * x_minus_mu) / 2
| 25.977169
| 87
| 0.679557
|
8a0188795536e668bab21c1eaba05c76a768f22d
| 4,272
|
py
|
Python
|
src/lossfunctions.py
|
Darshan-Ramesh/EmpRecognition
|
c85775659bcbb79f62de29a7a764cc72f1de0674
|
[
"MIT"
] | null | null | null |
src/lossfunctions.py
|
Darshan-Ramesh/EmpRecognition
|
c85775659bcbb79f62de29a7a764cc72f1de0674
|
[
"MIT"
] | null | null | null |
src/lossfunctions.py
|
Darshan-Ramesh/EmpRecognition
|
c85775659bcbb79f62de29a7a764cc72f1de0674
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import config as cfg
from torch.autograd import Variable
import numpy as np
class CBFocalLoss(nn.Module):
# from https://github.com/vandit15/Class-balanced-loss-pytorch/blob/master/class_balanced_loss.py
# paper https://arxiv.org/abs/1901.05555
def __init__(self,weights,gamma):
super(CBFocalLoss,self).__init__()
self.alpha = weights #weights per class for Class Balanced Loss or alpha=1 bare FocalLoss
self.gamma = gamma
def forward(self,logits,labels):
labels = F.one_hot(labels,cfg.CLASSES).float()
BCLoss = F.binary_cross_entropy_with_logits(logits,labels,weight=None,reduction='none')
if self.gamma == 0.0:
modulator = 1.0
else:
modulator = torch.exp(-self.gamma * labels * logits - self.gamma * torch.log(1+torch.exp(-1.0 * logits)))
loss = modulator * BCLoss
weighted_loss = self.alpha * loss
focal_loss = torch.sum(weighted_loss)
focal_loss /= torch.sum(labels)
return focal_loss
class FocalLoss(nn.Module):
def __init__(self,alpha= 1,gamma= 1,reduce= True,cls_weights=None):
super(FocalLoss,self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduce = reduce
self.cls_weights = cls_weights
def forward(self,logits,labels):
CE_loss = F.cross_entropy(logits,labels,weight=self.cls_weights,reduction = "none")
pt = torch.exp(-CE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * CE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
def LossFunctions(loss_fn_name,alpha,gamma,cls_weights=None):
#Gamma value chosen from the paper https://arxiv.org/abs/1901.05555
if not loss_fn_name:
print(f'[INFO] Using default loss function - CrossEntropy Loss')
loss_fn_name = 'ce'
if loss_fn_name == 'cb_focal':
loss = CBFocalLoss(weights = cls_weights, gamma = gamma)
return loss
if loss_fn_name == 'focal':
print(f'[INFO] Using {loss_fn_name} as loss..')
loss = FocalLoss(alpha,gamma,True,cls_weights)
return loss
if loss_fn_name == 'ce':
loss = torch.nn.CrossEntropyLoss(cls_weights)
return loss
# ------------------------------------------------------------------------------------------------------
# class CustomSigmoid(nn.Module):
# def __init__(self,weights=None):
# super(CustomSigmoid,self).__init__()
# self.cls_weights = weights
# def forward(self,logits,labels):
# labels = F.one_hot(labels,cfg.CLASSES).float()
# loss = F.binary_cross_entropy_with_logits(input=logits,target=labels,weight=self.cls_weights)
# loss = torch.mean(loss)
# return loss
# ----------------------------------------------------------------------------------------------------
# # testing
# torch.manual_seed(0)
# batch_size, n_classes = 8, 31
# x = torch.randn(batch_size, n_classes)
# batch_target = torch.randint(n_classes, size=(batch_size,), dtype=torch.long)
# fn = ['focal']
# for fn_name in fn:
# loss_fn = LossFunctions(fn_name)
# print(f'[INFO] Function - {fn_name}, Loss - {loss_fn(x,batch_target)}')
# no_of_classes = 5
# logits = torch.rand(10,no_of_classes).float()
# labels = torch.randint(0,no_of_classes, size = (10,))
# beta = 0.9999
# gamma = 2.0
# samples_per_cls = [2,3,1,2,2]
# labels_one_hot = F.one_hot(labels, no_of_classes).float()
# effective_num = 1.0 - np.power(beta, samples_per_cls)
# weights = (1.0 - beta) / np.array(effective_num)
# weights = weights / np.sum(weights) * no_of_classes
# weights = torch.tensor(weights).float()
# weights = weights.unsqueeze(0)
# weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
# weights = weights.sum(1)
# weights = weights.unsqueeze(1)
# weights = weights.repeat(1,no_of_classes)
# print(weights.shape)
# print(logits.shape)
# print(labels_one_hot.shape)
# cb_loss = CBFocalLoss(weights = weights, gamma = gamma)
# loss = cb_loss(logits,labels_one_hot)
# print(loss)
| 34.731707
| 117
| 0.619382
|
0712bbc0f673317187af8e1ab3b26543e2e7e0de
| 443
|
py
|
Python
|
balanced_delimiters.py
|
roopsmall/hackerrank-questions
|
f38d710be7a46491e5dc435155d953ae821d2055
|
[
"MIT"
] | null | null | null |
balanced_delimiters.py
|
roopsmall/hackerrank-questions
|
f38d710be7a46491e5dc435155d953ae821d2055
|
[
"MIT"
] | null | null | null |
balanced_delimiters.py
|
roopsmall/hackerrank-questions
|
f38d710be7a46491e5dc435155d953ae821d2055
|
[
"MIT"
] | null | null | null |
brackets = raw_input()
pairs = {'{':'}', '[':']', '(':')'}
index = -1
indicator = []
for i, bracket in enumerate(brackets):
if index == -1 and bracket not in pairs.keys():
index = 1
break
elif bracket in pairs.keys() or indicator[index] not in pairs.keys() or bracket != pairs[indicator[index]]:
indicator.append(bracket)
index += 1
else:
# bracket == indicator[index]:
del indicator[index]
index -= 1
print index == -1
| 20.136364
| 108
| 0.625282
|
42e42c29ee2f51520678161f21d7e6feade1c79f
| 5,775
|
py
|
Python
|
tests/test_livechat_2.py
|
pedrohbtp/pytchat
|
5b18536972a4cdfbe68aa0beb023c136d41fca16
|
[
"MIT"
] | null | null | null |
tests/test_livechat_2.py
|
pedrohbtp/pytchat
|
5b18536972a4cdfbe68aa0beb023c136d41fca16
|
[
"MIT"
] | null | null | null |
tests/test_livechat_2.py
|
pedrohbtp/pytchat
|
5b18536972a4cdfbe68aa0beb023c136d41fca16
|
[
"MIT"
] | null | null | null |
import asyncio
import re
from aioresponses import aioresponses
from concurrent.futures import CancelledError
from pytchat.core_multithread.livechat import LiveChat
from pytchat.core_async.livechat import LiveChatAsync
from pytchat.processors.dummy_processor import DummyProcessor
def _open_file(path):
with open(path, mode='r', encoding='utf-8') as f:
return f.read()
@aioresponses()
def test_async_live_stream(*mock):
async def test_loop(*mock):
pattern = re.compile(
r'^https://www.youtube.com/live_chat/get_live_chat\?continuation=.*$')
_text = _open_file('tests/testdata/test_stream.json')
mock[0].get(pattern, status=200, body=_text)
chat = LiveChatAsync(video_id='__test_id__', processor=DummyProcessor())
chats = await chat.get()
rawdata = chats[0]["chatdata"]
# assert fetching livachat data
assert list(rawdata[0]["addChatItemAction"]["item"].keys())[
0] == "liveChatTextMessageRenderer"
assert list(rawdata[1]["addChatItemAction"]["item"].keys())[
0] == "liveChatTextMessageRenderer"
assert list(rawdata[2]["addChatItemAction"]["item"].keys())[
0] == "liveChatPlaceholderItemRenderer"
assert list(rawdata[3]["addLiveChatTickerItemAction"]["item"].keys())[
0] == "liveChatTickerPaidMessageItemRenderer"
assert list(rawdata[4]["addChatItemAction"]["item"].keys())[
0] == "liveChatPaidMessageRenderer"
assert list(rawdata[5]["addChatItemAction"]["item"].keys())[
0] == "liveChatPaidStickerRenderer"
assert list(rawdata[6]["addLiveChatTickerItemAction"]["item"].keys())[
0] == "liveChatTickerSponsorItemRenderer"
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(test_loop(*mock))
except CancelledError:
assert True
@aioresponses()
def test_async_replay_stream(*mock):
async def test_loop(*mock):
pattern_live = re.compile(
r'^https://www.youtube.com/live_chat/get_live_chat\?continuation=.*$')
pattern_replay = re.compile(
r'^https://www.youtube.com/live_chat_replay/get_live_chat_replay\?continuation=.*$')
# empty livechat -> switch to fetch replaychat
_text_live = _open_file('tests/testdata/finished_live.json')
_text_replay = _open_file('tests/testdata/chatreplay.json')
mock[0].get(pattern_live, status=200, body=_text_live)
mock[0].get(pattern_replay, status=200, body=_text_replay)
chat = LiveChatAsync(video_id='__test_id__', processor=DummyProcessor())
chats = await chat.get()
rawdata = chats[0]["chatdata"]
# assert fetching replaychat data
assert list(rawdata[0]["addChatItemAction"]["item"].keys())[
0] == "liveChatTextMessageRenderer"
assert list(rawdata[14]["addChatItemAction"]["item"].keys())[
0] == "liveChatPaidMessageRenderer"
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(test_loop(*mock))
except CancelledError:
assert True
@aioresponses()
def test_async_force_replay(*mock):
async def test_loop(*mock):
pattern_live = re.compile(
r'^https://www.youtube.com/live_chat/get_live_chat\?continuation=.*$')
pattern_replay = re.compile(
r'^https://www.youtube.com/live_chat_replay/get_live_chat_replay\?continuation=.*$')
# valid live data, but force_replay = True
_text_live = _open_file('tests/testdata/test_stream.json')
# valid replay data
_text_replay = _open_file('tests/testdata/chatreplay.json')
mock[0].get(pattern_live, status=200, body=_text_live)
mock[0].get(pattern_replay, status=200, body=_text_replay)
# force replay
chat = LiveChatAsync(
video_id='__test_id__', processor=DummyProcessor(), force_replay=True)
chats = await chat.get()
rawdata = chats[0]["chatdata"]
# assert fetching replaychat data
assert list(rawdata[14]["addChatItemAction"]["item"].keys())[
0] == "liveChatPaidMessageRenderer"
# assert not mix livechat data
assert list(rawdata[2]["addChatItemAction"]["item"].keys())[
0] != "liveChatPlaceholderItemRenderer"
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(test_loop(*mock))
except CancelledError:
assert True
def test_multithread_live_stream(mocker):
_text = _open_file('tests/testdata/test_stream.json')
responseMock = mocker.Mock()
responseMock.status_code = 200
responseMock.text = _text
mocker.patch(
'requests.Session.get').return_value.__enter__.return_value = responseMock
chat = LiveChat(video_id='__test_id__', processor=DummyProcessor())
chats = chat.get()
rawdata = chats[0]["chatdata"]
# assert fetching livachat data
assert list(rawdata[0]["addChatItemAction"]["item"].keys())[
0] == "liveChatTextMessageRenderer"
assert list(rawdata[1]["addChatItemAction"]["item"].keys())[
0] == "liveChatTextMessageRenderer"
assert list(rawdata[2]["addChatItemAction"]["item"].keys())[
0] == "liveChatPlaceholderItemRenderer"
assert list(rawdata[3]["addLiveChatTickerItemAction"]["item"].keys())[
0] == "liveChatTickerPaidMessageItemRenderer"
assert list(rawdata[4]["addChatItemAction"]["item"].keys())[
0] == "liveChatPaidMessageRenderer"
assert list(rawdata[5]["addChatItemAction"]["item"].keys())[
0] == "liveChatPaidStickerRenderer"
assert list(rawdata[6]["addLiveChatTickerItemAction"]["item"].keys())[
0] == "liveChatTickerSponsorItemRenderer"
chat.terminate()
| 40.957447
| 96
| 0.665974
|
94cfea49754e85af29da3a247feda3bf7dc1e1e4
| 955
|
py
|
Python
|
Section_3/Exercise_15.py
|
Szymon-Budziak/WDI_exercises_solutions
|
51ffc9ec8b3cd6809bd55e98ecb8aed759c2d460
|
[
"MIT"
] | null | null | null |
Section_3/Exercise_15.py
|
Szymon-Budziak/WDI_exercises_solutions
|
51ffc9ec8b3cd6809bd55e98ecb8aed759c2d460
|
[
"MIT"
] | null | null | null |
Section_3/Exercise_15.py
|
Szymon-Budziak/WDI_exercises_solutions
|
51ffc9ec8b3cd6809bd55e98ecb8aed759c2d460
|
[
"MIT"
] | 1
|
2021-11-21T09:38:33.000Z
|
2021-11-21T09:38:33.000Z
|
"""
Dana jest duża tablica t. Proszę napisać funkcję, która zwraca informację czy w tablicy zachodzi
następujący warunek: „wszystkie elementy, których indeks jest elementem ciągu Fibonacciego są
liczbami złożonymi, a wśród pozostałych przynajmniej jedna jest liczbą pierwszą”.
"""
from random import randint
def fibonacci_index(t):
len_t = len(t)
a = 0
b = 1
for i in range(0, len_t):
result_a = 0
result_b = 0
try:
a_str = str(t[a])
b_str = str(t[b])
for x in range(len(a_str)):
result_a += 1
for y in range(len(b_str)):
result_b += 1
if result_a > 1 and result_b > 1:
a += b
b += a
continue
else:
return False
except IndexError:
continue
return True
t = [randint(1, 100) for _ in range(10)]
print(fibonacci_index(t))
| 26.527778
| 96
| 0.55288
|
75b99654dbf8921e515d99738725ab16c49c99da
| 22,714
|
py
|
Python
|
pandas/io/tests/parser/c_parser_only.py
|
onesandzeroes/pandas
|
22d982a8afdef3c438c9c93dfe5299cc5ca07de2
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/io/tests/parser/c_parser_only.py
|
onesandzeroes/pandas
|
22d982a8afdef3c438c9c93dfe5299cc5ca07de2
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/io/tests/parser/c_parser_only.py
|
onesandzeroes/pandas
|
22d982a8afdef3c438c9c93dfe5299cc5ca07de2
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tests that apply specifically to the CParser. Unless specifically stated
as a CParser-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the Python parser can accept
further arguments when parsing.
"""
import nose
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas import compat
from pandas.compat import StringIO, range, lrange
from pandas.types.dtypes import CategoricalDtype
class CParserTests(object):
def test_buffer_overflow(self):
# see gh-9205: test certain malformed input files that cause
# buffer overflows in tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
cperr = 'Buffer overflow caught - possible malformed input file.'
for malf in (malfw, malfs, malfl):
try:
self.read_table(StringIO(malf))
except Exception as err:
self.assertIn(cperr, str(err))
def test_buffer_rd_bytes(self):
# see gh-12098: src->buffer in the C parser can be freed twice leading
# to a segfault if a corrupt gzip file is read with 'read_csv' and the
# buffer is filled more than once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception:
pass
def test_delim_whitespace_custom_terminator(self):
# See gh-12912
data = """a b c~1 2 3~4 5 6~7 8 9"""
df = self.read_csv(StringIO(data), lineterminator='~',
delim_whitespace=True)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, expected)
def test_dtype_and_names_error(self):
# see gh-8833: passing both dtype and names
# resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep=r'\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep=r'\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep=r'\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]],
columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep=r'\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# valid but unsupported - fixed width unicode string
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'U8'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_precise_conversion(self):
# see gh-8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
# test numbers between 1 and 2
for num in np.linspace(1., 2., num=500):
# 25 decimal digits of precision
text = 'a\n{0:.25}'.format(num)
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'object')
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
# unsorted
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', 'b', 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
# missing
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', np.nan, 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_encoding(self):
# GH 10153
pth = tm.get_data_path('unicode_series.csv')
encoding = 'latin-1'
expected = self.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = self.read_csv(pth, header=None, encoding=encoding,
dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
pth = tm.get_data_path('utf16_ex.txt')
encoding = 'utf-16'
expected = self.read_table(pth, encoding=encoding)
expected = expected.apply(Categorical)
actual = self.read_table(pth, encoding=encoding, dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'])}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'])},
index=[2, 3])]
actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_pass_dtype_as_recarray(self):
if compat.is_platform_windows() and self.low_memory:
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 1: 'S1'}, as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'),
np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
# see gh-9424
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one.1', dtype='f')], axis=1)
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
data = ''
result = self.read_csv(StringIO(data), names=['one', 'one'],
dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_usecols_dtypes(self):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(0, 1, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
result2 = self.read_csv(StringIO(data), usecols=(0, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
self.assertTrue((result.dtypes == [object, np.int, np.float]).all())
self.assertTrue((result2.dtypes == [object, np.float]).all())
def test_disable_bool_parsing(self):
# #2090
data = """A,B,C
Yes,No,Yes
No,Yes,Yes
Yes,,Yes
No,No,No"""
result = self.read_csv(StringIO(data), dtype=object)
self.assertTrue((result.dtypes == object).all())
result = self.read_csv(StringIO(data), dtype=object, na_filter=False)
self.assertEqual(result['B'][2], '')
def test_custom_lineterminator(self):
data = 'a,b,c~1,2,3~4,5,6'
result = self.read_csv(StringIO(data), lineterminator='~')
expected = self.read_csv(StringIO(data.replace('~', '\n')))
tm.assert_frame_equal(result, expected)
def test_raise_on_passed_int_dtype_with_nas(self):
# see gh-2631
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
self.assertRaises(ValueError, self.read_csv, StringIO(data),
sep=",", skipinitialspace=True,
dtype={'DOY': np.int64})
def test_parse_ragged_csv(self):
data = """1,2,3
1,2,3,4
1,2,3,4,5
1,2
1,2,3,4"""
nice_data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
result = self.read_csv(StringIO(data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
expected = self.read_csv(StringIO(nice_data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
# too many columns, cause segfault if not careful
data = "1,2\n3,4,5"
result = self.read_csv(StringIO(data), header=None,
names=lrange(50))
expected = self.read_csv(StringIO(data), header=None,
names=lrange(3)).reindex(columns=lrange(50))
tm.assert_frame_equal(result, expected)
def test_tokenize_CR_with_quoting(self):
# see gh-3453
data = ' a,b,c\r"a,b","e,d","f,f"'
result = self.read_csv(StringIO(data), header=None)
expected = self.read_csv(StringIO(data.replace('\r', '\n')),
header=None)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data))
expected = self.read_csv(StringIO(data.replace('\r', '\n')))
tm.assert_frame_equal(result, expected)
def test_grow_boundary_at_cap(self):
# See gh-12494
#
# Cause of error was that the C parser
# was not increasing the buffer size when
# the desired space would fill the buffer
# to capacity, which would later cause a
# buffer overflow error when checking the
# EOF terminator of the CSV stream
def test_empty_header_read(count):
s = StringIO(',' * count)
expected = DataFrame(columns=[
'Unnamed: {i}'.format(i=i)
for i in range(count + 1)])
df = self.read_csv(s)
tm.assert_frame_equal(df, expected)
for count in range(1, 101):
test_empty_header_read(count)
def test_parse_trim_buffers(self):
# This test is part of a bugfix for issue #13703. It attmepts to
# to stress the system memory allocator, to cause it to move the
# stream buffer and either let the OS reclaim the region, or let
# other memory requests of parser otherwise modify the contents
# of memory space, where it was formely located.
# This test is designed to cause a `segfault` with unpatched
# `tokenizer.c`. Sometimes the test fails on `segfault`, other
# times it fails due to memory corruption, which causes the
# loaded DataFrame to differ from the expected one.
# Generate a large mixed-type CSV file on-the-fly (one record is
# approx 1.5KiB).
record_ = \
"""9999-9,99:99,,,,ZZ,ZZ,,,ZZZ-ZZZZ,.Z-ZZZZ,-9.99,,,9.99,Z""" \
"""ZZZZ,,-99,9,ZZZ-ZZZZ,ZZ-ZZZZ,,9.99,ZZZ-ZZZZZ,ZZZ-ZZZZZ,""" \
"""ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,9""" \
"""99,ZZZ-ZZZZ,,ZZ-ZZZZ,,,,,ZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,,,9,9,""" \
"""9,9,99,99,999,999,ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,9,ZZ-ZZZZ,9.""" \
"""99,ZZ-ZZZZ,ZZ-ZZZZ,,,,ZZZZ,,,ZZ,ZZ,,,,,,,,,,,,,9,,,999.""" \
"""99,999.99,,,ZZZZZ,,,Z9,,,,,,,ZZZ,ZZZ,,,,,,,,,,,ZZZZZ,ZZ""" \
"""ZZZ,ZZZ-ZZZZZZ,ZZZ-ZZZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZ""" \
"""ZZ,,,999999,999999,ZZZ,ZZZ,,,ZZZ,ZZZ,999.99,999.99,,,,Z""" \
"""ZZ-ZZZ,ZZZ-ZZZ,-9.99,-9.99,9,9,,99,,9.99,9.99,9,9,9.99,""" \
"""9.99,,,,9.99,9.99,,99,,99,9.99,9.99,,,ZZZ,ZZZ,,999.99,,""" \
"""999.99,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,ZZZZZ,ZZZZZ,ZZZ,ZZZ,9,9,""" \
""",,,,,ZZZ-ZZZZ,ZZZ999Z,,,999.99,,999.99,ZZZ-ZZZZ,,,9.999""" \
""",9.999,9.999,9.999,-9.999,-9.999,-9.999,-9.999,9.999,9.""" \
"""999,9.999,9.999,9.999,9.999,9.999,9.999,99999,ZZZ-ZZZZ,""" \
""",9.99,ZZZ,,,,,,,,ZZZ,,,,,9,,,,9,,,,,,,,,,ZZZ-ZZZZ,ZZZ-Z""" \
"""ZZZ,,ZZZZZ,ZZZZZ,ZZZZZ,ZZZZZ,,,9.99,,ZZ-ZZZZ,ZZ-ZZZZ,ZZ""" \
""",999,,,,ZZ-ZZZZ,ZZZ,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,99.99,99.99""" \
""",,,9.99,9.99,9.99,9.99,ZZZ-ZZZZ,,,ZZZ-ZZZZZ,,,,,-9.99,-""" \
"""9.99,-9.99,-9.99,,,,,,,,,ZZZ-ZZZZ,,9,9.99,9.99,99ZZ,,-9""" \
""".99,-9.99,ZZZ-ZZZZ,,,,,,,ZZZ-ZZZZ,9.99,9.99,9999,,,,,,,""" \
""",,,-9.9,Z/Z-ZZZZ,999.99,9.99,,999.99,ZZ-ZZZZ,ZZ-ZZZZ,9.""" \
"""99,9.99,9.99,9.99,9.99,9.99,,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZ""" \
"""ZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ,ZZZ,ZZZ,ZZZ,9.99,,,-9.99,ZZ""" \
"""-ZZZZ,-999.99,,-9999,,999.99,,,,999.99,99.99,,,ZZ-ZZZZZ""" \
"""ZZZ,ZZ-ZZZZ-ZZZZZZZ,,,,ZZ-ZZ-ZZZZZZZZ,ZZZZZZZZ,ZZZ-ZZZZ""" \
""",9999,999.99,ZZZ-ZZZZ,-9.99,-9.99,ZZZ-ZZZZ,99:99:99,,99""" \
""",99,,9.99,,-99.99,,,,,,9.99,ZZZ-ZZZZ,-9.99,-9.99,9.99,9""" \
""".99,,ZZZ,,,,,,,ZZZ,ZZZ,,,,,"""
# Set the number of lines so that a call to `parser_trim_buffers`
# is triggered: after a couple of full chunks are consumed a
# relatively small 'residual' chunk would cause reallocation
# within the parser.
chunksize, n_lines = 128, 2 * 128 + 15
csv_data = "\n".join([record_] * n_lines) + "\n"
# We will use StringIO to load the CSV from this text buffer.
# pd.read_csv() will iterate over the file in chunks and will
# finally read a residual chunk of really small size.
# Generate the expected output: manually create the dataframe
# by splitting by comma and repeating the `n_lines` times.
row = tuple(val_ if val_ else float("nan")
for val_ in record_.split(","))
expected = pd.DataFrame([row for _ in range(n_lines)],
dtype=object, columns=None, index=None)
# Iterate over the CSV file in chunks of `chunksize` lines
chunks_ = self.read_csv(StringIO(csv_data), header=None,
dtype=object, chunksize=chunksize)
result = pd.concat(chunks_, axis=0, ignore_index=True)
# Check for data corruption if there was no segfault
tm.assert_frame_equal(result, expected)
def test_internal_null_byte(self):
# see gh-14012
#
# The null byte ('\x00') should not be used as a
# true line terminator, escape character, or comment
# character, only as a placeholder to indicate that
# none was specified.
#
# This test should be moved to common.py ONLY when
# Python's csv class supports parsing '\x00'.
names = ['a', 'b', 'c']
data = "1,2,3\n4,\x00,6\n7,8,9"
expected = pd.DataFrame([[1, 2.0, 3], [4, np.nan, 6],
[7, 8, 9]], columns=names)
result = self.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(result, expected)
| 40.27305
| 79
| 0.535881
|
713a97a6d275cb10e46211239c5979143e7c1048
| 2,275
|
py
|
Python
|
tasks/functions/brook.py
|
Aurora-Admin-Panel/backend
|
14caa79773fd505eebcd9c1d578c2994a0f56a8a
|
[
"MIT"
] | 15
|
2020-11-27T04:03:34.000Z
|
2022-03-04T11:00:07.000Z
|
tasks/functions/brook.py
|
Aurora-Admin-Panel/backend
|
14caa79773fd505eebcd9c1d578c2994a0f56a8a
|
[
"MIT"
] | 18
|
2021-06-03T06:03:02.000Z
|
2022-02-21T08:58:09.000Z
|
tasks/functions/brook.py
|
Aurora-Admin-Panel/backend
|
14caa79773fd505eebcd9c1d578c2994a0f56a8a
|
[
"MIT"
] | 26
|
2020-11-26T09:00:03.000Z
|
2022-02-16T04:20:53.000Z
|
from sqlalchemy.orm import Session
from app.db.models.port import Port
from app.db.models.port_forward import MethodEnum
from app.utils.dns import dns_query
from app.utils.ip import is_ip
from tasks.functions.base import AppConfig
class BrookConfig(AppConfig):
method = MethodEnum.BROOK
def __init__(self):
super().__init__()
self.app_name = "brook"
self.app_sync_role_name = "brook_sync"
def apply(self, db: Session, port: Port):
self.local_port = port.num
self.app_command = self.get_app_command(db, port)
self.update_app = not port.server.config.get("brook")
self.applied = True
return self
def get_app_command(self, db: Session, port: Port):
command = port.forward_rule.config.get("command")
if port.forward_rule.config.get("remote_address"):
if not is_ip(port.forward_rule.config.get("remote_address")):
remote_ip = dns_query(port.forward_rule.config.get("remote_address"))
else:
remote_ip = port.forward_rule.config.get("remote_address")
port.forward_rule.config['remote_ip'] = remote_ip
db.add(port.forward_rule)
db.commit()
if command == "relay":
args = (
f"-f :{port.num} "
f"-t {remote_ip}:{port.forward_rule.config.get('remote_port')}"
)
elif command in ("server", "wsserver"):
args = f"-l :{port.num} -p {port.forward_rule.config.get('password')}"
elif command in ("client"):
args = (
f"--socks5 0.0.0.0:{port.num} "
f"-s {remote_ip}:{port.forward_rule.config.get('remote_port')} "
f"-p {port.forward_rule.config.get('password')}"
)
elif command in ("wsclient"):
args = (
f"--socks5 0.0.0.0:{port.num} "
f"--wsserver ws://{remote_ip}:{port.forward_rule.config.get('remote_port')} "
f"-p {port.forward_rule.config.get('password')}"
)
else:
args = port.forward_rule.config.get("args")
return f"/usr/local/bin/brook {command} {args}"
@property
def playbook(self):
return "app.yml"
| 36.111111
| 93
| 0.584615
|
ce6e43de40a5631238c29381068669ffeeee0f80
| 9,309
|
py
|
Python
|
documentation/building-with-duim/source/conf.py
|
promovicz/opendylan
|
93a21dbf7fa2096de505bc641c24e63f4de9831f
|
[
"MIT"
] | null | null | null |
documentation/building-with-duim/source/conf.py
|
promovicz/opendylan
|
93a21dbf7fa2096de505bc641c24e63f4de9831f
|
[
"MIT"
] | null | null | null |
documentation/building-with-duim/source/conf.py
|
promovicz/opendylan
|
93a21dbf7fa2096de505bc641c24e63f4de9831f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Building Applications With DUIM documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 6 22:59:06 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../sphinx-extensions'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['dylandomain.dylandomain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Building Applications With DUIM'
copyright = u'2011, Dylan Hackers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# The primary domain.
primary_domain = 'dylan'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'opendylan-docs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../../_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Building Applications With DUIM'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'BuildingApplicationsWithDUIM'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BuildingApplicationsWithDUIM.tex', u'Building Applications With DUIM',
u'Dylan Hackers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'buildingapplicationswithduim', u'Building Applications With DUIM',
[u'Dylan Hackers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'BuildingApplicationsWithDUIM', u'Building Applications With DUIM',
u'Dylan Hackers', 'BuildingApplicationsWithDUIM', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Building Applications With DUIM'
epub_author = u'Dylan Hackers'
epub_publisher = u'Dylan Hackers'
epub_copyright = u'2011, Dylan Hackers'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| 32.211073
| 88
| 0.719089
|
fc5807d9de5dafda2e96ace80792136280cf2938
| 1,360
|
py
|
Python
|
main.py
|
rVnPower/TheShell
|
43016f6569369830aa36d3fd6978824ad5fcf2e9
|
[
"MIT"
] | null | null | null |
main.py
|
rVnPower/TheShell
|
43016f6569369830aa36d3fd6978824ad5fcf2e9
|
[
"MIT"
] | null | null | null |
main.py
|
rVnPower/TheShell
|
43016f6569369830aa36d3fd6978824ad5fcf2e9
|
[
"MIT"
] | null | null | null |
class Character:
def __init__(self, name, health, strength):
self.name = name
self.health = health
self.strength = strength
self.xp = 0
def attack(self, target):
if (self.health > 0):
damage = self.strength
print(
f'{self.name} attacks {target.name} and causes {damage} damage points'
)
target.health -= damagea
if (target.health > 0) :
print(f'{target.name} has {target.health} health points left')
else:
target.health = 0
bonusXP = 10
print(
f'{self.name} eliminated {target.name} and wins {bonusXP} experience points'
)
self.xp += bonusXP
else:
print(f'{self.name} can\'t attack (they\'ve been eliminated)')
def describe(self):
return f'{self.name} has {self.health} health points, {self.strength} as strength and {self.xp} XP points'
aurora = Character("Aurora", 150, 25)
glacius = Character("Glacius", 130, 30)
print("Welcome to the adventure! Here are our heroes:")
print(aurora.describe())
print(glacius.describe())
monster = Character("Spike", 40, 20)
print("A wild monster has appeared: it's named " + monster.name)
monster.attack(aurora)
monster.attack(glacius)
aurora.attack(monster)
glacius.attack(monster)
glacius.attack(monster)
print(aurora.describe())
print(glacius.describe())
| 27.2
| 110
| 0.647059
|
efdb57da34f026ac3b5db34706bb8650acc74842
| 23,843
|
py
|
Python
|
klvdata/misbEG0104.py
|
stharding/klvdata
|
e34529c4eba7c8cd00fe56834b623d8e5770f47a
|
[
"MIT"
] | 6
|
2016-10-21T00:49:27.000Z
|
2017-08-31T04:36:38.000Z
|
klvdata/misbEG0104.py
|
stharding/klvdata
|
e34529c4eba7c8cd00fe56834b623d8e5770f47a
|
[
"MIT"
] | null | null | null |
klvdata/misbEG0104.py
|
stharding/klvdata
|
e34529c4eba7c8cd00fe56834b623d8e5770f47a
|
[
"MIT"
] | 2
|
2017-05-26T10:44:34.000Z
|
2017-06-23T22:05:55.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2017 Matthew Pare (paretech@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from klvdata.common import hexstr_to_bytes
from klvdata.element import UnknownElement
from klvdata.elementparser import BytesElementParser
from klvdata.elementparser import DateTimeElementParser
from klvdata.elementparser import StringElementParser
from klvdata.elementparser import IEEE754ElementParser
from klvdata.setparser import SetParser
from klvdata.streamparser import StreamParser
class UnknownElement(UnknownElement):
pass
@StreamParser.add_parser
class UAVBasicUniversalMetadataSet(SetParser):
"""MISB EG0104.4 Predator UAV Basic Universal Metadata Set
http://www.gwg.nga.mil/misb/docs/eg/EG0104.4.pdf
"""
#key = hexstr_to_bytes('06 0E 2B 34 - 01 01 01 01 – 02 01 03 00 - 00 00 00 00')
key = hexstr_to_bytes('06 0E 2B 34 - 02 01 01 01 – 0E 01 01 02 - 01 01 00 00')
name = 'UAV Basic Universal Metadata Set'
key_length = 16
parsers = {}
_unknown_element = UnknownElement
@UAVBasicUniversalMetadataSet.add_parser
class PrecisionTimeStamp(DateTimeElementParser):
"""Precision Timestamp represented in microseconds.
Precision Timestamp represented in the number of microseconds elapsed
since midnight (00:00:00), January 1, 1970 not including leap seconds.
See MISB ST 0601.11 for additional details.
"""
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 02 01 01 01 05 00 00")
TAG = 2
UDSKey = "06 0E 2B 34 01 01 01 03 07 02 01 01 01 05 00 00"
LDSName = "Precision Time Stamp"
ESDName = ""
UDSName = "User Defined Time Stamp"
@UAVBasicUniversalMetadataSet.add_parser
class MissionID(StringElementParser):
"""Mission ID is the descriptive mission identifier.
Mission ID value field free text with maximum of 127 characters
describing the event.
"""
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 01 05 05 00 00 00 00 00")
TAG = 3
UDSKey = "06 0E 2B 34 01 01 01 01 01 05 05 00 00 00 00 00"
LDSName = "Mission ID"
ESDName = "Mission Number"
UDSName = "Episode Number"
min_length, max_length = 0, 127
@UAVBasicUniversalMetadataSet.add_parser
class PlatformHeadingAngle(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 07 07 01 10 01 06 00 00 00")
TAG = 5
UDSKey = "06 0E 2B 34 01 01 01 07 07 01 10 01 06 00 00 00"
LDSName = "Platform Heading Angle"
ESDName = "UAV Heading (INS)"
UDSName = "Platform Heading Angle"
_domain = (0, 2**16-1)
_range = (0, 360)
@UAVBasicUniversalMetadataSet.add_parser
class PlatformPitchAngle(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 07 07 01 10 01 05 00 00 00")
TAG = 6
UDSKey = "06 0E 2B 34 01 01 01 07 07 01 10 01 05 00 00 00"
LDSName = "Platform Pitch Angle"
ESDName = "UAV Pitch (INS)"
UDSName = "Platform Pitch Angle"
_domain = (-(2**15-1), 2**15-1)
_range = (-20, 20)
@UAVBasicUniversalMetadataSet.add_parser
class PlatformRollAngle(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 07 07 01 10 01 04 00 00 00")
TAG = 7
UDSKey = "06 0E 2B 34 01 01 01 07 07 01 10 01 04 00 00 00"
LDSName = "Platform Roll Angle"
ESDName = "UAV Roll (INS)"
UDSName = "Platform Roll Angle"
_domain = (-(2**15-1), 2**15-1)
_range = (-50, 50)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class PlatformDesignation(StringElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 01 01 20 01 00 00 00 00")
TAG = 10
UDSKey = "06 0E 2B 34 01 01 01 01 01 01 20 01 00 00 00 00"
LDSName = "Platform Designation"
ESDName = "Project ID Code"
UDSName = "Device Designation"
min_length, max_length = 0, 127
@UAVBasicUniversalMetadataSet.add_parser
class ImageSourceSensor(StringElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 04 20 01 02 01 01 00 00")
TAG = 11
UDSKey = "06 0E 2B 34 01 01 01 01 04 20 01 02 01 01 00 00"
LDSName = "Image Source Sensor"
ESDName = "Sensor Name"
UDSName = "Image Source Device"
min_length, max_length = 0, 127
@UAVBasicUniversalMetadataSet.add_parser
class ImageCoordinateSystem(StringElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 01 01 00 00 00 00")
TAG = 12
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 01 01 00 00 00 00"
LDSName = "Image Coordinate System"
ESDName = "Image Coordinate System"
UDSName = "Image Coordinate System"
min_length, max_length = 0, 127
@UAVBasicUniversalMetadataSet.add_parser
class SensorLatitude(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 02 01 02 04 00 00")
TAG = 13
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 02 01 02 04 00 00"
LDSName = "Sensor Latitude"
ESDName = "Sensor Latitude"
UDSName = "Device Latitude"
_domain = (-(2**31-1), 2**31-1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class SensorLatitude1(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 02 01 02 04 02 00")
TAG = 13
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 02 01 02 04 02 00"
LDSName = "Sensor Latitude"
ESDName = "Sensor Latitude"
UDSName = "Device Latitude"
_domain = (-(2**63-1), 2**63-1)
_range = (-90, 90)
units = 'degrees'
#the key is wrong, comes from 1.klv
@UAVBasicUniversalMetadataSet.add_parser
class SensorLatitude2(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 01 01 01 03 07 01 02 01 02 04 02 00")
TAG = 13
UDSKey = "06 0E 01 01 01 03 07 01 02 01 02 04 02 00"
LDSName = "Sensor Latitude"
ESDName = "Sensor Latitude"
UDSName = "Device Latitude"
_domain = (-(2**63-1), 2**63-1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class SensorLongitude(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 02 01 02 06 00 00")
TAG = 14
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 02 01 02 06 00 00"
LDSName = "Sensor Longitude"
ESDName = "Sensor Longitude"
UDSName = "Device Longitude"
_domain = (-(2**63-1), 2**63-1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class SensorLongitude1(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 02 01 02 06 02 00")
TAG = 14
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 02 01 02 06 02 00"
LDSName = "Sensor Longitude"
ESDName = "Sensor Longitude"
UDSName = "Device Longitude"
_domain = (-(2**63-1), 2**63-1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class SensorTrueAltitude(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 02 01 02 02 00 00")
TAG = 15
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 02 01 02 02 00 00"
LDSName = "Sensor True Altitude"
ESDName = "Sensor Altitude"
UDSName = "Device Altitude"
_domain = (0, 2**16-1)
_range = (-99999, 99999)
units = 'meters'
@UAVBasicUniversalMetadataSet.add_parser
class SensorHorizontalFieldOfView(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 02 04 20 02 01 01 08 00 00")
TAG = 16
UDSKey = "06 0E 2B 34 01 01 01 02 04 20 02 01 01 08 00 00"
LDSName = "Sensor Horizontal Field of View"
ESDName = "Field of View"
UDSName = "Field of View (FOVHorizontal)"
_domain = (0, 2**16-1)
_range = (0, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class SensorVerticalFieldOfView(IEEE754ElementParser):
key = hexstr_to_bytes("06 0e 2b 34 01 01 01 07 04 20 02 01 01 0a 01 00")
TAG = 17
UDSKey = "06 0e 2b 34 01 01 01 07 04 20 02 01 01 0a 01 00"
LDSName = "Sensor Vertical Field of View"
ESDName = "Vertical Field of View"
UDSName = ""
_domain = (0, 2 ** 16 - 1)
_range = (0, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class SensorRelativeAzimuthAngle(IEEE754ElementParser):
key = hexstr_to_bytes("06 0e 2b 34 01 01 01 01 07 01 10 01 02 00 00 00")
TAG = 18
UDSKey = "06 0e 2b 34 01 01 01 01 07 01 10 01 02 00 00 00"
LDSName = "Sensor Relative Azimuth Angle"
ESDName = "Sensor Relative Azimuth Angle"
UDSName = ""
_domain = (0, 2 ** 32 - 1)
_range = (0, 360)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class SensorRelativeElevationAngle(IEEE754ElementParser):
key = hexstr_to_bytes("06 0e 2b 34 01 01 01 01 07 01 10 01 03 00 00 00")
TAG = 19
UDSKey = "06 0e 2b 34 01 01 01 01 07 01 10 01 03 00 00 00"
LDSName = "Sensor Relative Elevation Angle"
ESDName = "Sensor Relative Elevation Angle"
UDSName = ""
_domain = (-(2 ** 31 - 1), 2 ** 31 - 1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class SlantRange(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 08 01 01 00 00 00")
TAG = 21
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 08 01 01 00 00 00"
LDSName = "Slant Range"
ESDName = "Slant Range"
UDSName = "Slant Range"
_domain = (0, 2**32-1)
_range = (0, +5e6)
units = 'meters'
@UAVBasicUniversalMetadataSet.add_parser
class TargetWidth(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 09 02 01 00 00 00")
TAG = 22
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 09 02 01 00 00 00"
LDSName = "Target Width"
ESDName = "Target Width"
UDSName = "Target Width"
_domain = (0, 2**16-1)
_range = (0, +10e3)
units = 'meters'
@UAVBasicUniversalMetadataSet.add_parser
class FrameCenterLatitude(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 02 01 03 02 00 00")
TAG = 23
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 02 01 03 02 00 00"
LDSName = "Frame Center Latitude"
ESDName = "Target Latitude"
UDSName = "Frame Center Latitude"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class FrameCenterLongitude(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 02 01 03 04 00 00")
TAG = 24
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 02 01 03 04 00 00"
LDSName = "Frame Center Longitude"
ESDName = "Target Longitude"
UDSName = "Frame Center Longitude"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLatitudePoint1(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 07 01 00")
TAG = 26
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 07 01 00"
LDSName = "Offset Corner Latitude Point 1"
ESDName = "SAR Latitude 4"
UDSName = "Corner Latitude Point 1"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, +0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLongitudePoint1(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0B 01 00")
TAG = 27
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0B 01 00"
LDSName = "Offset Corner Longitude Point 1"
ESDName = "SAR Longitude 4"
UDSName = "Corner Longitude Point 1"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLatitudePoint2(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 08 01 00")
TAG = 28
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 08 01 00"
LDSName = "Offset Corner Latitude Point 2"
ESDName = "SAR Latitude 1"
UDSName = "Corner Latitude Point 2"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLongitudePoint2(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0C 01 00")
TAG = 29
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0C 01 00"
LDSName = "Offset Corner Longitude Point 2"
ESDName = "SAR Longitude 1"
UDSName = "Corner Longitude Point 2"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLatitudePoint3(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 09 01 00")
TAG = 30
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 09 01 00"
LDSName = "Offset Corner Latitude Point 3"
ESDName = "SAR Latitude 2"
UDSName = "Corner Latitude Point 3"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLongitudePoint3(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0D 01 00")
TAG = 31
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0D 01 00"
LDSName = "Offset Corner Longitude Point 3"
ESDName = "SAR Longitude 2"
UDSName = "Corner Longitude Point 3"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLatitudePoint4(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0A 01 00")
TAG = 32
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0A 01 00"
LDSName = "Offset Corner Latitude Point 4"
ESDName = "SAR Latitude 3"
UDSName = "Corner Latitude Point 4"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLongitudePoint4(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0E 01 00")
TAG = 33
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0E 01 00"
LDSName = "Offset Corner Longitude Point 4"
ESDName = "SAR Longitude 3"
UDSName = "Corner Longitude Point 4"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class StartDateTime(StringElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 02 01 02 01 01 00 00")
TAG = 72
UDSKey = "06 0E 2B 34 01 01 01 01 07 02 01 02 01 01 00 00"
LDSName = "Start Date Time - UTC"
UDSName = "Start Date Time - UTC"
min_length, max_length = 0, 127
@UAVBasicUniversalMetadataSet.add_parser
class EventStartTime(DateTimeElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 02 01 02 07 01 00 00")
TAG = 72
UDSKey = "06 0E 2B 34 01 01 01 01 07 02 01 02 07 01 00 00"
LDSName = "Event Start Time - UTC"
ESDName = "Mission Start Time, Date, and Date of Collection"
UDSName = "Event Start Date Time - UTC"
@UAVBasicUniversalMetadataSet.add_parser
class RVTLocalSet(StringElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 02 01 02 07 01 00 00")
TAG = 73
UDSKey = "06 0E 2B 34 01 01 01 01 07 02 01 02 07 01 00 00"
LDSName = "RVT Local Data Set"
ESDName = ""
UDSName = "Remote Video Terminal Local Set"
@UAVBasicUniversalMetadataSet.add_parser
class VMTILocalSet(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 02 0B 01 01 0E 01 03 03 06 00 00 00")
TAG = 74
UDSKey = "06 0E 2B 34 02 0B 01 01 0E 01 03 03 06 00 00 00"
LDSName = "VMTI Local Set"
ESDName = ""
UDSName = "Video Moving Target Indicator Local Set"
@UAVBasicUniversalMetadataSet.add_parser
class CornerLatitudePoint1Full(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 07 01 00")
TAG = 82
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 07 01 00"
LDSName = "Corner Latitude Point 1 (Full)"
ESDName = "SAR Latitude 4"
UDSName = "Corner Latitude Point 1 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLongitudePoint1Full(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0B 01 00")
TAG = 83
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0B 01 00"
LDSName = "Corner Longitude Point 1 (Full)"
ESDName = "SAR Longitude 4"
UDSName = "Corner Longitude Point 1 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLatitudePoint2Full(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 08 01 00")
TAG = 84
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 08 01 00"
LDSName = "Corner Latitude Point 2 (Full)"
ESDName = "SAR Latitude 1"
UDSName = "Corner Latitude Point 2 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLongitudePoint2Full(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0C 01 00")
TAG = 85
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0C 01 00"
LDSName = "Corner Longitude Point 2 (Full)"
ESDName = "SAR Longitude 1"
UDSName = "Corner Longitude Point 2 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLatitudePoint3Full(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 09 01 00")
TAG = 86
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 09 01 00"
LDSName = "Corner Latitude Point 3 (Full)"
ESDName = "SAR Latitude 2"
UDSName = "Corner Latitude Point 3 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLongitudePoint3Full(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0D 01 00")
TAG = 87
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0D 01 00"
LDSName = "Corner Longitude Point 3 (Full)"
ESDName = "SAR Longitude 2"
UDSName = "Corner Longitude Point 3 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLatitudePoint4Full(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0A 01 00")
TAG = 88
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0A 01 00"
LDSName = "Corner Latitude Point 4 (Full)"
ESDName = "SAR Latitude 3"
UDSName = "Corner Latitude Point 4 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLongitudePoint4Full(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0E 01 00")
TAG = 89
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0E 01 00"
LDSName = "Corner Longitude Point 4 (Full)"
ESDName = "SAR Longitude 3"
UDSName = "Corner Longitude Point 4 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class PlatformPitchAngleFull(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 07 07 01 10 01 05 00 00 00")
TAG = 90
UDSKey = "06 0E 2B 34 01 01 01 07 07 01 10 01 05 00 00 00"
LDSName = "Platform Pitch Angle (Full)"
ESDName = "UAV Pitch (INS)"
UDSName = "Platform Pitch Angle"
_domain = (-(2**31-1), 2**31-1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class PlatformRollAngleFull(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 07 07 01 10 01 04 00 00 00")
TAG = 91
UDSKey = "06 0E 2B 34 01 01 01 07 07 01 10 01 04 00 00 00"
LDSName = "Platform Roll Angle (Full)"
ESDName = "UAV Roll (INS)"
UDSName = "Platform Roll Angle"
_domain = (-(2**31-1), 2**31-1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class MIISCoreIdentifier(StringElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 0E 01 04 05 03 00 00 00")
TAG = 94
UDSKey = "06 0E 2B 34 01 01 01 01 0E 01 04 05 03 00 00 00"
LDSName = "MIIS Core Identifier"
ESDName = ""
UDSName = "Motion Imagery Identification System Core"
@UAVBasicUniversalMetadataSet.add_parser
class SARMotionImageryLocalSet(StringElementParser):
key = hexstr_to_bytes("06 0E 2B 34 02 0B 01 01 0E 01 03 03 0D 00 00 00")
TAG = 95
UDSKey = "06 0E 2B 34 02 0B 01 01 0E 01 03 03 0D 00 00 00"
LDSName = "SAR Motion Imagery Local Set"
ESDName = ""
UDSName = "SAR Motion Imagery Local Set"
@UAVBasicUniversalMetadataSet.add_parser
class TargetWidthExtended(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 09 02 01 00 00 00")
TAG = 96
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 09 02 01 00 00 00"
LDSName = "Target Width Extended"
ESDName = "Target Width"
UDSName = "Target Width"
_domain = (0, 2**8-1)
_range = (0, 2**8-1)
units = 'meters'
@UAVBasicUniversalMetadataSet.add_parser
class DensityAltitudeExtended(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 0E 01 01 01 10 00 00 00")
TAG = 103
UDSKey = "06 0E 2B 34 01 01 01 01 0E 01 01 01 10 00 00 00"
LDSName = "Density Altitude Extended"
ESDName = "Density Altitude"
UDSName = ""
_domain = (0, 2**16-1)
_range = (-900, 40000)
units = 'meters'
@UAVBasicUniversalMetadataSet.add_parser
class SensorEllipsoidHeightExtended(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 0E 01 02 01 82 47 00 00")
TAG = 104
UDSKey = "06 0E 2B 34 01 01 01 01 0E 01 02 01 82 47 00 00"
LDSName = "Sensor Ellipsoid Height Extended"
ESDName = ""
UDSName = ""
_domain = (0, 2**16-1)
_range = (-900, 40000)
units = 'meters'
@UAVBasicUniversalMetadataSet.add_parser
class AlternatePlatformEllipsoidHeightExtended(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 0E 01 02 01 82 48 00 00")
TAG = 105
UDSKey = "06 0E 2B 34 01 01 01 01 0E 01 02 01 82 48 00 00"
LDSName = " Alternate Platform Ellipsoid Height Extended"
ESDName = ""
UDSName = ""
_domain = (0, 2**16-1)
_range = (-900, 40000)
units = 'meters'
| 35.375371
| 83
| 0.671644
|
0de6fea6c576b09c0bb5be8fa6b4e4713bf0a512
| 2,843
|
py
|
Python
|
labs/lab-07/plot_words4.py
|
LingCheng3273/OSS-Lab
|
e4ba3755aac3c30649180fdeb6f1da740b8eeb1c
|
[
"MIT"
] | null | null | null |
labs/lab-07/plot_words4.py
|
LingCheng3273/OSS-Lab
|
e4ba3755aac3c30649180fdeb6f1da740b8eeb1c
|
[
"MIT"
] | null | null | null |
labs/lab-07/plot_words4.py
|
LingCheng3273/OSS-Lab
|
e4ba3755aac3c30649180fdeb6f1da740b8eeb1c
|
[
"MIT"
] | null | null | null |
"""
=====
Words
=====
Words/Ladder Graph
------------------
Generate an undirected graph over the 5757 5-letter words in the
datafile `words_dat.txt.gz`. Two words are connected by an edge
if they differ in one letter, resulting in 14,135 edges. This example
is described in Section 1.1 in Knuth's book (see [1]_ and [2]_).
References
----------
.. [1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
# Authors: Aric Hagberg (hagberg@lanl.gov),
# Brendt Wohlberg,
# hughdbrown@yahoo.com
# Copyright (C) 2004-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import gzip
from string import ascii_lowercase as lowercase
import networkx as nx
#-------------------------------------------------------------------
# The Words/Ladder graph of Section 1.1
#-------------------------------------------------------------------
def generate_graph(words):
G = nx.Graph(name="words")
lookup = dict((c, lowercase.index(c)) for c in lowercase)
def edit_distance_one(word):
for i in range(len(word)):
left, c, right = word[0:i], word[i], word[i + 1:]
j = lookup[c] # lowercase.index(c)
for cc in lowercase[j + 1:]:
yield left + cc + right
candgen = ((word, cand) for word in sorted(words)
for cand in edit_distance_one(word) if cand in words)
G.add_nodes_from(words)
for word, cand in candgen:
G.add_edge(word, cand)
return G
def words_graph():
# Modified the words file to words_dat.txt.gz to plot 4 letter words
fh = gzip.open('words4_dat.txt.gz', 'r')
words = set()
for line in fh.readlines():
line = line.decode()
if line.startswith('*'):
continue
w = str(line[0:4])
words.add(w)
return generate_graph(words)
if __name__ == '__main__':
G = words_graph()
print("Loaded words_dat.txt containing 5757 five-letter English words.")
print("Two words are connected if they differ in one letter.")
print("Graph has %d nodes with %d edges"
% (nx.number_of_nodes(G), nx.number_of_edges(G)))
print("%d connected components" % nx.number_connected_components(G))
for (source, target) in [('cold', 'warm'),
('love', 'hate'),
('good', 'evil'),
('pear', 'beef'),
('make', 'take')]:
print("Shortest path between %s and %s is" % (source, target))
try:
sp = nx.shortest_path(G, source, target)
for n in sp:
print(n)
except nx.NetworkXNoPath:
print("None")
| 31.94382
| 76
| 0.57369
|
ccdb0b58ce162e194092b2f7e77453d2f2336ad0
| 13,970
|
py
|
Python
|
app/extensions/api/namespace.py
|
IsmaelJS/test-github-actions
|
97223df261e9736c46875f590c9593dbac0d417b
|
[
"MIT"
] | 1,420
|
2015-11-20T01:25:14.000Z
|
2022-03-22T03:51:33.000Z
|
app/extensions/api/namespace.py
|
IsmaelJS/test-github-actions
|
97223df261e9736c46875f590c9593dbac0d417b
|
[
"MIT"
] | 151
|
2016-01-07T09:11:42.000Z
|
2020-11-17T08:37:07.000Z
|
app/extensions/api/namespace.py
|
IsmaelJS/test-github-actions
|
97223df261e9736c46875f590c9593dbac0d417b
|
[
"MIT"
] | 389
|
2015-11-23T01:14:31.000Z
|
2022-02-07T08:23:11.000Z
|
# encoding: utf-8
"""
Extended Api Namespace implementation with an application-specific helpers
--------------------------------------------------------------------------
"""
from contextlib import contextmanager
from functools import wraps
import logging
import flask_marshmallow
import sqlalchemy
from flask_restplus_patched.namespace import Namespace as BaseNamespace
from flask_restplus._http import HTTPStatus
from . import http_exceptions
from .webargs_parser import CustomWebargsParser
log = logging.getLogger(__name__)
class Namespace(BaseNamespace):
"""
Having app-specific handlers here.
"""
WEBARGS_PARSER = CustomWebargsParser()
def resolve_object_by_model(self, model, object_arg_name, identity_arg_names=None):
"""
A helper decorator to resolve DB record instance by id.
Arguments:
model (type) - a Flask-SQLAlchemy model class with
``query.get_or_404`` method
object_arg_name (str) - argument name for a resolved object
identity_arg_names (tuple) - a list of argument names holding an
object identity, by default it will be auto-generated as
``%(object_arg_name)s_id``.
Example:
>>> @namespace.resolve_object_by_model(User, 'user')
... def get_user_by_id(user):
... return user
>>> get_user_by_id(user_id=3)
<User(id=3, ...)>
>>> @namespace.resolve_object_by_model(MyModel, 'my_model', ('user_id', 'model_name'))
... def get_object_by_two_primary_keys(my_model):
... return my_model
>>> get_object_by_two_primary_keys(user_id=3, model_name="test")
<MyModel(user_id=3, name="test", ...)>
"""
if identity_arg_names is None:
identity_arg_names = ('%s_id' % object_arg_name, )
elif not isinstance(identity_arg_names, (list, tuple)):
identity_arg_names = (identity_arg_names, )
return self.resolve_object(
object_arg_name,
resolver=lambda kwargs: model.query.get_or_404(
[kwargs.pop(identity_arg_name) for identity_arg_name in identity_arg_names]
)
)
def model(self, name=None, model=None, **kwargs):
# pylint: disable=arguments-differ
"""
A decorator which registers a model (aka schema / definition).
This extended implementation auto-generates a name for
``Flask-Marshmallow.Schema``-based instances by using a class name
with stripped off `Schema` prefix.
"""
if isinstance(model, flask_marshmallow.Schema) and not name:
name = model.__class__.__name__
if name.endswith('Schema'):
name = name[:-len('Schema')]
return super(Namespace, self).model(name=name, model=model, **kwargs)
def login_required(self, oauth_scopes, locations=('headers',)):
"""
A decorator which restricts access for authorized users only.
This decorator automatically applies the following features:
* ``OAuth2.require_oauth`` decorator requires authentication;
* ``permissions.ActiveUserRolePermission`` decorator ensures
minimal authorization level;
* All of the above requirements are put into OpenAPI Specification with
relevant options and in a text description.
Arguments:
oauth_scopes (list): a list of required OAuth2 Scopes (strings)
locations (list): a list of locations (``headers``, ``form``) where
the access token should be looked up.
Example:
>>> class Users(Resource):
... @namespace.login_required(oauth_scopes=['users:read'])
... def get(self):
... return []
...
>>> @namespace.login_required(oauth_scopes=['users:read'])
... class Users(Resource):
... def get(self):
... return []
...
... @namespace.login_required(oauth_scopes=['users:write'])
... def post(self):
... return User()
...
>>> @namespace.login_required(oauth_scopes=[])
... class Users(Resource):
... @namespace.login_required(oauth_scopes=['users:read'])
... def get(self):
... return []
...
... @namespace.login_required(oauth_scopes=['users:write'])
... def post(self):
... return User()
"""
def decorator(func_or_class):
"""
A helper wrapper.
"""
if isinstance(func_or_class, type):
# Handle Resource classes decoration
# pylint: disable=protected-access
func_or_class._apply_decorator_to_methods(decorator)
return func_or_class
func = func_or_class
# Avoid circilar dependency
from app.extensions import oauth2
from app.modules.users import permissions
# Automatically apply `permissions.ActiveUserRolePermisson`
# guard if none is yet applied.
if getattr(func, '_role_permission_applied', False):
protected_func = func
else:
protected_func = self.permission_required(
permissions.ActiveUserRolePermission()
)(func)
# Ignore the current OAuth2 scopes if another @login_required
# decorator was applied and just copy the already applied scopes.
if hasattr(protected_func, '__apidoc__') \
and 'security' in protected_func.__apidoc__ \
and '__oauth__' in protected_func.__apidoc__['security']:
_oauth_scopes = protected_func.__apidoc__['security']['__oauth__']['scopes']
else:
_oauth_scopes = oauth_scopes
oauth_protection_decorator = oauth2.require_oauth(*_oauth_scopes, locations=locations)
self._register_access_restriction_decorator(protected_func, oauth_protection_decorator)
oauth_protected_func = oauth_protection_decorator(protected_func)
if 'form' in locations:
oauth_protected_func = self.param(
name='access_token',
description=(
"This is an alternative way of passing the access_token, useful for "
"making authenticated requests from the browser native forms."
),
_in='formData',
type='string',
required=False
)(oauth_protected_func)
return self.doc(
security={
# This is a temporary (namespace) configuration which gets
# overriden on a namespace registration (in `Api.add_namespace`).
'__oauth__': {
'type': 'oauth',
'scopes': _oauth_scopes,
}
}
)(
self.response(
code=HTTPStatus.UNAUTHORIZED.value,
description=(
"Authentication is required"
if not oauth_scopes else
"Authentication with %s OAuth scope(s) is required" % (
', '.join(oauth_scopes)
)
),
)(oauth_protected_func)
)
return decorator
def permission_required(self, permission, kwargs_on_request=None):
"""
A decorator which restricts access for users with a specific
permissions only.
This decorator puts together permissions restriction code with OpenAPI
Specification documentation.
Arguments:
permission (Permission) - it can be a class or an instance of
:class:``Permission``, which will be applied to a decorated
function, and docstrings of which will be used in OpenAPI
Specification.
kwargs_on_request (func) - a function which should accept only one
``dict`` argument (all kwargs passed to the function), and
must return a ``dict`` of arguments which will be passed to
the ``permission`` object.
Example:
>>> @namespace.permission_required(
... OwnerRolePermission,
... kwargs_on_request=lambda kwargs: {'obj': kwargs['team']}
... )
... def get_team(team):
... # This line will be reached only if OwnerRolePermission check
... # is passed!
... return team
"""
def decorator(func):
"""
A helper wrapper.
"""
# Avoid circilar dependency
from app.modules.users import permissions
if getattr(permission, '_partial', False):
# We don't apply partial permissions, we only use them for
# documentation purposes.
protected_func = func
else:
if not kwargs_on_request:
_permission_decorator = permission
else:
def _permission_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
with permission(**kwargs_on_request(kwargs)):
return func(*args, **kwargs)
return wrapper
protected_func = _permission_decorator(func)
self._register_access_restriction_decorator(protected_func, _permission_decorator)
# Apply `_role_permission_applied` marker for Role Permissions,
# so don't apply unnecessary permissions in `login_required`
# decorator.
#
# TODO: Change this behaviour when implement advanced OPTIONS
# method support
if (
isinstance(permission, permissions.RolePermission)
or
(
isinstance(permission, type)
and
issubclass(permission, permissions.RolePermission)
)
):
protected_func._role_permission_applied = True # pylint: disable=protected-access
permission_description = permission.__doc__.strip()
return self.doc(
description="**PERMISSIONS: %s**\n\n" % permission_description
)(
self.response(
code=HTTPStatus.FORBIDDEN.value,
description=permission_description,
)(protected_func)
)
return decorator
def _register_access_restriction_decorator(self, func, decorator_to_register):
# pylint: disable=invalid-name
"""
Helper function to register decorator to function to perform checks
in options method
"""
if not hasattr(func, '_access_restriction_decorators'):
func._access_restriction_decorators = [] # pylint: disable=protected-access
func._access_restriction_decorators.append(decorator_to_register) # pylint: disable=protected-access
def paginate(self, parameters=None, locations=None):
"""
Endpoint parameters registration decorator special for pagination.
If ``parameters`` is not provided default PaginationParameters will be
used.
Also, any custom Parameters can be used, but it needs to have ``limit`` and ``offset``
fields.
"""
if not parameters:
# Use default parameters if None specified
from app.extensions.api.parameters import PaginationParameters
parameters = PaginationParameters()
if not all(
mandatory in parameters.declared_fields
for mandatory in ('limit', 'offset')
):
raise AttributeError(
'`limit` and `offset` fields must be in Parameter passed to `paginate()`'
)
def decorator(func):
@wraps(func)
def wrapper(self_, parameters_args, *args, **kwargs):
queryset = func(self_, parameters_args, *args, **kwargs)
total_count = queryset.count()
return (
queryset
.offset(parameters_args['offset'])
.limit(parameters_args['limit']),
HTTPStatus.OK,
{'X-Total-Count': total_count}
)
return self.parameters(parameters, locations)(wrapper)
return decorator
@contextmanager
def commit_or_abort(self, session, default_error_message="The operation failed to complete"):
"""
Context manager to simplify a workflow in resources
Args:
session: db.session instance
default_error_message: Custom error message
Exampple:
>>> with api.commit_or_abort(db.session):
... team = Team(**args)
... db.session.add(team)
... return team
"""
try:
with session.begin():
yield
except ValueError as exception:
log.info("Database transaction was rolled back due to: %r", exception)
http_exceptions.abort(code=HTTPStatus.CONFLICT, message=str(exception))
except sqlalchemy.exc.IntegrityError as exception:
log.info("Database transaction was rolled back due to: %r", exception)
http_exceptions.abort(
code=HTTPStatus.CONFLICT,
message=default_error_message
)
| 39.6875
| 109
| 0.567788
|
a4cce7a07f3a273ec79d081e724abcf6b5e8b117
| 3,336
|
py
|
Python
|
prepare_data.py
|
dajor/keras
|
17b4df003c0445ed022401923d99495d8f1bb5fe
|
[
"MIT"
] | null | null | null |
prepare_data.py
|
dajor/keras
|
17b4df003c0445ed022401923d99495d8f1bb5fe
|
[
"MIT"
] | null | null | null |
prepare_data.py
|
dajor/keras
|
17b4df003c0445ed022401923d99495d8f1bb5fe
|
[
"MIT"
] | null | null | null |
import os
import glob
import argparse
import pdf2image
import simplejson
from tqdm import tqdm
from invoicenet import FIELDS, FIELD_TYPES
from invoicenet.common import util
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--data_dir", type=str, required=True,
help="path to directory containing invoice document images")
ap.add_argument("--out_dir", type=str, default='processed_data/',
help="path to save prepared data")
ap.add_argument("--val_size", type=float, default=0.2,
help="validation split ration")
args = ap.parse_args()
os.makedirs(os.path.join(args.out_dir, 'train'), exist_ok=True)
os.makedirs(os.path.join(args.out_dir, 'val'), exist_ok=True)
filenames = [os.path.abspath(f) for f in glob.glob(args.data_dir + "**/*.pdf", recursive=True)]
idx = int(len(filenames) * args.val_size)
train_files = filenames[idx:]
val_files = filenames[:idx]
print("Total: {}".format(len(filenames)))
print("Training: {}".format(len(train_files)))
print("Validation: {}".format(len(val_files)))
for phase, filenames in [('train', train_files), ('val', val_files)]:
print("Preparing {} data...".format(phase))
for filename in tqdm(filenames):
try:
page = pdf2image.convert_from_path(filename)[0]
page.save(os.path.join(args.out_dir, phase, os.path.basename(filename)[:-3] + 'png'))
height = page.size[1]
width = page.size[0]
ngrams = util.create_ngrams(page)
for ngram in ngrams:
if "amount" in ngram["parses"]:
ngram["parses"]["amount"] = util.normalize(ngram["parses"]["amount"], key="amount")
if "date" in ngram["parses"]:
ngram["parses"]["date"] = util.normalize(ngram["parses"]["date"], key="date")
with open(filename[:-3] + 'json', 'r') as fp:
labels = simplejson.loads(fp.read())
fields = {}
for field in FIELDS:
if field in labels:
if FIELDS[field] == FIELD_TYPES["amount"]:
fields[field] = util.normalize(labels[field], key="amount")
elif FIELDS[field] == FIELD_TYPES["date"]:
fields[field] = util.normalize(labels[field], key="date")
else:
fields[field] = labels[field]
else:
fields[field] = ''
data = {
"fields": fields,
"nGrams": ngrams,
"height": height,
"width": width,
"filename": os.path.abspath(
os.path.join(args.out_dir, phase, os.path.basename(filename)[:-3] + 'png'))
}
with open(os.path.join(args.out_dir, phase, os.path.basename(filename)[:-3] + 'json'), 'w') as fp:
fp.write(simplejson.dumps(data, indent=2))
except Exception as exp:
print("Skipping {} : {}".format(filename, exp))
continue
if __name__ == '__main__':
main()
| 36.659341
| 114
| 0.525779
|
98fb620b005388dc7d024319f2748b4e85ad011b
| 350
|
py
|
Python
|
nequip/utils/tp_utils.py
|
schiotz/nequip
|
c343ce25ecfeb64f6df92e96022e673a7714e3a6
|
[
"MIT"
] | 153
|
2021-06-20T20:12:01.000Z
|
2022-03-31T13:57:45.000Z
|
nequip/utils/tp_utils.py
|
schiotz/nequip
|
c343ce25ecfeb64f6df92e96022e673a7714e3a6
|
[
"MIT"
] | 25
|
2021-06-17T16:00:16.000Z
|
2022-03-29T07:04:00.000Z
|
nequip/utils/tp_utils.py
|
schiotz/nequip
|
c343ce25ecfeb64f6df92e96022e673a7714e3a6
|
[
"MIT"
] | 25
|
2021-06-21T22:25:22.000Z
|
2022-03-30T04:39:46.000Z
|
from e3nn import o3
def tp_path_exists(irreps_in1, irreps_in2, ir_out):
irreps_in1 = o3.Irreps(irreps_in1).simplify()
irreps_in2 = o3.Irreps(irreps_in2).simplify()
ir_out = o3.Irrep(ir_out)
for _, ir1 in irreps_in1:
for _, ir2 in irreps_in2:
if ir_out in ir1 * ir2:
return True
return False
| 25
| 51
| 0.642857
|
a75f653a20f7585f1ef5fc7ccc21a3541a64f2f1
| 2,489
|
py
|
Python
|
athanor_faction/models.py
|
volundmush/athanor_faction
|
4f2e886d9587222b157ff6ede1b30900555ce7df
|
[
"BSD-3-Clause"
] | null | null | null |
athanor_faction/models.py
|
volundmush/athanor_faction
|
4f2e886d9587222b157ff6ede1b30900555ce7df
|
[
"BSD-3-Clause"
] | null | null | null |
athanor_faction/models.py
|
volundmush/athanor_faction
|
4f2e886d9587222b157ff6ede1b30900555ce7df
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from evennia.typeclasses.models import SharedMemoryModel
class AllianceBridge(SharedMemoryModel):
db_object = models.OneToOneField('objects.ObjectDB', related_name='alliance_bridge', primary_key=True,
on_delete=models.CASCADE)
db_name = models.CharField(max_length=255, null=False, blank=False)
db_iname = models.CharField(max_length=255, null=False, blank=False, unique=True)
db_cname = models.CharField(max_length=255, null=False, blank=False)
db_abbreviation = models.CharField(max_length=20, null=True, blank=False)
db_iabbreviation = models.CharField(max_length=20, null=True, blank=False, unique=True)
db_system_identifier = models.CharField(max_length=255, null=True, blank=False, unique=True)
class FactionBridge(SharedMemoryModel):
db_object = models.OneToOneField('objects.ObjectDB', related_name='faction_bridge', primary_key=True,
on_delete=models.CASCADE)
db_alliance = models.ForeignKey(AllianceBridge, related_name='factions', on_delete=models.PROTECT, null=True)
db_name = models.CharField(max_length=255, null=False, blank=False)
db_iname = models.CharField(max_length=255, null=False, blank=False, unique=True)
db_cname = models.CharField(max_length=255, null=False, blank=False)
db_abbreviation = models.CharField(max_length=20, null=True, blank=False)
db_iabbreviation = models.CharField(max_length=20, null=True, blank=False, unique=True)
db_system_identifier = models.CharField(max_length=255, null=True, blank=False, unique=True)
class Meta:
verbose_name = 'Faction'
verbose_name_plural = 'Factions'
class DivisionBridge(SharedMemoryModel):
db_object = models.OneToOneField('objects.ObjectDB', related_name='division_bridge', primary_key=True,
on_delete=models.CASCADE)
db_faction = models.ForeignKey(FactionBridge, related_name='divisions', on_delete=models.PROTECT)
db_name = models.CharField(max_length=255, null=False, blank=False)
db_iname = models.CharField(max_length=255, null=False, blank=False)
db_cname = models.CharField(max_length=255, null=False, blank=False)
db_system_identifier = models.CharField(max_length=255, null=True, blank=False, unique=True)
class Meta:
unique_together = (('db_faction', 'db_iname'),)
verbose_name = 'Division'
verbose_name_plural = 'Divisions'
| 56.568182
| 113
| 0.732021
|
a117ceff7ebc0f5f7a0c5a8f796d2775125f5b67
| 650
|
py
|
Python
|
init_model.py
|
positivedefinite/SAMMY
|
46c9f6712262db95d3d0b56103f36537b99b4b9e
|
[
"MIT"
] | 1
|
2018-03-04T14:41:26.000Z
|
2018-03-04T14:41:26.000Z
|
init_model.py
|
positivedefinite/SAMMY
|
46c9f6712262db95d3d0b56103f36537b99b4b9e
|
[
"MIT"
] | null | null | null |
init_model.py
|
positivedefinite/SAMMY
|
46c9f6712262db95d3d0b56103f36537b99b4b9e
|
[
"MIT"
] | null | null | null |
from keras.models import model_from_json
def init(json_path,h5_path):
json_file = open(json_path,'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
#load woeights into new model
loaded_model.load_weights(h5_path)
print("Loaded Model from disk")
#compile and evaluate loaded model
# loaded_model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
# return loaded_model
#loss,accuracy = model.evaluate(X_test,y_test)
#print('loss:', loss)
#print('accuracy:', accuracy)
#graph = tf.get_default_graph()
#return loaded_model,graph
return loaded_model
| 30.952381
| 93
| 0.78
|
65d1fc31a73f889613000bfd08b63c6d7cb3c272
| 10,812
|
py
|
Python
|
PathPlanning/BidirectionalAStar/bidirectional_a_star.py
|
ll7/PythonRobotics
|
86a67adde743e85974a42e47c95f083284cfd492
|
[
"MIT"
] | 14
|
2021-09-26T06:37:01.000Z
|
2022-03-31T12:30:58.000Z
|
PathPlanning/BidirectionalAStar/bidirectional_a_star.py
|
taka-robo/PythonRobotics
|
3607d72b60cd500806e0f026ac8beb82850a01f9
|
[
"MIT"
] | null | null | null |
PathPlanning/BidirectionalAStar/bidirectional_a_star.py
|
taka-robo/PythonRobotics
|
3607d72b60cd500806e0f026ac8beb82850a01f9
|
[
"MIT"
] | 7
|
2020-11-18T02:15:30.000Z
|
2022-03-13T06:47:00.000Z
|
"""
Bidirectional A* grid planning
author: Erwin Lejeune (@spida_rwin)
See Wikipedia article (https://en.wikipedia.org/wiki/Bidirectional_search)
"""
import math
import matplotlib.pyplot as plt
show_animation = True
class BidirectionalAStarPlanner:
def __init__(self, ox, oy, reso, rr):
"""
Initialize grid map for a star planning
ox: x position list of Obstacles [m]
oy: y position list of Obstacles [m]
reso: grid resolution [m]
rr: robot radius[m]
"""
self.reso = reso
self.rr = rr
self.calc_obstacle_map(ox, oy)
self.motion = self.get_motion_model()
class Node:
def __init__(self, x, y, cost, pind):
self.x = x # index of grid
self.y = y # index of grid
self.cost = cost
self.pind = pind
def __str__(self):
return str(self.x) + "," + str(self.y) + "," + str(
self.cost) + "," + str(self.pind)
def planning(self, sx, sy, gx, gy):
"""
Bidirectional A star path search
input:
sx: start x position [m]
sy: start y position [m]
gx: goal x position [m]
gy: goal y position [m]
output:
rx: x position list of the final path
ry: y position list of the final path
"""
nstart = self.Node(self.calc_xyindex(sx, self.minx),
self.calc_xyindex(sy, self.miny), 0.0, -1)
ngoal = self.Node(self.calc_xyindex(gx, self.minx),
self.calc_xyindex(gy, self.miny), 0.0, -1)
open_set_A, closed_set_A = dict(), dict()
open_set_B, closed_set_B = dict(), dict()
open_set_A[self.calc_grid_index(nstart)] = nstart
open_set_B[self.calc_grid_index(ngoal)] = ngoal
current_A = nstart
current_B = ngoal
while 1:
if len(open_set_A) == 0:
print("Open set A is empty..")
break
if len(open_set_B) == 0:
print("Open set B is empty..")
break
c_id_A = min(
open_set_A,
key=lambda o: self.find_total_cost(open_set_A, o, current_B))
current_A = open_set_A[c_id_A]
c_id_B = min(
open_set_B,
key=lambda o: self.find_total_cost(open_set_B, o, current_A))
current_B = open_set_B[c_id_B]
# show graph
if show_animation: # pragma: no cover
plt.plot(self.calc_grid_position(current_A.x, self.minx),
self.calc_grid_position(current_A.y, self.miny), "xc")
plt.plot(self.calc_grid_position(current_B.x, self.minx),
self.calc_grid_position(current_B.y, self.miny), "xc")
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event:
[exit(0) if event.key == 'escape'
else None])
if len(closed_set_A.keys()) % 10 == 0:
plt.pause(0.001)
if current_A.x == current_B.x and current_A.y == current_B.y:
print("Found goal")
meetpointA = current_A
meetpointB = current_B
break
# Remove the item from the open set
del open_set_A[c_id_A]
del open_set_B[c_id_B]
# Add it to the closed set
closed_set_A[c_id_A] = current_A
closed_set_B[c_id_B] = current_B
# expand_grid search grid based on motion model
for i, _ in enumerate(self.motion):
c_nodes = [self.Node(current_A.x + self.motion[i][0],
current_A.y + self.motion[i][1],
current_A.cost + self.motion[i][2],
c_id_A),
self.Node(current_B.x + self.motion[i][0],
current_B.y + self.motion[i][1],
current_B.cost + self.motion[i][2],
c_id_B)]
n_ids = [self.calc_grid_index(c_nodes[0]),
self.calc_grid_index(c_nodes[1])]
# If the node is not safe, do nothing
continue_ = self.check_nodes_and_sets(c_nodes, closed_set_A,
closed_set_B, n_ids)
if not continue_[0]:
if n_ids[0] not in open_set_A:
# discovered a new node
open_set_A[n_ids[0]] = c_nodes[0]
else:
if open_set_A[n_ids[0]].cost > c_nodes[0].cost:
# This path is the best until now. record it
open_set_A[n_ids[0]] = c_nodes[0]
if not continue_[1]:
if n_ids[1] not in open_set_B:
# discovered a new node
open_set_B[n_ids[1]] = c_nodes[1]
else:
if open_set_B[n_ids[1]].cost > c_nodes[1].cost:
# This path is the best until now. record it
open_set_B[n_ids[1]] = c_nodes[1]
rx, ry = self.calc_final_bidirectional_path(
meetpointA, meetpointB, closed_set_A, closed_set_B)
return rx, ry
# takes two sets and two meeting nodes and return the optimal path
def calc_final_bidirectional_path(self, n1, n2, setA, setB):
rx_A, ry_A = self.calc_final_path(n1, setA)
rx_B, ry_B = self.calc_final_path(n2, setB)
rx_A.reverse()
ry_A.reverse()
rx = rx_A + rx_B
ry = ry_A + ry_B
return rx, ry
def calc_final_path(self, ngoal, closedset):
# generate final course
rx, ry = [self.calc_grid_position(ngoal.x, self.minx)], [
self.calc_grid_position(ngoal.y, self.miny)]
pind = ngoal.pind
while pind != -1:
n = closedset[pind]
rx.append(self.calc_grid_position(n.x, self.minx))
ry.append(self.calc_grid_position(n.y, self.miny))
pind = n.pind
return rx, ry
def check_nodes_and_sets(self, c_nodes, closedSet_A, closedSet_B, n_ids):
continue_ = [False, False]
if not self.verify_node(c_nodes[0]) or n_ids[0] in closedSet_A:
continue_[0] = True
if not self.verify_node(c_nodes[1]) or n_ids[1] in closedSet_B:
continue_[1] = True
return continue_
@staticmethod
def calc_heuristic(n1, n2):
w = 1.0 # weight of heuristic
d = w * math.hypot(n1.x - n2.x, n1.y - n2.y)
return d
def find_total_cost(self, open_set, lambda_, n1):
g_cost = open_set[lambda_].cost
h_cost = self.calc_heuristic(n1, open_set[lambda_])
f_cost = g_cost + h_cost
return f_cost
def calc_grid_position(self, index, minp):
"""
calc grid position
:param index:
:param minp:
:return:
"""
pos = index * self.reso + minp
return pos
def calc_xyindex(self, position, min_pos):
return round((position - min_pos) / self.reso)
def calc_grid_index(self, node):
return (node.y - self.miny) * self.xwidth + (node.x - self.minx)
def verify_node(self, node):
px = self.calc_grid_position(node.x, self.minx)
py = self.calc_grid_position(node.y, self.miny)
if px < self.minx:
return False
elif py < self.miny:
return False
elif px >= self.maxx:
return False
elif py >= self.maxy:
return False
# collision check
if self.obmap[node.x][node.y]:
return False
return True
def calc_obstacle_map(self, ox, oy):
self.minx = round(min(ox))
self.miny = round(min(oy))
self.maxx = round(max(ox))
self.maxy = round(max(oy))
print("minx:", self.minx)
print("miny:", self.miny)
print("maxx:", self.maxx)
print("maxy:", self.maxy)
self.xwidth = round((self.maxx - self.minx) / self.reso)
self.ywidth = round((self.maxy - self.miny) / self.reso)
print("xwidth:", self.xwidth)
print("ywidth:", self.ywidth)
# obstacle map generation
self.obmap = [[False for _ in range(self.ywidth)]
for _ in range(self.xwidth)]
for ix in range(self.xwidth):
x = self.calc_grid_position(ix, self.minx)
for iy in range(self.ywidth):
y = self.calc_grid_position(iy, self.miny)
for iox, ioy in zip(ox, oy):
d = math.hypot(iox - x, ioy - y)
if d <= self.rr:
self.obmap[ix][iy] = True
break
@staticmethod
def get_motion_model():
# dx, dy, cost
motion = [[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[-1, -1, math.sqrt(2)],
[-1, 1, math.sqrt(2)],
[1, -1, math.sqrt(2)],
[1, 1, math.sqrt(2)]]
return motion
def main():
print(__file__ + " start!!")
# start and goal position
sx = 10.0 # [m]
sy = 10.0 # [m]
gx = 50.0 # [m]
gy = 50.0 # [m]
grid_size = 2.0 # [m]
robot_radius = 1.0 # [m]
# set obstacle positions
ox, oy = [], []
for i in range(-10, 60):
ox.append(i)
oy.append(-10.0)
for i in range(-10, 60):
ox.append(60.0)
oy.append(i)
for i in range(-10, 61):
ox.append(i)
oy.append(60.0)
for i in range(-10, 61):
ox.append(-10.0)
oy.append(i)
for i in range(-10, 40):
ox.append(20.0)
oy.append(i)
for i in range(0, 40):
ox.append(40.0)
oy.append(60.0 - i)
if show_animation: # pragma: no cover
plt.plot(ox, oy, ".k")
plt.plot(sx, sy, "og")
plt.plot(gx, gy, "ob")
plt.grid(True)
plt.axis("equal")
bidir_a_star = BidirectionalAStarPlanner(ox, oy, grid_size, robot_radius)
rx, ry = bidir_a_star.planning(sx, sy, gx, gy)
if show_animation: # pragma: no cover
plt.plot(rx, ry, "-r")
plt.pause(.0001)
plt.show()
if __name__ == '__main__':
main()
| 31.521866
| 79
| 0.503607
|
4fc7c16f71239cf74629b8028742a25ffda46386
| 1,145
|
py
|
Python
|
2017/midterm/problem4_closestPower.py
|
codxse/mitx-600.1
|
c1cfa51d5ffe426cb1e84b9c1f2d133213b83962
|
[
"MIT"
] | null | null | null |
2017/midterm/problem4_closestPower.py
|
codxse/mitx-600.1
|
c1cfa51d5ffe426cb1e84b9c1f2d133213b83962
|
[
"MIT"
] | null | null | null |
2017/midterm/problem4_closestPower.py
|
codxse/mitx-600.1
|
c1cfa51d5ffe426cb1e84b9c1f2d133213b83962
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 12 12:26:38 2017
@author: Nadiar
"""
def closest_power(base, num):
'''
base: base of the exponential, integer > 1
num: number you want to be closest to, integer > 0
Find the integer exponent such that base**exponent is closest to num.
Note that the base**exponent may be either greater or smaller than num.
In case of a tie, return the smaller value.
Returns the exponent.
'''
exp = 0
while True:
if num - base**exp < 0:
temp = exp
exp -= 1
mmap = {}
mmap[abs(num-base**exp)] = exp
mmap[abs(num-base**temp)] = temp
flor = min(mmap.keys())
if len(mmap) == 1:
return mmap[flor] - 1
else:
return mmap[flor]
elif num - base**exp == 0:
return exp
else:
exp += 1
print(closest_power(3,12)) #2
print(closest_power(4,12)) #2
print(closest_power(4,1)) #0
print(closest_power(2, 192)) #7
print(closest_power(5, 375.0)) #3
print(closest_power(10, 550.0)) #2
| 27.261905
| 75
| 0.551092
|
81a2cd2cc844c3dfe496f2e2c1b1f8df8ca7ea00
| 1,110
|
py
|
Python
|
benchmarks/misc/baseline_reduction.py
|
liberix/taichi
|
bbd4ba13e021b32dbf9a52507f637aff0851fe39
|
[
"MIT"
] | null | null | null |
benchmarks/misc/baseline_reduction.py
|
liberix/taichi
|
bbd4ba13e021b32dbf9a52507f637aff0851fe39
|
[
"MIT"
] | null | null | null |
benchmarks/misc/baseline_reduction.py
|
liberix/taichi
|
bbd4ba13e021b32dbf9a52507f637aff0851fe39
|
[
"MIT"
] | null | null | null |
from utils import dtype_size, scale_repeat
import taichi as ti
def reduction(arch, dtype, dsize, repeat=10):
repeat = scale_repeat(arch, dsize, repeat)
n = dsize // dtype_size[dtype]
## fill x
x = ti.field(dtype, shape=n)
if dtype in [ti.f32, ti.f64]:
@ti.kernel
def fill_const(n: ti.i32):
for i in range(n):
x[i] = 0.1
else:
@ti.kernel
def fill_const(n: ti.i32):
for i in range(n):
x[i] = 1
# compile the kernel first
fill_const(n)
## reduce
y = ti.field(dtype, shape=())
if dtype in [ti.f32, ti.f64]:
y[None] = 0.0
else:
y[None] = 0
@ti.kernel
def reduction(n: ti.i32):
for i in range(n):
y[None] += ti.atomic_add(y[None], x[i])
# compile the kernel first
reduction(n)
ti.clear_kernel_profile_info()
for i in range(repeat):
reduction(n)
kernelname = reduction.__name__
suffix = "_c"
quering_result = ti.query_kernel_profile_info(kernelname + suffix)
return quering_result.min
| 21.764706
| 70
| 0.564865
|
4af6e5802c67db6349d95644164290c003945905
| 1,423
|
py
|
Python
|
uer/encoders/gpt_encoder.py
|
nju-websoft/TSQA
|
d0b3f0c3a5e55a46fc5d281cae09597aa7f76e2e
|
[
"Apache-2.0"
] | 12
|
2020-12-19T05:26:49.000Z
|
2022-03-30T13:20:46.000Z
|
uer/encoders/gpt_encoder.py
|
nju-websoft/TSQA
|
d0b3f0c3a5e55a46fc5d281cae09597aa7f76e2e
|
[
"Apache-2.0"
] | null | null | null |
uer/encoders/gpt_encoder.py
|
nju-websoft/TSQA
|
d0b3f0c3a5e55a46fc5d281cae09597aa7f76e2e
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding:utf-8 -*-
import torch
import torch.nn as nn
from uer.layers.layer_norm import LayerNorm
from uer.layers.position_ffn import PositionwiseFeedForward
from uer.layers.multi_headed_attn import MultiHeadedAttention
from uer.layers.transformer import TransformerLayer
class GptEncoder(nn.Module):
"""
BERT encoder exploits 12 or 24 transformer layers to extract features.
"""
def __init__(self, args):
super(GptEncoder, self).__init__()
self.layers_num = args.layers_num
self.transformer = nn.ModuleList([
TransformerLayer(args) for _ in range(self.layers_num)
])
def forward(self, emb, seg):
"""
Args:
emb: [batch_size x seq_length x emb_size]
seg: [batch_size x seq_length]
Returns:
hidden: [batch_size x seq_length x hidden_size]
"""
batch_size, seq_length, _ = emb.size()
# Generate mask according to segment indicators.
# mask: [batch_size x 1 x seq_length x seq_length]
mask = torch.ones(seq_length, seq_length, device=emb.device)
mask = torch.tril(mask)
mask = (1.0 - mask) * -10000
mask = mask.repeat(batch_size, 1, 1, 1)
hidden = emb
for i in range(self.layers_num):
hidden = self.transformer[i](hidden, mask)
return hidden
| 33.093023
| 75
| 0.618412
|
d9d3b6767e109b83b4417637015bb4185b5efc85
| 3,979
|
py
|
Python
|
frontend/myapp/views.py
|
TanmayHiremath/Traffic-Man
|
2c44780b336fd99fc6d4a4914a1c0a9716b19d3f
|
[
"MIT"
] | null | null | null |
frontend/myapp/views.py
|
TanmayHiremath/Traffic-Man
|
2c44780b336fd99fc6d4a4914a1c0a9716b19d3f
|
[
"MIT"
] | null | null | null |
frontend/myapp/views.py
|
TanmayHiremath/Traffic-Man
|
2c44780b336fd99fc6d4a4914a1c0a9716b19d3f
|
[
"MIT"
] | 1
|
2020-06-27T14:15:55.000Z
|
2020-06-27T14:15:55.000Z
|
# from django.shortcuts import render
# # Create your views here.
# from myapp.models import *
# from .serializers import *
# from django.db.models import Q
# from rest_framework import viewsets
# from rest_framework.response import Response
# from rest_framework.views import APIView
# import numpy as np
# import requests
# from django.views.decorators.csrf import csrf_exempt
# from rest_framework import status
from sys import path
from os import getcwd
path.append(getcwd()+'/Traffic Program')
print(path)
from classes import Traffic_Light
print(Traffic_Light)
context={'a':'b'}
class ImageViewSet(viewsets.ModelViewSet):
queryset = Image.objects.all()
serializer_class = ImageSerializer
class CurrentImagesViewSet(viewsets.ModelViewSet):
queryset = CurrentImages.objects.all()
serializer_class = CurrentImagesSerializer
def startedProgram():
pass
def get_images():
img0=Image.objects.get(sn=1)
img1=Image.objects.get(sn=2)
img2=Image.objects.get(sn=3)
img3=Image.objects.get(sn=4)
global context
context = {'img0': img0,'img1': img1,'img2': img2,'img3': img3}
def my_view(request):
get_images()
return render(request,'index.html',context)
class getCurrentImage(APIView):
permission_classes = ()
authentication_classes = ()
def get(self, request, pk, format=None):
currentimage = CurrentImages.objects.get(pk=pk)
serializer = CurrentImagesSerializer(currentimage)
return Response(serializer.data)
def put(self, request, pk, format=None):
currentimage = CurrentImages.objects.get(pk=pk)
if currentimage is not None:
serializer = CurrentImagesSerializer(currentimage, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
serializer = CurrentImagesSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
print(serializer.errors)
class getTrafficLight(APIView):
permission_classes = ()
authentication_classes = ()
def get(self, request, sn, format=None):
trafficlight = TrafficLight.objects.get(sn=sn)
serializer = TrafficLightSerializer(trafficlight)
return Response(serializer.data)
def put(self, request, sn, format=None):
trafficlight = TrafficLight.objects.get(sn=sn)
if trafficlight is not None:
serializer = TrafficLightSerializer(trafficlight, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
serializer = TrafficLightSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
print(serializer.errors)
# class getImage(APIView):
# def get_object(self, pk):
# try:
# return Image.objects.get(pk=pk)
# except Image.DoepkotExist:
# return None
# def get(self, request, pk, format=None):
# Image = self.get_object(pk)
# serializer = ImageSerializer(Image)
# return Response(serializer.data)
# @register.filter(name='update_filter')
# def update_variable(variable):
# get_images()
# if(variable=='img0'):
# print('0')
# elif(variable=='img1'):
# print('1')
# elif(variable=='img2'):
# print('2')
# elif(variable=='img3'):
# print('3')
# return 'hello'
| 29.474074
| 81
| 0.614727
|
86884156f9b808b026a7183f029318de4c0a4046
| 8,037
|
py
|
Python
|
cfgov/v1/tests/test_signals.py
|
Colin-Seifer/consumerfinance.gov
|
a1a943f7170b498707d642d6be97b9a97a2b52e3
|
[
"CC0-1.0"
] | 156
|
2015-01-16T15:16:46.000Z
|
2020-08-04T04:48:01.000Z
|
cfgov/v1/tests/test_signals.py
|
Colin-Seifer/consumerfinance.gov
|
a1a943f7170b498707d642d6be97b9a97a2b52e3
|
[
"CC0-1.0"
] | 3,604
|
2015-01-05T22:09:12.000Z
|
2020-08-14T17:09:19.000Z
|
cfgov/v1/tests/test_signals.py
|
Colin-Seifer/consumerfinance.gov
|
a1a943f7170b498707d642d6be97b9a97a2b52e3
|
[
"CC0-1.0"
] | 102
|
2015-01-28T14:51:18.000Z
|
2020-08-10T00:00:39.000Z
|
from unittest import TestCase, mock
from django.contrib.auth.models import User
from django.test import TestCase as DjangoTestCase
from django.utils import timezone
from wagtail.core.models import Site
from model_bakery import baker
from teachers_digital_platform.models import ActivityPage, ActivitySetUp
from v1.models import (
BlogPage,
CFGOVPageCategory,
LearnPage,
NewsroomLandingPage,
NewsroomPage,
SublandingFilterablePage,
)
from v1.signals import invalidate_filterable_list_caches
class UserSaveTestCase(TestCase):
def make_user(self, password, is_superuser=False):
user = baker.prepare(User, is_superuser=is_superuser)
user.set_password(password)
user.save()
return user
def test_user_save_new_password_makes_history_item(self):
user = self.make_user(password="foo")
first_phi = user.passwordhistoryitem_set.latest()
user.set_password("bar")
user.save()
new_phi = user.passwordhistoryitem_set.latest()
self.assertNotEqual(first_phi, new_phi)
self.assertEqual(user.password, new_phi.encrypted_password)
def test_user_save_new_password_not_expired(self):
user = self.make_user(password="foo")
user.set_password("bar")
user.save()
new_phi = user.passwordhistoryitem_set.latest()
self.assertGreater(new_phi.expires_at, timezone.now())
def test_user_save_new_password_locks_password(self):
user = self.make_user(password="foo")
user.set_password("bar")
user.save()
new_phi = user.passwordhistoryitem_set.latest()
self.assertGreater(new_phi.locked_until, timezone.now())
def test_user_save_same_password_no_history_item(self):
user = self.make_user(password="foo")
first_phi = user.passwordhistoryitem_set.latest()
user.save()
new_phi = user.passwordhistoryitem_set.latest()
self.assertEqual(first_phi, new_phi)
self.assertEqual(user.password, new_phi.encrypted_password)
def test_user_created_expires_password(self):
user = self.make_user(password="foo")
first_phi = user.passwordhistoryitem_set.latest()
self.assertLess(first_phi.expires_at, timezone.now())
def test_user_created_unlocks_password(self):
user = self.make_user(password="foo")
first_phi = user.passwordhistoryitem_set.latest()
self.assertLess(first_phi.locked_until, timezone.now())
def test_superuser_created_does_not_expire_password(self):
user = self.make_user(password="foo", is_superuser=True)
first_phi = user.passwordhistoryitem_set.latest()
self.assertGreater(first_phi.expires_at, timezone.now())
def test_superuser_created_unlocks_password(self):
user = self.make_user(password="foo", is_superuser=True)
first_phi = user.passwordhistoryitem_set.latest()
self.assertLess(first_phi.locked_until, timezone.now())
class FilterableListInvalidationTestCase(TestCase):
def setUp(self):
self.root_page = Site.objects.first().root_page
self.filterable_list_page = SublandingFilterablePage(title="Blog")
self.root_page.add_child(instance=self.filterable_list_page)
self.filterable_list_page.save()
self.category_filterable_list_page = NewsroomLandingPage(title="News")
self.root_page.add_child(instance=self.category_filterable_list_page)
self.category_filterable_list_page.save()
self.newsroom_page = NewsroomPage(title="News event")
self.category_filterable_list_page.add_child(
instance=self.newsroom_page
)
self.newsroom_page.save()
self.blog_page = BlogPage(title="test blog")
self.filterable_list_page.add_child(instance=self.blog_page)
self.blog_page.categories.add(CFGOVPageCategory(name="op-ed"))
self.blog_page.save()
self.non_filterable_page = LearnPage(title="Page")
self.root_page.add_child(instance=self.non_filterable_page)
self.non_filterable_page.save()
@mock.patch("v1.signals.AkamaiBackend.purge_by_tags")
@mock.patch("v1.signals.cache")
def test_invalidate_filterable_list_caches(
self,
mock_cache,
mock_purge,
):
invalidate_filterable_list_caches(None, instance=self.blog_page)
for cache_key_prefix in (
self.filterable_list_page.get_cache_key_prefix(),
self.category_filterable_list_page.get_cache_key_prefix(),
):
mock_cache.delete.assert_any_call(
f"{cache_key_prefix}-all_filterable_results"
)
mock_cache.delete.assert_any_call(f"{cache_key_prefix}-page_ids")
mock_cache.delete.assert_any_call(f"{cache_key_prefix}-topics")
mock_cache.delete.assert_any_call(f"{cache_key_prefix}-authors")
mock_purge.assert_called_once()
self.assertIn(
self.filterable_list_page.slug, mock_purge.mock_calls[0].args[0]
)
@mock.patch("v1.signals.AkamaiBackend.purge_by_tags")
@mock.patch("django.core.cache.cache")
def test_invalidate_filterable_list_caches_does_nothing(
self, mock_cache, mock_purge
):
invalidate_filterable_list_caches(
None, instance=self.non_filterable_page
)
mock_cache.delete.assert_not_called()
mock_purge.assert_not_called()
class RefreshActivitiesTestCase(DjangoTestCase):
fixtures = ["tdp_minimal_data"]
def setUp(self):
self.root_page = Site.objects.first().root_page
self.activity_page = ActivityPage(
title="activity 1",
live=False,
summary="Summary",
big_idea="Big Idea",
essential_questions="Essential Questions",
objectives="Objectives",
what_students_will_do="What students will do",
activity_duration_id=1,
activity_file_id=8335,
)
self.root_page.add_child(instance=self.activity_page)
self.activity_page.save()
self.activity_page2 = ActivityPage(
title="activity 2",
live=False,
summary="Summary 2",
big_idea="Big Idea",
essential_questions="Essential Questions",
objectives="Objectives",
what_students_will_do="What students will do",
activity_duration_id=1,
activity_file_id=8335,
)
self.root_page.add_child(instance=self.activity_page2)
self.activity_page2.save()
def test_setup_object_missing(self):
self.assertFalse(ActivitySetUp.objects.exists())
def test_publishing_creates_setup_object_with_reference(self):
self.activity_page.save_revision().publish()
self.assertTrue(ActivitySetUp.objects.exists())
self.assertIn(
str(self.activity_page.pk),
ActivitySetUp.objects.first().card_setup,
)
self.assertEqual(len(ActivitySetUp.objects.first().card_setup), 1)
def test_publish(self):
self.activity_page.save_revision().publish()
self.activity_page2.save_revision().publish()
self.assertEqual(len(ActivitySetUp.objects.first().card_setup), 2)
for page in [self.activity_page, self.activity_page2]:
self.assertIn(
str(page.pk), ActivitySetUp.objects.first().card_setup
)
def test_upublish(self):
self.activity_page.save_revision().publish()
self.activity_page2.save_revision().publish()
self.activity_page2.refresh_from_db()
self.assertTrue(self.activity_page2.live)
self.activity_page2.unpublish()
self.activity_page2.refresh_from_db()
self.assertFalse(self.activity_page2.live)
setup = ActivitySetUp.objects.first()
self.assertNotIn(str(self.activity_page2.pk), setup.card_setup)
self.assertEqual(len(ActivitySetUp.objects.first().card_setup), 1)
| 37.208333
| 78
| 0.690929
|
603e57613bd9d3d31e54f9ca35e2542eb7da5236
| 662
|
py
|
Python
|
bagdiscovery/models.py
|
RockefellerArchiveCenter/ursa_major
|
44bc4a23a37ec749553ecfcc4914357ab621d4f3
|
[
"MIT"
] | null | null | null |
bagdiscovery/models.py
|
RockefellerArchiveCenter/ursa_major
|
44bc4a23a37ec749553ecfcc4914357ab621d4f3
|
[
"MIT"
] | 99
|
2018-09-26T17:06:36.000Z
|
2022-03-28T15:53:46.000Z
|
bagdiscovery/models.py
|
RockefellerArchiveCenter/ursa_major
|
44bc4a23a37ec749553ecfcc4914357ab621d4f3
|
[
"MIT"
] | null | null | null |
from asterism.models import BasePackage
from django.db import models
class Accession(models.Model):
data = models.JSONField(null=True, blank=True)
created = models.DateTimeField(auto_now=True)
last_modified = models.DateTimeField(auto_now_add=True)
class Bag(BasePackage):
CREATED = 1
DISCOVERED = 2
DELIVERED = 3
PROCESS_STATUS_CHOICES = (
(CREATED, "Created"),
(DISCOVERED, "Discovered"),
(DELIVERED, "Delivered")
)
accession = models.ForeignKey(Accession, on_delete=models.CASCADE, null=True, blank=True)
process_status = models.IntegerField(choices=PROCESS_STATUS_CHOICES, default=CREATED)
| 30.090909
| 93
| 0.719033
|
d7303df29c2aaa039b1532df9e3acd9c65407786
| 126,962
|
py
|
Python
|
mrcnn/model.py
|
ejcv/Mask_RCNN
|
7f2ebb55c4fb8b8b0e06ce5cb796d9958fbd8d5b
|
[
"MIT"
] | null | null | null |
mrcnn/model.py
|
ejcv/Mask_RCNN
|
7f2ebb55c4fb8b8b0e06ce5cb796d9958fbd8d5b
|
[
"MIT"
] | null | null | null |
mrcnn/model.py
|
ejcv/Mask_RCNN
|
7f2ebb55c4fb8b8b0e06ce5cb796d9958fbd8d5b
|
[
"MIT"
] | null | null | null |
"""
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import random
import datetime
import re
import math
import logging
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.engine as KE
import keras.models as KM
from mrcnn import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementation of Log2. TF doesn't have a native implementation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- feature_maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]
masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_overlaps)[1], 0),
true_fn = lambda: tf.argmax(positive_overlaps, axis=1),
false_fn = lambda: tf.cast(tf.constant([]),tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the feature map
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location * depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Random horizontal flips.
# TODO: will be removed in a future update in favor of augmentation
if augment:
logging.warning("'augment' is deprecated. Use 'augmentation' instead.")
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False,
no_augmentation_sources=None):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The contents
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
no_augmentation_sources = no_augmentation_sources or []
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinitely.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
# If the image source is not to be augmented pass None as augmentation
if dataset.image_info[image_id]['source'] in no_augmentation_sources:
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=None,
use_mini_mask=config.USE_MINI_MASK)
else:
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=augmentation,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(batch_size, gt_masks.shape[0], gt_masks.shape[1],
config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to verify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3addmodel")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
# Conditional import to support versions of Keras before 2.2
# TODO: remove in about 6 months (end of 2018)
try:
from keras.engine import saving
except ImportError:
# Keras before 2.2 used the 'topology' namespace.
from keras.engine import topology as saving
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, layers)
else:
saving.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.metrics_tensors.append(loss)
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows)
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None, no_augmentation_sources=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE,
no_augmentation_sources=no_augmentation_sources)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=True,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matrices:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also returned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and normalized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name='trim_zeros'):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
| 44.26848
| 115
| 0.612089
|
2c10f60b509ebcc65d4bdf4f50fc06e734bf6d15
| 70
|
py
|
Python
|
dummyfile.py
|
LonnonjamesD/EconomyBot
|
17db1398f19b9d9dba0d47b498ddf6b63de49c64
|
[
"MIT"
] | 1
|
2020-01-03T21:12:58.000Z
|
2020-01-03T21:12:58.000Z
|
dummyfile.py
|
LonnonjamesD/EconomyBot
|
17db1398f19b9d9dba0d47b498ddf6b63de49c64
|
[
"MIT"
] | null | null | null |
dummyfile.py
|
LonnonjamesD/EconomyBot
|
17db1398f19b9d9dba0d47b498ddf6b63de49c64
|
[
"MIT"
] | null | null | null |
5993818^83748378783748372873874837482302490340298393283748357940238038
| 70
| 70
| 0.985714
|
7666e95ccdc8587f4c1bb57443acf22a87065147
| 45,256
|
py
|
Python
|
Lib/test/test_dict.py
|
philippeitis/RustPython
|
4e057271ecae2c923eb671b0456f2c06a8449b60
|
[
"MIT"
] | null | null | null |
Lib/test/test_dict.py
|
philippeitis/RustPython
|
4e057271ecae2c923eb671b0456f2c06a8449b60
|
[
"MIT"
] | null | null | null |
Lib/test/test_dict.py
|
philippeitis/RustPython
|
4e057271ecae2c923eb671b0456f2c06a8449b60
|
[
"MIT"
] | null | null | null |
import collections
import collections.abc
# import gc // XXX RustPython
import pickle
import random
import string
import sys
import unittest
import weakref
from test import support
class DictTest(unittest.TestCase):
@unittest.skip("TODO: RUSTPYTHON")
def test_invalid_keyword_arguments(self):
class Custom(dict):
pass
for invalid in {1 : 2}, Custom({1 : 2}):
with self.assertRaises(TypeError):
dict(**invalid)
with self.assertRaises(TypeError):
{}.update(**invalid)
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(dict(), {})
self.assertIsNot(dict(), {})
@unittest.skip("TODO: RUSTPYTHON")
def test_literal_constructor(self):
# check literal constructor for different sized dicts
# (to exercise the BUILD_MAP oparg).
for n in (0, 1, 6, 256, 400):
items = [(''.join(random.sample(string.ascii_letters, 8)), i)
for i in range(n)]
random.shuffle(items)
formatted_items = ('{!r}: {:d}'.format(k, v) for k, v in items)
dictliteral = '{' + ', '.join(formatted_items) + '}'
self.assertEqual(eval(dictliteral), dict(items))
def test_bool(self):
self.assertIs(not {}, True)
self.assertTrue({1: 2})
self.assertIs(bool({}), False)
self.assertIs(bool({1: 2}), True)
def test_keys(self):
d = {}
self.assertEqual(set(d.keys()), set())
d = {'a': 1, 'b': 2}
k = d.keys()
self.assertEqual(set(k), {'a', 'b'})
self.assertIn('a', k)
self.assertIn('b', k)
self.assertIn('a', d)
self.assertIn('b', d)
self.assertRaises(TypeError, d.keys, None)
self.assertEqual(repr(dict(a=1).keys()), "dict_keys(['a'])")
def test_values(self):
d = {}
self.assertEqual(set(d.values()), set())
d = {1:2}
self.assertEqual(set(d.values()), {2})
self.assertRaises(TypeError, d.values, None)
self.assertEqual(repr(dict(a=1).values()), "dict_values([1])")
def test_items(self):
d = {}
self.assertEqual(set(d.items()), set())
d = {1:2}
self.assertEqual(set(d.items()), {(1, 2)})
self.assertRaises(TypeError, d.items, None)
self.assertEqual(repr(dict(a=1).items()), "dict_items([('a', 1)])")
def test_contains(self):
d = {}
self.assertNotIn('a', d)
self.assertFalse('a' in d)
self.assertTrue('a' not in d)
d = {'a': 1, 'b': 2}
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = {}
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = {}
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = {1:1, 2:2, 3:3}
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
@unittest.skip("TODO: RUSTPYTHON")
def test_update(self):
d = {}
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = {}
self.assertIsNot(d.fromkeys('abc'), d)
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assertIsInstance(dictlike.fromkeys('a'), dictlike)
self.assertIsInstance(dictlike().fromkeys('a'), dictlike)
class mydict(dict):
def __new__(cls):
return collections.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, collections.UserDict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
class baddict3(dict):
def __new__(cls):
return d
d = {i : i for i in range(10)}
res = d.copy()
res.update(a=None, b=None, c=None)
self.assertEqual(baddict3.fromkeys({"a", "b", "c"}), res)
def test_copy(self):
d = {1: 1, 2: 2, 3: 3}
self.assertIsNot(d.copy(), d)
self.assertEqual(d.copy(), d)
self.assertEqual(d.copy(), {1: 1, 2: 2, 3: 3})
copy = d.copy()
d[4] = 4
self.assertNotEqual(copy, d)
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
@unittest.skip("TODO: RUSTPYTHON")
def test_copy_fuzz(self):
for dict_size in [10, 100, 1000, 10000, 100000]:
dict_size = random.randrange(
dict_size // 2, dict_size + dict_size // 2)
with self.subTest(dict_size=dict_size):
d = {}
for i in range(dict_size):
d[i] = i
d2 = d.copy()
self.assertIsNot(d2, d)
self.assertEqual(d, d2)
d2['key'] = 'value'
self.assertNotEqual(d, d2)
self.assertEqual(len(d2), len(d) + 1)
@unittest.skip("TODO: RUSTPYTHON")
def test_copy_maintains_tracking(self):
class A:
pass
key = A()
for d in ({}, {'a': 1}, {key: 'val'}):
d2 = d.copy()
self.assertEqual(gc.is_tracked(d), gc.is_tracked(d2))
def test_copy_noncompact(self):
# Dicts don't compact themselves on del/pop operations.
# Copy will use a slow merging strategy that produces
# a compacted copy when roughly 33% of dict is a non-used
# keys-space (to optimize memory footprint).
# In this test we want to hit the slow/compacting
# branch of dict.copy() and make sure it works OK.
d = {k: k for k in range(1000)}
for k in range(950):
del d[k]
d2 = d.copy()
self.assertEqual(d2, d)
def test_get(self):
d = {}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
d = {'a': 1, 'b': 2}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = {}
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key0', [])
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setdefault_atomic(self):
# Issue #13521: setdefault() calls __hash__ and __eq__ only once.
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
y = {hashed1: 5}
hashed2 = Hashed()
y.setdefault(hashed2, [])
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_setitem_atomic_at_resize(self):
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
# 5 items
y = {hashed1: 5, 0: 0, 1: 1, 2: 2, 3: 3}
hashed2 = Hashed()
# 6th item forces a resize
y[hashed2] = []
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = {}
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutating_iteration(self):
# changing dict size during iteration
d = {}
d[1] = 1
with self.assertRaises(RuntimeError):
for i in d:
d[i+1] = 1
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_mutating_iteration_delete(self):
# change dict content during iteration
d = {}
d[0] = 0
with self.assertRaises(RuntimeError):
for i in d:
del d[0]
d[0] = 0
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_mutating_iteration_delete_over_values(self):
# change dict content during iteration
d = {}
d[0] = 0
with self.assertRaises(RuntimeError):
for i in d.values():
del d[0]
d[0] = 0
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_mutating_iteration_delete_over_items(self):
# change dict content during iteration
d = {}
d[0] = 0
with self.assertRaises(RuntimeError):
for i in d.items():
del d[0]
d[0] = 0
@unittest.skip("TODO: RUSTPYTHON")
def test_mutating_lookup(self):
# changing dict during a lookup (issue #14417)
class NastyKey:
mutate_dict = None
def __init__(self, value):
self.value = value
def __hash__(self):
# hash collision!
return 1
def __eq__(self, other):
if NastyKey.mutate_dict:
mydict, key = NastyKey.mutate_dict
NastyKey.mutate_dict = None
del mydict[key]
return self.value == other.value
key1 = NastyKey(1)
key2 = NastyKey(2)
d = {key1: 1}
NastyKey.mutate_dict = (d, key1)
d[key2] = 2
self.assertEqual(d, {key2: 2})
def test_repr(self):
d = {}
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = {}
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repr_deep(self):
d = {}
for i in range(sys.getrecursionlimit() + 100):
d = {1: d}
self.assertRaises(RecursionError, repr, d)
def test_eq(self):
self.assertEqual({}, {})
self.assertEqual({1: 2}, {1: 2})
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 1
d1 = {BadCmp(): 1}
d2 = {1: 1}
with self.assertRaises(Exc):
d1 == d2
@unittest.skip("TODO: RUSTPYTHON")
def test_keys_contained(self):
self.helper_keys_contained(lambda x: x.keys())
self.helper_keys_contained(lambda x: x.items())
def helper_keys_contained(self, fn):
# Test rich comparisons against dict key views, which should behave the
# same as sets.
empty = fn(dict())
empty2 = fn(dict())
smaller = fn({1:1, 2:2})
larger = fn({1:1, 2:2, 3:3})
larger2 = fn({1:1, 2:2, 3:3})
larger3 = fn({4:1, 2:2, 3:3})
self.assertTrue(smaller < larger)
self.assertTrue(smaller <= larger)
self.assertTrue(larger > smaller)
self.assertTrue(larger >= smaller)
self.assertFalse(smaller >= larger)
self.assertFalse(smaller > larger)
self.assertFalse(larger <= smaller)
self.assertFalse(larger < smaller)
self.assertFalse(smaller < larger3)
self.assertFalse(smaller <= larger3)
self.assertFalse(larger3 > smaller)
self.assertFalse(larger3 >= smaller)
# Inequality strictness
self.assertTrue(larger2 >= larger)
self.assertTrue(larger2 <= larger)
self.assertFalse(larger2 > larger)
self.assertFalse(larger2 < larger)
self.assertTrue(larger == larger2)
self.assertTrue(smaller != larger)
# There is an optimization on the zero-element case.
self.assertTrue(empty == empty2)
self.assertFalse(empty != empty2)
self.assertFalse(empty == smaller)
self.assertTrue(empty != smaller)
# With the same size, an elementwise compare happens
self.assertTrue(larger != larger3)
self.assertFalse(larger == larger3)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_errors_in_view_containment_check(self):
class C:
def __eq__(self, other):
raise RuntimeError
d1 = {1: C()}
d2 = {1: C()}
with self.assertRaises(RuntimeError):
d1.items() == d2.items()
with self.assertRaises(RuntimeError):
d1.items() != d2.items()
with self.assertRaises(RuntimeError):
d1.items() <= d2.items()
with self.assertRaises(RuntimeError):
d1.items() >= d2.items()
d3 = {1: C(), 2: C()}
with self.assertRaises(RuntimeError):
d2.items() < d3.items()
with self.assertRaises(RuntimeError):
d3.items() > d2.items()
@unittest.skip("TODO: RUSTPYTHON")
def test_dictview_set_operations_on_keys(self):
k1 = {1:1, 2:2}.keys()
k2 = {1:1, 2:2, 3:3}.keys()
k3 = {4:4}.keys()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {1,2})
self.assertEqual(k2 - k1, {3})
self.assertEqual(k3 - k1, {4})
self.assertEqual(k1 & k2, {1,2})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {1,2,3})
self.assertEqual(k1 ^ k2, {3})
self.assertEqual(k1 ^ k3, {1,2,4})
@unittest.skip("TODO: RUSTPYTHON")
def test_dictview_set_operations_on_items(self):
k1 = {1:1, 2:2}.items()
k2 = {1:1, 2:2, 3:3}.items()
k3 = {4:4}.items()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {(1,1), (2,2)})
self.assertEqual(k2 - k1, {(3,3)})
self.assertEqual(k3 - k1, {(4,4)})
self.assertEqual(k1 & k2, {(1,1), (2,2)})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {(1,1), (2,2), (3,3)})
self.assertEqual(k1 ^ k2, {(3,3)})
self.assertEqual(k1 ^ k3, {(1,1), (2,2), (4,4)})
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_dictview_mixed_set_operations(self):
# Just a few for .keys()
self.assertTrue({1:1}.keys() == {1})
self.assertTrue({1} == {1:1}.keys())
self.assertEqual({1:1}.keys() | {2}, {1, 2})
self.assertEqual({2} | {1:1}.keys(), {1, 2})
# And a few for .items()
self.assertTrue({1:1}.items() == {(1,1)})
self.assertTrue({(1,1)} == {1:1}.items())
self.assertEqual({1:1}.items() | {2}, {(1,1), 2})
self.assertEqual({2} | {1:1}.items(), {(1,1), 2})
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(dict, "__missing__"))
self.assertFalse(hasattr({}, "__missing__"))
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
with self.assertRaises(RuntimeError) as c:
e[42]
self.assertEqual(c.exception.args, (42,))
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f[42]
self.assertEqual(c.exception.args, (42,))
class G(dict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g[42]
self.assertEqual(c.exception.args, (42,))
def test_tuple_keyerror(self):
# SF #1576657
d = {}
with self.assertRaises(KeyError) as c:
d[(1,)]
self.assertEqual(c.exception.args, ((1,),))
def test_bad_key(self):
# Dictionary lookups should fail if __eq__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
with self.assertRaises(CustomException):
exec(stmt, locals())
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
with self.assertRaises(ZeroDivisionError):
d = {'a': 1 // 0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None}
d = {}
@unittest.skip("TODO: RUSTPYTHON")
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for dictiter and
# dictview objects.
class C(object):
pass
views = (dict.items, dict.values, dict.keys)
for v in views:
obj = C()
ref = weakref.ref(obj)
container = {obj: 1}
obj.v = v(container)
obj.x = iter(obj.v)
del obj, container
gc.collect()
self.assertIs(ref(), None, "Cycle was not collected")
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@support.cpython_only
def test_track_literals(self):
# Test GC-optimization of dict literals
x, y, z, w = 1.5, "a", (1, None), []
self._not_tracked({})
self._not_tracked({x:(), y:x, z:1})
self._not_tracked({1: "a", "b": 2})
self._not_tracked({1: 2, (None, True, False, ()): int})
self._not_tracked({1: object()})
# Dicts with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked({1: []})
self._tracked({1: ([],)})
self._tracked({1: {}})
self._tracked({1: set()})
@support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically-created dicts
class MyObject(object):
pass
x, y, z, w, o = 1.5, "a", (1, object()), [], MyObject()
d = dict()
self._not_tracked(d)
d[1] = "a"
self._not_tracked(d)
d[y] = 2
self._not_tracked(d)
d[z] = 3
self._not_tracked(d)
self._not_tracked(d.copy())
d[4] = w
self._tracked(d)
self._tracked(d.copy())
d[4] = None
self._not_tracked(d)
self._not_tracked(d.copy())
# dd isn't tracked right now, but it may mutate and therefore d
# which contains it must be tracked.
d = dict()
dd = dict()
d[1] = dd
self._not_tracked(dd)
self._tracked(d)
dd[1] = d
self._tracked(dd)
d = dict.fromkeys([x, y, z])
self._not_tracked(d)
dd = dict()
dd.update(d)
self._not_tracked(dd)
d = dict.fromkeys([x, y, z, o])
self._tracked(d)
dd = dict()
dd.update(d)
self._tracked(dd)
d = dict(x=x, y=y, z=z)
self._not_tracked(d)
d = dict(x=x, y=y, z=z, w=w)
self._tracked(d)
d = dict()
d.update(x=x, y=y, z=z)
self._not_tracked(d)
d.update(w=w)
self._tracked(d)
d = dict([(x, y), (z, 1)])
self._not_tracked(d)
d = dict([(x, y), (z, w)])
self._tracked(d)
d = dict()
d.update([(x, y), (z, 1)])
self._not_tracked(d)
d.update([(x, y), (z, w)])
self._tracked(d)
@support.cpython_only
def test_track_subtypes(self):
# Dict subtypes are always tracked
class MyDict(dict):
pass
self._tracked(MyDict())
def make_shared_key_dict(self, n):
class C:
pass
dicts = []
for i in range(n):
a = C()
a.x, a.y, a.z = 1, 2, 3
dicts.append(a.__dict__)
return dicts
@support.cpython_only
def test_splittable_setdefault(self):
"""split table must be combined when setdefault()
breaks insertion order"""
a, b = self.make_shared_key_dict(2)
a['a'] = 1
size_a = sys.getsizeof(a)
a['b'] = 2
b.setdefault('b', 2)
size_b = sys.getsizeof(b)
b['a'] = 1
self.assertGreater(size_b, size_a)
self.assertEqual(list(a), ['x', 'y', 'z', 'a', 'b'])
self.assertEqual(list(b), ['x', 'y', 'z', 'b', 'a'])
@support.cpython_only
def test_splittable_del(self):
"""split table must be combined when del d[k]"""
a, b = self.make_shared_key_dict(2)
orig_size = sys.getsizeof(a)
del a['y'] # split table is combined
with self.assertRaises(KeyError):
del a['y']
self.assertGreater(sys.getsizeof(a), orig_size)
self.assertEqual(list(a), ['x', 'z'])
self.assertEqual(list(b), ['x', 'y', 'z'])
# Two dicts have different insertion order.
a['y'] = 42
self.assertEqual(list(a), ['x', 'z', 'y'])
self.assertEqual(list(b), ['x', 'y', 'z'])
@support.cpython_only
def test_splittable_pop(self):
"""split table must be combined when d.pop(k)"""
a, b = self.make_shared_key_dict(2)
orig_size = sys.getsizeof(a)
a.pop('y') # split table is combined
with self.assertRaises(KeyError):
a.pop('y')
self.assertGreater(sys.getsizeof(a), orig_size)
self.assertEqual(list(a), ['x', 'z'])
self.assertEqual(list(b), ['x', 'y', 'z'])
# Two dicts have different insertion order.
a['y'] = 42
self.assertEqual(list(a), ['x', 'z', 'y'])
self.assertEqual(list(b), ['x', 'y', 'z'])
@support.cpython_only
def test_splittable_pop_pending(self):
"""pop a pending key in a splitted table should not crash"""
a, b = self.make_shared_key_dict(2)
a['a'] = 4
with self.assertRaises(KeyError):
b.pop('a')
@support.cpython_only
def test_splittable_popitem(self):
"""split table must be combined when d.popitem()"""
a, b = self.make_shared_key_dict(2)
orig_size = sys.getsizeof(a)
item = a.popitem() # split table is combined
self.assertEqual(item, ('z', 3))
with self.assertRaises(KeyError):
del a['z']
self.assertGreater(sys.getsizeof(a), orig_size)
self.assertEqual(list(a), ['x', 'y'])
self.assertEqual(list(b), ['x', 'y', 'z'])
@support.cpython_only
def test_splittable_setattr_after_pop(self):
"""setattr() must not convert combined table into split table."""
# Issue 28147
import _testcapi
class C:
pass
a = C()
a.a = 1
self.assertTrue(_testcapi.dict_hassplittable(a.__dict__))
# dict.pop() convert it to combined table
a.__dict__.pop('a')
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
# But C should not convert a.__dict__ to split table again.
a.a = 1
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
# Same for popitem()
a = C()
a.a = 2
self.assertTrue(_testcapi.dict_hassplittable(a.__dict__))
a.__dict__.popitem()
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
a.a = 3
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
@unittest.skip("TODO: RUSTPYTHON")
def test_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = {1:"a", 2:"b", 3:"c"}
it = iter(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(data))
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop]
self.assertEqual(list(it), list(data))
@unittest.skip("TODO: RUSTPYTHON")
def test_itemiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = {1:"a", 2:"b", 3:"c"}
# dictviews aren't picklable, only their iterators
itorg = iter(data.items())
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# note that the type of the unpickled iterator
# is not necessarily the same as the original. It is
# merely an object supporting the iterator protocol, yielding
# the same objects as the original one.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(dict(it), data)
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop[0]]
self.assertEqual(dict(it), data)
@unittest.skip("TODO: RUSTPYTHON")
def test_valuesiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = {1:"a", 2:"b", 3:"c"}
# data.values() isn't picklable, only its iterator
it = iter(data.values())
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(data.values()))
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
values = list(it) + [drop]
self.assertEqual(sorted(values), sorted(list(data.values())))
@unittest.skip("TODO: RUSTPYTHON")
def test_reverseiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = {1:"a", 2:"b", 3:"c"}
it = reversed(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(reversed(data)))
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop]
self.assertEqual(list(it), list(reversed(data)))
@unittest.skip("TODO: RUSTPYTHON")
def test_reverseitemiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = {1:"a", 2:"b", 3:"c"}
# dictviews aren't picklable, only their iterators
itorg = reversed(data.items())
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# note that the type of the unpickled iterator
# is not necessarily the same as the original. It is
# merely an object supporting the iterator protocol, yielding
# the same objects as the original one.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(dict(it), data)
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop[0]]
self.assertEqual(dict(it), data)
@unittest.skip("TODO: RUSTPYTHON")
def test_reversevaluesiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = {1:"a", 2:"b", 3:"c"}
# data.values() isn't picklable, only its iterator
it = reversed(data.values())
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(reversed(data.values())))
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
values = list(it) + [drop]
self.assertEqual(sorted(values), sorted(data.values()))
def test_instance_dict_getattr_str_subclass(self):
class Foo:
def __init__(self, msg):
self.msg = msg
f = Foo('123')
class _str(str):
pass
self.assertEqual(f.msg, getattr(f, _str('msg')))
self.assertEqual(f.msg, f.__dict__[_str('msg')])
def test_object_set_item_single_instance_non_str_key(self):
class Foo: pass
f = Foo()
f.__dict__[1] = 1
f.a = 'a'
self.assertEqual(f.__dict__, {1:1, 'a':'a'})
def check_reentrant_insertion(self, mutate):
# This object will trigger mutation of the dict when replaced
# by another value. Note this relies on refcounting: the test
# won't achieve its purpose on fully-GCed Python implementations.
class Mutating:
def __del__(self):
mutate(d)
d = {k: Mutating() for k in 'abcdefghijklmnopqr'}
for k in list(d):
d[k] = k
def test_reentrant_insertion(self):
# Reentrant insertion shouldn't crash (see issue #22653)
def mutate(d):
d['b'] = 5
self.check_reentrant_insertion(mutate)
def mutate(d):
d.update(self.__dict__)
d.clear()
self.check_reentrant_insertion(mutate)
def mutate(d):
while d:
d.popitem()
self.check_reentrant_insertion(mutate)
@unittest.skip("TODO: RUSTPYTHON")
def test_merge_and_mutate(self):
class X:
def __hash__(self):
return 0
def __eq__(self, o):
other.clear()
return False
l = [(i,0) for i in range(1, 1337)]
other = dict(l)
other[X()] = 0
d = {X(): 0, 1: 1}
self.assertRaises(RuntimeError, d.update, other)
@unittest.skip("TODO: RUSTPYTHON")
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, dict)
support.check_free_after_iterating(self, lambda d: iter(d.keys()), dict)
support.check_free_after_iterating(self, lambda d: iter(d.values()), dict)
support.check_free_after_iterating(self, lambda d: iter(d.items()), dict)
@unittest.skip("TODO: RUSTPYTHON")
def test_equal_operator_modifying_operand(self):
# test fix for seg fault reported in issue 27945 part 3.
class X():
def __del__(self):
dict_b.clear()
def __eq__(self, other):
dict_a.clear()
return True
def __hash__(self):
return 13
dict_a = {X(): 0}
dict_b = {X(): X()}
self.assertTrue(dict_a == dict_b)
def test_fromkeys_operator_modifying_dict_operand(self):
# test fix for seg fault reported in issue 27945 part 4a.
class X(int):
def __hash__(self):
return 13
def __eq__(self, other):
if len(d) > 1:
d.clear()
return False
d = {} # this is required to exist so that d can be constructed!
d = {X(1): 1, X(2): 2}
try:
dict.fromkeys(d) # shouldn't crash
except RuntimeError: # implementation defined
pass
def test_fromkeys_operator_modifying_set_operand(self):
# test fix for seg fault reported in issue 27945 part 4b.
class X(int):
def __hash__(self):
return 13
def __eq__(self, other):
if len(d) > 1:
d.clear()
return False
d = {} # this is required to exist so that d can be constructed!
d = {X(1), X(2)}
try:
dict.fromkeys(d) # shouldn't crash
except RuntimeError: # implementation defined
pass
@unittest.skip("TODO: RUSTPYTHON")
def test_dictitems_contains_use_after_free(self):
class X:
def __eq__(self, other):
d.clear()
return NotImplemented
d = {0: set()}
(0, X()) in d.items()
def test_init_use_after_free(self):
class X:
def __hash__(self):
pair[:] = []
return 13
pair = [X(), 123]
dict([pair])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_oob_indexing_dictiter_iternextitem(self):
class X(int):
def __del__(self):
d.clear()
d = {i: X(i) for i in range(8)}
def iter_and_mutate():
for result in d.items():
if result[0] == 2:
d[2] = None # free d[2] --> X(2).__del__ was called
self.assertRaises(RuntimeError, iter_and_mutate)
@unittest.skip("TODO: RUSTPYTHON")
def test_reversed(self):
d = {"a": 1, "b": 2, "foo": 0, "c": 3, "d": 4}
del d["foo"]
r = reversed(d)
self.assertEqual(list(r), list('dcba'))
self.assertRaises(StopIteration, next, r)
@unittest.skip("TODO: RUSTPYTHON")
def test_dict_copy_order(self):
# bpo-34320
od = collections.OrderedDict([('a', 1), ('b', 2)])
od.move_to_end('a')
expected = list(od.items())
copy = dict(od)
self.assertEqual(list(copy.items()), expected)
# dict subclass doesn't override __iter__
class CustomDict(dict):
pass
pairs = [('a', 1), ('b', 2), ('c', 3)]
d = CustomDict(pairs)
self.assertEqual(pairs, list(dict(d).items()))
class CustomReversedDict(dict):
def keys(self):
return reversed(list(dict.keys(self)))
__iter__ = keys
def items(self):
return reversed(dict.items(self))
d = CustomReversedDict(pairs)
self.assertEqual(pairs[::-1], list(dict(d).items()))
class CAPITest(unittest.TestCase):
# Test _PyDict_GetItem_KnownHash()
@support.cpython_only
def test_getitem_knownhash(self):
from _testcapi import dict_getitem_knownhash
d = {'x': 1, 'y': 2, 'z': 3}
self.assertEqual(dict_getitem_knownhash(d, 'x', hash('x')), 1)
self.assertEqual(dict_getitem_knownhash(d, 'y', hash('y')), 2)
self.assertEqual(dict_getitem_knownhash(d, 'z', hash('z')), 3)
# not a dict
self.assertRaises(SystemError, dict_getitem_knownhash, [], 1, hash(1))
# key does not exist
self.assertRaises(KeyError, dict_getitem_knownhash, {}, 1, hash(1))
class Exc(Exception): pass
class BadEq:
def __eq__(self, other):
raise Exc
def __hash__(self):
return 7
k1, k2 = BadEq(), BadEq()
d = {k1: 1}
self.assertEqual(dict_getitem_knownhash(d, k1, hash(k1)), 1)
self.assertRaises(Exc, dict_getitem_knownhash, d, k2, hash(k2))
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
if __name__ == "__main__":
unittest.main()
| 31.647552
| 82
| 0.525676
|
6838be9a56714803a88bf9125df40f7e565a157e
| 1,025
|
py
|
Python
|
gspread/exceptions.py
|
MilindGaharwar/gspread
|
bf4f2606f210ac03bda1946810597dd83ab5ed80
|
[
"MIT"
] | 3
|
2021-06-15T15:05:33.000Z
|
2022-01-31T22:56:19.000Z
|
gspread/exceptions.py
|
merbroussard/gspread
|
d0a69c5e9fa0cd1ae807f29c933e93b7bbb58a1b
|
[
"MIT"
] | 15
|
2015-01-06T13:41:52.000Z
|
2022-03-30T10:37:25.000Z
|
gspread/exceptions.py
|
merbroussard/gspread
|
d0a69c5e9fa0cd1ae807f29c933e93b7bbb58a1b
|
[
"MIT"
] | 1
|
2018-01-27T20:13:16.000Z
|
2018-01-27T20:13:16.000Z
|
# -*- coding: utf-8 -*-
"""
gspread.exceptions
~~~~~~~~~~~~~~~~~~
Exceptions used in gspread.
"""
class GSpreadException(Exception):
"""A base class for gspread's exceptions."""
class AuthenticationError(GSpreadException):
"""An error during authentication process."""
class SpreadsheetNotFound(GSpreadException):
"""Trying to open non-existent or inaccessible spreadsheet."""
class WorksheetNotFound(GSpreadException):
"""Trying to open non-existent or inaccessible worksheet."""
class CellNotFound(GSpreadException):
"""Cell lookup exception."""
class NoValidUrlKeyFound(GSpreadException):
"""No valid key found in URL."""
class UnsupportedFeedTypeError(GSpreadException):
pass
class UrlParameterMissing(GSpreadException):
pass
class IncorrectCellLabel(GSpreadException):
"""The cell label is incorrect."""
class UpdateCellError(GSpreadException):
"""Error while setting cell's value."""
class RequestError(GSpreadException):
"""Error while sending API request."""
| 23.837209
| 66
| 0.727805
|
94c59e29177a0d9b2658393bf810adf431801b45
| 6,525
|
py
|
Python
|
web/servicecalls/salesreports.py
|
vacoj/mbodjango
|
e9a6df563862c587e4cc2c2713ed7f8ea0a6e4e3
|
[
"MIT"
] | 8
|
2015-10-27T12:38:54.000Z
|
2018-02-23T03:03:24.000Z
|
web/servicecalls/salesreports.py
|
vacoj/mbodjango
|
e9a6df563862c587e4cc2c2713ed7f8ea0a6e4e3
|
[
"MIT"
] | 3
|
2015-10-28T22:23:58.000Z
|
2016-01-13T04:05:04.000Z
|
web/servicecalls/salesreports.py
|
vacoj/mbodjango
|
e9a6df563862c587e4cc2c2713ed7f8ea0a6e4e3
|
[
"MIT"
] | 9
|
2015-09-28T17:32:17.000Z
|
2018-02-01T00:01:04.000Z
|
import operator
import datetime
from ..models import ReportsCacheModel
import ast
from ..servicecalls.saleservice_gets import GetSales
class SalesReport:
def __init__(self, sales=None, current=True):
self.SudsResult = sales
self.SaleTotalsByDate = None
self.current = current
self.TodayDate = datetime.datetime.today()
def GetSudsResults(self):
if not self.current:
self.SudsResult = GetSales(timedelta=1).Sales.Sale
else:
self.SudsResult = GetSales(timedelta=2).Sales.Sale
def print_sales(self):
for sale in self.SudsResult:
print(sale)
def get_totals_by_dow(self):
name = 'total_dow'
if not self.current:
name += '_nc'
sales = {}
try:
sorted_by_dow = ReportsCacheModel.objects.filter(
datapull_datestamp=self.TodayDate, chart_name=name)[0]
return eval(sorted_by_dow.data_string)
except (IndexError, ReportsCacheModel.DoesNotExist):
if self.SudsResult == None:
self.GetSudsResults()
for sale in self.SudsResult:
sale_total = 0.0
payments = sale.Payments
saleday = str(sale.SaleDate.weekday())
for payment in payments:
sale_total += payment[1][0].Amount
if saleday not in sales:
sales[saleday] = sale_total
else:
sales[saleday] += sale_total
sale_totals_by_dow = sales
sorted_by_dow = sorted(sale_totals_by_dow.items(), key=operator.itemgetter(0))
report = ReportsCacheModel()
report.chart_name = name
report.data_string = str(sorted_by_dow)
report.save()
return sorted_by_dow
def sale_totals_by_date(self):
name = 'total_date'
if not self.current:
name += '_nc'
sales = {}
try:
sorted_by_payment_type = ReportsCacheModel.objects.filter(
datapull_datestamp=self.TodayDate, chart_name=name)[0]
return eval(sorted_by_payment_type.data_string)
except (IndexError, ReportsCacheModel.DoesNotExist):
if self.SudsResult == None:
self.GetSudsResults()
for sale in self.SudsResult:
sale_total = 0.0
payments = sale.Payments
saledate = str(sale.SaleDate).split(' ')[0]
for payment in payments:
sale_total += payment[1][0].Amount
if saledate not in sales:
sales[saledate] = sale_total
else:
sales[saledate] += sale_total
sale_totals_by_date = sales
sorted_by_date = sorted(sale_totals_by_date.items(), key=operator.itemgetter(0))
report = ReportsCacheModel()
report.chart_name = name
report.data_string = str(sorted_by_date)
report.save()
return sorted_by_date
def get_totals_by_hour(self):
name = 'total_hour'
if not self.current:
name += '_nc'
sales = {}
try:
sale_by_hour = ReportsCacheModel.objects.filter(
datapull_datestamp=self.TodayDate, chart_name=name)[0]
return eval(sale_by_hour.data_string)
except (IndexError, ReportsCacheModel.DoesNotExist):
if self.SudsResult == None:
self.GetSudsResults()
for sale in self.SudsResult:
sale_total = 0.0
payments = sale.Payments
salehour = str(sale.SaleDateTime.hour)
if len(salehour) == 1:
salehour = '0' + str(salehour)
else:
salehour = str(salehour)
for payment in payments:
sale_total += payment[1][0].Amount
if salehour not in sales:
sales[salehour] = sale_total
else:
sales[salehour] += sale_total
sale_totals_by_hour = sales
sale_by_hour = sorted(sale_totals_by_hour.items(), key=operator.itemgetter(0))
report = ReportsCacheModel()
report.chart_name = name
report.data_string = str(sale_by_hour)
report.save()
return sale_by_hour
def get_totals_by_payment_type(self):
name = 'total_paymenttype'
if not self.current:
name += '_nc'
sales = {}
try:
sorted_by_payment_type = ReportsCacheModel.objects.filter(
datapull_datestamp=self.TodayDate, chart_name=name)[0]
return ast.literal_eval(sorted_by_payment_type.data_string)
except (IndexError, ReportsCacheModel.DoesNotExist):
if self.SudsResult == None:
self.GetSudsResults()
for sale in self.SudsResult:
sale_total = 0.0
payments = sale.Payments
paytype = ""
for payment in payments:
paytype = str(payment[1][0].Type)
sale_total = payment[1][0].Amount
if paytype not in sales:
sales[paytype] = sale_total
else:
sales[paytype] += sale_total
sale_totals_by_payment_type = sales
sorted_by_payment_type = sorted(
sale_totals_by_payment_type.items(), key=operator.itemgetter(1))
report = ReportsCacheModel()
report.chart_name = name
report.data_string = '\"' + str(sale_totals_by_payment_type) + '\"'
report.save()
return sale_totals_by_payment_type.items
def report_normalizer(report1, report2):
report1 = sorted(eval(report1).items(), key=operator.itemgetter(1))
report2 = sorted(eval(report2).items(), key=operator.itemgetter(1))
for i in report1:
exists = False
for k in report2:
if i[0] == k[0]:
exists = True
if not exists:
report2.insert(report1.index(i), (i[0], 0))
for i in report2:
exists = False
for k in report1:
if i[0] == k[0]:
exists = True
if not exists:
report1.insert(report2.index(i), (i[0], 0))
return report1, report2
| 33.80829
| 92
| 0.555096
|
daea370b5705056386b2c5c2a8bf0ced86144b8e
| 4,298
|
py
|
Python
|
src/sshark.py
|
mfs-git/sshark
|
291d050d0b6fd9f863f48e9cfa86284cd35d575d
|
[
"MIT"
] | null | null | null |
src/sshark.py
|
mfs-git/sshark
|
291d050d0b6fd9f863f48e9cfa86284cd35d575d
|
[
"MIT"
] | null | null | null |
src/sshark.py
|
mfs-git/sshark
|
291d050d0b6fd9f863f48e9cfa86284cd35d575d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
This tool used to capture packets (pcap file) by tshark and insert them into a sqlite file
"""
import argparse
import subprocess
import shlex
import sys
import signal
import sqlite3
import xml.etree.cElementTree as ET
__main_author__ = 'M. Fatemipour'
__email__ = 'm.fatemipour@gmail.com'
__date__ = '2016-Apr-2'
__last_modified_date__ = '2016-Apr-2'
__version__ = '1.0.0'
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
profiles = {}
def handler(signum, frame):
print 'interupt'
proc.terminate()
def add_profile(prof):
col = []
filed = []
type = []
for f in prof:
filed.append(f.attrib['filed'])
col.append(f.attrib['column'])
type.append(f.attrib['type'])
display_fields = ''
create_table_query = 'CREATE TABLE packets ('
for i in range(0, len(col)):
display_fields += '-e ' + filed[i] + ' '
if i > 0:
create_table_query += ', '
create_table_query += col[i] + ' ' + type[i]
create_table_query += ')'
p = {'captureFilter': prof.attrib['captureFilter'], 'displayFilter': prof.attrib['displayFilter'],
'sqliteName': prof.attrib['sqliteName'], 'pcapName': prof.attrib['pcapName'],
'display_fields': display_fields, 'create_table_query': create_table_query}
profiles[prof.attrib['Name']] = p
def parse_config(config_file):
tree = ET.parse(config_file)
root = tree.getroot()
if root.tag != 'sshark_profiles':
raise Exception('root of profiles xml must be sshark_profiles')
for profile in root:
add_profile(profile)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-c', '--config', type=argparse.FileType('r'), default='/usr/local/config/sshark_config.xml',
help="config file, if not mentioned /usr/local/sshark_config.xml will be used")
parser.add_argument('-p', '--profile', type=str, default='TCP',
help="profile name, if not mentioned TCP will be used")
parser.add_argument('-r', '--input', type=argparse.FileType('r'), default=None,
help="read packets from input file instead of network")
args = parser.parse_args()
print args.config.name
parse_config(args.config.name)
p = args.profile
print 'Using profile ' + p
conn = sqlite3.connect(profiles[p]['sqliteName'])
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS packets')
capture_filter = profiles[p]['captureFilter']
display_fields_str = profiles[p]['display_fields']
c.execute(profiles[p]['create_table_query'])
tshark_command = 'tshark -T fields ' + display_fields_str
if args.input == None and len(profiles[p]['displayFilter']) == 0:
tshark_command += ' -F pcap -w ' + profiles[p]['pcapName']
if args.input == None:
tshark_command += ' -f "' + profiles[p]['captureFilter'] + '"'
if args.input != None:
tshark_command += ' -r ' + args.input.name
if len(profiles[p]['displayFilter']) > 0:
print bcolors.WARNING + 'When displayFilter has value saving captured file (pcap) is disabled.' +\
bcolors.ENDC
tshark_command += ' -Y "' + profiles[p]['displayFilter'] + '"'
print 'tshark command: ' + bcolors.OKBLUE + tshark_command + bcolors.ENDC
proc = subprocess.Popen(shlex.split(tshark_command), stdout=subprocess.PIPE, stderr=sys.stderr)
signal.signal(signal.SIGINT, handler)
values_to_be_added = ''
i = 0
while True:
line = proc.stdout.readline()
if line != '':
i += 1
if len(values_to_be_added) > 0:
values_to_be_added += ','
values_to_be_added += '("' + line.replace('\t', '","').strip() + '")\n'
if i % 100 == 0:
conn.execute('INSERT INTO packets VALUES ' + values_to_be_added)
conn.commit()
values_to_be_added = ''
else:
break
if len(values_to_be_added) > 0:
conn.execute('INSERT INTO packets VALUES ' + values_to_be_added)
conn.commit()
proc.wait()
print 'Done.'
| 32.315789
| 117
| 0.620754
|
73c34348eabdb5d3ac95175b08536dd288346942
| 79
|
py
|
Python
|
ioplin/__init__.py
|
DearCaat/ioplin
|
e4eae4a60d461d08f9aaf886bea2915282872245
|
[
"Apache-2.0"
] | 4
|
2021-06-24T06:04:05.000Z
|
2022-03-19T08:49:32.000Z
|
ioplin/__init__.py
|
DearCaat/ioplin
|
e4eae4a60d461d08f9aaf886bea2915282872245
|
[
"Apache-2.0"
] | null | null | null |
ioplin/__init__.py
|
DearCaat/ioplin
|
e4eae4a60d461d08f9aaf886bea2915282872245
|
[
"Apache-2.0"
] | 1
|
2021-07-04T07:56:40.000Z
|
2021-07-04T07:56:40.000Z
|
from .train import *
from .predict import *
from keras.models import load_model
| 26.333333
| 35
| 0.797468
|
e88459ba2c09f7e054f60aee44fc2dc3052165fa
| 9,926
|
py
|
Python
|
openhgnn/trainerflow/dist_mult.py
|
zsy0828/OpenHGNN
|
7fe0917008c9f50269bbd308e411a1d8199d667d
|
[
"Apache-2.0"
] | null | null | null |
openhgnn/trainerflow/dist_mult.py
|
zsy0828/OpenHGNN
|
7fe0917008c9f50269bbd308e411a1d8199d667d
|
[
"Apache-2.0"
] | null | null | null |
openhgnn/trainerflow/dist_mult.py
|
zsy0828/OpenHGNN
|
7fe0917008c9f50269bbd308e411a1d8199d667d
|
[
"Apache-2.0"
] | null | null | null |
import copy
import dgl
import numpy as np
import torch as th
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from . import BaseFlow, register_flow
from ..tasks import build_task
from ..utils import extract_embed
from collections.abc import Mapping
class NegativeSampler(object):
def __init__(self, g, k):
# caches the probability distribution
self.weights = {
etype: g.in_degrees(etype=etype).float() ** 0.75
for etype in g.canonical_etypes
}
self.k = k
def __call__(self, g, eids_dict):
result_dict = {}
for etype, eids in eids_dict.items():
src, _ = g.find_edges(eids, etype=etype)
src = src.repeat_interleave(self.k)
dst = self.weights[etype].multinomial(len(src), replacement=True)
result_dict[etype] = (src, dst)
return result_dict
@register_flow("distmult")
class DistMult(BaseFlow):
"""Node classification flows."""
def __init__(self, args):
super(DistMult, self).__init__(args)
self.args = args
self.model_name = args.model
self.device = args.device
self.task = build_task(args)
self.hg = self.task.get_graph().to(self.device)
self.loss_fn = self.task.get_loss_fn()
self.model = build_model(self.model_name).build_model_from_args(self.args, self.hg)
self.model = self.model.to(self.device)
self.evaluator = self.task.get_evaluator('mrr')
self.num_rels = self.task.dataset.num_rels
if hasattr(self.model, 'r_embedding'):
para = self.model.parameters()
self.r_embedding = self.model.r_embedding[:self.num_rels]
else:
self.r_embedding = nn.Parameter(th.Tensor(self.num_rels, self.args.out_dim).to(self.device))
nn.init.uniform_(self.r_embedding,a=-1,b=1)
para = [{'params': self.model.parameters()}, {'params': self.r_embedding}]
self.optimizer = (
th.optim.Adam(para, lr=args.lr, weight_decay=args.weight_decay)
)
self.patience = args.patience
self.max_epoch = args.max_epoch
if self.args.mini_batch_flag:
self.hg = self.hg.to('cpu')
train_eid_dict = {
etype: self.hg.edges(etype=etype, form='eid')
for etype in self.hg.canonical_etypes}
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(self.args.n_layers)
self.dataloader = dgl.dataloading.EdgeDataLoader(
self.hg, train_eid_dict, sampler, device=self.device,
negative_sampler=NegativeSampler(self.hg, 1), batch_size=100,
shuffle=True, drop_last=False, num_workers=0
)
else:
self.train_eid_dict = {
etype: self.hg.edges(etype=etype, form='eid')
for etype in self.hg.canonical_etypes}
self.negative_sampler = NegativeSampler(self.hg, 10)
def preprocess(self):
self.test_dataset = self.task.dataset.get_triples('test_mask').to(self.device)
self.val_dataset = self.task.dataset.get_triples('val_mask').to(self.device)
self.train_dataset = self.task.dataset.get_triples('train_mask').to(self.device)
return
def train(self):
self.preprocess()
epoch_iter = tqdm(range(self.max_epoch))
patience = 0
best_score = 0
best_model = copy.deepcopy(self.model)
for epoch in tqdm(range(self.max_epoch), ncols=80):
if self.args.mini_batch_flag:
loss = self._mini_train_step()
else:
loss = self._full_train_setp()
if epoch % 2 == 0:
metric= self._test_step(split='train')
epoch_iter.set_description(
f"Epoch: {epoch:03d}, Val-mrr: {metric:.4f}, Loss:{loss:.4f}"
)
if metric >= best_score:
best_score = metric
best_model = copy.deepcopy(self.model)
patience = 0
else:
patience += 1
if patience == self.patience:
epoch_iter.close()
break
print(f"Valid mrr = {best_score: .4f}")
self.model = best_model
test_mrr = self._test_step(split="test")
val_mrr = self._test_step(split="val")
print(f"Test mrr = {test_mrr:.4f}")
return dict(Test_mrr=test_mrr, ValMrr=val_mrr)
def _mini_train_step(self,):
self.model.train()
all_loss = 0
for input_nodes, positive_graph, negative_graph, blocks in self.dataloader:
blocks = [b.to(self.device) for b in blocks]
positive_graph = positive_graph.to(self.device)
negative_graph = negative_graph.to(self.device)
if type(input_nodes) == th.Tensor:
input_nodes = {self.category: input_nodes}
input_features = extract_embed(self.model.embed_layer(), input_nodes)
logits = self.model(blocks, input_features)[self.category]
loss = self.loss_calculation(positive_graph, negative_graph, logits)
all_loss += loss.item()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return all_loss
def loss_calculation(self, positive_graph, negative_graph, logits):
p_score = self.ScorePredictor(positive_graph, logits)
p_label = th.ones(len(p_score), device=self.device)
n_score = self.ScorePredictor(negative_graph, logits)
n_label = th.zeros(len(n_score), device=self.device)
loss = F.binary_cross_entropy_with_logits(th.cat((p_score, n_score)), th.cat((p_label, n_label)))
return loss
def ScorePredictor(self, edge_subgraph, x):
score_list = []
with edge_subgraph.local_scope():
edge_subgraph.ndata['x'] = x
for etype in edge_subgraph.canonical_etypes:
e = self.r_embedding[int(etype[1])]
n = edge_subgraph.num_edges(etype)
edge_subgraph.edata['e'] = {etype: e.expand(n, -1)}
edge_subgraph.apply_edges(
dgl.function.u_mul_e('x', 'e', 's'), etype=etype)
edge_subgraph.apply_edges(
dgl.function.e_mul_v('s', 'x', 'score'), etype=etype)
score = th.sum(edge_subgraph.edata['score'].pop(etype), dim=1)
#score = th.sum(th.mul(edge_subgraph.edata['score'].pop(etype), e), dim=1)
score_list.append(score)
return th.cat(score_list)
def ScorePredictor_(self, edge_subgraph, x):
score_list = []
with edge_subgraph.local_scope():
edge_subgraph.ndata['x'] = x
for etype in edge_subgraph.canonical_etypes:
e = self.r_embedding[int(etype[1])]
n = edge_subgraph.num_edges(etype)
edge_subgraph.edata['e'] = {etype: e.expand(n, -1)}
edge_subgraph.apply_edges(
dgl.function.u_add_e('x', 'e', 's'), etype=etype)
edge_subgraph.apply_edges(
dgl.function.e_sub_v('s', 'x', 'score'), etype=etype)
score = -th.norm(edge_subgraph.edata['score'].pop(etype), p=1, dim=1)
#score = th.sum(th.mul(edge_subgraph.edata['score'].pop(etype), e), dim=1)
score_list.append(score)
return th.cat(score_list)
def regularization_loss(self, embedding):
return th.mean(embedding.pow(2)) + th.mean(self.r_embedding.pow(2))
def construct_negative_graph(self,):
neg_srcdst = self.negative_sampler(self.hg, self.train_eid_dict)
if not isinstance(neg_srcdst, Mapping):
assert len(self.hg.etypes) == 1, \
'graph has multiple or no edge types; '\
'please return a dict in negative sampler.'
neg_srcdst = {self.hg.canonical_etypes[0]: neg_srcdst}
# Get dtype from a tuple of tensors
#dtype = F.dtype(list(neg_srcdst.values())[0][0])
neg_edges = {
etype: neg_srcdst.get(etype, (th.tensor([]), th.tensor([])))
for etype in self.hg.canonical_etypes}
neg_pair_graph = dgl.heterograph(
neg_edges, {ntype: self.hg.number_of_nodes(ntype) for ntype in self.hg.ntypes})
return neg_pair_graph
def _full_train_setp(self):
self.model.train()
negative_graph = self.construct_negative_graph()
#for _ in range(2000):
logits = self.model(self.hg)[self.category]
#reg_loss = self.regularization_loss(logits)
loss = self.loss_calculation(self.hg, negative_graph, logits)
self.optimizer.zero_grad()
loss.backward()
#th.nn.utils.clip_grad_norm_(list(self.model.parameters()) + [self.r_embedding], 1.0)
th.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
self.optimizer.step()
print(loss.item())
return loss.item()
def _test_step(self, split=None, logits=None):
self.model.eval()
with th.no_grad():
logits = logits if logits else self.model(self.hg)[self.category]
metric = self.evaluator(logits, self.r_embedding, self.train_dataset, self.val_dataset, self.train_dataset, hits=[1], eval_p='raw')
# if split == 'val':
# metric = self.evaluator(logits, self.r_embedding, self.val_dataset, hits=[1, 3, 10], eval_p='raw')
# elif split == 'test':
# metric = self.evaluator(logits, self.r_embedding, self.test_dataset, hits=[1, 3, 10], eval_p='raw')
# elif split == 'train':
# metric = self.evaluator(logits, self.r_embedding, self.train_dataset, hits=[1], eval_p='raw')
return metric
| 43.535088
| 143
| 0.599839
|
8c547c276425fb4693184383542a946b66b821f9
| 593
|
py
|
Python
|
kalaida_schvyschkov/server/sv_packages/__init__.py
|
maxkalayda/stud_projects
|
e56a9647ca23694ef8c6983cb3b813dadda40d15
|
[
"MIT"
] | null | null | null |
kalaida_schvyschkov/server/sv_packages/__init__.py
|
maxkalayda/stud_projects
|
e56a9647ca23694ef8c6983cb3b813dadda40d15
|
[
"MIT"
] | null | null | null |
kalaida_schvyschkov/server/sv_packages/__init__.py
|
maxkalayda/stud_projects
|
e56a9647ca23694ef8c6983cb3b813dadda40d15
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from os import path
basedir = path.abspath(path.join(path.dirname(__file__), '..', 'sv_packages/templates'))
app = Flask(__name__)
app.secret_key = 'my supersecret key of all apps'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database/login_params.db'
app.config['SQLALCHEMY_BINDS'] = {
'main': 'sqlite:///database/sqlite.db'
}
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
manager = LoginManager(app)
from sv_packages import routes
db.create_all()
| 26.954545
| 88
| 0.767285
|
88d38be15ae470fc5fbf66e20a289b89cb4fec43
| 278
|
py
|
Python
|
web/src/blueprints/__init__.py
|
jphacks/A_2001
|
b8f0dce49dc32041d06272593a467242636539a6
|
[
"MIT"
] | null | null | null |
web/src/blueprints/__init__.py
|
jphacks/A_2001
|
b8f0dce49dc32041d06272593a467242636539a6
|
[
"MIT"
] | 7
|
2020-10-31T09:10:47.000Z
|
2020-11-06T17:42:58.000Z
|
web/src/blueprints/__init__.py
|
jphacks/A_2001
|
b8f0dce49dc32041d06272593a467242636539a6
|
[
"MIT"
] | null | null | null |
from .api import api
from .tasks import tasks
from .auth import auth
from .quests import quests
from .quests_shared import quests_shared
from .subtasks import subtasks
from .users import users
__all__ = ["api", "auth", "quests", "quests_shared", "tasks", "subtasks", "users"]
| 25.272727
| 82
| 0.751799
|
43d8704474f78915a3e4a1975cfea3815e6c948c
| 923
|
py
|
Python
|
2020/day10.py
|
hrushikeshrv/aoc
|
00b3315cb7119acfe5536ae2dd5a3a78a76b0502
|
[
"MIT"
] | 1
|
2020-12-05T03:59:02.000Z
|
2020-12-05T03:59:02.000Z
|
2020/day10.py
|
hrushikeshrv/aoc
|
00b3315cb7119acfe5536ae2dd5a3a78a76b0502
|
[
"MIT"
] | null | null | null |
2020/day10.py
|
hrushikeshrv/aoc
|
00b3315cb7119acfe5536ae2dd5a3a78a76b0502
|
[
"MIT"
] | null | null | null |
"""
#TODO - Add solution to part 2
Problem 10 - https://adventofcode.com/2020/day/10
Part 1 -
Given a set of numbers, find the product of the number of pairs of numbers which differ by 1 and the number of pairs of numbers which differ by 3
Part 1 -
Given the same set of numbers, find the number of ways to arrange them in ascending order such that two consecutive numbers differ by at most 3
"""
# Set up the input
with open('input-10122020.txt', 'r') as file:
j = file.readlines()
j.append('0\n')
jolts = [int(x[:-1]) for x in j]
jolts.append(max(jolts) + 3)
jolts = sorted(jolts)
# Solution to part 1
def solve_1(jolts):
diff = {1: 0, 2: 0, 3: 0}
for i in range(1, len(jolts)):
d = jolts[i] - jolts[i - 1]
diff[d] += 1
return diff[1] * diff[3]
ans_1 = solve_1(jolts)
print(ans_1)
# Answer was 1820
# Solution to part 2
def solve_2(jolts):
raise NotImplementedError
| 23.075
| 149
| 0.661972
|
396d3e47ace5edb0e735045e1705adc468f7681d
| 886
|
py
|
Python
|
autoresolution/src/__init__.py
|
Haehnchen/enigma2-plugins
|
23007eb0b78665cd3a2faf98d1d6145b4f0ada3f
|
[
"OLDAP-2.3"
] | 1
|
2020-01-27T22:53:56.000Z
|
2020-01-27T22:53:56.000Z
|
autoresolution/src/__init__.py
|
Haehnchen/enigma2-plugins
|
23007eb0b78665cd3a2faf98d1d6145b4f0ada3f
|
[
"OLDAP-2.3"
] | null | null | null |
autoresolution/src/__init__.py
|
Haehnchen/enigma2-plugins
|
23007eb0b78665cd3a2faf98d1d6145b4f0ada3f
|
[
"OLDAP-2.3"
] | null | null | null |
# -*- coding: utf-8 -*-
from Components.Language import language
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_LANGUAGE
import os,gettext
PluginLanguageDomain = "AutoResolution"
PluginLanguagePath = "SystemPlugins/AutoResolution/locale"
def localeInit():
lang = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
os.environ["LANGUAGE"] = lang # Enigma doesn't set this (or LC_ALL, LC_MESSAGES, LANG). gettext needs it!
#print "[%s] set language to " %(PluginLanguageDomain), lang
gettext.bindtextdomain(PluginLanguageDomain, resolveFilename(SCOPE_PLUGINS, PluginLanguagePath))
def _(txt):
t = gettext.dgettext(PluginLanguageDomain, txt)
if t == txt:
#print "[%s] fallback to default translation for %s" %(PluginLanguageDomain, txt)
t = gettext.gettext(txt)
return t
localeInit()
language.addCallback(localeInit)
| 35.44
| 106
| 0.766366
|
a64d6b7148b862426e291c518b3eae98ceadb0aa
| 859
|
py
|
Python
|
tcpproxy.py
|
benjaminbrewerton/dlepard
|
f1279ed798c04df80b501a0a6689f047037196ff
|
[
"MIT"
] | null | null | null |
tcpproxy.py
|
benjaminbrewerton/dlepard
|
f1279ed798c04df80b501a0a6689f047037196ff
|
[
"MIT"
] | null | null | null |
tcpproxy.py
|
benjaminbrewerton/dlepard
|
f1279ed798c04df80b501a0a6689f047037196ff
|
[
"MIT"
] | null | null | null |
import asyncio
class TCPProxy(asyncio.Protocol):
def __init__(self, ipv4adr, port, interface, receive_handler, loop=None):
if loop is None:
self.loop = asyncio.get_event_loop()
else:
self.loop = loop
self.running = False
self.ip_addr = ipv4adr
self.port = port
self.interface = interface
self.transport = None # type: asyncio.Transport
self.receive_handler = receive_handler
def connection_made(self, transport):
self.transport = transport
def data_received(self, data: bytes):
self.receive_handler(data)
def send_msg(self, message):
self.transport.write(message)
async def start(self):
coro = self.loop.create_connection(lambda: self, host=self.ip_addr, port=self.port)
await asyncio.wait_for(coro, 5)
| 28.633333
| 91
| 0.6461
|
ef3f19540130364218e18488536191df6983d0a8
| 4,016
|
py
|
Python
|
torchnlp/datasets/wmt.py
|
XingxingZhang/PyTorch-NLP
|
b998dbbd943f7a00f67fd94aacbe5e865577da33
|
[
"BSD-3-Clause"
] | 3
|
2018-06-27T13:43:47.000Z
|
2022-03-11T05:11:13.000Z
|
torchnlp/datasets/wmt.py
|
XingxingZhang/PyTorch-NLP
|
b998dbbd943f7a00f67fd94aacbe5e865577da33
|
[
"BSD-3-Clause"
] | null | null | null |
torchnlp/datasets/wmt.py
|
XingxingZhang/PyTorch-NLP
|
b998dbbd943f7a00f67fd94aacbe5e865577da33
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from torchnlp.utils import download_compressed_directory
from torchnlp.datasets.dataset import Dataset
def wmt_dataset(directory='data/wmt16_en_de',
train=False,
dev=False,
test=False,
train_filename='train.tok.clean.bpe.32000',
dev_filename='newstest2013.tok.bpe.32000',
test_filename='newstest2014.tok.bpe.32000',
check_file='train.tok.clean.bpe.32000.en',
url='https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8'):
"""
The Workshop on Machine Translation (WMT) 2014 English-German dataset.
Initially this dataset was preprocessed by Google Brain. Though this download contains test sets
from 2015 and 2016, the train set differs slightly from WMT 2015 and 2016 and significantly from
WMT 2017.
The provided data is mainly taken from version 7 of the Europarl corpus, which is freely
available. Note that this the same data as last year, since Europarl is not anymore translted
across all 23 official European languages. Additional training data is taken from the new News
Commentary corpus. There are about 50 million words of training data per language from the
Europarl corpus and 3 million words from the News Commentary corpus.
A new data resource from 2013 is the Common Crawl corpus which was collected from web sources.
Each parallel corpus comes with a annotation file that gives the source of each sentence pair.
References:
* https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/translate_ende.py # noqa: E501
* http://www.statmt.org/wmt14/translation-task.html
Args:
directory (str, optional): Directory to cache the dataset.
train (bool, optional): If to load the training split of the dataset.
dev (bool, optional): If to load the dev split of the dataset.
test (bool, optional): If to load the test split of the dataset.
train_filename (str, optional): The filename of the training split.
dev_filename (str, optional): The filename of the dev split.
test_filename (str, optional): The filename of the test split.
check_file (str, optional): Check this file exists if download was successful.
url (str, optional): URL of the dataset `tar.gz` file.
Returns:
:class:`tuple` of :class:`torchnlp.datasets.Dataset`: Tuple with the training tokens, dev
tokens and test tokens in order if their respective boolean argument is true.
Example:
>>> from torchnlp.datasets import wmt_dataset
>>> train = wmt_dataset(train=True)
>>> train[:2]
[{
'en': 'Res@@ um@@ ption of the session',
'de': 'Wiederaufnahme der Sitzungsperiode'
}, {
'en': 'I declare resumed the session of the European Parliament ad@@ jour@@ ned on...'
'de': 'Ich erklär@@ e die am Freitag , dem 17. Dezember unterbro@@ ch@@ ene...'
}]
"""
download_compressed_directory(
file_url=url, directory=directory, check_file=check_file, filename='wmt16_en_de.tar.gz')
ret = []
splits = [(train, train_filename), (dev, dev_filename), (test, test_filename)]
splits = [f for (requested, f) in splits if requested]
for filename in splits:
examples = []
en_path = os.path.join(directory, filename + '.en')
de_path = os.path.join(directory, filename + '.de')
en_file = [l.strip() for l in open(en_path, 'r', encoding='utf-8')]
de_file = [l.strip() for l in open(de_path, 'r', encoding='utf-8')]
assert len(en_file) == len(de_file)
for i in range(len(en_file)):
if en_file[i] != '' and de_file[i] != '':
examples.append({'en': en_file[i], 'de': de_file[i]})
ret.append(Dataset(examples))
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
| 45.636364
| 126
| 0.65762
|
1d964101cb10bea0a1424a5c53e294517d3b77bc
| 155
|
py
|
Python
|
parser/fase2/team12/src/ENTORNO/Simbolo.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/fase2/team12/src/ENTORNO/Simbolo.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/fase2/team12/src/ENTORNO/Simbolo.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
class Symbol():
def __init__(self, id, data_type, valor):
self.id = id
self.data_type = data_type
self.valor = valor
| 19.375
| 45
| 0.554839
|
6aa38d7c7506a6a74034a099a7fff6b3a11a5c15
| 14,846
|
py
|
Python
|
microsoft/testsuites/performance/common.py
|
srveniga/lisa
|
0b5bcf028ed4211d79ff90b9f915981c426baab4
|
[
"MIT"
] | null | null | null |
microsoft/testsuites/performance/common.py
|
srveniga/lisa
|
0b5bcf028ed4211d79ff90b9f915981c426baab4
|
[
"MIT"
] | null | null | null |
microsoft/testsuites/performance/common.py
|
srveniga/lisa
|
0b5bcf028ed4211d79ff90b9f915981c426baab4
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import inspect
import pathlib
from typing import Any, Dict, List, Optional, Union, cast
from lisa import Node, RemoteNode, notifier, run_in_parallel
from lisa.environment import Environment
from lisa.messages import (
DiskPerformanceMessage,
DiskSetupType,
DiskType,
NetworkLatencyPerformanceMessage,
NetworkTCPPerformanceMessage,
NetworkUDPPerformanceMessage,
)
from lisa.schema import NetworkDataPath
from lisa.tools import (
FIOMODES,
Fdisk,
Fio,
FIOResult,
Iperf3,
Kill,
Lagscope,
Lscpu,
Mdadm,
Netperf,
Ntttcp,
Sar,
Ssh,
)
from lisa.tools.ntttcp import NTTTCP_TCP_CONCURRENCY, NTTTCP_UDP_CONCURRENCY
from lisa.util.process import ExecutableResult, Process
def perf_disk(
node: Node,
start_iodepth: int,
max_iodepth: int,
filename: str,
core_count: int,
disk_count: int,
disk_setup_type: DiskSetupType,
disk_type: DiskType,
environment: Environment,
test_name: str = "",
num_jobs: Optional[List[int]] = None,
block_size: int = 4,
time: int = 120,
size_gb: int = 0,
numjob: int = 0,
overwrite: bool = False,
cwd: Optional[pathlib.PurePath] = None,
) -> None:
fio_result_list: List[FIOResult] = []
fio = node.tools[Fio]
numjobiterator = 0
for mode in FIOMODES:
iodepth = start_iodepth
numjobindex = 0
while iodepth <= max_iodepth:
if num_jobs:
numjob = num_jobs[numjobindex]
fio_result = fio.launch(
name=f"iteration{numjobiterator}",
filename=filename,
mode=mode.name,
time=time,
size_gb=size_gb,
block_size=f"{block_size}K",
iodepth=iodepth,
overwrite=overwrite,
numjob=numjob,
cwd=cwd,
)
fio_result_list.append(fio_result)
iodepth = iodepth * 2
numjobindex += 1
numjobiterator += 1
other_fields: Dict[str, Any] = {}
other_fields["core_count"] = core_count
other_fields["disk_count"] = disk_count
other_fields["block_size"] = block_size
other_fields["disk_setup_type"] = disk_setup_type
other_fields["disk_type"] = disk_type
if not test_name:
test_name = inspect.stack()[1][3]
fio_messages: List[DiskPerformanceMessage] = fio.create_performance_messages(
fio_result_list,
test_name=test_name,
environment=environment,
other_fields=other_fields,
)
for fio_message in fio_messages:
notifier.notify(fio_message)
def get_nic_datapath(node: Node) -> str:
data_path: str = ""
assert (
node.capability.network_interface
and node.capability.network_interface.data_path
)
if isinstance(node.capability.network_interface.data_path, NetworkDataPath):
data_path = node.capability.network_interface.data_path.value
return data_path
def cleanup_process(environment: Environment, process_name: str) -> None:
for node in environment.nodes.list():
kill = node.tools[Kill]
kill.by_name(process_name)
def reset_partitions(
node: Node,
disk_names: List[str],
) -> List[str]:
fdisk = node.tools[Fdisk]
partition_disks: List[str] = []
for data_disk in disk_names:
fdisk.delete_partitions(data_disk)
partition_disks.append(fdisk.make_partition(data_disk, format=False))
return partition_disks
def stop_raid(node: Node) -> None:
mdadm = node.tools[Mdadm]
mdadm.stop_raid()
def reset_raid(node: Node, disk_list: List[str]) -> None:
stop_raid(node)
mdadm = node.tools[Mdadm]
mdadm.create_raid(disk_list)
def perf_tcp_latency(
environment: Environment,
) -> List[NetworkLatencyPerformanceMessage]:
client = cast(RemoteNode, environment.nodes[0])
server = cast(RemoteNode, environment.nodes[1])
client_lagscope = client.tools[Lagscope]
server_lagscope = server.tools[Lagscope]
try:
for lagscope in [client_lagscope, server_lagscope]:
lagscope.set_busy_poll()
server_lagscope.run_as_server(ip=server.internal_address)
latency_perf_messages = client_lagscope.create_latency_peformance_messages(
client_lagscope.run_as_client(server_ip=server.internal_address),
environment,
inspect.stack()[1][3],
)
finally:
for lagscope in [client_lagscope, server_lagscope]:
lagscope.restore_busy_poll()
return latency_perf_messages
def perf_tcp_pps(environment: Environment, test_type: str) -> None:
client = cast(RemoteNode, environment.nodes[0])
server = cast(RemoteNode, environment.nodes[1])
client_netperf = client.tools[Netperf]
server_netperf = server.tools[Netperf]
cpu = client.tools[Lscpu]
core_count = cpu.get_core_count()
if "maxpps" == test_type:
ssh = client.tools[Ssh]
ssh.set_max_session()
client.close()
ports = range(30000, 30032)
else:
ports = range(30000, 30001)
for port in ports:
server_netperf.run_as_server(port)
for port in ports:
client_netperf.run_as_client_async(server.internal_address, core_count, port)
client_sar = client.tools[Sar]
server_sar = server.tools[Sar]
server_sar.get_statistics_async()
result = client_sar.get_statistics()
pps_message = client_sar.create_pps_peformance_messages(
result, inspect.stack()[1][3], environment, test_type
)
notifier.notify(pps_message)
def perf_ntttcp(
environment: Environment,
udp_mode: bool = False,
connections: Optional[List[int]] = None,
test_case_name: str = "",
) -> List[Union[NetworkTCPPerformanceMessage, NetworkUDPPerformanceMessage]]:
client = cast(RemoteNode, environment.nodes[0])
server = cast(RemoteNode, environment.nodes[1])
if not test_case_name:
# if it's not filled, assume it's called by case directly.
test_case_name = inspect.stack()[1][3]
if connections is None:
if udp_mode:
connections = NTTTCP_UDP_CONCURRENCY
else:
connections = NTTTCP_TCP_CONCURRENCY
client_ntttcp, server_ntttcp = run_in_parallel(
[lambda: client.tools[Ntttcp], lambda: server.tools[Ntttcp]]
)
try:
client_lagscope, server_lagscope = run_in_parallel(
[lambda: client.tools[Lagscope], lambda: server.tools[Lagscope]]
)
for ntttcp in [client_ntttcp, server_ntttcp]:
ntttcp.setup_system(udp_mode)
for lagscope in [client_lagscope, server_lagscope]:
lagscope.set_busy_poll()
data_path = get_nic_datapath(client)
server_nic_name = server.nics.default_nic
client_nic_name = client.nics.default_nic
dev_differentiator = "Hypervisor callback interrupts"
if NetworkDataPath.Sriov.value == data_path:
server_nic_name = server.nics.get_lower_nics()[0]
client_nic_name = client.nics.get_lower_nics()[0]
dev_differentiator = "mlx"
server_lagscope.run_as_server(ip=server.internal_address)
max_server_threads = 64
perf_ntttcp_message_list: List[
Union[NetworkTCPPerformanceMessage, NetworkUDPPerformanceMessage]
] = []
for test_thread in connections:
if test_thread < max_server_threads:
num_threads_p = test_thread
num_threads_n = 1
else:
num_threads_p = max_server_threads
num_threads_n = int(test_thread / num_threads_p)
if 1 == num_threads_n and 1 == num_threads_p:
buffer_size = int(1048576 / 1024)
else:
buffer_size = int(65536 / 1024)
if udp_mode:
buffer_size = int(1024 / 1024)
server_result = server_ntttcp.run_as_server_async(
server_nic_name,
ports_count=num_threads_p,
buffer_size=buffer_size,
dev_differentiator=dev_differentiator,
udp_mode=udp_mode,
)
client_lagscope_process = client_lagscope.run_as_client_async(
server_ip=server.internal_address,
ping_count=0,
run_time_seconds=10,
print_histogram=False,
print_percentile=False,
histogram_1st_interval_start_value=0,
length_of_histogram_intervals=0,
count_of_histogram_intervals=0,
dump_csv=False,
)
client_ntttcp_result = client_ntttcp.run_as_client(
client_nic_name,
server.internal_address,
buffer_size=buffer_size,
threads_count=num_threads_n,
ports_count=num_threads_p,
dev_differentiator=dev_differentiator,
udp_mode=udp_mode,
)
server_ntttcp_result = server_result.wait_result()
server_result_temp = server_ntttcp.create_ntttcp_result(
server_ntttcp_result
)
client_result_temp = client_ntttcp.create_ntttcp_result(
client_ntttcp_result, role="client"
)
client_sar_result = client_lagscope_process.wait_result()
client_average_latency = client_lagscope.get_average(client_sar_result)
if udp_mode:
ntttcp_message: Union[
NetworkTCPPerformanceMessage, NetworkUDPPerformanceMessage
] = client_ntttcp.create_ntttcp_udp_performance_message(
server_result_temp,
client_result_temp,
str(test_thread),
buffer_size,
environment,
test_case_name,
)
else:
ntttcp_message = client_ntttcp.create_ntttcp_tcp_performance_message(
server_result_temp,
client_result_temp,
client_average_latency,
str(test_thread),
buffer_size,
environment,
test_case_name,
)
notifier.notify(ntttcp_message)
perf_ntttcp_message_list.append(ntttcp_message)
finally:
for ntttcp in [client_ntttcp, server_ntttcp]:
ntttcp.restore_system(udp_mode)
for lagscope in [client_lagscope, server_lagscope]:
lagscope.restore_busy_poll()
return perf_ntttcp_message_list
def perf_iperf(
environment: Environment,
connections: List[int],
buffer_length_list: List[int],
udp_mode: bool = False,
) -> None:
client = cast(RemoteNode, environment.nodes[0])
server = cast(RemoteNode, environment.nodes[1])
client_iperf3 = client.tools[Iperf3]
server_iperf3 = server.tools[Iperf3]
test_case_name = inspect.stack()[1][3]
iperf3_messages_list: List[Any] = []
if udp_mode:
for node in [client, server]:
ssh = node.tools[Ssh]
ssh.set_max_session()
node.close()
for buffer_length in buffer_length_list:
for connection in connections:
server_iperf3_process_list: List[Process] = []
client_iperf3_process_list: List[Process] = []
client_result_list: List[ExecutableResult] = []
server_result_list: List[ExecutableResult] = []
if connection < 64:
num_threads_p = connection
num_threads_n = 1
else:
num_threads_p = 64
num_threads_n = int(connection / 64)
server_start_port = 750
current_server_port = server_start_port
current_server_iperf_instances = 0
while current_server_iperf_instances < num_threads_n:
current_server_iperf_instances += 1
server_iperf3_process_list.append(
server_iperf3.run_as_server_async(
current_server_port, "g", 10, True, True, False
)
)
current_server_port += 1
client_start_port = 750
current_client_port = client_start_port
current_client_iperf_instances = 0
while current_client_iperf_instances < num_threads_n:
current_client_iperf_instances += 1
client_iperf3_process_list.append(
client_iperf3.run_as_client_async(
server.internal_address,
output_json=True,
report_periodic=1,
report_unit="g",
port=current_client_port,
buffer_length=buffer_length,
run_time_seconds=10,
parallel_number=num_threads_p,
ip_version="4",
udp_mode=udp_mode,
)
)
current_client_port += 1
for client_iperf3_process in client_iperf3_process_list:
client_result_list.append(client_iperf3_process.wait_result())
for server_iperf3_process in server_iperf3_process_list:
server_result_list.append(server_iperf3_process.wait_result())
if udp_mode:
iperf3_messages_list.append(
client_iperf3.create_iperf_udp_performance_message(
server_result_list,
client_result_list,
buffer_length,
connection,
environment,
test_case_name,
)
)
else:
iperf3_messages_list.append(
client_iperf3.create_iperf_tcp_performance_message(
server_result_list[0].stdout,
client_result_list[0].stdout,
buffer_length,
environment,
test_case_name,
)
)
for iperf3_message in iperf3_messages_list:
notifier.notify(iperf3_message)
def calculate_middle_average(values: List[Union[float, int]]) -> float:
"""
This method is used to calculate an average indicator. It discard the max
and min value, and then take the average.
"""
total = sum(x for x in values) - min(values) - max(values)
# calculate average
return total / (len(values) - 2)
| 36.121655
| 85
| 0.611815
|
10b30fb70ea1bd12e8539b813f96c10d3195f6d6
| 3,244
|
py
|
Python
|
tf-docker/django/webapp/translation_web/settings.py
|
xinmang/TensorFlow-test
|
5eafe60c2d6a5166583c6384473fe2560f6bdeea
|
[
"Apache-2.0"
] | 1
|
2018-06-29T07:16:55.000Z
|
2018-06-29T07:16:55.000Z
|
tf-docker/django/webapp/translation_web/settings.py
|
nciefeiniu/TensorFlow-test
|
e61db477899b36fdf22248db1968d016b9a9a421
|
[
"Apache-2.0"
] | null | null | null |
tf-docker/django/webapp/translation_web/settings.py
|
nciefeiniu/TensorFlow-test
|
e61db477899b36fdf22248db1968d016b9a9a421
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for translation_web project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r#^1d(*6r2*gr*-0-pvtboof6=c@1r@op61ld&ohqfkto5w4ov'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*',]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tranweb',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'translation_web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'translation_web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
| 26.16129
| 91
| 0.694205
|
2577d8bfce8a3225c50699252016a2b415f9a954
| 869
|
py
|
Python
|
setup.py
|
ngazagna/safe_grid_search
|
bb06740a7b3473578b474b118694de2e24150246
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
ngazagna/safe_grid_search
|
bb06740a7b3473578b474b118694de2e24150246
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
ngazagna/safe_grid_search
|
bb06740a7b3473578b474b118694de2e24150246
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from distutils.core import setup
from Cython.Build import cythonize
DISTNAME = 'safegridoptim'
DESCRIPTION = 'Coordinate descent solver for elastic net and l1 logistic'
LONG_DESCRIPTION = open('README.md').read()
MAINTAINER = 'Eugene Ndiaye'
MAINTAINER_EMAIL = 'ndiayeeugene@gmail.com'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/EugeneNdiaye/safe_grid_search'
URL = 'https://github.com/EugeneNdiaye/safe_grid_search'
VERSION = None
setup(name='safegridoptim',
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
packages=['safegridoptim'],
ext_modules=cythonize("safegridoptim/*.pyx"),
include_dirs=[np.get_include()]
)
| 29.965517
| 73
| 0.735328
|
6d51b85a91f8ba204c604eb1ad5333fa51eb0fa8
| 235
|
py
|
Python
|
pacman-arch/test/pacman/tests/sync104.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23
|
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/sync104.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11
|
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/sync104.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1
|
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "-Suu"
sp = pmpkg("dummy", "0.9-1")
lp = pmpkg("dummy", "1.0-1")
self.addpkg2db("sync", sp)
self.addpkg2db("local", lp)
self.args = "-Suu"
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=dummy|0.9-1")
| 18.076923
| 39
| 0.655319
|
2832e76a75b14d65485b551b477ee8fec1daeb5c
| 1,807
|
py
|
Python
|
tests/test.py
|
ChristofDubs/DoubleBallBalancer
|
6869220ed9f8c5234b00fc653bf05bb7e0bf6737
|
[
"Apache-2.0"
] | 3
|
2018-04-08T13:32:26.000Z
|
2018-06-29T16:15:50.000Z
|
tests/test.py
|
ChristofDubs/DoubleBallBalancer
|
6869220ed9f8c5234b00fc653bf05bb7e0bf6737
|
[
"Apache-2.0"
] | null | null | null |
tests/test.py
|
ChristofDubs/DoubleBallBalancer
|
6869220ed9f8c5234b00fc653bf05bb7e0bf6737
|
[
"Apache-2.0"
] | 1
|
2020-07-18T03:47:41.000Z
|
2020-07-18T03:47:41.000Z
|
import unittest
import numpy as np
import context
from model_2d.controller_2 import Controller as Controller2
from model_2d.dynamics_2 import StateIndex as StateIndex2
from model_2d.controller_3 import Controller as Controller3
from model_2d.dynamics_3 import StateIndex as StateIndex3
from model_2d.param import getDefaultParam
delta = 1e-6
class TestController(unittest.TestCase):
controller = Controller2(getDefaultParam(2))
K = np.array([0.447213596e+00, 1.03556079e+01, -4.73012271e+01,
3.683281576e+00, 6.05877477e-01, -3.53469304e+01])
def test_gains(self):
# state gain
for i in range(StateIndex2.NUM_STATES):
x0 = np.zeros(StateIndex2.NUM_STATES)
x0[i] = delta
u = self.controller.compute_ctrl_input(x0, 0)
self.assertAlmostEqual(u / delta, -self.K[i])
# input gain
x0 = np.zeros(StateIndex2.NUM_STATES)
u = self.controller.compute_ctrl_input(x0, delta)
self.assertAlmostEqual(u / delta, self.K[0])
class TestController3(unittest.TestCase):
controller = Controller3(getDefaultParam(3))
K = np.array([-0.447213596, 26.62771567, -199.45731763, -327.80147739, -
1.6832815768, 9.73717718, -133.08516459, -137.62380736])
def test_gains(self):
# state gain
for i in range(StateIndex3.NUM_STATES):
x0 = np.zeros(StateIndex3.NUM_STATES)
x0[i] = delta
u = self.controller.compute_ctrl_input(x0, 0)
self.assertAlmostEqual(u / delta, -self.K[i])
# input gain
x0 = np.zeros(StateIndex3.NUM_STATES)
u = self.controller.compute_ctrl_input(x0, delta)
self.assertAlmostEqual(u / delta, self.K[0])
if __name__ == '__main__':
unittest.main()
| 30.116667
| 76
| 0.663531
|
048c97aa6e972722ee96c6f677210e8f09287695
| 35,675
|
py
|
Python
|
pyqt_app/taskwidgetui.py
|
fffoobibi/pyqt_novel
|
08141735ce7937f817f99686bfc239259604558c
|
[
"MIT"
] | null | null | null |
pyqt_app/taskwidgetui.py
|
fffoobibi/pyqt_novel
|
08141735ce7937f817f99686bfc239259604558c
|
[
"MIT"
] | null | null | null |
pyqt_app/taskwidgetui.py
|
fffoobibi/pyqt_novel
|
08141735ce7937f817f99686bfc239259604558c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\task_widget.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(760, 519)
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.stackedWidget = QtWidgets.QStackedWidget(Form)
self.stackedWidget.setObjectName("stackedWidget")
self.page = QtWidgets.QWidget()
self.page.setObjectName("page")
self.verticalLayout = QtWidgets.QVBoxLayout(self.page)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.frame_6 = QtWidgets.QFrame(self.page)
self.frame_6.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_6.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_6.setObjectName("frame_6")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.frame_6)
self.horizontalLayout_4.setContentsMargins(5, 0, 0, 0)
self.horizontalLayout_4.setSpacing(2)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.pushButton = HoverButton(self.frame_6)
self.pushButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton.setStyleSheet("QPushButton{image:url(:/ico/close_512px_1175341_easyicon.net.png);border:none;width:23px;height:23px}\n"
"QPushButton:hover{image: url(:/ico/close_128px_1175741_easyicon.net.ico);width:23px;height:23px}\n"
"")
self.pushButton.setText("")
self.pushButton.setIconSize(QtCore.QSize(22, 22))
self.pushButton.setObjectName("pushButton")
self.horizontalLayout_4.addWidget(self.pushButton)
self.info_title = QtWidgets.QLabel(self.frame_6)
self.info_title.setStyleSheet("QLabel{font: bold 14pt}")
self.info_title.setAlignment(QtCore.Qt.AlignCenter)
self.info_title.setObjectName("info_title")
self.horizontalLayout_4.addWidget(self.info_title)
spacerItem = QtWidgets.QSpacerItem(5000, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem)
self.checkBox = QtWidgets.QCheckBox(self.frame_6)
self.checkBox.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.checkBox.setStyleSheet("QCheckBox{\n"
" spacing: 2px;\n"
"}\n"
"\n"
"/*QCheckBox::indicator{\n"
" width: 15px;\n"
" height:15px;\n"
"}\n"
"\n"
"QCheckBox::indicator:unchecked{\n"
" image:url(:/ico/unchecked_checkbox_64px_1170301_easyicon.net.png);\n"
"}\n"
"\n"
"QCheckBox::indicator:checked{\n"
" image:url(:/ico/checked_checkbox_64px_1101338_easyicon.net.png);\n"
"}*/\n"
"\n"
"")
self.checkBox.setText("")
self.checkBox.setTristate(False)
self.checkBox.setObjectName("checkBox")
self.horizontalLayout_4.addWidget(self.checkBox)
self.sub_button = QtWidgets.QPushButton(self.frame_6)
self.sub_button.setStyleSheet("QPushButton{border:none}\n"
"QPushButton:hover{border:1px solid lightgray;border-radius:3px;background:lightgray}")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/ico/check_128px_1138992_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
icon.addPixmap(QtGui.QPixmap(":/ico/close_128px_1138994_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.sub_button.setIcon(icon)
self.sub_button.setIconSize(QtCore.QSize(23, 27))
self.sub_button.setCheckable(True)
self.sub_button.setChecked(False)
self.sub_button.setObjectName("sub_button")
self.horizontalLayout_4.addWidget(self.sub_button)
self.frame_2 = QtWidgets.QFrame(self.frame_6)
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.frame_2)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.pushButton_2 = QtWidgets.QPushButton(self.frame_2)
self.pushButton_2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton_2.setStyleSheet("QPushButton{border:none}\n"
"QPushButton:hover{border:1px solid lightgray;border-radius:3px;background:lightgray}")
self.pushButton_2.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/ico/angle-left.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_2.setIcon(icon1)
self.pushButton_2.setIconSize(QtCore.QSize(23, 27))
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout_5.addWidget(self.pushButton_2)
self.pushButton_3 = QtWidgets.QPushButton(self.frame_2)
self.pushButton_3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton_3.setStyleSheet("QPushButton{border:none}\n"
"QPushButton:hover{border:1px solid lightgray;border-radius:3px;background:lightgray}")
self.pushButton_3.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/ico/angle-right.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_3.setIcon(icon2)
self.pushButton_3.setIconSize(QtCore.QSize(23, 27))
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout_5.addWidget(self.pushButton_3)
self.horizontalLayout_4.addWidget(self.frame_2)
self.horizontalLayout_4.setStretch(1, 10)
self.verticalLayout.addWidget(self.frame_6)
self.frame_4 = QtWidgets.QFrame(self.page)
self.frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_4.setObjectName("frame_4")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.frame_4)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.frame_13 = QtWidgets.QFrame(self.frame_4)
self.frame_13.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_13.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_13.setObjectName("frame_13")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.frame_13)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.info_img = TaskWidgetSubmitLabel(self.frame_13)
self.info_img.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.info_img.setObjectName("info_img")
self.verticalLayout_5.addWidget(self.info_img)
spacerItem1 = QtWidgets.QSpacerItem(20, 129, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_5.addItem(spacerItem1)
self.horizontalLayout_9.addWidget(self.frame_13)
self.info_introduced = QtWidgets.QTextEdit(self.frame_4)
font = QtGui.QFont()
font.setFamily("微软雅黑 Light")
font.setPointSize(11)
self.info_introduced.setFont(font)
self.info_introduced.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.info_introduced.setStyleSheet("background-color:rgba(0,0,0,0)")
self.info_introduced.setLocale(QtCore.QLocale(QtCore.QLocale.Chinese, QtCore.QLocale.China))
self.info_introduced.setFrameShape(QtWidgets.QFrame.NoFrame)
self.info_introduced.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.info_introduced.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.info_introduced.setObjectName("info_introduced")
self.horizontalLayout_9.addWidget(self.info_introduced)
self.verticalLayout.addWidget(self.frame_4)
self.line_2 = QtWidgets.QFrame(self.page)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setObjectName("line_2")
self.verticalLayout.addWidget(self.line_2)
self.frame_5 = QtWidgets.QFrame(self.page)
self.frame_5.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_5.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_5.setObjectName("frame_5")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.frame_5)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.frame_8 = QtWidgets.QFrame(self.frame_5)
self.frame_8.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_8.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_8.setObjectName("frame_8")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.frame_8)
self.horizontalLayout_7.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_7.setSpacing(0)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.pushButton_6 = QtWidgets.QPushButton(self.frame_8)
self.pushButton_6.setStyleSheet("font: 11.5pt;\n"
"text-decoration: underline;\n"
"border:none;\n"
"font-weight:bold")
self.pushButton_6.setObjectName("pushButton_6")
self.horizontalLayout_7.addWidget(self.pushButton_6)
spacerItem2 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem2)
self.frame_3 = QtWidgets.QFrame(self.frame_8)
self.frame_3.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.frame_3)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.pushButton_5 = QtWidgets.QPushButton(self.frame_3)
self.pushButton_5.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton_5.setStyleSheet("QPushButton{font: 11.5pt;\n"
"text-decoration: underline;\n"
"font-weight:bold;\n"
"border:none}\n"
"QPushButton:hover{\n"
"color: rgb(200, 105, 255)}")
self.pushButton_5.setObjectName("pushButton_5")
self.horizontalLayout_3.addWidget(self.pushButton_5)
self.label_2 = QtWidgets.QLabel(self.frame_3)
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.horizontalLayout_3.addWidget(self.label_2)
spacerItem3 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem3)
self.horizontalLayout_7.addWidget(self.frame_3)
self.horizontalLayout_6.addWidget(self.frame_8)
self.fresh_button = GifButton(self.frame_5)
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(9)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.fresh_button.setFont(font)
self.fresh_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.fresh_button.setStyleSheet("QPushButton{border:none}\n"
"QPushButton:checked{border:1px solid lightgray;\n"
"border-radius:2px;background-color: gray}\n"
"QPushButton:hover{border:1px solid lightgray;border-radius:2px;background-color:lightgray}")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/ico/refresh_173px_1188146_easyicon.net.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.fresh_button.setIcon(icon3)
self.fresh_button.setObjectName("fresh_button")
self.horizontalLayout_6.addWidget(self.fresh_button)
self.read_button = QtWidgets.QPushButton(self.frame_5)
font = QtGui.QFont()
font.setFamily("黑体")
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.read_button.setFont(font)
self.read_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.read_button.setStyleSheet("QPushButton{border:none}\n"
"QPushButton:hover{border:1px solid lightgray;border-radius:2px;background-color:lightgray}")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/ico/Metro_Text_Document_128px_1097814_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.read_button.setIcon(icon4)
self.read_button.setIconSize(QtCore.QSize(23, 23))
self.read_button.setObjectName("read_button")
self.horizontalLayout_6.addWidget(self.read_button)
spacerItem4 = QtWidgets.QSpacerItem(50, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem4)
self.msg_button = QtWidgets.QPushButton(self.frame_5)
self.msg_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.msg_button.setStyleSheet("QPushButton{border:none}\n"
"")
self.msg_button.setText("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/ico/EditDocument_1194764_easyicon.net.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.msg_button.setIcon(icon5)
self.msg_button.setIconSize(QtCore.QSize(28, 28))
self.msg_button.setObjectName("msg_button")
self.horizontalLayout_6.addWidget(self.msg_button)
self.info_down = GifButton(self.frame_5)
self.info_down.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.info_down.setStyleSheet("QPushButton{border:none}\n"
"QPushButton:hover{border:1px solid lightgray;border-radius:3px;background:lightgray}")
self.info_down.setText("")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/ico/download_1229240_easyicon.net.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.info_down.setIcon(icon6)
self.info_down.setIconSize(QtCore.QSize(28, 28))
self.info_down.setObjectName("info_down")
self.horizontalLayout_6.addWidget(self.info_down)
self.pushButton_4 = QtWidgets.QPushButton(self.frame_5)
self.pushButton_4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton_4.setStyleSheet("QPushButton{border:none}\n"
"QPushButton:hover{border:1px solid lightgray;border-radius:3px;background:lightgray}")
self.pushButton_4.setText("")
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/ico/refresh_1233084_easyicon.net.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_4.setIcon(icon7)
self.pushButton_4.setIconSize(QtCore.QSize(28, 28))
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout_6.addWidget(self.pushButton_4)
self.verticalLayout.addWidget(self.frame_5)
self.line = QtWidgets.QFrame(self.page)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.frame_7 = QtWidgets.QFrame(self.page)
self.frame_7.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_7.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_7.setObjectName("frame_7")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.frame_7)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.frame_9 = QtWidgets.QFrame(self.frame_7)
self.frame_9.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_9.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_9.setObjectName("frame_9")
self.gridLayout = QtWidgets.QGridLayout(self.frame_9)
self.gridLayout.setVerticalSpacing(10)
self.gridLayout.setObjectName("gridLayout")
self.label_5 = QtWidgets.QLabel(self.frame_9)
font = QtGui.QFont()
font.setFamily("SimSun-ExtB")
self.label_5.setFont(font)
self.label_5.setStyleSheet("")
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 0, 0, 1, 1)
self.label_13 = TaskInfoLabel(self.frame_9)
self.label_13.setStyleSheet("color: black;\n"
"font-weight:bold;\n"
"")
self.label_13.setOpenExternalLinks(False)
self.label_13.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 0, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(self.frame_9)
self.label_6.setStyleSheet("")
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 1, 0, 1, 1)
self.label_14 = TaskInfoLabel(self.frame_9)
self.label_14.setStyleSheet("QLabel{color: gray;font-weight:bold}\n"
"QLabel:hover{color: blue; font-weight:bold}")
self.label_14.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.label_14.setObjectName("label_14")
self.gridLayout.addWidget(self.label_14, 1, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.frame_9)
self.label_7.setStyleSheet("")
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 2, 0, 1, 1)
self.label_15 = TaskInfoLabel(self.frame_9)
self.label_15.setStyleSheet("font-family:宋体")
self.label_15.setOpenExternalLinks(False)
self.label_15.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.label_15.setObjectName("label_15")
self.gridLayout.addWidget(self.label_15, 2, 1, 1, 1)
self.label_12 = QtWidgets.QLabel(self.frame_9)
self.label_12.setStyleSheet("")
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 3, 0, 1, 1)
self.label_16 = QtWidgets.QLabel(self.frame_9)
self.label_16.setObjectName("label_16")
self.gridLayout.addWidget(self.label_16, 3, 1, 1, 1)
self.gridLayout.setColumnStretch(1, 20)
self.horizontalLayout_8.addWidget(self.frame_9)
self.frame_10 = QtWidgets.QFrame(self.frame_7)
self.frame_10.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_10.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_10.setObjectName("frame_10")
self.gridLayout_2 = QtWidgets.QGridLayout(self.frame_10)
self.gridLayout_2.setVerticalSpacing(10)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_8 = QtWidgets.QLabel(self.frame_10)
self.label_8.setStyleSheet("")
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, 0, 0, 1, 1)
self.label_17 = QtWidgets.QLabel(self.frame_10)
self.label_17.setStyleSheet("")
self.label_17.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.label_17.setObjectName("label_17")
self.gridLayout_2.addWidget(self.label_17, 0, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.frame_10)
self.label_9.setStyleSheet("")
self.label_9.setObjectName("label_9")
self.gridLayout_2.addWidget(self.label_9, 1, 0, 1, 1)
self.label_18 = QtWidgets.QLabel(self.frame_10)
self.label_18.setStyleSheet("")
self.label_18.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.label_18.setObjectName("label_18")
self.gridLayout_2.addWidget(self.label_18, 1, 1, 1, 1)
self.label_10 = QtWidgets.QLabel(self.frame_10)
self.label_10.setStyleSheet("")
self.label_10.setObjectName("label_10")
self.gridLayout_2.addWidget(self.label_10, 2, 0, 1, 1)
self.label_19 = QtWidgets.QLabel(self.frame_10)
self.label_19.setStyleSheet("")
self.label_19.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.label_19.setObjectName("label_19")
self.gridLayout_2.addWidget(self.label_19, 2, 1, 1, 1)
self.label_11 = QtWidgets.QLabel(self.frame_10)
self.label_11.setStyleSheet("")
self.label_11.setObjectName("label_11")
self.gridLayout_2.addWidget(self.label_11, 3, 0, 1, 1)
self.label_20 = QtWidgets.QLabel(self.frame_10)
self.label_20.setStyleSheet("")
self.label_20.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.label_20.setObjectName("label_20")
self.gridLayout_2.addWidget(self.label_20, 3, 1, 1, 1)
self.gridLayout_2.setColumnStretch(1, 20)
self.horizontalLayout_8.addWidget(self.frame_10)
self.verticalLayout.addWidget(self.frame_7)
self.frame = QtWidgets.QFrame(self.page)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.progressBar = QtWidgets.QProgressBar(self.frame)
self.progressBar.setStyleSheet("QProgressBar {\n"
" text-align: center; /*进度值居中*/;\n"
" color: rgb(0, 0, 0);\n"
" border:1px solid gray;\n"
" border-radius:5px;\n"
" font-family:华文细黑;\n"
"}\n"
"QProgressBar::chunk {\n"
" background-color: rgb(6, 168, 255);\n"
" width:10px;\n"
"}\n"
"")
self.progressBar.setProperty("value", 0)
self.progressBar.setOrientation(QtCore.Qt.Horizontal)
self.progressBar.setInvertedAppearance(False)
self.progressBar.setTextDirection(QtWidgets.QProgressBar.TopToBottom)
self.progressBar.setObjectName("progressBar")
self.horizontalLayout_2.addWidget(self.progressBar)
self.label = QtWidgets.QLabel(self.frame)
self.label.setMinimumSize(QtCore.QSize(50, 0))
self.label.setStyleSheet("font-family:华文细黑;")
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.horizontalLayout_2.setStretch(0, 40)
self.horizontalLayout_2.setStretch(1, 1)
self.verticalLayout.addWidget(self.frame)
self.stackedWidget.addWidget(self.page)
self.page_2 = QtWidgets.QWidget()
self.page_2.setStyleSheet("")
self.page_2.setObjectName("page_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.page_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.frame_12 = QtWidgets.QFrame(self.page_2)
self.frame_12.setStyleSheet("")
self.frame_12.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_12.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_12.setObjectName("frame_12")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame_12)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.readbar_widget = ReadBar(self.frame_12)
self.readbar_widget.setStyleSheet("")
self.readbar_widget.setObjectName("readbar_widget")
self.verticalLayout_3.addWidget(self.readbar_widget)
self.stackedWidget_2 = QtWidgets.QStackedWidget(self.frame_12)
self.stackedWidget_2.setObjectName("stackedWidget_2")
self.page_3 = QtWidgets.QWidget()
self.page_3.setObjectName("page_3")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.page_3)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.stackedWidget_3 = QtWidgets.QStackedWidget(self.page_3)
self.stackedWidget_3.setObjectName("stackedWidget_3")
self.page_5 = QtWidgets.QWidget()
self.page_5.setObjectName("page_5")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.page_5)
self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_7.setSpacing(0)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.textBrowser = TaskReadBrowser(self.page_5)
self.textBrowser.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.textBrowser.setStyleSheet("")
self.textBrowser.setFrameShape(QtWidgets.QFrame.NoFrame)
self.textBrowser.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.textBrowser.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout_7.addWidget(self.textBrowser)
self.stackedWidget_3.addWidget(self.page_5)
self.page_6 = QtWidgets.QWidget()
self.page_6.setObjectName("page_6")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.page_6)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setSpacing(0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.auto_split = AutoSplitContentTaskReadWidget(self.page_6)
self.auto_split.setObjectName("auto_split")
self.verticalLayout_6.addWidget(self.auto_split)
self.stackedWidget_3.addWidget(self.page_6)
self.verticalLayout_4.addWidget(self.stackedWidget_3)
self.frame_11 = QtWidgets.QFrame(self.page_3)
font = QtGui.QFont()
font.setFamily("等线")
font.setPointSize(12)
self.frame_11.setFont(font)
self.frame_11.setStyleSheet("")
self.frame_11.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_11.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_11.setObjectName("frame_11")
self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.frame_11)
self.horizontalLayout_10.setContentsMargins(5, 5, 5, 5)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.chapter_label = QtWidgets.QLabel(self.frame_11)
self.chapter_label.setObjectName("chapter_label")
self.horizontalLayout_10.addWidget(self.chapter_label)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem5)
self.prev_button = ColorButton(self.frame_11)
font = QtGui.QFont()
font.setFamily("等线")
font.setPointSize(10)
self.prev_button.setFont(font)
self.prev_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.prev_button.setText("")
self.prev_button.setCheckable(True)
self.prev_button.setObjectName("prev_button")
self.horizontalLayout_10.addWidget(self.prev_button)
self.next_button = ColorButton(self.frame_11)
font = QtGui.QFont()
font.setFamily("等线")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.next_button.setFont(font)
self.next_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.next_button.setText("")
self.next_button.setCheckable(True)
self.next_button.setObjectName("next_button")
self.horizontalLayout_10.addWidget(self.next_button)
self.exit_button = ColorButton(self.frame_11)
font = QtGui.QFont()
font.setFamily("等线")
font.setPointSize(10)
self.exit_button.setFont(font)
self.exit_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.exit_button.setText("")
self.exit_button.setCheckable(True)
self.exit_button.setObjectName("exit_button")
self.horizontalLayout_10.addWidget(self.exit_button)
self.sizedown_button = QtWidgets.QPushButton(self.frame_11)
self.sizedown_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.sizedown_button.setText("")
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/ico/font_size_down_1170460_easyicon.net.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.sizedown_button.setIcon(icon8)
self.sizedown_button.setCheckable(True)
self.sizedown_button.setObjectName("sizedown_button")
self.horizontalLayout_10.addWidget(self.sizedown_button)
self.sizeup_button = QtWidgets.QPushButton(self.frame_11)
self.sizeup_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.sizeup_button.setText("")
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(":/ico/font_size_up_1170461_easyicon.net.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.sizeup_button.setIcon(icon9)
self.sizeup_button.setCheckable(True)
self.sizeup_button.setObjectName("sizeup_button")
self.horizontalLayout_10.addWidget(self.sizeup_button)
self.other_button = ColorButton(self.frame_11)
self.other_button.setText("")
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(":/hl/其他.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.other_button.setIcon(icon10)
self.other_button.setObjectName("other_button")
self.horizontalLayout_10.addWidget(self.other_button)
self.markup_button = ColorButton(self.frame_11)
self.markup_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.markup_button.setText("")
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap(":/hl/bookmark_1223025_easyicon.net.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.markup_button.setIcon(icon11)
self.markup_button.setCheckable(True)
self.markup_button.setObjectName("markup_button")
self.horizontalLayout_10.addWidget(self.markup_button)
self.skin_button = ColorButton(self.frame_11)
self.skin_button.setText("")
self.skin_button.setObjectName("skin_button")
self.horizontalLayout_10.addWidget(self.skin_button)
self.chapters_button_2 = ColorButton(self.frame_11)
self.chapters_button_2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.chapters_button_2.setText("")
self.chapters_button_2.setCheckable(True)
self.chapters_button_2.setObjectName("chapters_button_2")
self.horizontalLayout_10.addWidget(self.chapters_button_2)
self.more_button = ColorButton(self.frame_11)
self.more_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.more_button.setText("")
self.more_button.setCheckable(True)
self.more_button.setObjectName("more_button")
self.horizontalLayout_10.addWidget(self.more_button)
self.verticalLayout_4.addWidget(self.frame_11)
self.verticalLayout_4.setStretch(0, 20)
self.stackedWidget_2.addWidget(self.page_3)
self.verticalLayout_3.addWidget(self.stackedWidget_2)
self.verticalLayout_2.addWidget(self.frame_12)
self.stackedWidget.addWidget(self.page_2)
self.horizontalLayout.addWidget(self.stackedWidget)
self.retranslateUi(Form)
self.stackedWidget.setCurrentIndex(0)
self.stackedWidget_2.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.info_title.setText(_translate("Form", "小说名"))
self.sub_button.setText(_translate("Form", "已订阅"))
self.pushButton_2.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">上一个(Z)</span></p></body></html>"))
self.pushButton_3.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">下一个(C)</span></p></body></html>"))
self.info_img.setText(_translate("Form", "照片"))
self.pushButton_6.setText(_translate("Form", "任务详情"))
self.pushButton_5.setText(_translate("Form", "按章节下载"))
self.label_2.setText(_translate("Form", "几把"))
self.fresh_button.setText(_translate("Form", "获取最新章节"))
self.read_button.setText(_translate("Form", "阅读"))
self.msg_button.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">下载日志</span></p></body></html>"))
self.info_down.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">开始下载</span></p></body></html>"))
self.pushButton_4.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">重新下载</span></p></body></html>"))
self.label_5.setText(_translate("Form", "文件名称"))
self.label_13.setText(_translate("Form", "TextLabel"))
self.label_6.setText(_translate("Form", "存储目录"))
self.label_14.setText(_translate("Form", "TextLabel"))
self.label_7.setText(_translate("Form", "下载地址"))
self.label_15.setText(_translate("Form", "https://www.baidu.com"))
self.label_12.setText(_translate("Form", "平均速度"))
self.label_16.setText(_translate("Form", "TextLabel"))
self.label_8.setText(_translate("Form", "任务状态"))
self.label_17.setText(_translate("Form", "TextLabel"))
self.label_9.setText(_translate("Form", "文件大小"))
self.label_18.setText(_translate("Form", "2012"))
self.label_10.setText(_translate("Form", "创建时间"))
self.label_19.setText(_translate("Form", "TextLabel"))
self.label_11.setText(_translate("Form", "完成时间"))
self.label_20.setText(_translate("Form", "TextLabel"))
self.progressBar.setFormat(_translate("Form", "%p%"))
self.label.setText(_translate("Form", "21.34k/s "))
self.chapter_label.setText(_translate("Form", "TextLabel"))
self.prev_button.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">上一章 (快捷键: 1)</span></p></body></html>"))
self.prev_button.setShortcut(_translate("Form", "1"))
self.next_button.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">下一章 (快捷键: 2)</span></p></body></html>"))
self.next_button.setShortcut(_translate("Form", "2"))
self.exit_button.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">退出阅读 (快捷键: Esc)</span></p></body></html>"))
self.exit_button.setShortcut(_translate("Form", "Esc"))
self.other_button.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">翻页方式</span></p></body></html>"))
self.markup_button.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">添加书签</span></p></body></html>"))
self.skin_button.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">阅读主题</span></p></body></html>"))
self.chapters_button_2.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">信息</span></p></body></html>"))
self.more_button.setToolTip(_translate("Form", "<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">设置</span></p></body></html>"))
from .customwidgets import AutoSplitContentTaskReadWidget, ColorButton, GifButton, HoverButton, ReadBar, TaskInfoLabel, TaskReadBrowser, TaskWidgetSubmitLabel
| 56.269716
| 164
| 0.70918
|
19d30a0e8303dd7f558a8039eef41a64b8affc5a
| 7,342
|
py
|
Python
|
catkin_tools/jobs/commands/cmake.py
|
iwanders/catkin_tools
|
76bfe96cc18cdf4b4e88a1f764f73260f77843b5
|
[
"Apache-2.0"
] | null | null | null |
catkin_tools/jobs/commands/cmake.py
|
iwanders/catkin_tools
|
76bfe96cc18cdf4b4e88a1f764f73260f77843b5
|
[
"Apache-2.0"
] | null | null | null |
catkin_tools/jobs/commands/cmake.py
|
iwanders/catkin_tools
|
76bfe96cc18cdf4b4e88a1f764f73260f77843b5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
from catkin_tools.execution.io import IOBufferProtocol
from catkin_tools.execution.events import ExecutionEvent
from catkin_tools.terminal_color import fmt
from catkin_tools.terminal_color import sanitize
from catkin_tools.utils import which
CMAKE_EXEC = which('cmake')
def split_to_last_line_break(data):
"""This splits a byte buffer into (head, tail) where head contains the
beginning of the buffer to the last line break (inclusive) and the tail
contains all bytes after that."""
last_break_index = 1 + data.rfind(b'\n')
return data[:last_break_index], data[last_break_index:]
class CMakeIOBufferProtocol(IOBufferProtocol):
"""An asyncio protocol that collects stdout and stderr.
This class also generates `stdout` and `stderr` events.
Since the underlying asyncio API constructs the actual protocols, this
class provides a factory method to inject the job and stage information
into the created protocol.
"""
def __init__(self, label, job_id, stage_label, event_queue, log_path, source_path, *args, **kwargs):
super(CMakeIOBufferProtocol, self).__init__(label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
self.source_path = source_path
# These are buffers for incomplete lines that we want to wait to parse
# until we have received them completely
self.stdout_tail = b''
self.stderr_tail = b''
def on_stdout_received(self, data):
data_head, self.stdout_tail = split_to_last_line_break(self.stdout_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
def on_stderr_received(self, data):
data_head, self.stderr_tail = split_to_last_line_break(self.stderr_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
def close(self):
# Make sure tail buffers are flushed
self.flush_tails()
super(CMakeIOBufferProtocol, self).close()
def flush_tails(self):
"""Write out any unprocessed tail buffers."""
colored = self.color_lines(self.stdout_tail)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored)
self.stdout_tail = b''
colored = self.color_lines(self.stderr_tail)
super(CMakeIOBufferProtocol, self).on_stderr_received(colored)
self.stderr_tail = b''
def color_lines(self, data):
"""Apply colorization rules to each line in data"""
decoded_data = self._decode(data)
# TODO: This will only work if all lines are received at once. Instead
# of direclty splitting lines, we should buffer the data lines until
# the last character is a line break
lines = decoded_data.splitlines(True) # Keep line breaks
colored_lines = [self.colorize_cmake(l) for l in lines]
colored_data = ''.join(colored_lines)
encoded_data = self._encode(colored_data)
return encoded_data
@classmethod
def factory_factory(cls, source_path):
"""Factory factory for constructing protocols that know the source path for this CMake package."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by caktin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, source_path, *args, **kwargs)
return init_proxy
return factory
def colorize_cmake(self, line):
"""Colorizes output from CMake
This also prepends the source path to the locations of warnings and errors.
:param line: one, new line terminated, line from `cmake` which needs coloring.
:type line: str
"""
# return line
cline = sanitize(line)
if len(cline.strip()) == 0:
return cline
if line.startswith('-- '):
cline = '@{cf}--@| ' + cline[len('-- '):]
if ':' in cline:
split_cline = cline.split(':', 1)
if len(split_cline[1].strip()) > 0:
cline = split_cline[0] + (':@{yf}%s@|' % split_cline[1])
elif line.lower().startswith('warning'):
# WARNING
cline = fmt('@{yf}', reset=False) + cline
elif line.startswith('CMake Warning at '):
# CMake Warning at...
cline = cline.replace('CMake Warning at ', '@{yf}@!CMake Warning@| at ' + self.source_path + os.path.sep)
elif line.startswith('CMake Warning (dev) at '):
# CMake Warning at...
cline = cline.replace(
'CMake Warning (dev) at ', '@{yf}@!CMake Warning (dev)@| at ' + self.source_path + os.path.sep)
elif line.startswith('CMake Warning'):
# CMake Warning...
cline = cline.replace('CMake Warning', '@{yf}@!CMake Warning@|')
elif line.startswith('ERROR:'):
# ERROR:
cline = cline.replace('ERROR:', '@!@{rf}ERROR:@|')
elif line.startswith('CMake Error at '):
# CMake Error...
cline = cline.replace('CMake Error at ', '@{rf}@!CMake Error@| at ' + self.source_path + os.path.sep)
elif line.startswith('CMake Error'):
# CMake Error...
cline = cline.replace('CMake Error', '@{rf}@!CMake Error@|')
elif line.startswith('Call Stack (most recent call first):'):
# CMake Call Stack
cline = cline.replace('Call Stack (most recent call first):',
'@{cf}@_Call Stack (most recent call first):@|')
return fmt(cline, reset=False)
class CMakeMakeIOBufferProtocol(IOBufferProtocol):
"""An IOBufferProtocol which parses CMake's progree prefixes and emits corresponding STAGE_PROGRESS events."""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
super(CMakeMakeIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
def on_stdout_received(self, data):
super(CMakeMakeIOBufferProtocol, self).on_stdout_received(data)
# Parse CMake Make completion progress
progress_matches = re.match('\[\s*([0-9]+)%\]', self._decode(data))
if progress_matches is not None:
self.event_queue.put(ExecutionEvent(
'STAGE_PROGRESS',
job_id=self.job_id,
stage_label=self.stage_label,
percent=str(progress_matches.groups()[0])))
| 41.480226
| 119
| 0.652411
|
a7b32b1c0e08ff923d00c75837198cf6fe90e147
| 14,074
|
py
|
Python
|
datalad/support/tests/test_fileinfo.py
|
mslw/datalad
|
5adfde7818a9582cd12c2d25fe6f008132de1b2f
|
[
"MIT"
] | 298
|
2015-01-25T17:36:29.000Z
|
2022-03-20T03:38:47.000Z
|
datalad/support/tests/test_fileinfo.py
|
adswa/datalad
|
c86643fe2e974da8d7403e9799997efcfee97384
|
[
"MIT"
] | 6,387
|
2015-01-02T18:15:01.000Z
|
2022-03-31T20:58:58.000Z
|
datalad/support/tests/test_fileinfo.py
|
adswa/datalad
|
c86643fe2e974da8d7403e9799997efcfee97384
|
[
"MIT"
] | 109
|
2015-01-25T17:49:40.000Z
|
2022-03-06T06:54:54.000Z
|
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test file info getters"""
import os.path as op
from pathlib import Path
import datalad.utils as ut
from datalad.tests.utils import (
assert_dict_equal,
assert_equal,
assert_false,
assert_in,
assert_not_in,
assert_raises,
known_failure_githubci_win,
slow,
with_tempfile,
with_tree,
)
from datalad.distribution.dataset import Dataset
from datalad.support.exceptions import NoSuchPathError
from datalad.support.gitrepo import GitRepo
from datalad.tests.utils import (
assert_repo_status,
get_convoluted_situation,
)
@slow # 10sec on travis
@known_failure_githubci_win
@with_tempfile
def test_get_content_info(path):
repo = GitRepo(path)
assert_equal(repo.get_content_info(), {})
# an invalid reference causes an exception
assert_raises(ValueError, repo.get_content_info, ref='HEAD')
ds = get_convoluted_situation(path)
repopath = ds.repo.pathobj
assert_equal(ds.repo.pathobj, repopath)
assert_equal(ds.pathobj, ut.Path(path))
# verify general rules on fused info records that are incrementally
# assembled: for git content info, amended with annex info on 'HEAD'
# (to get the last committed stage and with it possibly vanished
# content), and lastly annex info wrt to the present worktree, to
# also get info on added/staged content
# this fuses the info reported from
# - git ls-files
# - git annex findref HEAD
# - git annex find --include '*'
for f, r in ds.repo.annexstatus().items():
if f.match('*_untracked'):
assert(r.get('gitshasum', None) is None)
if f.match('*_deleted'):
assert(not f.exists() and not f.is_symlink() is None)
if f.match('subds_*'):
assert(r['type'] == 'dataset' if r.get('gitshasum', None) else 'directory')
if f.match('file_*'):
# which one exactly depends on many things
assert_in(r['type'], ('file', 'symlink'))
if f.match('file_ingit*'):
assert(r['type'] == 'file')
elif '.datalad' not in f.parts and not f.match('.git*') and \
r.get('gitshasum', None) and not f.match('subds*'):
# this should be known to annex, one way or another
# regardless of whether things add deleted or staged
# or anything in between
assert_in('key', r, f)
assert_in('keyname', r, f)
assert_in('backend', r, f)
assert_in('bytesize', r, f)
# no duplication with path
assert_not_in('file', r, f)
# query full untracked report
res = ds.repo.get_content_info()
assert_in(repopath.joinpath('dir_untracked', 'file_untracked'), res)
assert_not_in(repopath.joinpath('dir_untracked'), res)
# query for compact untracked report
res = ds.repo.get_content_info(untracked='normal')
assert_not_in(repopath.joinpath('dir_untracked', 'file_untracked'), res)
assert_in(repopath.joinpath('dir_untracked'), res)
# query no untracked report
res = ds.repo.get_content_info(untracked='no')
assert_not_in(repopath.joinpath('dir_untracked', 'file_untracked'), res)
assert_not_in(repopath.joinpath('dir_untracked'), res)
# git status integrity
status = ds.repo.status()
for t in ('subds', 'file'):
for s in ('untracked', 'added', 'deleted', 'clean',
'ingit_clean', 'dropped_clean', 'modified',
'ingit_modified'):
for l in ('', ut.PurePosixPath('subdir', '')):
if t == 'subds' and 'ingit' in s or 'dropped' in s:
# invalid combination
continue
if t == 'subds' and s == 'deleted':
# same as subds_unavailable -> clean
continue
p = repopath.joinpath(l, '{}_{}'.format(t, s))
assert p.match('*_{}'.format(status[p]['state'])), p
if t == 'subds':
assert_in(status[p]['type'], ('dataset', 'directory'), p)
else:
assert_in(status[p]['type'], ('file', 'symlink'), p)
# git annex status integrity
annexstatus = ds.repo.annexstatus()
for t in ('file',):
for s in ('untracked', 'added', 'deleted', 'clean',
'ingit_clean', 'dropped_clean', 'modified',
'ingit_modified'):
for l in ('', ut.PurePosixPath('subdir', '')):
p = repopath.joinpath(l, '{}_{}'.format(t, s))
if s in ('untracked', 'ingit_clean', 'ingit_modified'):
# annex knows nothing about these things
assert_not_in('key', annexstatus[p])
continue
assert_in('key', annexstatus[p])
# dear future,
# if the next one fails, git-annex might have changed the
# nature of the path that are being reported by
# `annex find --json`
# when this was written `hashir*` was a native path, but
# `file` was a POSIX path
assert_equal(annexstatus[p]['has_content'], 'dropped' not in s)
# check the different subds evaluation modes
someds = Dataset(ds.pathobj / 'subds_modified' / 'someds')
dirtyds_path = someds.pathobj / 'dirtyds'
assert_not_in(
'state',
someds.repo.status(eval_submodule_state='no')[dirtyds_path]
)
assert_equal(
'clean',
someds.repo.status(eval_submodule_state='commit')[dirtyds_path]['state']
)
assert_equal(
'modified',
someds.repo.status(eval_submodule_state='full')[dirtyds_path]['state']
)
@with_tempfile
def test_compare_content_info(path):
# TODO remove when `create` is RF to return the new Dataset
ds = Dataset(path).create()
assert_repo_status(path)
# for a clean repo HEAD and worktree query should yield identical results
# minus a 'bytesize' report that is readily available for HEAD, but would
# not a stat call per file for the worktree, and is not done ATM
wt = ds.repo.get_content_info(ref=None)
assert_dict_equal(
wt,
{f: {k: v for k, v in p.items() if k != 'bytesize'}
for f, p in ds.repo.get_content_info(ref='HEAD').items()}
)
@with_tempfile
def test_subds_path(path):
# a dataset with a subdataset with a file, all neatly tracked
ds = Dataset(path).create()
subds = ds.create('sub')
assert_repo_status(path)
with (subds.pathobj / 'some.txt').open('w') as f:
f.write(u'test')
ds.save(recursive=True)
assert_repo_status(path)
# querying the toplevel dataset repo for a subdspath should
# report the subdataset record in the dataset
# (unlike `git status`, which is silent for subdataset paths),
# but definitely not report the subdataset as deleted
# https://github.com/datalad/datalad-revolution/issues/17
stat = ds.repo.status(paths=[op.join('sub', 'some.txt')])
assert_equal(list(stat.keys()), [subds.repo.pathobj])
assert_equal(stat[subds.repo.pathobj]['state'], 'clean')
@with_tempfile
def test_report_absent_keys(path):
ds = Dataset(path).create()
# create an annexed file
testfile = ds.pathobj / 'dummy'
testfile.write_text(u'nothing')
ds.save()
# present in a full report and in a partial report
# based on worktree of HEAD ref
for ai in (
ds.repo.get_content_annexinfo(eval_availability=True),
ds.repo.get_content_annexinfo(
paths=['dummy'],
eval_availability=True),
ds.repo.get_content_annexinfo(
ref='HEAD',
eval_availability=True),
ds.repo.get_content_annexinfo(
ref='HEAD',
paths=['dummy'],
eval_availability=True)):
assert_in(testfile, ai)
assert_equal(ai[testfile]['has_content'], True)
# drop the key, not available anywhere else
ds.drop('dummy', check=False)
# does not change a thing, except the key is gone
for ai in (
ds.repo.get_content_annexinfo(eval_availability=True),
ds.repo.get_content_annexinfo(
paths=['dummy'],
eval_availability=True),
ds.repo.get_content_annexinfo(
ref='HEAD',
eval_availability=True),
ds.repo.get_content_annexinfo(
ref='HEAD',
paths=['dummy'],
eval_availability=True)):
assert_in(testfile, ai)
assert_equal(ai[testfile]['has_content'], False)
@with_tempfile
def test_annexinfo_init(path):
ds = Dataset(path).create()
foo = ds.pathobj / "foo"
foo_cont = b"foo content"
foo.write_bytes(foo_cont)
bar = ds.pathobj / "bar"
bar.write_text(u"bar content")
ds.save()
# Custom init limits report, with original dict getting updated.
cinfo_custom_init = ds.repo.get_content_annexinfo(
init={foo: {"bytesize": 0,
"this-is-surely-only-here": "right?"}})
assert_not_in(bar, cinfo_custom_init)
assert_in(foo, cinfo_custom_init)
assert_equal(cinfo_custom_init[foo]["bytesize"], len(foo_cont))
assert_equal(cinfo_custom_init[foo]["this-is-surely-only-here"],
"right?")
# "git" injects get_content_info() values.
cinfo_init_git = ds.repo.get_content_annexinfo(init="git")
assert_in("gitshasum", cinfo_init_git[foo])
# init=None, on the other hand, does not.
cinfo_init_none = ds.repo.get_content_annexinfo(init=None)
assert_in(foo, cinfo_init_none)
assert_in(bar, cinfo_init_none)
assert_not_in("gitshasum", cinfo_init_none[foo])
@with_tempfile
def test_info_path_inside_submodule(path):
ds = Dataset(path).create()
subds = ds.create("submod")
foo = (subds.pathobj / "foo")
foo.write_text("foo")
ds.save(recursive=True)
cinfo = ds.repo.get_content_info(
ref="HEAD", paths=[foo.relative_to(ds.pathobj)])
assert_in("gitshasum", cinfo[subds.pathobj])
@with_tempfile
def test_get_content_info_dotgit(path):
ds = Dataset(path).create()
# Files in .git/ won't be reported, though this takes a kludge on our side
# before Git 2.25.
assert_false(ds.repo.get_content_info(paths=[op.join(".git", "config")]))
@with_tempfile
def test_get_content_info_paths_empty_list(path):
ds = Dataset(path).create()
# Unlike None, passing any empty list as paths to get_content_info() does
# not report on all content.
assert_false(ds.repo.get_content_info(paths=[]))
assert_false(ds.repo.get_content_info(paths=[], ref="HEAD"))
# Add annex content to make sure its not reported.
(ds.pathobj / "foo").write_text("foo")
ds.save()
# Same for get_content_annexinfo()...
assert_false(ds.repo.get_content_annexinfo(paths=[]))
assert_false(ds.repo.get_content_annexinfo(paths=[], init=None))
assert_false(ds.repo.get_content_annexinfo(paths=[], ref="HEAD"))
assert_false(
ds.repo.get_content_annexinfo(paths=[], ref="HEAD", init=None))
# ... where whatever was passed for init will be returned as is.
assert_equal(
ds.repo.get_content_annexinfo(
paths=[], ref="HEAD", init={"random": {"entry": "a"}}),
{"random": {"entry": "a"}})
@with_tempfile
def test_status_paths_empty_list(path):
ds = Dataset(path).create()
assert_equal(ds.repo.status(paths=[]), {})
@with_tree(tree=(('ingit.txt', 'ingit'),
('inannex.txt', 'inannex'),
('dir1', {'dropped': 'dropped'}),
('dir2', {'d21': 'd21', 'd22': 'd22'})))
def test_get_file_annexinfo(path):
ds = Dataset(path).create(force=True)
ds.save('ingit.txt', to_git=True)
ds.save()
# have some content-less component for testing
ds.drop(ds.pathobj / 'dir1', check=False)
repo = ds.repo
# only handles a single file at a time
assert_raises(ValueError, repo.get_file_annexinfo, repo.pathobj / 'dir2')
# however, it only functionally matters that there is only a single file to
# report on not that the exact query path matches, the matching path is in
# the report
assert_equal(
repo.pathobj / 'dir1' / 'dropped',
repo.get_file_annexinfo(repo.pathobj / 'dir1')['path'])
# does not raise on a non-annex file, instead it returns no properties
assert_equal(repo.get_file_annexinfo('ingit.txt'), {})
# but does raise on path that doesn exist
assert_raises(NoSuchPathError, repo.get_file_annexinfo, 'nothere')
# check return properties for utility
props = repo.get_file_annexinfo('inannex.txt')
# to replace get_file_backend()
assert_equal(props['backend'], 'MD5E')
# to replace get_file_key()
assert_equal(props['key'], 'MD5E-s7--3b158c5b0a18c247ebad28c09fc3e180.txt')
# for size reporting
assert_equal(props['bytesize'], 7)
# all records have a pathobj
assert_equal(props['path'], repo.pathobj / 'inannex.txt')
# test if `eval_availability` has desired effect
assert_not_in('has_content', props)
# extended set of properties, after more expensive availability check
props = repo.get_file_annexinfo('inannex.txt', eval_availability=True)
# to replace file_has_content()
assert_equal(props['has_content'], True)
# to replace get_contentlocation()
assert_equal(
Path(props['objloc']).read_text(),
'inannex')
# make sure has_content is not always True
props = repo.get_file_annexinfo(
ds.pathobj / 'dir1' / 'dropped', eval_availability=True)
assert_equal(props['has_content'], False)
assert_not_in('objloc', props)
| 37.631016
| 87
| 0.62619
|
694af4168970c99ad8aea7a0af5a3636e2ee04fe
| 1,565
|
py
|
Python
|
book/book/pipelines.py
|
qingtianfei/Dangdangspider
|
bdfc5c83fbda4ba0f5afef31265d17b9dc8f1f5c
|
[
"MIT"
] | null | null | null |
book/book/pipelines.py
|
qingtianfei/Dangdangspider
|
bdfc5c83fbda4ba0f5afef31265d17b9dc8f1f5c
|
[
"MIT"
] | null | null | null |
book/book/pipelines.py
|
qingtianfei/Dangdangspider
|
bdfc5c83fbda4ba0f5afef31265d17b9dc8f1f5c
|
[
"MIT"
] | 1
|
2019-11-16T12:11:24.000Z
|
2019-11-16T12:11:24.000Z
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import requests
import os
import pymysql
db = pymysql.connect(host = "localhost",port = 3306, user = "root",password = "", db = "bookstore",charset='utf8')
class BookPipeline(object):
def process_item(self, item, spider):
print("+"*10)
print("hello, pipline")
try:
os.mkdir("D:\PictureList")
except:
print("创建文件失败")
for i in range(1, min(len(item['title']), len(item['pic']), len(item['price']), len(item['author']), len(item['time']), len(item['publish']))):
title = item['title'][i]
price = item['price'][i]
pic = item['pic'][i-1]
author = item['author'][i]
publish = item['publish'][i]
time = item['time'][i]
cursor = db.cursor()
name = "D:/PictureList/" + pic[-18:]
cursor = db.cursor()
sql = """ insert into booklist (title, price, pic, author, publish, time, picpath) values(%s, %s, %s, %s, %s, %s, %s)"""
cursor.execute(sql, (title, price, pic, author, publish, time, name))
db.commit()
picT = requests.get(pic).content
name = "D:/PictureList/" + pic[-18:]
with open(name , 'wb') as f:
f.write(picT)
return item
| 30.096154
| 151
| 0.514377
|
125b79dd286390a2c4eb5d9b9401b2b8216a6a7d
| 40,843
|
py
|
Python
|
residue/crud/orm.py
|
magfest/residue
|
d8621d8a9c43c19b67f0ee532655aaeaaae4ca62
|
[
"BSD-3-Clause"
] | 1
|
2018-02-26T19:03:19.000Z
|
2018-02-26T19:03:19.000Z
|
residue/crud/orm.py
|
magfest/residue
|
d8621d8a9c43c19b67f0ee532655aaeaaae4ca62
|
[
"BSD-3-Clause"
] | null | null | null |
residue/crud/orm.py
|
magfest/residue
|
d8621d8a9c43c19b67f0ee532655aaeaaae4ca62
|
[
"BSD-3-Clause"
] | 2
|
2018-02-05T19:49:30.000Z
|
2018-02-24T18:10:30.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 the Residue team, see AUTHORS.
# Licensed under the BSD License, see LICENSE for details.
"""CRUD support for SQLAlchemy orm objects."""
from __future__ import absolute_import
import collections
import inspect
import re
import uuid
from collections import defaultdict, Mapping
from copy import deepcopy
from itertools import chain
from datetime import date, datetime, time
import six
from pockets import cached_classproperty, cached_property, classproperty, collect_superclass_attr_names, \
is_data, is_listy, mappify
from pockets.autolog import log
from sqlalchemy import orm
from sqlalchemy.ext.associationproxy import AssociationProxy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.orm.properties import ColumnProperty, RelationshipProperty
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.sql import ClauseElement
from sqlalchemy.types import Boolean, DateTime, Integer, String, Text, UnicodeText
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache # noqa: F401
__all__ = [
'get_model_by_table', 'get_primary_key_column_names', 'get_unique_constraint_column_names',
'get_one_to_many_foreign_key_column_name', 'CrudModelMixin', 'crudable', 'crud_validation',
'text_length_validation', 'regex_validation']
@lru_cache()
def get_model_by_table(base, table):
"""
Returns declarative class associated with given table.
Arguments:
base (sqlalchemy.ext.declarative.api.Base): Declarative model base or
a subclass of the declarative model base.
table (sqlalchemy.sql.schema.Table): SQLAlchemy Table object.
Returns:
class: Declarative class or None if not found.
"""
for cls in base._decl_class_registry.values():
if hasattr(cls, '__table__') and cls.__table__ is table:
return cls
return None
@lru_cache()
def get_one_to_many_foreign_key_column_name(model, name):
"""
Returns the constituent column names for the foreign key on the remote
table of the one-to-many relationship specified by name.
Args:
model (class or object): The given model class or model instance.
name (string): The name of the attribute on `model` which is a
one-to-many relationship.
Return:
list: One-to-many foreign key column names as a list of strings.
"""
if not inspect.isclass(model):
return get_one_to_many_foreign_key_column_name(model.__class__, name)
attr = getattr(model, name, None)
if not attr:
# Unknown attribute.
return []
remote_columns = getattr(attr.property, 'remote_side', None)
if not remote_columns:
# This is not a one-to-many relationship.
return []
remote_tables = set(c.table.name for c in remote_columns)
if len(remote_tables) > 1:
# This is a many-to-many relationship with a cross reference table.
return []
foreign_key_column_names = []
for remote_column in remote_columns:
if getattr(remote_column, 'foreign_keys', False):
foreign_key_column_names.append(remote_column.name)
else:
remote_model = get_model_by_table(model, remote_column.table)
if remote_model:
# Quasi foreign keys don't actually have foreign_keys set,
# but they need to be treated as though they did.
foreign_keys = getattr(remote_model, 'quasi_foreign_keys', [])
if remote_column.name in foreign_keys:
foreign_key_column_names.append(remote_column.name)
return foreign_key_column_names
@lru_cache()
def get_primary_key_column_names(model):
"""
Returns the constituent column names for the primary key of the given
model.
Args:
model (class or object): The given model class or model instance.
Return:
list: Primary key column names as a list of strings.
"""
return [column.name for column in model.__table__.primary_key.columns]
@lru_cache()
def get_unique_constraint_column_names(model):
"""
Returns the constituent column names for each unique constraint on the
given model.
Args:
model (class or object): The given model class or model instance.
Return:
list: Unique constraint column names as a list of lists of strings.
"""
return [[column.name for column in constraint.columns]
for constraint in model.__table__.constraints
if isinstance(constraint, UniqueConstraint)]
class CrudModelMixin(object):
extra_defaults = []
type_casts = {uuid.UUID: str}
type_map = {}
type_map_defaults = {
int: 'int',
six.binary_type: 'string',
six.text_type: 'string',
float: 'float',
datetime: 'date',
date: 'date',
time: 'date',
bool: 'boolean',
uuid.UUID: 'string',
String: 'string',
UnicodeText: 'string',
Text: 'string',
DateTime: 'date',
Integer: 'int',
Boolean: 'boolean',
}
# Override what attributes will show in the repr. Defaults to primary keys
# and unique constraints.
_repr_attr_names = ()
# In addition to any default attributes, also show these in the repr.
_extra_repr_attr_names = ()
@classmethod
def _create_or_fetch(cls, session, value, **backref_mapping):
"""
Fetch an existing or create a new instance of this class. Fetching uses
the values from the value positional argument (the id if available, or
if any keys that correspond to unique constraints are present). In both
cases the instance will still need to be updated using whatever new
values you want.
Args:
cls (class): The class object we're going to fetch or create
session (Session): the session object
value (any): the dictionary value to fetch with
**backref_mapping: the backref key name and value of the "parent"
object of the object you're fetching or about to create. If
the backref value of a fetched instance is not the same as the
value of what's passed in, we will instead create a new
instance. This is because we want to prevent "stealing" an
existing object in a one-to-one relationship unless an id is
explicitly passed.
Returns:
A previously existing or newly created (and added to the session)
model instance.
"""
assert len(backref_mapping) <= 1, 'only one backref key is allowed at this time: {}'.format(backref_mapping)
if backref_mapping:
backref_name = list(backref_mapping.keys())[0]
parent_id = backref_mapping[backref_name]
else:
backref_name, parent_id = None, None
id = None
if isinstance(value, Mapping):
id = value.get('id', None)
elif isinstance(value, six.string_types):
id = value
instance = None
if id is not None:
try:
instance = session.query(cls).filter(cls.id == id).first()
except Exception:
log.error('Unable to fetch instance based on id value {!r}', value, exc_info=True)
raise TypeError('Invalid instance ID type for relation: {0.__name__} (value: {1})'.format(cls, value))
elif isinstance(value, Mapping):
# if there's no id, check to see if we're provided a dictionary
# that includes all of the columns associated with a UniqueConstraint.
for column_names in cls.unique_constraint_column_names:
if all((name in value and value[name]) for name in column_names):
# all those column names are provided,
# use that to query by chaining together all the necessary
# filters to construct that query
q = session.query(cls)
filter_kwargs = {name: value[name] for name in column_names}
try:
instance = q.filter_by(**filter_kwargs).one()
except NoResultFound:
continue
except MultipleResultsFound:
log.error('multiple results found for {} unique constraint: {}', cls.__name__, column_names)
raise
else:
break
else:
log.debug('unable to search using unique constraints: {} with {}', column_names, value)
if instance and id is None and backref_mapping and getattr(instance, backref_name, None) != parent_id:
log.warning(
'Attempting to change the owner of {} without an explicitly passed id; '
'a new {} instance will be used instead', instance, cls.__name__)
instance = None
if not instance:
log.debug('creating new: {} with id {}', cls.__name__, id)
if id is None:
instance = cls()
else:
instance = cls(id=id)
session.add(instance)
return instance
@cached_property
def _type_casts_for_to_dict(self):
type_casts = CrudModelMixin.type_casts.copy()
type_casts.update(self.type_casts)
return defaultdict(lambda: lambda x: x, type_casts)
@cached_classproperty
def to_dict_default_attrs(cls):
attr_names = []
super_attr_names = collect_superclass_attr_names(cls, terminal_class=cls.BaseClass)
for name in super_attr_names:
if not name.startswith('_') or name in cls.extra_defaults:
attr = getattr(cls, name)
class_attr = getattr(cls, '__dict__', {}).get(name)
is_column_prop = isinstance(attr, InstrumentedAttribute) \
and isinstance(attr.property, ColumnProperty)
is_hybrid_prop = isinstance(getattr(attr, 'descriptor', None), hybrid_property)
is_prop = isinstance(attr, (property, InstrumentedAttribute, ClauseElement, AssociationProxy))
is_class_prop = isinstance(class_attr, (classproperty, cached_classproperty))
is_callable = callable(attr)
if is_column_prop or not (is_hybrid_prop or is_prop or is_class_prop or is_callable):
attr_names.append(name)
return attr_names
def to_dict(self, attrs=None, validator=lambda self, name: True):
obj = {}
if attrs is not None:
try:
attrs = mappify(attrs)
except TypeError:
attrs = None
# It's still possible for the client to blacklist this, but by default
# we're going to include them.
if attrs is None or attrs.get('_model', True):
obj['_model'] = self.__class__.__name__
if attrs is None or attrs.get('id', True):
obj['id'] = self.id
def cast_type(value):
# Ensure that certain types are cast appropriately for daily usage
# e.g. we want the result of HashedPasswords to be the string
# representation instead of the object.
return self._type_casts_for_to_dict[value.__class__](value)
if attrs is None:
for name in self.to_dict_default_attrs:
if validator(self, name):
obj[name] = cast_type(getattr(self, name))
else:
for name in self.extra_defaults + list(attrs.keys()):
# If we're not supposed to get the attribute according to the
# validator, OR the client intentionally blacklisted it, then
# skip this value.
if not validator(self, name) or not attrs.get(name, True):
continue
attr = getattr(self, name, None)
if isinstance(attr, self.BaseClass):
obj[name] = attr.to_dict(attrs[name], validator)
elif isinstance(attr, (list, set, tuple, frozenset)):
obj[name] = []
for item in attr:
if isinstance(item, self.BaseClass):
obj[name].append(item.to_dict(attrs[name], validator))
else:
obj[name].append(item)
elif callable(attr):
obj[name] = cast_type(attr())
else:
obj[name] = cast_type(attr)
return obj
def from_dict(self, attrs, validator=lambda self, name, val: True):
relations = []
# merge_relations modifies the dictionaries that are passed to it in
# order to support updates in deeply-nested object graphs. To ensure
# that we don't have dirty state between applying updates to different
# model objects, we need a fresh copy
attrs = deepcopy(attrs)
for name, value in attrs.items():
if not name.startswith('_') and validator(self, name, value):
attr = getattr(self.__class__, name)
if isinstance(attr, InstrumentedAttribute) and isinstance(attr.property, RelationshipProperty):
relations.append((name, value))
else:
setattr(self, name, value)
def required(kv):
cols = list(getattr(self.__class__, kv[0]).property.local_columns)
return len(cols) != 1 or cols[0].primary_key or cols[0].nullable
relations.sort(key=required)
for name, value in relations:
self._merge_relations(name, value, validator)
return self
@classproperty
def primary_key_column_names(cls):
return get_primary_key_column_names(cls)
@classproperty
def unique_constraint_column_names(cls):
return get_unique_constraint_column_names(cls)
@classmethod
def one_to_many_foreign_key_column_name(cls, name):
column_names = get_one_to_many_foreign_key_column_name(cls, name)
return column_names[0] if column_names else None
def _merge_relations(self, name, value, validator=lambda self, name, val: True):
attr = getattr(self.__class__, name)
if (not isinstance(attr, InstrumentedAttribute) or
not isinstance(attr.property, RelationshipProperty)):
return
session = orm.Session.object_session(self)
assert session, "cannot call _merge_relations on objects not attached to a session"
property = attr.property
relation_cls = property.mapper.class_
# e.g., if this a Team with many Players, and we're handling the attribute name
# "players," we want to set the team_id on all dictionary representations of those players.
backref_id_name = self.one_to_many_foreign_key_column_name(name)
original_value = getattr(self, name)
if is_listy(original_value):
new_insts = []
if value is None:
value = []
if isinstance(value, six.string_types):
value = [value]
for i in value:
if backref_id_name is not None and isinstance(i, dict) and not i.get(backref_id_name):
i[backref_id_name] = self.id
relation_inst = relation_cls._create_or_fetch(
session, i, **{backref_id_name: self.id} if backref_id_name else {})
if isinstance(i, dict):
if relation_inst._sa_instance_state.identity:
validator = _crud_write_validator
else:
validator = _crud_create_validator
relation_inst.from_dict(i, validator)
new_insts.append(relation_inst)
relation = original_value
remove_insts = [stale_inst for stale_inst in relation if stale_inst not in new_insts]
for stale_inst in remove_insts:
relation.remove(stale_inst)
if property.cascade.delete_orphan:
session.delete(stale_inst)
for new_inst in new_insts:
if new_inst.id is None or new_inst not in relation:
relation.append(new_inst)
elif isinstance(value, (collections.Mapping, six.string_types)):
if backref_id_name is not None and not value.get(backref_id_name):
# if this is a dictionary, it's possible we're going to be
# creating a new thing, if so, we'll add a backref to the
# "parent" if one isn't already set
value[backref_id_name] = self.id
relation_inst = relation_cls._create_or_fetch(session, value)
stale_inst = original_value
if stale_inst is None or stale_inst.id != relation_inst.id:
if stale_inst is not None and property.cascade.delete_orphan:
session.delete(stale_inst)
if isinstance(value, collections.Mapping):
relation_inst.from_dict(value, validator)
session.flush([relation_inst]) # we want this this to be queryable for other things
setattr(self, name, relation_inst)
elif value is None:
# the first branch handles the case of setting a many-to-one value
# to None. So this is for the one-to-one-mapping case
# Setting a relation to None is nullifying the relationship, which
# has potential side effects in the case of cascades, etc.
setattr(self, name, value)
stale_inst = original_value
if stale_inst is not None and property.cascade.delete_orphan:
session.delete(stale_inst)
else:
raise TypeError('merging relations on {1} not support for values '
'of type: {0.__class__.__name__} '
'(value: {0})'.format(value, name))
def __setattr__(self, name, value):
if name in getattr(self, '_validators', {}):
for val_dict in self._validators[name]:
if not val_dict['model_validator'](self, value):
raise ValueError('validation failed for {.__class__.__name__}'
'.{} with value {!r}: {}'.format(self, name, value,
val_dict.get('validator_message')))
object.__setattr__(self, name, value)
def crud_read(self, attrs=None):
return self.to_dict(attrs, validator=_crud_read_validator)
def crud_create(self, **kwargs):
return self.from_dict(kwargs, validator=_crud_create_validator)
def crud_update(self, **kwargs):
return self.from_dict(kwargs, validator=_crud_write_validator)
def __repr__(self):
"""
Useful string representation for logging.
Note:
__repr__ does NOT return unicode on Python 2, since python decodes
it using the default encoding: http://bugs.python.org/issue5876.
"""
# If no repr attr names have been set, default to the set of all
# unique constraints. This is unordered normally, so we'll order and
# use it here.
if not self._repr_attr_names:
# this flattens the unique constraint list
_unique_attrs = chain.from_iterable(self.unique_constraint_column_names)
_primary_keys = self.primary_key_column_names
attr_names = tuple(sorted(set(chain(_unique_attrs,
_primary_keys,
self._extra_repr_attr_names))))
else:
attr_names = self._repr_attr_names
if not attr_names and hasattr(self, 'id'):
# there should be SOMETHING, so use id as a fallback
attr_names = ('id',)
if attr_names:
_kwarg_list = ' '.join('%s=%s' % (name, repr(getattr(self, name, 'undefined')))
for name in attr_names)
kwargs_output = ' %s' % _kwarg_list
else:
kwargs_output = ''
# specifically using the string interpolation operator and the repr of
# getattr so as to avoid any "hilarious" encode errors for non-ascii
# characters
u = '<%s%s>' % (self.__class__.__name__, kwargs_output)
return u if six.PY3 else u.encode('utf-8')
def _crud_read_validator(self, name):
_crud_perms = getattr(self, '_crud_perms', None)
if _crud_perms is not None and not _crud_perms.get('read', True):
raise ValueError('Attempt to read non-readable model {}'.format(self.__class__.__name__))
elif name in self.extra_defaults:
return True
elif _crud_perms is None:
return not name.startswith('_')
else:
return name in _crud_perms.get('read', {})
def _crud_write_validator(self, name, value=None):
_crud_perms = getattr(self, '_crud_perms', None)
if getattr(self, name, None) == value:
return True
elif not _crud_perms or not _crud_perms.get('update', False):
raise ValueError('Attempt to update non-updateable model {}'.format(self.__class__.__name__))
elif name not in _crud_perms.get('update', {}):
raise ValueError('Attempt to update non-updateable attribute {}.{}'.format(self.__class__.__name__, name))
else:
return name in _crud_perms.get("update", {})
def _crud_create_validator(self, name, value=None):
_crud_perms = getattr(self, '_crud_perms', {})
if not _crud_perms or not _crud_perms.get('can_create', False):
raise ValueError('Attempt to create non-createable model {}'.format(self.__class__.__name__))
else:
return name in _crud_perms.get("create", {})
class crudable(object):
"""
Decorator that specifies which model attributes are part of the CRUD API.
Intended to be used on SQLAlchemy model classes, for example::
@crudable(
create=True,
read=['__something'],
no_read=['password'],
update=[],
no_update=[],
delete=True,
data_spec={
attr={
read=True,
update=True,
desc='description'
defaultValue=<some default>
validators={<validator_name>, <validator value>}
}
}
)
class MyModelObject(Base):
# ...
The resulting object will have a class attribute named "crud_spec" which
is a dictionary like::
{
create: True/False,
read: {<attribute name>, <attribute name>},
update: {<attribute name>, <attribute name>},
delete: True/False,
data_spec: {
manually_specified_attr: {
desc: 'description',
type: '<type>'
read: True/False # only needed if attribute is unspecified
update": True/False
}
attr_with_manual_description: {
desc: 'description',
type: '<type>'
}
}
}
Attributes:
never_read (tuple): Names of attributes that default to being not
readable.
never_update (tuple): Names of attribute that default to being not
updatable.
always_create (tuple): Names of attributes that default to being always
creatable.
default_labels (dict): Attribute name and label pairs, to simplify
setting the same label for each and every instance of an attribute
name.
"""
never_read = ('metadata',)
never_update = ('id',)
always_create = ('id',)
default_labels = {'addr': 'Address'} # TODO: This should be user-definable
def __init__(self, can_create=True,
create=None, no_create=None,
read=None, no_read=None,
update=None, no_update=None,
can_delete=True,
data_spec=None):
"""
Args:
can_create (bool): If True (default), the decorated class can be
created.
create (collections.Iterable): If provided, interpreted as the
attribute names that can be specified when the object is
created in addition to the items are updateable. If not
provided (default) all attributes that can be updated plus the
primary key are allowed to be passed to the create method.
no_create (collections.Iterable): If provided, interpreted as the
attribute names that will not be allowed to be passed to
create, taking precedence over anything specified in the create
parameter. If not provided (default) everything allowed by the
create parameter will be acceptable.
read (collections.Iterable): If provided, interpreted as the
attribute names that can be read, and ONLY these names can be
read. If not provided (default) all attributes not starting
with an underscore (e.g. __str__, or _hidden) will be readable,
no_read (collections.Iterable): if provided, interpreted as the
attribute names that can't be read, taking precedence over
anything specified in the read parameter. If not provided
(default) everything allowed by the read parameter will be
readable.
update (collections.Iterable): If provided, interpreted as the
attribute names that can be updated, in addition to the list of
items are readable. If None (default) default to the list of
readable attributes. Pass an empty iterable to use the default
behavior listed under the read docstring if there were
attributes passed to read that you don't want update to default
to.
no_update (collections.Iterable): if provided, interpreted as the
attribute names that can't be updated, taking precedence over
anything specified in the update parameter. If None (default)
default to the list of non-readable attributes. Pass an empty
iterable to use the default behavior listed under the no_read
docstring if there were attributes passed to no_read that you
don't want no_update to default to.
can_delete (bool): If True (default), the decorated class can be
deleted.
data_spec (dict): Any additional information that should be added
to the `model.get_crud_definition`. See that function for
complete documentation, but the key items are:
"desc" - Human-readable description, will default to docstrings
if available, else not be present in the final spec.
"label" - a Human-readable short label to help remember the
purpose of a particular field, without going into detail.
If not specifically provided, it will not be present in the
spec.
"type" - the human-readable "type" for an attribute meaning
that a conversion to this type will be performed on the
server. If possible this will be determined automatically
using isinstance(), otherwise "auto" will be set:
auto (default) - no type conversion
string - `str`
boolean - `bool`
int - `int`
float - `float`
"defaultValue" - the value that is considered the default,
either because a model instance will use this default value
if unspecified, or a client should present this option as
the default for a user
"validators" - a `dict` mapping a validator name (e.g. "max")
and the value to be used in validation (e.g. 1000, for a
max value of 1000). This is intended to support client side
validation.
"""
self.can_create = can_create
self.can_delete = can_delete
if no_update is not None and create is None:
create = deepcopy(no_update)
self.read = read or []
self.no_read = no_read or []
self.update = update or []
self.no_update = no_update or [x for x in self.no_read if x not in self.update]
self.create = create or []
self.no_create = no_create or [x for x in self.no_update if x not in self.create]
self.no_read.extend(self.never_read)
self.no_update.extend(self.never_update)
self.data_spec = data_spec or {}
def __call__(self, cls):
def _get_crud_perms(cls):
if getattr(cls, '_cached_crud_perms', False):
return cls._cached_crud_perms
crud_perms = {
'can_create': self.can_create,
'can_delete': self.can_delete,
'read': [],
'update': [],
'create': []
}
read = self.read
for name in collect_superclass_attr_names(cls):
if not name.startswith('_'):
attr = getattr(cls, name)
properties = (InstrumentedAttribute, property, ClauseElement)
primitives = (int, float, bool, datetime, date, time, six.binary_type, six.text_type, uuid.UUID)
if isinstance(attr, properties) or isinstance(attr, primitives):
read.append(name)
read = list(set(read))
for name in read:
if not self.no_read or name not in self.no_read:
crud_perms['read'].append(name)
update = self.update + deepcopy(crud_perms['read'])
update = list(set(update))
for name in update:
if not self.no_update or name not in self.no_update:
if name in cls.__table__.columns:
crud_perms['update'].append(name)
else:
attr = getattr(cls, name)
if isinstance(attr, property) and getattr(attr, 'fset', False):
crud_perms['update'].append(name)
elif (isinstance(attr, InstrumentedAttribute) and
isinstance(attr.property, RelationshipProperty) and
attr.property.viewonly != True): # noqa: E712
crud_perms['update'].append(name)
create = self.create + deepcopy(crud_perms['update'])
for name in self.always_create:
create.append(name)
if name in self.no_create:
self.no_create.remove(name)
create = list(set(create))
for name in create:
if not self.no_create or name not in self.no_create:
crud_perms['create'].append(name)
cls._cached_crud_perms = crud_perms
return cls._cached_crud_perms
def _get_crud_spec(cls):
if getattr(cls, '_cached_crud_spec', False):
return cls._cached_crud_spec
crud_perms = cls._crud_perms
field_names = list(set(crud_perms['read']) | set(crud_perms['update']) |
set(crud_perms['create']) | set(self.data_spec.keys()))
fields = {}
for name in field_names:
# json is implicitly unicode, and since this will eventually
# be serialized as json, it's convenient to have it in that
# form early
# if using different validation decorators or in the data spec
# causes multiple spec
# kwargs to be specified, we're going to error here for
# duplicate keys in dictionaries. Since we don't want to allow
# two different expected values for maxLength being sent in a
# crud spec for example
field_validator_kwargs = {
spec_key_name: spec_value
# collect each spec_kwarg for all validators of an attribute
for crud_validator_dict in getattr(cls, '_validators', {}).get(name, [])
for spec_key_name, spec_value in crud_validator_dict.get('spec_kwargs', {}).items()
}
if field_validator_kwargs:
self.data_spec.setdefault(name, {})
# manually specified crud validator keyword arguments
# overwrite the decorator-supplied keyword arguments
field_validator_kwargs.update(self.data_spec[name].get('validators', {}))
self.data_spec[name]['validators'] = field_validator_kwargs
name = six.text_type(name)
field = deepcopy(self.data_spec.get(name, {}))
field['name'] = name
try:
attr = getattr(cls, name)
except AttributeError:
# if the object doesn't have the attribute, AND it's in the field
# list, that means we're assuming it was manually specified in the
# data_spec argument
fields[name] = field
continue
field['read'] = name in crud_perms['read']
field['update'] = name in crud_perms['update']
field['create'] = name in crud_perms['create']
if field['read'] or field['update'] or field['create']:
fields[name] = field
elif name in fields:
del fields[name]
continue
if 'desc' not in field and not is_data(attr):
# no des specified, and there's a relevant docstring, so use it
# if there's 2 consecutive newlines, assume that there's a
# separator in the docstring and that the top part only
# is the description, if there's not, use the whole thing.
# Either way, replace newlines with spaces since docstrings often
# break the same sentence over new lines due to space
doc = inspect.getdoc(attr)
if doc:
doc = doc.partition('\n\n')[0].replace('\n', ' ').strip()
field['desc'] = doc
if 'type' not in field:
if isinstance(attr, InstrumentedAttribute) and isinstance(attr.property, ColumnProperty):
field['type'] = cls._type_map.get(type(attr.property.columns[0].type), 'auto')
field_default = getattr(attr.property.columns[0], 'default', None)
# only put the default here if it exists, and it's not an automatic thing like "time.utcnow()"
if field_default is not None and field['type'] != 'auto' \
and not isinstance(field_default.arg, (collections.Callable, property)):
field['defaultValue'] = field_default.arg
elif hasattr(attr, "default"):
field['defaultValue'] = attr.default
else:
field['type'] = cls._type_map.get(type(attr), 'auto')
# only set a default if this isn't a property or some other kind of "constructed attribute"
if field['type'] != 'auto' and not isinstance(attr, (collections.Callable, property)):
field['defaultValue'] = attr
if isinstance(attr, InstrumentedAttribute) and isinstance(attr.property, RelationshipProperty):
field['_model'] = attr.property.mapper.class_.__name__
crud_spec = {'fields': fields}
cls._cached_crud_spec = crud_spec
return cls._cached_crud_spec
def _type_map(cls):
return dict(cls.type_map_defaults, **cls.type_map)
cls._type_map = cached_classproperty(_type_map)
cls._crud_spec = cached_classproperty(_get_crud_spec)
cls._crud_perms = cached_classproperty(_get_crud_perms)
return cls
class crud_validation(object):
"""
Base class for adding validators to a model.
Supports adding to the crud spec, or to the save action.
"""
def __init__(self, attribute_name, model_validator, validator_message, **spec_kwargs):
"""
Args:
attribute_name (str): The attribute to which this validator applies.
model_validator (callable): A callable that accepts the attribute
value and returns False or None if invalid, or True if the
value is valid.
validator_message (str): Failure message if the validation fails.
**spec_kwargs: The key/value pairs that should be added to the
the crud spec for this attribute name. This generally supports
making the same sorts of validations in a client (e.g.
javascript).
"""
self.attribute_name = attribute_name
self.model_validator = model_validator
self.validator_message = validator_message
self.spec_kwargs = spec_kwargs
def __call__(self, cls):
if not hasattr(cls, '_validators'):
cls._validators = {}
else:
# in case we subclass something with a _validators attribute
cls._validators = deepcopy(cls._validators)
cls._validators.setdefault(self.attribute_name, []).append({
'model_validator': self.model_validator,
'validator_message': self.validator_message,
'spec_kwargs': self.spec_kwargs
})
return cls
class text_length_validation(crud_validation):
def __init__(self, attribute_name, min_length=None, max_length=None,
min_text='The minimum length of this field is {0}.',
max_text='The maximum length of this field is {0}.',
allow_none=True):
def model_validator(instance, text):
if text is None:
return allow_none
text_length = len(six.text_type(text))
return all([min_length is None or text_length >= min_length,
max_length is None or text_length <= max_length])
kwargs = {}
if min_length is not None:
kwargs['minLength'] = min_length
if max_text is not None:
kwargs['minLengthText'] = min_text
if max_length is not None:
kwargs['maxLength'] = max_length
if max_text is not None:
kwargs['maxLengthText'] = max_text
message = 'Length of value should be between {} and {} (inclusive; None means no min/max).'.format(
min_length, max_length)
crud_validation.__init__(self, attribute_name, model_validator, message, **kwargs)
class regex_validation(crud_validation):
def __init__(self, attribute_name, regex, message):
def regex_validator(instance, text):
# if the field isn't nullable, that will trigger an error later at the sqla level,
# but since None can't be passed to a re.search we want to pass this validation check
if text is None:
return True
# we don't want to actually send across the match object if it did match,
# so leverage the fact that failing searches or matches return None types
return re.search(regex, text) is not None
crud_validation.__init__(self, attribute_name, regex_validator, message,
regexText=message, regexString=regex)
| 43.729122
| 118
| 0.594251
|
8296c4d4712c50f06a43899ea8df455a1baee006
| 821
|
py
|
Python
|
src/test_workflow/perf_test/perf_test_runner.py
|
naveenpajjuri/opensearch-build
|
855f0296b36ba32b18cf4fc40b096659b5b3f1f0
|
[
"Apache-2.0"
] | null | null | null |
src/test_workflow/perf_test/perf_test_runner.py
|
naveenpajjuri/opensearch-build
|
855f0296b36ba32b18cf4fc40b096659b5b3f1f0
|
[
"Apache-2.0"
] | null | null | null |
src/test_workflow/perf_test/perf_test_runner.py
|
naveenpajjuri/opensearch-build
|
855f0296b36ba32b18cf4fc40b096659b5b3f1f0
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import abc
import os
from manifests.bundle_manifest import BundleManifest
from test_workflow.perf_test.perf_args import PerfArgs
class PerfTestRunner(abc.ABC):
def __init__(self, args: PerfArgs, test_manifest: BundleManifest):
self.args = args
self.test_manifest = test_manifest
self.security = "security" in self.test_manifest.components and not self.args.insecure
self.tests_dir = os.path.join(os.getcwd(), "test-results", "perf-test", f"{'with' if self.security else 'without'}-security")
os.makedirs(self.tests_dir, exist_ok=True)
def run(self):
self.run_tests()
| 32.84
| 133
| 0.73447
|
c7fd094ba0bfa565e25600a10ee0e3e77a78e98f
| 16,830
|
py
|
Python
|
c2cgeoportal/scaffolds/update/CONST_alembic/main/versions/415746eb9f6_changes_for_v2.py
|
craxxkid/c2cgeoportal
|
60ca7d5d014d69b0a938f858271c911a30da77c3
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
c2cgeoportal/scaffolds/update/CONST_alembic/main/versions/415746eb9f6_changes_for_v2.py
|
craxxkid/c2cgeoportal
|
60ca7d5d014d69b0a938f858271c911a30da77c3
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
c2cgeoportal/scaffolds/update/CONST_alembic/main/versions/415746eb9f6_changes_for_v2.py
|
craxxkid/c2cgeoportal
|
60ca7d5d014d69b0a938f858271c911a30da77c3
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2016, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""Changes to start the implementation of the version 2
Revision ID: 415746eb9f6
Revises: None
Create Date: 2014-10-23 16:00:47.940216
"""
from alembic import op, context
from sqlalchemy import Column, ForeignKey, Table, MetaData
from sqlalchemy.types import Integer, Boolean, Unicode, Float
# revision identifiers, used by Alembic.
revision = '415746eb9f6'
down_revision = '166ff2dcc48d'
def upgrade():
schema = context.get_context().config.get_main_option('schema')
engine = op.get_bind().engine
if type(engine).__name__ != 'MockConnection' and \
op.get_context().dialect.has_table(
engine, 'interface', schema=schema): # pragma: no cover
return
op.drop_table('user_functionality', schema=schema)
op.create_table(
'interface',
Column(
'id', Integer, primary_key=True
),
Column('name', Unicode),
Column('description', Unicode),
schema=schema,
)
op.create_table(
'interface_layer',
Column(
'interface_id', Integer, ForeignKey(schema + '.interface.id'), primary_key=True
),
Column(
'layer_id', Integer, ForeignKey(schema + '.layer.id'), primary_key=True
),
schema=schema,
)
op.create_table(
'interface_theme',
Column(
'interface_id', Integer, ForeignKey(schema + '.interface.id'), primary_key=True
),
Column(
'theme_id', Integer, ForeignKey(schema + '.theme.id'), primary_key=True
),
schema=schema,
)
op.create_table(
'layerv1',
Column(
'id', Integer, ForeignKey(schema + '.layer.id'), primary_key=True
),
Column('is_checked', Boolean, default=True),
Column('icon', Unicode),
Column('layer_type', Unicode(12)),
Column('url', Unicode),
Column('image_type', Unicode(10)),
Column('style', Unicode),
Column('dimensions', Unicode),
Column('matrix_set', Unicode),
Column('wms_url', Unicode),
Column('wms_layers', Unicode),
Column('query_layers', Unicode),
Column('kml', Unicode),
Column('is_single_tile', Boolean),
Column('legend', Boolean, default=True),
Column('legend_image', Unicode),
Column('legend_rule', Unicode),
Column('is_legend_expanded', Boolean, default=False),
Column('min_resolution', Float),
Column('max_resolution', Float),
Column('disclaimer', Unicode),
Column('identifier_attribute_field', Unicode),
Column('exclude_properties', Unicode),
Column('time_mode', Unicode(8)),
schema=schema,
)
op.execute(
"UPDATE ONLY %(schema)s.treeitem SET type = 'layerv1' "
"WHERE type='layer'" % {'schema': schema}
)
op.execute(
'INSERT INTO %(schema)s.layerv1 ('
'id, is_checked, icon, layer_type, url, image_type, style, dimensions, matrix_set, '
'wms_url, wms_layers, query_layers, kml, is_single_tile, legend, '
'legend_image, legend_rule, is_legend_expanded, min_resolution, max_resolution, '
'disclaimer, identifier_attribute_field, exclude_properties, time_mode) '
'(SELECT '
'id, "isChecked" AS is_checked, icon, "layerType" AS layer_type, url, '
'"imageType" AS image_type, style, dimensions, "matrixSet" AS matrix_set, '
'"wmsUrl" AS wms_url, "wmsLayers" AS wms_layers, "queryLayers" AS query_layers, kml, '
'"isSingleTile" AS is_single_tile, legend, "legendImage" AS legend_image, '
'"legendRule" AS legend_rule, "isLegendExpanded" AS is_legend_expanded, '
'"minResolution" AS min_resolution, "maxResolution" AS max_resolution, disclaimer, '
'"identifierAttributeField" AS identifier_attribute_field, '
'"excludeProperties" AS exclude_properties, "timeMode" AS time_mode '
'FROM %(schema)s.layer)' % {'schema': schema}
)
op.drop_column('layer', 'isChecked', schema=schema)
op.drop_column('layer', 'icon', schema=schema)
op.drop_column('layer', 'layerType', schema=schema)
op.drop_column('layer', 'url', schema=schema)
op.drop_column('layer', 'imageType', schema=schema)
op.drop_column('layer', 'style', schema=schema)
op.drop_column('layer', 'dimensions', schema=schema)
op.drop_column('layer', 'matrixSet', schema=schema)
op.drop_column('layer', 'wmsUrl', schema=schema)
op.drop_column('layer', 'wmsLayers', schema=schema)
op.drop_column('layer', 'queryLayers', schema=schema)
op.drop_column('layer', 'kml', schema=schema)
op.drop_column('layer', 'isSingleTile', schema=schema)
op.drop_column('layer', 'legend', schema=schema)
op.drop_column('layer', 'legendImage', schema=schema)
op.drop_column('layer', 'legendRule', schema=schema)
op.drop_column('layer', 'isLegendExpanded', schema=schema)
op.drop_column('layer', 'minResolution', schema=schema)
op.drop_column('layer', 'maxResolution', schema=schema)
op.drop_column('layer', 'disclaimer', schema=schema)
op.drop_column('layer', 'identifierAttributeField', schema=schema)
op.drop_column('layer', 'excludeProperties', schema=schema)
op.drop_column('layer', 'timeMode', schema=schema)
interface = Table(
'interface', MetaData(),
Column('name', Unicode),
schema=schema,
)
op.bulk_insert(interface, [
{'name': 'main'},
{'name': 'mobile'},
{'name': 'edit'},
{'name': 'routing'},
])
op.execute(
'INSERT INTO %(schema)s.interface_layer (layer_id, interface_id) '
'(SELECT l.id AS layer_id, i.id AS interface_id '
'FROM %(schema)s.layer AS l, %(schema)s.interface AS i '
'WHERE i.name in (\'main\', \'edit\', \'routing\') AND l."inDesktopViewer")' % {
'schema': schema
}
)
op.execute(
'INSERT INTO %(schema)s.interface_layer (layer_id, interface_id) '
'(SELECT l.id AS layer_id, i.id AS interface_id '
'FROM %(schema)s.layer AS l, %(schema)s.interface AS i '
'WHERE i.name = \'mobile\' AND l."inMobileViewer")' % {'schema': schema}
)
op.execute(
'INSERT INTO %(schema)s.interface_theme (theme_id, interface_id) '
'(SELECT l.id AS theme_id, i.id AS interface_id '
'FROM %(schema)s.theme AS l, %(schema)s.interface AS i '
'WHERE i.name in (\'main\', \'edit\', \'routing\') AND l."inDesktopViewer")' % {
'schema': schema
}
)
op.execute(
'INSERT INTO %(schema)s.interface_theme (theme_id, interface_id) '
'(SELECT l.id AS theme_id, i.id AS interface_id '
'FROM %(schema)s.theme AS l, %(schema)s.interface AS i '
'WHERE i.name = \'mobile\' AND l."inMobileViewer")' % {'schema': schema}
)
op.drop_column('layer', 'inMobileViewer', schema=schema)
op.drop_column('layer', 'inDesktopViewer', schema=schema)
op.alter_column('layer', 'geoTable', new_column_name='geo_table', schema=schema)
op.drop_column('theme', 'inMobileViewer', schema=schema)
op.drop_column('theme', 'inDesktopViewer', schema=schema)
op.alter_column('treeitem', 'metadataURL', new_column_name='metadata_url', schema=schema)
op.alter_column('layergroup', 'isExpanded', new_column_name='is_expanded', schema=schema)
op.alter_column('layergroup', 'isInternalWMS', new_column_name='is_internal_wms', schema=schema)
op.alter_column('layergroup', 'isBaseLayer', new_column_name='is_base_layer', schema=schema)
op.create_table(
'layer_internal_wms',
Column(
'id', Integer, ForeignKey(schema + '.layer.id'), primary_key=True
),
Column('layer', Unicode),
Column('image_type', Unicode(10)),
Column('style', Unicode),
Column('time_mode', Unicode(8)),
schema=schema,
)
op.create_table(
'layer_external_wms',
Column(
'id', Integer, ForeignKey(schema + '.layer.id'), primary_key=True
),
Column('url', Unicode),
Column('layer', Unicode),
Column('image_type', Unicode(10)),
Column('style', Unicode),
Column('is_single_tile', Boolean),
Column('time_mode', Unicode(8)),
schema=schema,
)
op.create_table(
'layer_wmts',
Column(
'id', Integer, ForeignKey(schema + '.layer.id'), primary_key=True,
),
Column('url', Unicode),
Column('layer', Unicode),
Column('style', Unicode),
Column('matrix_set', Unicode),
schema=schema,
)
op.create_table(
'ui_metadata',
Column(
'id', Integer, primary_key=True
),
Column('name', Unicode),
Column('value', Unicode),
Column('description', Unicode),
Column('item_id', Integer, ForeignKey(schema + '.treeitem.id'), nullable=False),
schema=schema,
)
op.create_table(
'wmts_dimension',
Column(
'id', Integer, primary_key=True
),
Column('name', Unicode),
Column('value', Unicode),
Column('description', Unicode),
Column('layer_id', Integer, ForeignKey(schema + '.layer_wmts.id'), nullable=False),
schema=schema,
)
def downgrade():
schema = context.get_context().config.get_main_option('schema')
op.drop_table('wmts_dimension', schema=schema)
op.drop_table('ui_metadata', schema=schema)
op.drop_table('layer_wmts', schema=schema)
op.drop_table('layer_external_wms', schema=schema)
op.drop_table('layer_internal_wms', schema=schema)
op.add_column('layer', Column('inMobileViewer', Boolean, default=False), schema=schema)
op.add_column('layer', Column('inDesktopViewer', Boolean, default=True), schema=schema)
op.alter_column('layer', 'geo_table', new_column_name='geoTable', schema=schema)
op.add_column('theme', Column('inMobileViewer', Boolean, default=False), schema=schema)
op.add_column('theme', Column('inDesktopViewer', Boolean, default=True), schema=schema)
op.alter_column('treeitem', 'metadata_url', new_column_name='metadataURL', schema=schema)
op.alter_column('layergroup', 'is_expanded', new_column_name='isExpanded', schema=schema)
op.alter_column('layergroup', 'is_internal_wms', new_column_name='isInternalWMS', schema=schema)
op.alter_column('layergroup', 'is_base_layer', new_column_name='isBaseLayer', schema=schema)
op.execute(
'UPDATE ONLY %(schema)s.theme AS t '
'SET "inDesktopViewer" = FALSE' % {'schema': schema}
)
op.execute(
'UPDATE ONLY %(schema)s.layer AS t '
'SET "inDesktopViewer" = FALSE' % {'schema': schema}
)
op.execute(
'UPDATE ONLY %(schema)s.theme AS t '
'SET "inMobileViewer" = TRUE '
'FROM %(schema)s.interface AS i, %(schema)s.interface_theme AS it '
'WHERE i.name = \'mobile\' AND i.id = it.interface_id AND it.theme_id = t.id' % {
'schema': schema
}
)
op.execute(
'UPDATE ONLY %(schema)s.theme AS t '
'SET "inDesktopViewer" = TRUE '
'FROM %(schema)s.interface AS i, %(schema)s.interface_theme AS it '
'WHERE i.name = \'main\' AND i.id = it.interface_id AND it.theme_id = t.id' % {
'schema': schema
}
)
op.execute(
'UPDATE ONLY %(schema)s.layer AS l '
'SET "inMobileViewer" = TRUE '
'FROM %(schema)s.interface AS i, %(schema)s.interface_layer AS il '
'WHERE i.name = \'mobile\' AND i.id = il.interface_id AND il.layer_id = l.id' % {
'schema': schema
}
)
op.execute(
'UPDATE ONLY %(schema)s.layer AS l '
'SET "inDesktopViewer" = TRUE '
'FROM %(schema)s.interface AS i, %(schema)s.interface_layer AS il '
'WHERE i.name = \'main\' AND i.id = il.interface_id AND il.layer_id = l.id' % {
'schema': schema
}
)
op.add_column('layer', Column('timeMode', Unicode(8)), schema=schema)
op.add_column('layer', Column('excludeProperties', Unicode), schema=schema)
op.add_column('layer', Column('identifierAttributeField', Unicode), schema=schema)
op.add_column('layer', Column('disclaimer', Unicode), schema=schema)
op.add_column('layer', Column('maxResolution', Float), schema=schema)
op.add_column('layer', Column('minResolution', Float), schema=schema)
op.add_column('layer', Column('isLegendExpanded', Boolean, default=False), schema=schema)
op.add_column('layer', Column('legendRule', Unicode), schema=schema)
op.add_column('layer', Column('legendImage', Unicode), schema=schema)
op.add_column('layer', Column('legend', Boolean, default=True), schema=schema)
op.add_column('layer', Column('isSingleTile', Boolean, default=False), schema=schema)
op.add_column('layer', Column('kml', Unicode), schema=schema)
op.add_column('layer', Column('queryLayers', Unicode), schema=schema)
op.add_column('layer', Column('wmsLayers', Unicode), schema=schema)
op.add_column('layer', Column('wmsUrl', Unicode), schema=schema)
op.add_column('layer', Column('matrixSet', Unicode), schema=schema)
op.add_column('layer', Column('dimensions', Unicode), schema=schema)
op.add_column('layer', Column('style', Unicode), schema=schema)
op.add_column('layer', Column('imageType', Unicode(10)), schema=schema)
op.add_column('layer', Column('url', Unicode), schema=schema)
op.add_column('layer', Column('layerType', Unicode(12)), schema=schema)
op.add_column('layer', Column('icon', Unicode), schema=schema)
op.add_column('layer', Column('isChecked', Boolean, default=True), schema=schema)
op.execute(
'UPDATE %(schema)s.layer AS l SET ('
'id, "isChecked", icon, "layerType", url, "imageType", style, dimensions, "matrixSet", '
'"wmsUrl", "wmsLayers", "queryLayers", kml, "isSingleTile", legend, "legendImage", '
'"legendRule", "isLegendExpanded", "minResolution", "maxResolution", disclaimer, '
'"identifierAttributeField", "excludeProperties", "timeMode"'
') = ('
'o.id, o.is_checked, o.icon, o.layer_type, o.url, o.image_type, o.style, o.dimensions, '
'o.matrix_set, o.wms_url, o.wms_layers, o.query_layers, o.kml, o.is_single_tile, '
'o.legend, o.legend_image, o.legend_rule, o.is_legend_expanded, o.min_resolution, '
'o.max_resolution, o.disclaimer, o.identifier_attribute_field, o.exclude_properties, '
'o.time_mode '
') FROM %(schema)s.layerv1 AS o WHERE o.id = l.id' % {'schema': schema}
)
op.drop_table('layerv1', schema=schema)
op.drop_table('interface_theme', schema=schema)
op.drop_table('interface_layer', schema=schema)
op.drop_table('interface', schema=schema)
op.create_table(
'user_functionality',
Column(
'user_id', Integer,
ForeignKey(schema + '.user.id'), primary_key=True
),
Column(
'functionality_id', Integer,
ForeignKey(schema + '.functionality.id'), primary_key=True
),
schema=schema,
)
| 41.555556
| 100
| 0.644742
|
e486effd351757fcc0884067943b4129ba26f878
| 1,694
|
py
|
Python
|
scripts/export-dos-opportunities.py
|
alphagov-mirror/digitalmarketplace-scripts
|
8a7ef9b2b5f5fffea6e012bd676b095a27d35101
|
[
"MIT"
] | 1
|
2020-06-23T01:55:31.000Z
|
2020-06-23T01:55:31.000Z
|
scripts/export-dos-opportunities.py
|
alphagov-mirror/digitalmarketplace-scripts
|
8a7ef9b2b5f5fffea6e012bd676b095a27d35101
|
[
"MIT"
] | 267
|
2015-10-12T12:43:52.000Z
|
2021-08-19T10:38:55.000Z
|
scripts/export-dos-opportunities.py
|
alphagov-mirror/digitalmarketplace-scripts
|
8a7ef9b2b5f5fffea6e012bd676b095a27d35101
|
[
"MIT"
] | 7
|
2015-11-11T16:47:41.000Z
|
2021-04-10T18:03:04.000Z
|
#!/usr/bin/env python3
"""Generate DOS opportunity data export CSV
Loads data from the Brief and BriefResponse API models, filters for
closed/awarded briefs and stores the output in the CSV.
This script generates two CSVs, one with buyer user details and one without.
The CSV without buyer user details is made publically available by uploading to
the communications bucket, the CSV with buyer user details should be available
to admins only so it is uploaded to the reports bucket.
Usage:
scripts/export-dos-opportunities.py [options] <stage>
Options:
-h --help Show this screen.
-v --verbose Print apiclient INFO messages.
--dry-run Generate the file but do not upload to S3
--output-dir=<output_dir> Directory to write csv files to [default: data]
"""
import sys
sys.path.insert(0, '.')
from docopt import docopt
from dmapiclient import DataAPIClient
from dmscripts.helpers.auth_helpers import get_auth_token
from dmscripts.helpers.logging_helpers import logging, configure_logger
from dmscripts.export_dos_opportunities import export_dos_opportunities
from dmutils.env_helpers import get_api_endpoint_from_stage
if __name__ == '__main__':
arguments = docopt(__doc__)
STAGE = arguments['<stage>']
OUTPUT_DIR = arguments['--output-dir']
DRY_RUN = arguments['--dry-run']
logging_config = {
'dmapiclient': logging.INFO} if bool(arguments.get('--verbose')) \
else {'dmapiclient': logging.WARNING}
logger = configure_logger(logging_config)
client = DataAPIClient(get_api_endpoint_from_stage(STAGE), get_auth_token('api', STAGE))
export_dos_opportunities(client, logger, STAGE, OUTPUT_DIR, DRY_RUN)
| 33.88
| 92
| 0.753837
|
3832ea4c713b1d009149a89fe5add279d5d6762a
| 363
|
py
|
Python
|
src/rst2text/__main__.py
|
1oglop1/rst2text
|
730b1bf5b9f7d9c223e0252837f37ce6e51b751e
|
[
"MIT"
] | null | null | null |
src/rst2text/__main__.py
|
1oglop1/rst2text
|
730b1bf5b9f7d9c223e0252837f37ce6e51b751e
|
[
"MIT"
] | null | null | null |
src/rst2text/__main__.py
|
1oglop1/rst2text
|
730b1bf5b9f7d9c223e0252837f37ce6e51b751e
|
[
"MIT"
] | null | null | null |
"""
Entrypoint module, in case you use `python -mrst2text`.
Why does this file exist, and why __main__? For more info, read:
- https://www.python.org/dev/peps/pep-0338/
- https://docs.python.org/2/using/cmdline.html#cmdoption-m
- https://docs.python.org/3/using/cmdline.html#cmdoption-m
"""
from rst2text.cli import main
if __name__ == "__main__":
main()
| 24.2
| 64
| 0.716253
|
d2b64f99c6695fed163a22dce173932f01aed20e
| 4,737
|
py
|
Python
|
axelrod/tests/integration/test_filtering.py
|
nandhinianandj/Axelrod
|
379b907d64c51816a50abfd8480240276c893953
|
[
"MIT"
] | 596
|
2015-03-30T17:34:14.000Z
|
2022-03-21T19:32:38.000Z
|
axelrod/tests/integration/test_filtering.py
|
nandhinianandj/Axelrod
|
379b907d64c51816a50abfd8480240276c893953
|
[
"MIT"
] | 1,018
|
2015-03-30T14:57:33.000Z
|
2022-03-14T14:57:48.000Z
|
axelrod/tests/integration/test_filtering.py
|
nandhinianandj/Axelrod
|
379b907d64c51816a50abfd8480240276c893953
|
[
"MIT"
] | 263
|
2015-03-31T10:26:28.000Z
|
2022-03-29T09:26:02.000Z
|
import unittest
import warnings
import axelrod as axl
from axelrod.tests.property import strategy_lists
from hypothesis import example, given, settings
from hypothesis.strategies import integers, lists, sampled_from
def classifiers_lists(min_size=1, max_size=5):
"""
A function to return a list of classifiers
Parameters
----------
min_size : integer
The minimum number of classifiers to include
max_size : integer
The maximum number of classifiers to include
"""
classifier_list = [
"stochastic",
"long_run_time",
"manipulates_state",
"manipulates_source",
"inspects_source",
]
classifiers = lists(
sampled_from(classifier_list), min_size=min_size, max_size=max_size
)
return classifiers
class TestFiltersAgainstComprehensions(unittest.TestCase):
"""
Test that the results of filtering strategies via a filterset dict
match the results from using a list comprehension.
"""
def setUp(self) -> None:
# Ignore warnings about classifiers running on instances
warnings.simplefilter("ignore", category=UserWarning)
def tearDown(self) -> None:
warnings.simplefilter("default", category=UserWarning)
@settings(deadline=None)
@given(
strategies=strategy_lists(min_size=20, max_size=20),
classifiers=classifiers_lists(),
)
@example(
strategies=[axl.DBS, axl.Cooperator], classifiers=["long_run_time"]
)
def test_boolean_filtering(self, strategies, classifiers):
comprehension, filterset = strategies, {}
for classifier in classifiers:
comprehension = set(filter(axl.Classifiers[classifier], strategies))
filterset = {classifier: True}
filtered = set(
axl.filtered_strategies(filterset, strategies=strategies)
)
self.assertEqual(comprehension, filtered)
@given(
min_memory_depth=integers(min_value=1, max_value=10),
max_memory_depth=integers(min_value=1, max_value=10),
memory_depth=integers(min_value=1, max_value=10),
strategies=strategy_lists(min_size=20, max_size=20),
)
@example(
min_memory_depth=float("inf"),
max_memory_depth=float("inf"),
memory_depth=float("inf"),
strategies=axl.short_run_time_strategies,
)
@settings(max_examples=5, deadline=None)
def test_memory_depth_filtering(
self, min_memory_depth, max_memory_depth, memory_depth, strategies
):
min_comprehension = set(
[
s
for s in strategies
if axl.Classifiers["memory_depth"](s) >= min_memory_depth
]
)
min_filterset = {"min_memory_depth": min_memory_depth}
min_filtered = set(
axl.filtered_strategies(min_filterset, strategies=strategies)
)
self.assertEqual(min_comprehension, min_filtered)
max_comprehension = set(
[
s
for s in strategies
if axl.Classifiers["memory_depth"](s) <= max_memory_depth
]
)
max_filterset = {"max_memory_depth": max_memory_depth}
max_filtered = set(
axl.filtered_strategies(max_filterset, strategies=strategies)
)
self.assertEqual(max_comprehension, max_filtered)
comprehension = set(
[
s
for s in strategies
if axl.Classifiers["memory_depth"](s) == memory_depth
]
)
filterset = {"memory_depth": memory_depth}
filtered = set(
axl.filtered_strategies(filterset, strategies=strategies)
)
self.assertEqual(comprehension, filtered)
@given(strategies=strategy_lists(min_size=20, max_size=20))
@settings(max_examples=5, deadline=None)
def test_makes_use_of_filtering(self, strategies):
"""
Test equivalent filtering using two approaches.
"""
classifiers = [["game"], ["length"], ["game", "length"]]
for classifier in classifiers:
comprehension = set(
[
s
for s in strategies
if set(classifier).issubset(
set(axl.Classifiers["makes_use_of"](s))
)
]
)
filterset = {"makes_use_of": classifier}
filtered = set(
axl.filtered_strategies(filterset, strategies=strategies)
)
self.assertEqual(
comprehension, filtered, msg="classifier: {}".format(classifier)
)
| 31.58
| 80
| 0.610091
|
9c34799279023f2497ddaff2cf479492b85d1fce
| 2,983
|
py
|
Python
|
tests/unit/algorithms/TransientAdvectionDiffusionTest.py
|
Jimmy-INL/OpenPNM
|
1546fa1ac2204443bde916f2037fac383c5069ae
|
[
"MIT"
] | 1
|
2020-06-08T19:48:00.000Z
|
2020-06-08T19:48:00.000Z
|
tests/unit/algorithms/TransientAdvectionDiffusionTest.py
|
Jimmy-INL/OpenPNM
|
1546fa1ac2204443bde916f2037fac383c5069ae
|
[
"MIT"
] | null | null | null |
tests/unit/algorithms/TransientAdvectionDiffusionTest.py
|
Jimmy-INL/OpenPNM
|
1546fa1ac2204443bde916f2037fac383c5069ae
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy as sp
import openpnm as op
class TransientAdvectionDiffusionTest:
def setup_class(self):
np.random.seed(0)
self.net = op.network.Cubic(shape=[4, 3, 1], spacing=1.0)
self.geo = op.geometry.GenericGeometry(network=self.net,
pores=self.net.Ps,
throats=self.net.Ts)
self.phase = op.phases.GenericPhase(network=self.net)
self.phys = op.physics.GenericPhysics(network=self.net,
phase=self.phase,
geometry=self.geo)
self.phys['throat.diffusive_conductance'] = 1e-15
self.phys['throat.hydraulic_conductance'] = 1e-15
self.geo['pore.volume'] = 1e-27
self.geo['throat.conduit_lengths.pore1'] = 0.1
self.geo['throat.conduit_lengths.throat'] = 0.6
self.geo['throat.conduit_lengths.pore2'] = 0.1
def test_transient_advection_diffusion(self):
sf = op.algorithms.StokesFlow(network=self.net, phase=self.phase)
sf.setup(quantity='pore.pressure',
conductance='throat.hydraulic_conductance')
sf.set_value_BC(pores=self.net.pores('back'), values=1)
sf.set_value_BC(pores=self.net.pores('front'), values=0)
sf.run()
self.phase[sf.settings['quantity']] = sf[sf.settings['quantity']]
mod = op.models.physics.ad_dif_conductance.ad_dif
self.phys.add_model(propname='throat.ad_dif_conductance', model=mod,
s_scheme='powerlaw')
self.phys.regenerate_models()
ad = op.algorithms.TransientAdvectionDiffusion(network=self.net,
phase=self.phase)
ad.setup(phase=self.phase, quantity='pore.concentration',
conductance='throat.ad_dif_conductance',
diffusive_conductance='throat.diffusive_conductance',
hydraulic_conductance='throat.hydraulic_conductance',
pressure='pore.pressure', t_initial=0, t_final=100, t_step=1,
t_output=50, t_tolerance=1e-20, t_precision=12,
s_scheme='implicit')
ad.set_IC(0)
ad.set_value_BC(pores=self.net.pores('back'), values=2)
ad.set_value_BC(pores=self.net.pores('front'), values=0)
ad.run()
x = [0., 0., 0.,
0.89653, 0.89653, 0.89653,
1.53924, 1.53924, 1.53924,
2., 2., 2.]
y = np.around(ad[ad.settings['quantity']], decimals=5)
assert np.all(x == y)
def teardown_class(self):
ws = op.Workspace()
ws.clear()
if __name__ == '__main__':
t = TransientAdvectionDiffusionTest()
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
self = t
| 39.773333
| 78
| 0.573584
|
8c60aa0cc192fe4fb0f1695abf7f5f15c83739b4
| 3,144
|
py
|
Python
|
CIM16/IEC61970/Informative/InfLocations/LocationGrant.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM16/IEC61970/Informative/InfLocations/LocationGrant.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM16/IEC61970/Informative/InfLocations/LocationGrant.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 1
|
2021-04-02T18:04:49.000Z
|
2021-04-02T18:04:49.000Z
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.IEC61968.Common.Agreement import Agreement
class LocationGrant(Agreement):
"""A grant provides a right, as defined by type, for a parcel of land. Note that the association to Location, Asset, Organisation, etc. for the Grant is inherited from Agreement, a type of Document.A grant provides a right, as defined by type, for a parcel of land. Note that the association to Location, Asset, Organisation, etc. for the Grant is inherited from Agreement, a type of Document.
"""
def __init__(self, propertyData='', LandProperty=None, *args, **kw_args):
"""Initialises a new 'LocationGrant' instance.
@param propertyData: Property related information that describes the Grant's land parcel. For example, it may be a deed book number, deed book page number, and parcel number.
@param LandProperty: Land property this location grant applies to.
"""
#: Property related information that describes the Grant's land parcel. For example, it may be a deed book number, deed book page number, and parcel number.
self.propertyData = propertyData
self._LandProperty = None
self.LandProperty = LandProperty
super(LocationGrant, self).__init__(*args, **kw_args)
_attrs = ["propertyData"]
_attr_types = {"propertyData": str}
_defaults = {"propertyData": ''}
_enums = {}
_refs = ["LandProperty"]
_many_refs = []
def getLandProperty(self):
"""Land property this location grant applies to.
"""
return self._LandProperty
def setLandProperty(self, value):
if self._LandProperty is not None:
filtered = [x for x in self.LandProperty.LocationGrants if x != self]
self._LandProperty._LocationGrants = filtered
self._LandProperty = value
if self._LandProperty is not None:
if self not in self._LandProperty._LocationGrants:
self._LandProperty._LocationGrants.append(self)
LandProperty = property(getLandProperty, setLandProperty)
| 48.369231
| 397
| 0.724237
|
a1b43197d4ca5dd3de486f2766065c87fb56e7b4
| 5,487
|
py
|
Python
|
tests/integration_tests/test_integration.py
|
JimmyBoyle/FeatureToggles
|
7e6ec7619764ee93882fb9c07f7076e076023eb9
|
[
"MIT"
] | null | null | null |
tests/integration_tests/test_integration.py
|
JimmyBoyle/FeatureToggles
|
7e6ec7619764ee93882fb9c07f7076e076023eb9
|
[
"MIT"
] | 1
|
2018-11-02T22:00:08.000Z
|
2018-11-02T22:00:08.000Z
|
tests/integration_tests/test_integration.py
|
JimmyBoyle/FeatureToggles
|
7e6ec7619764ee93882fb9c07f7076e076023eb9
|
[
"MIT"
] | null | null | null |
import boto3
import json
import lambda_functions
import sys
lambda_client = boto3.client('lambda')
def test_basic_load():
updates = {
"operator_id": "tester",
"updates": [
{
"action": "SET",
"toggle_name": "feature1",
"dimension": "dimension1",
"value": True
},
{
"action": "SET",
"toggle_name": "feature1",
"dimension": "dimension2",
"value": False
},
{
"action": "SET",
"toggle_name": "feature2",
"dimension": "dimension3",
"value": True
}
]
}
expected = {
'feature_toggles': {
'feature1': {
'dimension1': True,
'dimension2': False
},
'feature2': {
'dimension3': True
}
}
}
_update_toggles(updates)
assert _load_toggles() == expected
_clear_toggles(expected)
def test_clear_one():
before = {
"operator_id": "tester",
"updates": [
{
"action": "SET",
"toggle_name": "feature1",
"dimension": "dimension1",
"value": True
},
{
"action": "SET",
"toggle_name": "feature1",
"dimension": "dimension2",
"value": False
},
{
"action": "SET",
"toggle_name": "feature2",
"dimension": "dimension3",
"value": True
}
]
}
_update_toggles(before)
updates = {
"operator_id": "tester",
"updates": [
{
"action": "CLEAR",
"toggle_name": "feature1",
"dimension": "dimension1"
}
]
}
expected = {
'feature_toggles': {
'feature1': {
'dimension2': False
},
'feature2': {
'dimension3': True
}
}
}
_update_toggles(updates)
assert _load_toggles() == expected
_clear_toggles(expected)
def test_clear_all():
before = {
"operator_id": "tester",
"updates": [
{
"action": "SET",
"toggle_name": "feature1",
"dimension": "dimension1",
"value": True
},
{
"action": "SET",
"toggle_name": "feature1",
"dimension": "dimension2",
"value": False
},
{
"action": "SET",
"toggle_name": "feature2",
"dimension": "dimension3",
"value": True
}
]
}
_update_toggles(before)
updates = {
"operator_id": "tester",
"updates": [
{
"action": "CLEAR_ALL",
"toggle_name": "feature1"
}
]
}
expected = {
'feature_toggles': {
'feature2': {
'dimension3': True
}
}
}
_update_toggles(updates)
assert _load_toggles() == expected
_clear_toggles(expected)
def test_bad_action():
updates = {
"operator_id": "tester",
"updates": [
{
"action": "SETS",
"toggle_name": "feature1",
"dimension": "dimension1",
"value": True
}
]
}
result = _update_toggles(updates)
assert 'errorMessage' in result
assert 'ValidationError' == result['errorType']
def test_missing_dimension():
updates = {
"operator_id": "tester",
"updates": [
{
'action': 'SET',
'toggle_name': 't1',
'value': True,
}
]
}
result = _update_toggles(updates)
assert 'errorMessage' in result
assert 'ValidationError' == result['errorType']
def test_missing_action():
updates = {
"operator_id": "tester",
"updates": [
{
'toggle_name': 't1',
'value': True,
}
]
}
result = _update_toggles(updates)
assert 'errorMessage' in result
assert 'ValidationError' == result['errorType']
def _load_toggles():
res = lambda_client.invoke(
FunctionName=lambda_functions.load_function,
InvocationType='RequestResponse'
)
return json.loads(res['Payload'].read().decode("utf-8"))
def _update_toggles(updates):
print(lambda_functions.update_function)
res = lambda_client.invoke(
FunctionName=lambda_functions.update_function,
InvocationType='RequestResponse',
Payload=json.dumps(updates)
)
return json.loads(res['Payload'].read().decode("utf-8"))
def _clear_toggles(current_toggles):
updates = {
"operator_id": "tester",
"updates": []
}
for toggle_name in current_toggles['feature_toggles']:
updates['updates'].append(
{
"action": "CLEAR_ALL",
"toggle_name": toggle_name
}
)
_update_toggles(updates)
assert _load_toggles() == {'feature_toggles': {}}
| 24.278761
| 60
| 0.446692
|
9f28a123848cd59592f4ab2a4ba4e8be20ca9391
| 1,829
|
py
|
Python
|
apiV1/routes/driver.py
|
DerKip/Ride-My-Way
|
30600349b4225272c4b6e78851c1dce96e586d31
|
[
"MIT"
] | 1
|
2021-04-06T07:18:06.000Z
|
2021-04-06T07:18:06.000Z
|
apiV1/routes/driver.py
|
DerKip/Ride-My-Way
|
30600349b4225272c4b6e78851c1dce96e586d31
|
[
"MIT"
] | 13
|
2018-06-13T05:09:35.000Z
|
2019-10-21T16:15:50.000Z
|
apiV1/routes/driver.py
|
DerKip/Ride-My-Way
|
30600349b4225272c4b6e78851c1dce96e586d31
|
[
"MIT"
] | 1
|
2019-05-08T14:40:51.000Z
|
2019-05-08T14:40:51.000Z
|
from flask import Blueprint, jsonify, request, Response
from ..app.controllers import registration_controller, rides_controller
from ..models.rides import all_ride_offers
from utils import JSON_MIME_TYPE, json_response
driver_route = Blueprint("route_driver",__name__)
@driver_route.route('/register', methods=['POST'])
def register_driver():
"""Driver registration endpoint"""
if request.content_type != JSON_MIME_TYPE:
error = jsonify({'error': 'Invalid Content Type'})
return json_response(error, 400)
return registration_controller.register_new_driver()
@driver_route.route('/logout', methods=['DELETE'])
def logout_driver():
"""Driver logout endpoint"""
return jsonify({"message":"Successfully logged out"}),200
@driver_route.route('/create_ride', methods=['POST'])
def create_ride():
"""Create ride offer endpoint"""
if request.content_type != JSON_MIME_TYPE:
error = jsonify({'error': 'Invalid Content Type'})
return json_response(error, 400)
return rides_controller.create_new_ride_offer()
@driver_route.route('/rides', methods=['GET'])
def get_all_ride_offers():
"""GET all ride offers endpoint"""
rides = jsonify(all_ride_offers)
return json_response(rides,200)
@driver_route.route('/rides/<int:id>', methods=['GET'])
def get_single_ride_offer(id):
"""GET single ride offer endpoint"""
single_ride_offer = [offer for offer in all_ride_offers if offer['id'] == id]
return jsonify({"Your Ride Offer:":single_ride_offer}),200
@driver_route.route('/rides/<string:driver>', methods=['GET'])
def get_all_my_ride_offers(driver):
"""GET all my ride offers endpoint"""
my_rides =[rides for rides in all_ride_offers if rides["created_by"] == driver]
return jsonify({(("{} ride offers").format(driver)):my_rides})
| 35.862745
| 83
| 0.718972
|
b230d891805bd7cec875dae1d28fff1ec4770671
| 2,201
|
py
|
Python
|
ITP2/ITP2_10_D.py
|
yu8ikmnbgt6y/MyAOJ
|
474b21a2a0c25e1c1f3d6d66d2a2ea52aecaa39b
|
[
"Unlicense"
] | 1
|
2020-01-08T16:33:46.000Z
|
2020-01-08T16:33:46.000Z
|
ITP2/ITP2_10_D.py
|
yu8ikmnbgt6y/MyAOJ
|
474b21a2a0c25e1c1f3d6d66d2a2ea52aecaa39b
|
[
"Unlicense"
] | null | null | null |
ITP2/ITP2_10_D.py
|
yu8ikmnbgt6y/MyAOJ
|
474b21a2a0c25e1c1f3d6d66d2a2ea52aecaa39b
|
[
"Unlicense"
] | null | null | null |
import sys
import io
import time
import pprint
input_txt = """
3
3 0 1 3
1 3
3 0 1 2
8
1 0
2 1
3 1
4 2
5 2
6 2
7 2
8 2
"""
sys.stdin = io.StringIO(input_txt);input()
#sys.stdin = open('in.test')
start = time.time()
# copy the below part and paste to the submission form.
# ---------function------------
import sys
class BitFlag:
#ALL_ON = 0xffffffffffffffff
ALL_OFF = 0x0000000000000000
def __init__(self, mask_digits):
self.FLAGS = self.ALL_OFF
self.MaskFor1bit = [1 << i for i in range(64)]
self.Masks = [self.make_mask(digits) for digits in mask_digits]
@staticmethod
def make_mask(digits):
ret = 0
for digit in digits:
ret += 1 << digit
return ret
def _test(self, i):
return self.FLAGS & self.MaskFor1bit[i] != self.ALL_OFF
def _set(self, m):
self.FLAGS |= self.Masks[m]
def _clear(self, m):
self.FLAGS &= ~self.Masks[m]
def _flip(self, m):
self.FLAGS ^= self.Masks[m]
def _all(self, m):
return self.FLAGS & self.Masks[m] == self.Masks[m]
def _any(self, m):
return self.FLAGS & self.Masks[m] != self.ALL_OFF
def _none(self, m):
return self.FLAGS & self.Masks[m] == self.ALL_OFF
def _count(self, m):
return bin(self.FLAGS & self.Masks[m]).count('1')
def _val(self, m):
return self.FLAGS & self.Masks[m]
nm = int(input())
digits = []
for i in range(nm):
k, *arg = map(int, input().split())
digits.append(arg)
bf = BitFlag(digits)
commands = {'0': bf._test,
'1': bf._set,
'2': bf._clear,
'3': bf._flip,
'4': bf._all,
'5': bf._any,
'6': bf._none,
'7': bf._count,
'8': bf._val
}
qn = int(input())
lines = sys.stdin.readlines()
ans = [None] * qn
for i in range(qn):
q, arg = lines[i].split()
ans[i] = commands[q](int(arg))
[print(int(x)) for x in ans if x is not None]
# -----------------------------
print("elapsed:", time.time() - start)
sys.stdin = sys.__stdin__
| 21.163462
| 72
| 0.524307
|
862fc353c28cda53807147358ed0f087f58f4ca4
| 1,197
|
py
|
Python
|
profiles_api/serializers.py
|
khangnguyen211195/profiles-rest-api
|
b74577159874e100b06e682814b465ef175d7d2f
|
[
"MIT"
] | null | null | null |
profiles_api/serializers.py
|
khangnguyen211195/profiles-rest-api
|
b74577159874e100b06e682814b465ef175d7d2f
|
[
"MIT"
] | null | null | null |
profiles_api/serializers.py
|
khangnguyen211195/profiles-rest-api
|
b74577159874e100b06e682814b465ef175d7d2f
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length = 10)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializes a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password' : {
'write_only': True,
'style': {'input_type' : 'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email = validated_data['email'],
name = validated_data['name'],
password = validated_data['password']
)
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializers profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {'user_profile' : {'read_only':True}}
| 29.925
| 68
| 0.621554
|
5f5bad289bafe45d52b9a87b7dd7279ef0ae305c
| 5,948
|
py
|
Python
|
jello/cli.py
|
roehling/jello
|
1073355e2bbfcd4f92af2584c9e539ce34859fc0
|
[
"MIT"
] | null | null | null |
jello/cli.py
|
roehling/jello
|
1073355e2bbfcd4f92af2584c9e539ce34859fc0
|
[
"MIT"
] | null | null | null |
jello/cli.py
|
roehling/jello
|
1073355e2bbfcd4f92af2584c9e539ce34859fc0
|
[
"MIT"
] | null | null | null |
"""jello - query JSON at the command line with python syntax"""
import os
import sys
import textwrap
import signal
import jello
from jello.lib import opts, load_json, pyquery, Schema, Json
def ctrlc(signum, frame):
"""exit with error on SIGINT"""
sys.exit(1)
def get_stdin():
"""return STDIN data"""
if sys.stdin.isatty():
return None
else:
return sys.stdin.read()
def print_help():
print(textwrap.dedent('''\
jello: query JSON at the command line with python syntax
Usage: cat data.json | jello [OPTIONS] [QUERY]
-c compact JSON output
-i initialize environment with .jelloconf.py in ~ (linux) or %appdata% (Windows)
-l output as lines suitable for assignment to a bash array
-m monochrome output
-n print selected null values
-r raw string output (no quotes)
-s print the JSON schema in grep-able format
-t print type annotations in schema view
-v version info
-h help
Use '_' as the input data and use python dict and list bracket syntax or dot notation.
Examples:
cat data.json | jello _.foo
cat data.json | jello '_["foo"]'
variable=($(cat data.json | jello -l _.foo))
'''))
sys.exit()
def print_error(message):
"""print error messages to STDERR and quit with error code"""
print(message, file=sys.stderr)
sys.exit(1)
def print_exception(e=None, list_dict_data='', query='', response='', ex_type='Runtime'):
list_dict_data = str(list_dict_data).replace('\n', '\\n')
query = str(query).replace('\n', '\\n')
response = str(response).replace('\n', '\\n')
e_text = ''
if hasattr(e, 'text'):
e_text = e.text.replace('\n', '')
if len(list_dict_data) > 70:
list_dict_data = list_dict_data[:34] + ' ... ' + list_dict_data[-34:]
if len(query) > 70:
query = query[:34] + ' ... ' + query[-34:]
if len(response) > 70:
response = response[:34] + ' ... ' + response[-34:]
exception_message = f'jello: {ex_type} Exception: {e.__class__.__name__}\n'
ex_map = {
'query': query,
'data': list_dict_data,
'response': response
}
exception_message += f' {e}\n'
if e_text:
exception_message += f' {e_text}\n'
for item_name, item in ex_map.items():
if item:
exception_message += f' {item_name}: {item}\n'
print(exception_message, file=sys.stderr)
sys.exit(1)
def main(data=None, query='_'):
# break on ctrl-c keyboard interrupt
signal.signal(signal.SIGINT, ctrlc)
# break on pipe error. need try/except for windows compatibility
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
# enable colors for Windows cmd.exe terminal
if sys.platform.startswith('win32'):
os.system('')
if data is None:
data = get_stdin()
options = []
long_options = {}
for arg in sys.argv[1:]:
if arg.startswith('-') and not arg.startswith('--'):
options.extend(arg[1:])
elif arg.startswith('--'):
try:
k, v = arg[2:].split('=')
long_options[k] = int(v)
except Exception:
print_help()
else:
query = arg
opts.compact = opts.compact or 'c' in options
opts.initialize = opts.initialize or 'i' in options
opts.lines = opts.lines or 'l' in options
opts.mono = opts.mono or 'm' in options
opts.nulls = opts.nulls or 'n' in options
opts.raw = opts.raw or 'r' in options
opts.schema = opts.schema or 's' in options
opts.types = opts.types or 't' in options
opts.version_info = opts.version_info or 'v' in options
opts.helpme = opts.helpme or 'h' in options
if opts.helpme:
print_help()
if opts.version_info:
print(textwrap.dedent(f'''\
jello: Version: {jello.__version__}
Author: {jello.AUTHOR}
Website: {jello.WEBSITE}
Copyright: {jello.COPYRIGHT}
License: {jello.LICENSE}
'''))
sys.exit()
if data is None:
print_error('jello: missing piped JSON or JSON Lines data\n')
# only process if there is data
if data and not data.isspace():
# load the JSON or JSON Lines
list_dict_data = None
try:
list_dict_data = load_json(data)
except Exception as e:
msg = f'''JSON Load Exception: Cannot parse the data (Not valid JSON or JSON Lines)
{e}
'''
print_error(f'jello: {msg}')
# Read .jelloconf.py (if it exists) and run the query
response = ''
try:
response = pyquery(list_dict_data, query)
except Exception as e:
print_exception(e, list_dict_data, query, ex_type='Query')
# Create and print schema or JSON/JSON-Lines/Lines
output = ''
try:
if opts.schema:
schema = Schema()
output = schema.create_schema(response)
if not opts.mono and sys.stdout.isatty():
schema.set_colors()
output = schema.color_output(output)
else:
json_out = Json()
output = json_out.create_json(response)
if not opts.mono and not opts.raw and sys.stdout.isatty():
json_out.set_colors()
output = json_out.color_output(output)
print(output)
except Exception as e:
print_exception(e, list_dict_data, query, response, ex_type='Formatting')
if __name__ == '__main__':
main()
| 29.156863
| 98
| 0.561533
|
a9b08e21bf2138a7f53246cef7d893882f651385
| 7,818
|
py
|
Python
|
tests/test_patches_chains.py
|
IzeBerg/python-wgus
|
c457af6ea57c40c7a2552fb69db1dd74bc6c9740
|
[
"MIT"
] | null | null | null |
tests/test_patches_chains.py
|
IzeBerg/python-wgus
|
c457af6ea57c40c7a2552fb69db1dd74bc6c9740
|
[
"MIT"
] | null | null | null |
tests/test_patches_chains.py
|
IzeBerg/python-wgus
|
c457af6ea57c40c7a2552fb69db1dd74bc6c9740
|
[
"MIT"
] | null | null | null |
import pytest
from wgus import PatchesChains, get_patches_chains
def test_parse():
data = """
<protocol name="patches_chain" version="1.9" wgc_publisher_id="wargaming,steam">
<patches_chain type="install">
<patch>
<files>
<file>
<name>wot_1.14.1.3252_ru_vtlj1d/wot_1.14.1.1663850_locale_ru.wgpkg</name>
<size>25326691</size>
<unpacked_size>31281758</unpacked_size>
</file>
</files>
<torrent>
<hash>245ffc7538bf793894491b17090d69e646b70825ba1de074679046da71283be8</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3252_ru_vtlj1d/wot_1.14.1.3252_ru.torrent</url>
</urls>
</torrent>
<part>locale</part>
<version_to>1.14.1.1663850</version_to>
</patch>
<patch>
<files>
<file>
<name>wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.21618_client.wgpkg.001</name>
<size>4194304000</size>
<unpacked_size>12957669925</unpacked_size>
</file>
<file>
<name>wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.21618_client.wgpkg.002</name>
<size>4184272193</size>
<unpacked_size>0</unpacked_size>
</file>
</files>
<torrent>
<hash>105b3d87dd8f08bc012800906ba1a467c5fef4ebaffa04902473677ef3d0c155</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.3186_ru.torrent</url>
</urls>
</torrent>
<part>client</part>
<version_to>1.14.1.21618</version_to>
</patch>
<patch>
<files>
<file>
<name>wot_1.14.1.3193_ru_la952m/wot_1.14.1.21624_1.14.1.21618_client.wgpkg</name>
<size>55703224</size>
<unpacked_size>131328708</unpacked_size>
<diffs_size>7941114817</diffs_size>
</file>
</files>
<torrent>
<hash>1ccb7ac1facfbf22b60989bb4a0376c1d0f7ea9fe35a6b9d63ea4b7ca4de87aa</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3193_ru_la952m/wot_1.14.1.3193_ru.torrent</url>
</urls>
</torrent>
<part>client</part>
<version_to>1.14.1.21624</version_to>
</patch>
<patch>
<files>
<file>
<name>wot_1.14.1.3239_ru_ib3c5x/wot_1.14.1.21637_1.14.1.21624_client.wgpkg</name>
<size>15395960</size>
<unpacked_size>20030646</unpacked_size>
<diffs_size>3766081307</diffs_size>
</file>
</files>
<torrent>
<hash>cf0bf16818a1e902fc94af4107664334616dca234df1d8960b7ca69fb7fdee4b</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3239_ru_ib3c5x/wot_1.14.1.3239_ru.torrent</url>
</urls>
</torrent>
<part>client</part>
<version_to>1.14.1.21637</version_to>
</patch>
<patch>
<files>
<file>
<name>wot_1.14.1.3247_ru_qd0mnp/wot_1.14.1.21661_1.14.1.21637_client.wgpkg</name>
<size>594078157</size>
<unpacked_size>796205967</unpacked_size>
<diffs_size>9090130583</diffs_size>
</file>
</files>
<torrent>
<hash>eb758887e3497b911691fc69c25cf586927f0fe786cf32b66a55823c003a8702</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3247_ru_qd0mnp/wot_1.14.1.3247_ru.torrent</url>
</urls>
</torrent>
<part>client</part>
<version_to>1.14.1.21661</version_to>
</patch>
<patch>
<files>
<file>
<name>wot_1.14.1.3266_ru_xb0636/wot_1.14.1.21675_1.14.1.21661_client.wgpkg</name>
<size>48174</size>
<unpacked_size>75218</unpacked_size>
<diffs_size>125343471</diffs_size>
</file>
</files>
<torrent>
<hash>b353aa48e8b46ed7502b25dbb4fc51dcfa380f676c450e0cd579e4e4bb116e73</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3266_ru_xb0636/wot_1.14.1.3266_ru.torrent</url>
</urls>
</torrent>
<part>client</part>
<version_to>1.14.1.21675</version_to>
</patch>
<patch>
<files>
<file>
<name>wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.21618_sdcontent.wgpkg.001</name>
<size>4194304000</size>
<unpacked_size>26552838287</unpacked_size>
</file>
<file>
<name>wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.21618_sdcontent.wgpkg.002</name>
<size>4194304000</size>
<unpacked_size>0</unpacked_size>
</file>
<file>
<name>wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.21618_sdcontent.wgpkg.003</name>
<size>1943655010</size>
<unpacked_size>0</unpacked_size>
</file>
</files>
<torrent>
<hash>105b3d87dd8f08bc012800906ba1a467c5fef4ebaffa04902473677ef3d0c155</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.3186_ru.torrent</url>
</urls>
</torrent>
<part>sdcontent</part>
<version_to>1.14.1.21618</version_to>
</patch>
<patch>
<files>
<file>
<name>wot_1.14.1.3193_ru_la952m/wot_1.14.1.21624_1.14.1.21618_sdcontent.wgpkg</name>
<size>135449</size>
<unpacked_size>188905</unpacked_size>
<diffs_size>1366361750</diffs_size>
</file>
</files>
<torrent>
<hash>1ccb7ac1facfbf22b60989bb4a0376c1d0f7ea9fe35a6b9d63ea4b7ca4de87aa</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3193_ru_la952m/wot_1.14.1.3193_ru.torrent</url>
</urls>
</torrent>
<part>sdcontent</part>
<version_to>1.14.1.21624</version_to>
</patch>
<patch>
<files>
<file>
<name>wot_1.14.1.3247_ru_qd0mnp/wot_1.14.1.21661_1.14.1.21624_sdcontent.wgpkg</name>
<size>306984314</size>
<unpacked_size>786453093</unpacked_size>
<diffs_size>14466667087</diffs_size>
</file>
</files>
<torrent>
<hash>eb758887e3497b911691fc69c25cf586927f0fe786cf32b66a55823c003a8702</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3247_ru_qd0mnp/wot_1.14.1.3247_ru.torrent</url>
</urls>
</torrent>
<part>sdcontent</part>
<version_to>1.14.1.21661</version_to>
</patch>
<patch>
<files>
<file>
<name>wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.21618_hdcontent.wgpkg.001</name>
<size>4194304000</size>
<unpacked_size>19862853844</unpacked_size>
</file>
<file>
<name>wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.21618_hdcontent.wgpkg.002</name>
<size>4194304000</size>
<unpacked_size>0</unpacked_size>
</file>
<file>
<name>wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.21618_hdcontent.wgpkg.003</name>
<size>3005599718</size>
<unpacked_size>0</unpacked_size>
</file>
</files>
<torrent>
<hash>105b3d87dd8f08bc012800906ba1a467c5fef4ebaffa04902473677ef3d0c155</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3186_ru_aa0jt9/wot_1.14.1.3186_ru.torrent</url>
</urls>
</torrent>
<part>hdcontent</part>
<version_to>1.14.1.21618</version_to>
</patch>
<patch>
<files>
<file>
<name>wot_1.14.1.3247_ru_qd0mnp/wot_1.14.1.21661_1.14.1.21618_hdcontent.wgpkg</name>
<size>369097545</size>
<unpacked_size>656771030</unpacked_size>
<diffs_size>13160407318</diffs_size>
</file>
</files>
<torrent>
<hash>eb758887e3497b911691fc69c25cf586927f0fe786cf32b66a55823c003a8702</hash>
<urls>
<url>https://dl-wot-gc.wargaming.net/ru/patches/wot_1.14.1.3247_ru_qd0mnp/wot_1.14.1.3247_ru.torrent</url>
</urls>
</torrent>
<part>hdcontent</part>
<version_to>1.14.1.21661</version_to>
</patch>
<web_seeds>
<url threads="3" name="G-Core">http://dl-wot-gc.wargaming.net/ru/patches/</url>
<url threads="1" name="Cedexis">http://dl-wot-cdx.wargaming.net/ru/patches/</url>
</web_seeds>
<meta_need_update>False</meta_need_update>
<version_name>1.14.1.3252</version_name>
</patches_chain>
</protocol>"""
chains = PatchesChains.parse(data)
assert chains
@pytest.mark.asyncio
async def test_get_patches_chains():
await get_patches_chains("wgus-wotru.wargaming.net", "WOT.RU.PRODUCTION")
| 32.040984
| 110
| 0.680353
|
1d8e9b0c45344da5cc18e06555c10b334404499b
| 3,970
|
py
|
Python
|
tests/test_builder.py
|
georgeyk/simple-model
|
95eaacb0da70364c1ee3aaf5e0d205f8e3255dfe
|
[
"MIT"
] | null | null | null |
tests/test_builder.py
|
georgeyk/simple-model
|
95eaacb0da70364c1ee3aaf5e0d205f8e3255dfe
|
[
"MIT"
] | null | null | null |
tests/test_builder.py
|
georgeyk/simple-model
|
95eaacb0da70364c1ee3aaf5e0d205f8e3255dfe
|
[
"MIT"
] | null | null | null |
import typing
import pytest
from simple_model.builder import model_builder, model_class_builder, model_many_builder
from simple_model import Model, to_dict
def test_model_class_builder():
Birl = model_class_builder('Birl', {'f': 'foo', 'b': 'bar'})
birl = Birl()
assert isinstance(birl, Model)
keys = ('f', 'b')
assert len(Birl._meta.fields) == len(keys)
assert set(Birl._meta.fields) == set(keys)
assert birl.validate(raise_exception=False) is True
assert to_dict(birl) == {'f': None, 'b': None}
def test_model_class_builder_empty_data():
Birl = model_class_builder('Birl', {})
birl = Birl()
assert isinstance(birl, Model)
def test_model_builder():
data = {
'foo': 'foo',
'bar': 'bar',
}
birl = model_builder(data, recurse=False)
assert birl.foo == 'foo'
assert birl.bar == 'bar'
assert type(birl).__name__ == 'MyModel'
def test_model_builder_class_name():
data = {
'foo': 'foo',
'bar': 'bar',
}
birl = model_builder(data, class_name='Birl', recurse=False)
assert birl.foo == 'foo'
assert birl.bar == 'bar'
assert type(birl).__name__ == 'Birl'
def test_model_builder_recurse_false():
my_model = {'baz': 'baz', 'qux': 'qux'}
data = {
'foo': 'foo',
'bar': 'bar',
'my_model': my_model,
}
birl = model_builder(data, recurse=False)
assert birl.foo == 'foo'
assert birl.bar == 'bar'
assert birl.my_model == my_model
def test_model_builder_recurse():
my_model = {'baz': 'baz', 'qux': 'qux'}
data = {
'foo': 'foo',
'bar': 'bar',
'my_model': my_model,
}
birl = model_builder(data)
assert birl.foo == 'foo'
assert birl.bar == 'bar'
assert birl.my_model.baz == 'baz'
assert birl.my_model.qux == 'qux'
assert type(birl.my_model).__name__ == 'MyModel'
assert type(birl.my_model) not in (Model, type(birl))
@pytest.mark.parametrize('iterable_class', (tuple, list))
def test_model_builder_recurse_iterable(iterable_class):
models = iterable_class([{'baz': 'baz', 'qux': 'qux'}, 1, 2])
data = {
'foo': 'foo',
'bar': 'bar',
'models': models,
}
birl = model_builder(data)
assert birl.foo == 'foo'
assert birl.bar == 'bar'
assert birl.models[0].baz == 'baz'
assert birl.models[0].qux == 'qux'
assert birl.models[1] == 1
assert birl.models[2] == 2
assert isinstance(birl.models[0], Model)
assert type(birl.models[0]).__name__ == 'NamelessModel'
def test_model_builder_data_keys_with_special_characters():
data = {
'foo*bar': 'foobar',
'baz/qux': 'bazqux',
}
birl = model_builder(data)
assert birl.foo_bar == 'foobar'
assert birl.baz_qux == 'bazqux'
def test_model_builder_custom_class():
data = {
'foo*bar': 'foobar',
'baz/qux': 'bazqux',
}
cls = model_class_builder('Model', data)
birl = model_builder(data, cls=cls)
assert isinstance(birl, cls)
def test_model_many_builder():
element = {
'foo*bar': 'foobar',
'baz/qux': 'bazqux',
}
model_count = 3
data = [element] * model_count
models = model_many_builder(data)
assert isinstance(models, typing.Generator)
models = list(models)
assert len(models) == model_count
first = models[0]
for model in models[1:]:
assert isinstance(model, type(first))
@pytest.mark.parametrize('iterable', ([], ()))
def test_model_many_builder_empty_iterable(iterable):
models = model_many_builder(iterable)
assert isinstance(models, typing.Generator)
assert len(list(models)) == 0
def test_model_many_builder_custom_cls():
class Foo(Model):
bar: str
def baz(self):
return True
data = [{'bar': 1}] * 3
models = list(model_many_builder(data, cls=Foo))
assert len(models) == 3
assert all(foo.baz() for foo in models)
| 24.658385
| 87
| 0.615113
|
272f65947e07bdacc5832ed3acdce26b0e3b04d2
| 3,533
|
py
|
Python
|
vprof/profiler.py
|
ltetrel/vprof
|
4cab77f345be71b8f2841e0af7f32979e17b9b44
|
[
"BSD-2-Clause"
] | 4,222
|
2015-07-19T06:16:39.000Z
|
2022-03-31T13:04:35.000Z
|
vprof/profiler.py
|
ltetrel/vprof
|
4cab77f345be71b8f2841e0af7f32979e17b9b44
|
[
"BSD-2-Clause"
] | 105
|
2015-12-18T10:33:08.000Z
|
2022-01-03T21:15:51.000Z
|
vprof/profiler.py
|
ltetrel/vprof
|
4cab77f345be71b8f2841e0af7f32979e17b9b44
|
[
"BSD-2-Clause"
] | 224
|
2015-12-22T20:54:53.000Z
|
2022-01-05T20:27:25.000Z
|
"""Profiler wrapper module."""
import cProfile
import operator
import pstats
import runpy
import time
from vprof import base_profiler
class Profiler(base_profiler.BaseProfiler):
"""Python profiler wrapper.
Runs cProfile on specified program and returns collected stats.
"""
@staticmethod
def _transform_stats(prof):
"""Processes collected stats for UI."""
records = []
for info, params in prof.stats.items():
filename, lineno, funcname = info
cum_calls, num_calls, time_per_call, cum_time, _ = params
if prof.total_tt == 0:
percentage = 0
else:
percentage = round(100 * (cum_time / prof.total_tt), 4)
cum_time = round(cum_time, 4)
func_name = '%s @ %s' % (funcname, filename)
color_hash = base_profiler.hash_name(func_name)
records.append(
(filename, lineno, funcname, cum_time, percentage, num_calls,
cum_calls, time_per_call, filename, color_hash))
return sorted(records, key=operator.itemgetter(4), reverse=True)
def _profile_package(self):
"""Runs cProfile on a package."""
prof = cProfile.Profile()
prof.enable()
try:
runpy.run_path(self._run_object, run_name='__main__')
except SystemExit:
pass
prof.disable()
prof_stats = pstats.Stats(prof)
prof_stats.calc_callees()
return {
'objectName': self._object_name,
'callStats': self._transform_stats(prof_stats),
'totalTime': prof_stats.total_tt,
'primitiveCalls': prof_stats.prim_calls,
'totalCalls': prof_stats.total_calls,
'timestamp': int(time.time())
}
def profile_package(self):
"""Runs package profiler in a separate process."""
return base_profiler.run_in_separate_process(self._profile_package)
def _profile_module(self):
"""Runs cProfile on a module."""
prof = cProfile.Profile()
try:
with open(self._run_object, 'rb') as srcfile:
code = compile(srcfile.read(), self._run_object, 'exec')
prof.runctx(code, self._globs, None)
except SystemExit:
pass
prof_stats = pstats.Stats(prof)
prof_stats.calc_callees()
return {
'objectName': self._object_name,
'callStats': self._transform_stats(prof_stats),
'totalTime': prof_stats.total_tt,
'primitiveCalls': prof_stats.prim_calls,
'totalCalls': prof_stats.total_calls,
'timestamp': int(time.time())
}
def profile_module(self):
"""Runs module profiler in a separate process."""
return base_profiler.run_in_separate_process(self._profile_module)
def profile_function(self):
"""Runs cProfile on a function."""
prof = cProfile.Profile()
prof.enable()
result = self._run_object(*self._run_args, **self._run_kwargs)
prof.disable()
prof_stats = pstats.Stats(prof)
prof_stats.calc_callees()
return {
'objectName': self._object_name,
'callStats': self._transform_stats(prof_stats),
'totalTime': prof_stats.total_tt,
'primitiveCalls': prof_stats.prim_calls,
'totalCalls': prof_stats.total_calls,
'result': result,
'timestamp': int(time.time())
}
| 34.980198
| 77
| 0.600906
|
23d7b7136e43be77be11baa7da44e767ad821cb9
| 2,935
|
py
|
Python
|
src/tests/crazy_mesh_test.py
|
Idate96/Mimetic-Fem
|
75ad3b982ef7ed7c6198f526d19dc460dec28f4d
|
[
"MIT"
] | null | null | null |
src/tests/crazy_mesh_test.py
|
Idate96/Mimetic-Fem
|
75ad3b982ef7ed7c6198f526d19dc460dec28f4d
|
[
"MIT"
] | null | null | null |
src/tests/crazy_mesh_test.py
|
Idate96/Mimetic-Fem
|
75ad3b982ef7ed7c6198f526d19dc460dec28f4d
|
[
"MIT"
] | null | null | null |
"""Module to test mapping of compuational domain into an arbitrary one."""
import path_magic
import unittest
import os
import mesh
import numpy as np
import matplotlib.pyplot as plt
import numpy.testing as npt
# os.chdir(os.getcwd() + '/tests')
class TestMappingMesh22(unittest.TestCase):
def setUp(self):
n = 10
self.nx, self.ny = 2, 2
self.crazy_mesh = mesh.CrazyMesh(
2, (self.nx, self.ny), ((-1, 1), (-1, 1)), curvature=0.1)
self.xi = self.eta = np.linspace(-1, 1, n)
self.xi, self.eta = np.meshgrid(self.xi, self.eta)
self.dir = os.getcwd() + '/src/tests/'
def test_deformed_grid_mapping(self):
for i in range(self.nx * self.ny):
x_ref = np.loadtxt(self.dir + 'test_mapping/x_reference_domain_cc1_el' + str(i) + '.dat',
delimiter=',')
y_ref = np.loadtxt(self.dir + 'test_mapping/y_reference_domain_cc1_el' + str(i) + '.dat',
delimiter=',')
x, y = self.crazy_mesh.mapping(self.xi, self.eta, i)
npt.assert_array_almost_equal(x, x_ref, decimal=4)
npt.assert_array_almost_equal(y, y_ref, decimal=4)
def test_dx_dxi(self):
for i in range(self.nx * self.ny):
dx_dxi_ref = np.loadtxt(self.dir + 'test_dx_dxi/dxdxi_ref_domain_cc1_el' +
str(i) + '.dat', delimiter=',')
dx_dxi_crazy = self.crazy_mesh.dx_dxi(self.xi, self.eta, i)
# print('element : ', i)
# print(dx_dxi_crazy, dx_dxi_ref)
npt.assert_array_almost_equal(dx_dxi_ref, dx_dxi_crazy, decimal=4)
def test_dx_deta(self):
for i in range(self.nx * self.ny):
dx_dxi_ref = np.loadtxt(self.dir + 'test_dx_deta/dxdeta_ref_domain_cc1_el' +
str(i) + '.dat', delimiter=',')
dx_dxi_crazy = self.crazy_mesh.dx_deta(self.xi, self.eta, i)
# print('element : ', i)
# print(dx_dxi_crazy, dx_dxi_ref)
npt.assert_array_almost_equal(dx_dxi_ref, dx_dxi_crazy, decimal=4)
def test_dy_dxi(self):
for i in range(self.nx * self.ny):
dx_dxi_ref = np.loadtxt(self.dir + 'test_dy_dxi/dydxi_ref_domain_cc1_el' +
str(i) + '.dat', delimiter=',')
dx_dxi_crazy = self.crazy_mesh.dy_dxi(self.xi, self.eta, i)
npt.assert_array_almost_equal(dx_dxi_ref, dx_dxi_crazy, decimal=4)
def test_dy_deta(self):
for i in range(self.nx * self.ny):
dx_dxi_ref = np.loadtxt(self.dir + 'test_dy_deta/dydeta_ref_domain_cc1_el' +
str(i) + '.dat', delimiter=',')
dx_dxi_crazy = self.crazy_mesh.dy_deta(self.xi, self.eta, i)
npt.assert_array_almost_equal(dx_dxi_ref, dx_dxi_crazy, decimal=4)
if __name__ == '__main__':
unittest.main()
| 41.928571
| 101
| 0.587394
|
ec6ba2fdddb957f8f47695187781177ac2dbd234
| 66
|
py
|
Python
|
npbench/benchmarks/polybench/bicg/bicg_cupy.py
|
frahlg/npbench
|
1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26
|
[
"BSD-3-Clause"
] | 27
|
2021-05-10T11:49:13.000Z
|
2022-03-22T18:07:19.000Z
|
npbench/benchmarks/polybench/bicg/bicg_cupy.py
|
frahlg/npbench
|
1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26
|
[
"BSD-3-Clause"
] | 3
|
2021-12-01T13:03:17.000Z
|
2022-03-17T10:53:00.000Z
|
npbench/benchmarks/polybench/bicg/bicg_cupy.py
|
frahlg/npbench
|
1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26
|
[
"BSD-3-Clause"
] | 7
|
2021-06-24T03:40:25.000Z
|
2022-01-26T09:04:33.000Z
|
import cupy as np
def kernel(A, p, r):
return r @ A, A @ p
| 9.428571
| 23
| 0.545455
|
a212c64c25f9aba5d25f36332b608ab569738d40
| 4,045
|
py
|
Python
|
soln_obamacon.py
|
GirlswhocodeKS2017/obamacon-photo
|
3508f20163dc4a4ec1dfd5b3ddb8ae223944f4c9
|
[
"MIT"
] | null | null | null |
soln_obamacon.py
|
GirlswhocodeKS2017/obamacon-photo
|
3508f20163dc4a4ec1dfd5b3ddb8ae223944f4c9
|
[
"MIT"
] | null | null | null |
soln_obamacon.py
|
GirlswhocodeKS2017/obamacon-photo
|
3508f20163dc4a4ec1dfd5b3ddb8ae223944f4c9
|
[
"MIT"
] | null | null | null |
from PIL import Image
def change_pixel_colors(my_image,obamacon_colors):
"""
Your image is coverted to pixels with a certain color (rgb value).
pixel_data_list is a list of tuples. It is a list of pixels, but every pixel
has 3 rgb values.
(1) Print the pixel_data_list. What do you see? Does it make sense?
(2) How many pixels does your image have?
(3) Pixel intensity is defined as the sum of its rgb values. So if the pixel's
rgb values are (0,51,100) the intensity is 151.
Your assignment is to calculate the intensity of each pixel. Based on intensity
make a new list of rgb pixel values with the correct obamacon_color:
darkBlue if intensity < 182
red if intensity between 182 and 364
lightBlue if intensity between 364 and 546
yellow if intensity > 546
return the new pixel list
"""
pixel_data = my_image.getdata() #What happens if you print this?
pixel_data_list = list(pixel_data) #converting image data to a list
new_pixel_data = [] #create new empty list
### YOUR CODE GOES HERE ###
for pixel in pixel_data_list:
intensity = sum(pixel) # Built in function that does the sum
if intensity < 182:
new_pixel_data.append(obamacon_colors['darkBlue'])
elif intensity >= 182 and intensity < 364:
new_pixel_data.append(obamacon_colors['red'])
elif intensity >= 364 and intensity <= 546:
new_pixel_data.append(obamacon_colors['lightBlue'])
elif intensity > 546:
new_pixel_data.append(obamacon_colors['yellow'])
return new_pixel_data
def go_crazy(my_image,obamacon_colors):
"""
What if you only changed half the pixels? or made your new image look
like a checkerboard? Can you do that? How are the pixels laid out, i.e. how
is the program reading in the pixels? Like we read a book (starting at
upper left corner and ending at bottom right)? Try to implement.
"""
crazy_pixels = []
pixel_data = my_image.getdata()
pixel_data_list = list(pixel_data)
### YOUR CODE GOES HERE ###
""" soln for 1/2 obamacon"""
for i in range(len(pixel_data_list)):
if i < len(pixel_data_list)/2:
crazy_pixels.append(pixel_data_list[i])
else:
intensity = sum(pixel_data_list[i])
if intensity < 182:
crazy_pixels.append(obamacon_colors['darkBlue'])
elif intensity >= 182 and intensity < 364:
crazy_pixels.append(obamacon_colors['red'])
elif intensity >= 364 and intensity <= 546:
crazy_pixels.append(obamacon_colors['lightBlue'])
elif intensity > 546:
crazy_pixels.append(obamacon_colors['yellow'])
return crazy_pixels
def main():
"""
dictionary of color values.
Set rgb values for the standard Obamacon photo
color = (red_value,green_value,blue_value) i.e. rgb values
"""
obamacon_colors = {} #create dictionary of colors: dictionary_name[key] = value
obamacon_colors['darkBlue'] = (0, 51, 76)
obamacon_colors['red'] = (217, 26, 33)
obamacon_colors['lightBlue'] = (112, 150, 158)
obamacon_colors['yellow'] = (252, 227, 166)
"""
Open image file. Replace IMAGENAME with the name of your pic. If the pic
is not in the same directory(folder) as this program, make sure you include the
path to the image. You can see the path by typing (in bash):
$ pwd
"""
my_image = Image.open("MrFuzzyPants.jpeg")
"""Call the function that changes the pixels"""
new_pixels = change_pixel_colors(my_image,obamacon_colors)
#crazy_pixels = go_crazy(my_image,obamacon_colors)
"""Functions that create a new image based on your new pixels"""
new_image = Image.new("RGB", my_image.size)
new_image.putdata(new_pixels)
new_image.show()
new_image.save("newMrFuzzypants.jpg", "jpeg") ### Change NEW-IMAGENAME
return
"""Call to main()"""
if __name__ == "__main__":
main()
| 36.772727
| 83
| 0.663782
|
456430e72bf8eec0aaab4f8256291c238aef71aa
| 526
|
py
|
Python
|
Egzersiz/ali/NoSQLEgzersiz.py.py
|
ibrahimediz/ornekproje
|
c5ebeafc43a9c6d2aa639d0d95eedbce65991576
|
[
"Apache-2.0"
] | null | null | null |
Egzersiz/ali/NoSQLEgzersiz.py.py
|
ibrahimediz/ornekproje
|
c5ebeafc43a9c6d2aa639d0d95eedbce65991576
|
[
"Apache-2.0"
] | null | null | null |
Egzersiz/ali/NoSQLEgzersiz.py.py
|
ibrahimediz/ornekproje
|
c5ebeafc43a9c6d2aa639d0d95eedbce65991576
|
[
"Apache-2.0"
] | null | null | null |
import pymongo
cli = pymongo.MongoClient("mongodb+srv://dbuser:dbUser123@cluster0.6iawp.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
# print(*cli.list_database_names(),sep="\n")
db = cli["patika"]
print(*db.list_collection_names(),sep="\n")
col = db["153_yemeksepeti"]
# yukarıdaki kod bloğunu kullanarak
row={"adi":"Ali","soyadi":"Mansur","adres":"Akıncılar","whoAdded":"AliMansur"} #fieldları koleksiyona ekleyiniz, ek olarak bir field daha ekleyelim.
sonuc = col.insert_one(row)
# print(sonuc.inserted_id)
| 47.818182
| 149
| 0.754753
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.