hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a765f6c349621f1e0308c3686c2a549868853c7d
| 1,854
|
py
|
Python
|
sopel_modules/urban_dictionary/urbandictionary.py
|
capsterx/sopel-urbandictionary
|
188a54badc64c4626b1413dfab93ee685f543cf1
|
[
"MIT"
] | null | null | null |
sopel_modules/urban_dictionary/urbandictionary.py
|
capsterx/sopel-urbandictionary
|
188a54badc64c4626b1413dfab93ee685f543cf1
|
[
"MIT"
] | 1
|
2021-01-10T06:53:49.000Z
|
2021-01-13T02:03:30.000Z
|
sopel_modules/urban_dictionary/urbandictionary.py
|
capsterx/sopel-urbandictionary
|
188a54badc64c4626b1413dfab93ee685f543cf1
|
[
"MIT"
] | null | null | null |
from sopel.module import commands, example
from sopel import web
import sopel.module
import socket
import re
import urbandictionary as ud
BOLD=chr(0x02)
ITALICS=chr(0x1D)
UNDERLINE=chr(0x1F)
def ud_conv(s):
italics = re.sub(r"\[([\w' \"_-]*)\]", f"{UNDERLINE}\\1{UNDERLINE}", s.strip())
newlines = re.sub(r"\r?\n(\r?\n)*", f" {BOLD}--{BOLD} ", italics)
return newlines
def urbandictionary_lookup(bot, nick, word):
if not word:
bot.say(f"{nick} You must specify something to search")
return
defs = []
try:
defs = ud.define(word)
except Exception as e:
bot.say(f"{nick} {e}")
return
if len(defs) == 0:
bot.say(f"{nick} no results found for {word}")
return
filtered_defs = [x for x in defs if x.word.strip().lower() == word.strip().lower()]
if len(filtered_defs) == 0:
words = ', '.join(set([x.word.lower() for x in defs]))
bot.say(f"{nick} no exact results for '{word}', possible matchs are: {words}")
return
filtered_defs.sort(key = lambda x: x.upvotes, reverse=True)
d = filtered_defs[0]
bot.say(f"{BOLD}{d.word}{BOLD}: {ud_conv(d.definition)} {BOLD}Example{BOLD}: {ud_conv(d.example)}", max_messages=3)
@sopel.module.commands('ud')
@sopel.module.example('.ud netflix and chill')
def urbandictionary_lookup_cmd(bot, trigger):
return urbandictionary_lookup(bot, trigger.nick, trigger.group(2))
@sopel.module.commands('udrw')
@sopel.module.example('.udrw')
def urbandictionary_random_words(bot, trigger):
bot.say(', '.join([x.word for x in ud.random()]))
@sopel.module.commands('udr')
@sopel.module.example('.udr')
def urbandictionary_random(bot, trigger):
defs = ud.random()
defs.sort(key = lambda x: x.upvotes, reverse=True)
return urbandictionary_lookup(bot, trigger.nick, defs[0].word)
| 29.903226
| 119
| 0.651564
| 0
| 0
| 0
| 0
| 601
| 0.324164
| 0
| 0
| 397
| 0.214132
|
a7660deda124d1efd2085f69810453398abdc730
| 324
|
py
|
Python
|
Aula01 e exercicios/exercicio_06.py
|
Dorcival/PYTHON
|
0dc3fa53699d40b21c6ed721a190ffb4f8404345
|
[
"MIT"
] | null | null | null |
Aula01 e exercicios/exercicio_06.py
|
Dorcival/PYTHON
|
0dc3fa53699d40b21c6ed721a190ffb4f8404345
|
[
"MIT"
] | null | null | null |
Aula01 e exercicios/exercicio_06.py
|
Dorcival/PYTHON
|
0dc3fa53699d40b21c6ed721a190ffb4f8404345
|
[
"MIT"
] | null | null | null |
# Conversor de CELSIUS para FAHRENHEIT v.0.1
# Por Dorcival Leite 202003362174
import time
print("CONVERTER TEMPERATURA DE CELSIUS PARA FAHRENHEIT\n")
c = float(input("Digite a temperatura em CELSIUS: "))
f = float((9 * c)/5)+32
print("\nA temperatura de", c, "graus CELSIUS é igual a", f, "graus FAHRENHEIT")
time.sleep(20)
| 40.5
| 80
| 0.734568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.701538
|
a7687184494cf93d9f5d684cfc40811e7667b3e4
| 772
|
py
|
Python
|
multranslate.py
|
anoidgit/NMTServer
|
f608695c4c1f5319fb3c56f218b1d78056861c62
|
[
"Apache-2.0"
] | 3
|
2017-08-29T22:56:38.000Z
|
2017-12-12T06:20:35.000Z
|
multranslate.py
|
anoidgit/NMTServer
|
f608695c4c1f5319fb3c56f218b1d78056861c62
|
[
"Apache-2.0"
] | 1
|
2017-09-10T08:02:24.000Z
|
2017-09-12T01:03:25.000Z
|
multranslate.py
|
anoidgit/NMTServer
|
f608695c4c1f5319fb3c56f218b1d78056861c62
|
[
"Apache-2.0"
] | null | null | null |
#encoding: utf-8
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import zmq, sys, json
import seg
import detoken
import datautils
from random import sample
serverl=["tcp://127.0.0.1:"+str(port) for port in xrange(5556,5556+4)]
def _translate_core(jsond):
global serverl
sock = zmq.Context().socket(zmq.REQ)
sock.connect(sample(serverl, 1)[0])
sock.send(jsond)
return sock.recv()
def _translate(srctext):
return detoken.detoken(datautils.char2pinyin(datautils.restoreFromBatch(json.loads(_translate_core(json.dumps(datautils.makeBatch(datautils.cutParagraph(seg.segline(srctext)))))))))
def translate(srctext):
tmp=srctext.strip()
if tmp:
return _translate(tmp)
else:
return tmp
def poweron():
seg.poweron()
def poweroff():
seg.poweroff()
| 19.794872
| 182
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.053109
|
a769f370668047fa9ac58fd30c92b5d2a06a8ba0
| 10,995
|
py
|
Python
|
face_detector.py
|
duwizerak/Keras_insightface
|
dae425d7ef5dfeccb50a8ddca5814a0901b2957a
|
[
"MIT"
] | null | null | null |
face_detector.py
|
duwizerak/Keras_insightface
|
dae425d7ef5dfeccb50a8ddca5814a0901b2957a
|
[
"MIT"
] | null | null | null |
face_detector.py
|
duwizerak/Keras_insightface
|
dae425d7ef5dfeccb50a8ddca5814a0901b2957a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from glob2 import glob
from skimage import transform
from skimage.io import imread, imsave
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
FILE_HASH = {"yolov5s_face_dynamic": "e7854a5cae48ded05b3b31aa93765f0d"}
DEFAULT_DETECTOR = "https://github.com/leondgarse/Keras_insightface/releases/download/v1.0.0/yolov5s_face_dynamic.h5"
DEFAULT_ANCHORS = np.array(
[
[[0.5, 0.625], [1.0, 1.25], [1.625, 2.0]],
[[1.4375, 1.8125], [2.6875, 3.4375], [4.5625, 6.5625]],
[[4.5625, 6.781199932098389], [7.218800067901611, 9.375], [10.468999862670898, 13.531000137329102]],
],
dtype="float32",
)
DEFAULT_STRIDES = np.array([8, 16, 32], dtype="float32")
class BaseDetector:
def face_align_landmarks(self, img, landmarks, image_size=(112, 112), method="similar"):
tform = transform.AffineTransform() if method == "affine" else transform.SimilarityTransform()
src = np.array(
[[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [41.5493, 92.3655], [70.729904, 92.2041]],
dtype=np.float32,
)
ret = []
landmarks = landmarks if landmarks.shape[1] == 5 else tf.reshape(landmarks, [-1, 5, 2]).numpy()
for landmark in landmarks:
# landmark = np.array(landmark).reshape(2, 5)[::-1].T
tform.estimate(landmark, src)
# M = tform.params[0:2, :]
# ndimage = cv2.warpAffine(img, M, image_size, borderValue=0.0)
ndimage = transform.warp(img, tform.inverse, output_shape=image_size)
if len(ndimage.shape) == 2:
ndimage = np.stack([ndimage, ndimage, ndimage], -1)
ret.append(ndimage)
# return np.array(ret)
return (np.array(ret) * 255).astype(np.uint8)
def detect_in_image(self, image, max_output_size=15, iou_threshold=0.45, score_threshold=0.25, image_format="RGB"):
if isinstance(image, str):
image = imread(image)[:, :, :3]
image_format = "RGB"
bbs, pps, ccs = self.__call__(image, max_output_size, iou_threshold, score_threshold, image_format)
# print(bbs.shape, pps.shape, ccs.shape)
if len(bbs) != 0:
image_RGB = image if image_format == "RGB" else image[:, :, ::-1]
return bbs, pps, ccs, self.face_align_landmarks(image_RGB, pps)
else:
return np.array([]), np.array([]), np.array([]), np.array([])
def detect_in_folder(self, data_path, max_output_size=15, iou_threshold=0.45, score_threshold=0.25):
while data_path.endswith(os.sep):
data_path = data_path[:-1]
imms = glob(os.path.join(data_path, "*", "*"))
use_class = True
if len(imms) == 0:
imms = glob(os.path.join(data_path, "*"))
use_class = False
dest_path = data_path + "_aligned_112_112"
for imm in tqdm(imms, "Detecting"):
_, _, _, nimages = self.detect_in_image(imm, max_output_size, iou_threshold, score_threshold, image_format="RGB")
if nimages.shape[0] != 0:
file_name = os.path.basename(imm)
if use_class:
class_name = os.path.basename(os.path.dirname(imm))
save_dir = os.path.join(dest_path, class_name)
else:
save_dir = dest_path
if not os.path.exists(save_dir):
os.makedirs(save_dir)
imsave(os.path.join(save_dir, file_name), nimages[0]) # Use only the first one
else:
print(">>>> None face detected in image:", imm)
print(">>>> Saved aligned face images in:", dest_path)
return dest_path
def show_result(self, image, bbs, pps=[], ccs=[]):
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(image)
for id, bb in enumerate(bbs):
plt.plot([bb[0], bb[2], bb[2], bb[0], bb[0]], [bb[1], bb[1], bb[3], bb[3], bb[1]])
if len(ccs) != 0:
plt.text(bb[0], bb[1], "{:.4f}".format(ccs[id]))
if len(pps) != 0:
pp = pps[id]
if len(pp.shape) == 2:
plt.scatter(pp[:, 0], pp[:, 1], s=8)
else:
plt.scatter(pp[::2], pp[1::2], s=8)
plt.axis("off")
plt.tight_layout()
plt.show()
class YoloV5FaceDetector(BaseDetector):
def __init__(self, model_path=DEFAULT_DETECTOR, anchors=DEFAULT_ANCHORS, strides=DEFAULT_STRIDES):
if isinstance(model_path, str) and model_path.startswith("http"):
file_name = os.path.basename(model_path)
file_hash = FILE_HASH.get(os.path.splitext(file_name)[0], None)
model_path = tf.keras.utils.get_file(file_name, model_path, cache_subdir="models", file_hash=file_hash)
self.model = tf.keras.models.load_model(model_path)
elif isinstance(model_path, str) and model_path.endswith(".h5"):
self.model = tf.keras.models.load_model(model_path)
else:
self.model = model_path
self.anchors, self.strides = anchors, strides
self.num_anchors = anchors.shape[1]
self.anchor_grids = tf.math.ceil((anchors * strides[:, tf.newaxis, tf.newaxis])[:, tf.newaxis, :, tf.newaxis, :])
def make_grid(self, nx=20, ny=20, dtype=tf.float32):
xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, -1, 2]), dtype=dtype)
def pre_process_32(self, image):
hh, ww, _ = image.shape
pad_hh = (32 - hh % 32) % 32 # int(tf.math.ceil(hh / 32) * 32) - hh
pad_ww = (32 - ww % 32) % 32 # int(tf.math.ceil(ww / 32) * 32) - ww
if pad_ww != 0 or pad_hh != 0:
image = tf.pad(image, [[0, pad_hh], [0, pad_ww], [0, 0]])
return tf.expand_dims(image, 0)
def post_process(self, outputs, image_height, image_width):
post_outputs = []
for output, stride, anchor, anchor_grid in zip(outputs, self.strides, self.anchors, self.anchor_grids):
hh, ww = image_height // stride, image_width // stride
anchor_width = output.shape[-1] // self.num_anchors
output = tf.reshape(output, [-1, output.shape[1] * output.shape[2], self.num_anchors, anchor_width])
output = tf.transpose(output, [0, 2, 1, 3])
cls = tf.sigmoid(output[:, :, :, :5])
cur_grid = self.make_grid(ww, hh, dtype=output.dtype) * stride
xy = cls[:, :, :, 0:2] * (2 * stride) - 0.5 * stride + cur_grid
wh = (cls[:, :, :, 2:4] * 2) ** 2 * anchor_grid
mm = [1, 1, 1, 5]
landmarks = output[:, :, :, 5:15] * tf.tile(anchor_grid, mm) + tf.tile(cur_grid, mm)
# print(output.shape, cls.shape, xy.shape, wh.shape, landmarks.shape)
post_out = tf.concat([xy, wh, landmarks, cls[:, :, :, 4:]], axis=-1)
post_outputs.append(tf.reshape(post_out, [-1, output.shape[1] * output.shape[2], anchor_width - 1]))
return tf.concat(post_outputs, axis=1)
def yolo_nms(self, inputs, max_output_size=15, iou_threshold=0.35, score_threshold=0.25):
inputs = inputs[0][inputs[0, :, -1] > score_threshold]
xy_center, wh, ppt, cct = inputs[:, :2], inputs[:, 2:4], inputs[:, 4:14], inputs[:, 14]
xy_start = xy_center - wh / 2
xy_end = xy_start + wh
bbt = tf.concat([xy_start, xy_end], axis=-1)
rr = tf.image.non_max_suppression(bbt, cct, max_output_size=max_output_size, iou_threshold=iou_threshold, score_threshold=0.0)
bbs, pps, ccs = tf.gather(bbt, rr, axis=0), tf.gather(ppt, rr, axis=0), tf.gather(cct, rr, axis=0)
pps = tf.reshape(pps, [-1, 5, 2])
return bbs.numpy(), pps.numpy(), ccs.numpy()
def __call__(self, image, max_output_size=15, iou_threshold=0.45, score_threshold=0.25, image_format="RGB"):
imm_RGB = image if image_format == "RGB" else image[:, :, ::-1]
imm_RGB = self.pre_process_32(imm_RGB)
outputs = self.model(imm_RGB)
post_outputs = self.post_process(outputs, imm_RGB.shape[1], imm_RGB.shape[2])
return self.yolo_nms(post_outputs, max_output_size, iou_threshold, score_threshold)
class SCRFD(BaseDetector):
def __init__(self, det_shape=640):
self.model = self.download_and_prepare_det()
self.det_shape = (det_shape, det_shape)
def __call__(self, image, max_output_size=15, iou_threshold=0.45, score_threshold=0.25, image_format="RGB"):
imm_BGR = image if image_format == "BGR" else image[:, :, ::-1]
bboxes, pps = self.model.detect(imm_BGR, self.det_shape)
bbs, ccs = bboxes[:, :4], bboxes[:, -1]
return bbs, pps, ccs
def download_and_prepare_det(self):
import insightface
cvd = os.environ.get("CUDA_VISIBLE_DEVICES", "").strip()
ctx = 0 if len(cvd) > 0 and int(cvd) != -1 else -1
model_file = os.path.expanduser("~/.insightface/models/antelope/scrfd_10g_bnkps.onnx")
if not os.path.exists(model_file):
import zipfile
model_url = "http://storage.insightface.ai/files/models/antelope.zip"
zip_file = os.path.expanduser("~/.insightface/models/antelope.zip")
zip_extract_path = os.path.splitext(zip_file)[0]
if not os.path.exists(os.path.dirname(zip_file)):
os.makedirs(os.path.dirname(zip_file))
insightface.utils.storage.download_file(model_url, path=zip_file, overwrite=True)
with zipfile.ZipFile(zip_file) as zf:
zf.extractall(zip_extract_path)
os.remove(zip_file)
model = insightface.model_zoo.SCRFD(model_file=model_file)
model.prepare(ctx)
return model
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"input_path",
type=str,
default=None,
help="Could be: 1. Data path, containing images in class folders; 2. image folder path, containing multiple images; 3. jpg / png image path",
)
parser.add_argument("--use_scrfd", action="store_true", help="Use SCRFD instead of YoloV5FaceDetector")
args = parser.parse_known_args(sys.argv[1:])[0]
det = SCRFD() if args.use_scrfd else YoloV5FaceDetector()
if args.input_path.endswith(".jpg") or args.input_path.endswith(".png"):
print(">>>> Detection in image:", args.input_path)
imm = imread(args.input_path)
bbs, pps, ccs, nimgs = det.detect_in_image(imm)
det.show_result(imm, bbs, pps, ccs)
else:
print(">>>> Detection in folder:", args.input_path)
det.detect_in_folder(args.input_path)
| 46.588983
| 149
| 0.606639
| 9,111
| 0.828649
| 0
| 0
| 0
| 0
| 0
| 0
| 1,228
| 0.111687
|
a76a5f631eaf931f6a0d7bb1f2bdb5a30e7ae751
| 4,132
|
py
|
Python
|
pyleus/configuration.py
|
earthmine/pyleus
|
4d9c14c9df470be6ff544f2ad82985f37e582d80
|
[
"Apache-2.0"
] | 166
|
2015-01-14T16:06:37.000Z
|
2021-11-15T12:17:11.000Z
|
pyleus/configuration.py
|
WenbinTan/pyleus
|
8ab87e2d18b8b6a7e0471ceefdbb3ff23a576cce
|
[
"Apache-2.0"
] | 105
|
2015-01-16T19:59:06.000Z
|
2016-05-13T19:40:45.000Z
|
pyleus/configuration.py
|
WenbinTan/pyleus
|
8ab87e2d18b8b6a7e0471ceefdbb3ff23a576cce
|
[
"Apache-2.0"
] | 62
|
2015-01-19T07:42:24.000Z
|
2021-06-05T21:02:09.000Z
|
"""Configuration defaults and loading functions.
Pyleus will look for configuration files in the following file paths in order
of increasing precedence. The latter configuration overrides the previous one.
#. /etc/pyleus.conf
#. ~/.config/pyleus.conf
#. ~/.pyleus.conf
You can always specify a configuration file when running any pyleus CLI command
as following:
``$ pyleus -c /path/to/config_file CMD``
This will override previous configurations.
Configuration file example
--------------------------
The following file contains all options you can configure for all pyleus
invocations.
.. code-block:: ini
[storm]
# path to Storm executable (pyleus will automatically look in PATH)
storm_cmd_path: /usr/share/storm/bin/storm
# optional: use -n option of pyleus CLI instead
nimbus_host: 10.11.12.13
# optional: use -p option of pyleus CLI instead
nimbus_port: 6628
# java options to pass to Storm CLI
jvm_opts: -Djava.io.tmpdir=/home/myuser/tmp
[build]
# PyPI server to use during the build of your topologies
pypi_index_url: http://pypi.ninjacorp.com/simple/
# always use system-site-packages for pyleus virtualenvs (default: false)
system_site_packages: true
# list of packages to always include in your topologies
include_packages: foo bar<4.0 baz==0.1
"""
from __future__ import absolute_import
import collections
import os
from pyleus import BASE_JAR_PATH
from pyleus.utils import expand_path
from pyleus.exception import ConfigurationError
from pyleus.compat import configparser
# Configuration files paths in order of increasing precedence
# Please keep in sync with module docstring
CONFIG_FILES_PATH = [
"/etc/pyleus.conf",
"~/.config/pyleus.conf",
"~/.pyleus.conf"
]
Configuration = collections.namedtuple(
"Configuration",
"base_jar config_file debug func include_packages output_jar \
pypi_index_url nimbus_host nimbus_port storm_cmd_path \
system_site_packages topology_path topology_jar topology_name verbose \
wait_time jvm_opts"
)
"""Namedtuple containing all pyleus configuration values."""
DEFAULTS = Configuration(
base_jar=BASE_JAR_PATH,
config_file=None,
debug=False,
func=None,
include_packages=None,
output_jar=None,
pypi_index_url=None,
nimbus_host=None,
nimbus_port=None,
storm_cmd_path=None,
system_site_packages=False,
topology_path="pyleus_topology.yaml",
topology_jar=None,
topology_name=None,
verbose=False,
wait_time=None,
jvm_opts=None,
)
def _validate_config_file(config_file):
"""Ensure that config_file exists and is a file."""
if not os.path.exists(config_file):
raise ConfigurationError("Specified configuration file not"
" found: {0}".format(config_file))
if not os.path.isfile(config_file):
raise ConfigurationError("Specified configuration file is not"
" a file: {0}".format(config_file))
def update_configuration(config, update_dict):
"""Update configuration with new values passed as dictionary.
:return: new configuration ``namedtuple``
"""
tmp = config._asdict()
tmp.update(update_dict)
return Configuration(**tmp)
def load_configuration(cmd_line_file):
"""Load configurations from the more generic to the
more specific configuration file. The latter configurations
override the previous one.
If a file is specified from command line, it is considered
the most specific.
:return: configuration ``namedtuple``
"""
config_files_hierarchy = [expand_path(c) for c in CONFIG_FILES_PATH]
if cmd_line_file is not None:
_validate_config_file(cmd_line_file)
config_files_hierarchy.append(cmd_line_file)
config = configparser.SafeConfigParser()
config.read(config_files_hierarchy)
configs = update_configuration(
DEFAULTS,
dict(
(config_name, config_value)
for section in config.sections()
for config_name, config_value in config.items(section)
)
)
return configs
| 28.694444
| 79
| 0.717086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,353
| 0.569458
|
a76b01dad5f2ae8289af31fef183a815e3bdd1f2
| 1,318
|
py
|
Python
|
tests/conftest.py
|
sdrobert/pydrobert-param
|
d9f68bbcebfcc5ca909c639b03b959526a8b1631
|
[
"Apache-2.0"
] | 1
|
2021-05-14T18:27:13.000Z
|
2021-05-14T18:27:13.000Z
|
tests/conftest.py
|
sdrobert/pydrobert-param
|
d9f68bbcebfcc5ca909c639b03b959526a8b1631
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
sdrobert/pydrobert-param
|
d9f68bbcebfcc5ca909c639b03b959526a8b1631
|
[
"Apache-2.0"
] | null | null | null |
from shutil import rmtree
from tempfile import mkdtemp
import pytest
import param
import pydrobert.param.serialization as serial
param.parameterized.warnings_as_exceptions = True
@pytest.fixture(params=["ruamel_yaml", "pyyaml"])
def yaml_loader(request):
if request.param == "ruamel_yaml":
try:
from ruamel_yaml import YAML # type: ignore
yaml_loader = YAML().load
except ImportError:
from ruamel.yaml import YAML # type: ignore
yaml_loader = YAML().load
module_names = ("ruamel_yaml", "ruamel.yaml")
else:
import yaml # type: ignore
def yaml_loader(x):
return yaml.load(x, Loader=yaml.FullLoader)
module_names = ("pyyaml",)
old_props = serial.YAML_MODULE_PRIORITIES
serial.YAML_MODULE_PRIORITIES = module_names
yield yaml_loader
serial.YAML_MODULE_PRIORITIES = old_props
@pytest.fixture(params=[True, False])
def with_yaml(request):
if request.param:
yield True
else:
old_props = serial.YAML_MODULE_PRIORITIES
serial.YAML_MODULE_PRIORITIES = tuple()
yield False
serial.YAML_MODULE_PRIORITIES = old_props
@pytest.fixture
def temp_dir():
dir_name = mkdtemp()
yield dir_name
rmtree(dir_name, ignore_errors=True)
| 24.867925
| 56
| 0.677542
| 0
| 0
| 1,023
| 0.776176
| 1,127
| 0.855083
| 0
| 0
| 110
| 0.08346
|
a76b06cca3e635c2f5710089e70486f5a0bbb87e
| 1,942
|
py
|
Python
|
tests/test_npaths.py
|
mtymchenko/npaths
|
5019694784afee9f60ab0b5f0f0ef3051e113077
|
[
"MIT"
] | null | null | null |
tests/test_npaths.py
|
mtymchenko/npaths
|
5019694784afee9f60ab0b5f0f0ef3051e113077
|
[
"MIT"
] | null | null | null |
tests/test_npaths.py
|
mtymchenko/npaths
|
5019694784afee9f60ab0b5f0f0ef3051e113077
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import matplotlib.pyplot as plt
from npaths import NPathNode, Filter, Circulator
__all__ = [
'TestNPathNode',
'TestFilter',
'TestCirculator'
]
GHz = 1e9
ohm = 1
pF = 1e-12
freqs = np.linspace(0.001, 6, 500)*GHz
class TestNPathNode(unittest.TestCase):
def test_sparam(self):
node = NPathNode(
freqs=freqs,
freq_mod=1*GHz,
C=3*pF)
S11 = node.sparam(1, 1)
S21 = node.sparam(2, 1)
plt.figure()
plt.plot(freqs/GHz, 10*np.log10(np.abs(S11)))
plt.plot(freqs/GHz, 10*np.log10(np.abs(S21)))
plt.grid()
plt.show()
class TestFilter(unittest.TestCase):
def test_sparam(self):
node = Filter(
freqs=freqs,
freq_mod=1*GHz,
C=15*pF)
S11 = node.sparam(1, 1)
S21 = node.sparam(2, 1)
S21_8 = node.sparam(2, 1, 8)
S21_16 = node.sparam(2, 1, 16)
plt.figure()
plt.plot(freqs/GHz, 20*np.log10(np.abs(S11)))
plt.plot(freqs/GHz, 20*np.log10(np.abs(S21)))
plt.plot(freqs/GHz, 20*np.log10(np.abs(S21_8)))
plt.plot(freqs/GHz, 20*np.log10(np.abs(S21_16)))
plt.grid()
plt.show()
class TestCirculator(unittest.TestCase):
def test_sparam(self):
node = Circulator(
freqs=freqs,
freq_mod=1*GHz,
n_harmonics=60,
n_harmonics_subset=15,
C=1*pF)
S11 = node.sparam(1, 1)
S21 = node.sparam(2, 1)
S12 = node.sparam(1, 2)
S31 = node.sparam(2, 1)
plt.figure()
plt.plot(freqs/GHz, 10*np.log10(np.abs(S11)))
plt.plot(freqs/GHz, 10*np.log10(np.abs(S21)))
plt.plot(freqs/GHz, 10*np.log10(np.abs(S12)))
plt.plot(freqs/GHz, 10*np.log10(np.abs(S31)))
plt.grid()
plt.show()
if __name__ == '__main__':
unittest.main()
| 21.577778
| 56
| 0.549949
| 1,621
| 0.834706
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.027291
|
a76bc9e0503e467514bbda7a08ff3433e4b780d7
| 2,896
|
py
|
Python
|
python/o80_pam/o80_ball.py
|
intelligent-soft-robots/o80_pam
|
3491dcdace61f58e0cf31149184593da3cd2f017
|
[
"BSD-3-Clause"
] | null | null | null |
python/o80_pam/o80_ball.py
|
intelligent-soft-robots/o80_pam
|
3491dcdace61f58e0cf31149184593da3cd2f017
|
[
"BSD-3-Clause"
] | 2
|
2021-02-17T12:55:44.000Z
|
2021-05-27T14:10:57.000Z
|
python/o80_pam/o80_ball.py
|
intelligent-soft-robots/o80_pam
|
3491dcdace61f58e0cf31149184593da3cd2f017
|
[
"BSD-3-Clause"
] | null | null | null |
import o80
import o80_pam
import context
class _Data:
def __init__(self, observation):
ball_states = observation.get_observed_states()
self.ball_position = [None] * 3
self.ball_velocity = [None] * 3
for dim in range(3):
self.ball_position[dim] = ball_states.get(2 * dim).get()
self.ball_velocity[dim] = ball_states.get(2 * dim + 1).get()
# convenience class for shooting virtual balls
# via o80, playing pre-recorded trajectories (hosted in context package)
class o80Ball:
def __init__(self, segment_id, frontend=None):
if frontend is None:
self._frontend = o80_pam.MirrorFreeJointFrontEnd(segment_id)
else:
self._frontend = frontend
def burst(self, nb_iterations):
self._frontend.burst(nb_iterations)
def reset(self):
"""
send via the frontend an overwrite command requesting the backend
to set the desired states as the first state it ever observed, i.e.
to reset the object to its initial state.
"""
self._frontend.add_reinit_command()
self._frontend.pulse()
def get_iteration(self):
return self._frontend.pulse().get_iteration()
def play_trajectory(self, trajectory_points, overwrite=False):
if overwrite:
mode = o80.Mode.OVERWRITE
else:
mode = o80.Mode.QUEUE
# sending the full ball trajectory
# duration of 10ms : sampling rate of the trajectory
duration = o80.Duration_us.milliseconds(10)
for traj_point in trajectory_points:
self._frontend.add_command(
traj_point.position, traj_point.velocity, duration, mode
)
mode = o80.Mode.QUEUE
self._frontend.pulse()
def set(self, position, velocity, duration_ms=None, wait=False):
if duration_ms is not None:
duration = o80.Duration_us.milliseconds(duration_ms)
else:
duration = None
if duration is None:
self._frontend.add_command(position, velocity, o80.Mode.OVERWRITE)
else:
self._frontend.add_command(position, velocity, duration, o80.Mode.OVERWRITE)
if wait:
self._frontend.pulse_and_wait()
else:
self._frontend.pulse()
def get(self):
ball_states = self._frontend.pulse().get_observed_states()
ball_position = [None] * 3
ball_velocity = [None] * 3
for dim in range(3):
ball_position[dim] = ball_states.get(2 * dim).get()
ball_velocity[dim] = ball_states.get(2 * dim + 1).get()
return ball_position, ball_velocity
def get_data(self, start_iteration):
observations = self._frontend.get_observations_since(start_iteration)
data = [_Data(obs) for obs in observations]
return data
| 31.139785
| 88
| 0.632597
| 2,729
| 0.942334
| 0
| 0
| 0
| 0
| 0
| 0
| 419
| 0.144682
|
a76c133ddf548f99aff8129ee6e9cbb2e7608901
| 5,374
|
py
|
Python
|
pymic/transform/threshold.py
|
HiLab-git/PyMIC
|
abf5c43de43668b85f4c049c95a8f1b7cf1d9f16
|
[
"Apache-2.0"
] | 147
|
2019-12-23T02:52:04.000Z
|
2022-03-06T16:30:43.000Z
|
pymic/transform/threshold.py
|
HiLab-git/PyMIC
|
abf5c43de43668b85f4c049c95a8f1b7cf1d9f16
|
[
"Apache-2.0"
] | 4
|
2020-12-18T12:47:21.000Z
|
2021-05-21T02:18:01.000Z
|
pymic/transform/threshold.py
|
HiLab-git/PyMIC
|
abf5c43de43668b85f4c049c95a8f1b7cf1d9f16
|
[
"Apache-2.0"
] | 32
|
2020-01-08T13:48:50.000Z
|
2022-03-12T06:31:13.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import json
import math
import random
import numpy as np
from scipy import ndimage
from pymic.transform.abstract_transform import AbstractTransform
from pymic.util.image_process import *
class ChannelWiseThreshold(AbstractTransform):
"""Threshold the image (shape [C, D, H, W] or [C, H, W]) for each channel
"""
def __init__(self, params):
"""
channels (tuple/list/None): the list of specified channels for thresholding. Default value
is all the channels.
threshold_lower (tuple/list/None): The lower threshold values for specified channels.
threshold_upper (tuple/list/None): The uppoer threshold values for specified channels.
replace_lower (tuple/list/None): new values for pixels with intensity smaller than
threshold_lower. Default value is
replace_upper (tuple/list/None): new values for pixels with intensity larger than threshold_upper.
"""
super(ChannelWiseThreshold, self).__init__(params)
self.channlels = params['ChannelWiseThreshold_channels'.lower()]
self.threshold_lower = params['ChannelWiseThreshold_threshold_lower'.lower()]
self.threshold_upper = params['ChannelWiseThreshold_threshold_upper'.lower()]
self.replace_lower = params['ChannelWiseThreshold_replace_lower'.lower()]
self.replace_upper = params['ChannelWiseThreshold_replace_upper'.lower()]
self.inverse = params.get('ChannelWiseThreshold_inverse'.lower(), False)
def __call__(self, sample):
image= sample['image']
channels = range(image.shape[0]) if self.channlels is None else self.channlels
for i in range(len(channels)):
chn = channels[i]
if((self.threshold_lower is not None) and (self.threshold_lower[i] is not None)):
t_lower = self.threshold_lower[i]
r_lower = self.threshold_lower[i]
if((self.replace_lower is not None) and (self.replace_lower[i] is not None)):
r_lower = self.replace_lower[i]
image[chn][image[chn] < t_lower] = r_lower
if((self.threshold_upper is not None) and (self.threshold_upper[i] is not None)):
t_upper = self.threshold_upper[i]
r_upper = self.threshold_upper[i]
if((self.replace_upper is not None) and (self.replace_upper[i] is not None)):
r_upper= self.replace_upper[i]
image[chn][image[chn] > t_upper] = r_upper
sample['image'] = image
return sample
class ChannelWiseThresholdWithNormalize(AbstractTransform):
"""
Note that this can be replaced by ChannelWiseThreshold + NormalizeWithMinMax
Threshold the image (shape [C, D, H, W] or [C, H, W]) for each channel
and then normalize the image based on remaining pixels
"""
def __init__(self, params):
"""
:param threshold_lower: (tuple/list/None) The lower threshold value along each channel.
:param threshold_upper: (typle/list/None) The upper threshold value along each channel.
:param mean_std_mode: (bool) If true, nomalize the image based on mean and std values,
and pixels values outside the threshold value are replaced random number.
If false, use the min and max values for normalization.
"""
super(ChannelWiseThresholdWithNormalize, self).__init__(params)
self.threshold_lower = params['ChannelWiseThresholdWithNormalize_threshold_lower'.lower()]
self.threshold_upper = params['ChannelWiseThresholdWithNormalize_threshold_upper'.lower()]
self.mean_std_mode = params['ChannelWiseThresholdWithNormalize_mean_std_mode'.lower()]
self.inverse = params.get('ChannelWiseThresholdWithNormalize_inverse'.lower(), False)
def __call__(self, sample):
image= sample['image']
for chn in range(image.shape[0]):
v0 = self.threshold_lower[chn]
v1 = self.threshold_upper[chn]
if(self.mean_std_mode == True):
mask = np.ones_like(image[chn])
if(v0 is not None):
mask = mask * np.asarray(image[chn] > v0)
if(v1 is not None):
mask = mask * np.asarray(image[chn] < v1)
pixels = image[chn][mask > 0]
chn_mean = pixels.mean()
chn_std = pixels.std()
chn_norm = (image[chn] - chn_mean)/chn_std
chn_random = np.random.normal(0, 1, size = chn_norm.shape)
chn_norm[mask == 0] = chn_random[mask == 0]
image[chn] = chn_norm
else:
img_chn = image[chn]
if(v0 is not None):
img_chn[img_chn < v0] = v0
min_value = v0
else:
min_value = img_chn.min()
if(v1 is not None):
img_chn[img_chn > v1] = v1
max_value = img_chn.max()
else:
max_value = img_chn.max()
img_chn = (img_chn - min_value) / (max_value - min_value)
image[chn] = img_chn
sample['image'] = image
return sample
| 48.854545
| 106
| 0.619092
| 5,097
| 0.948456
| 0
| 0
| 0
| 0
| 0
| 0
| 1,808
| 0.336435
|
a76da74714c837b30e9c8d8f4fbec4b1ea99f85f
| 211
|
py
|
Python
|
exerc18/18.py
|
WilliamSampaio/ExerciciosPython
|
4317d242d2944b91b5d455da8a4ac3a33e154385
|
[
"MIT"
] | null | null | null |
exerc18/18.py
|
WilliamSampaio/ExerciciosPython
|
4317d242d2944b91b5d455da8a4ac3a33e154385
|
[
"MIT"
] | null | null | null |
exerc18/18.py
|
WilliamSampaio/ExerciciosPython
|
4317d242d2944b91b5d455da8a4ac3a33e154385
|
[
"MIT"
] | null | null | null |
import os
numeros = [0,0]
numeros[0] = float(input('Digite o numero 1: '))
numeros[1] = float(input('Digite o numero 2: '))
print(f'o maior valor entre os dois é: {max(numeros)}')
os.system('pause')
| 21.1
| 56
| 0.630332
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.462264
|
a76e729d78669a3e706e9fdd618185c47c67bee8
| 7,958
|
py
|
Python
|
DictionaryOfNewZealandEnglish/headword/citation/views.py
|
eResearchSandpit/DictionaryOfNewZealandEnglish
|
cf3cec34aafc7a9a8bd0413883f5eeb314d46a48
|
[
"BSD-3-Clause"
] | null | null | null |
DictionaryOfNewZealandEnglish/headword/citation/views.py
|
eResearchSandpit/DictionaryOfNewZealandEnglish
|
cf3cec34aafc7a9a8bd0413883f5eeb314d46a48
|
[
"BSD-3-Clause"
] | null | null | null |
DictionaryOfNewZealandEnglish/headword/citation/views.py
|
eResearchSandpit/DictionaryOfNewZealandEnglish
|
cf3cec34aafc7a9a8bd0413883f5eeb314d46a48
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Citations
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session)
from flask.ext.login import login_required, current_user
import logging, sys, re
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from DictionaryOfNewZealandEnglish.database import db
from DictionaryOfNewZealandEnglish.headword.citation.forms import *
from DictionaryOfNewZealandEnglish.headword.citation.models import *
import datetime as dt
blueprint = Blueprint("citations", __name__, url_prefix='/headwords/citations',
static_folder="../static")
@blueprint.route("/edit", methods=["GET", "POST"])
@login_required
def edit():
if not current_user.is_admin:
return redirect(url_for('public.home'))
headword = Headword.query.get( request.args.get('headword_id') )
citation_id = request.args.get('citation_id')
citation = Citation.query.get( citation_id )
form = CitationForm(request.form, obj=citation)
if request.method == "GET":
date = __pretty_print_date(citation)
return render_template("headwords/citations/edit.html", form=form,
citation_id=citation_id,
date=date,
headword=headword)
if request.method == "POST":
data = __set_data_for_citation(citation, form)
citation = Citation.query.get( citation_id )
date = __pretty_print_date(citation)
return render_template("headwords/citations/edit.html", form=form,
citation_id=citation_id,
date=date,
headword=headword)
@blueprint.route("/new", methods=["GET"])
@login_required
def new():
if not current_user.is_admin:
return redirect(url_for('public.home'))
headword = Headword.query.get( request.args.get('headword_id') )
form = CitationForm(request.form)
return render_template("headwords/citations/new.html", form=form,
headword=headword)
@blueprint.route("/create", methods=["POST"])
@login_required
def create():
if not current_user.is_admin:
return redirect(url_for('public.home'))
form = CitationForm(request.form)
headword = Headword.query.get( request.args.get('headword_id') )
try:
citation_id = __create_citation(form, headword)
circa = ""
if form.circa.data:
circa = "circa "
date_obj = __form_date(form)
date = __pretty_print_date(date_obj, form.circa.data)
flash("New citation created: {0} ({1}{2})".format(form.author.data,
circa,
date, 'success'))
return render_template("headwords/citations/edit.html",
form=form,
citation_id=citation_id,
date=date,
headword=headword)
except (IntegrityError) as e:
db.session.rollback()
flash("Input error %s" % e)
return render_template("headwords/citations/new.html",
form=form,
headword=headword)
except (InvalidRequestError):
return render_template("headwords/citations/new.html",
form=form,
headword=headword)
@blueprint.route("/delete", methods=["GET"])
@login_required
def delete():
if not current_user.is_admin:
return redirect(url_for('public.home'))
citation = Citation.query.get( request.args.get('citation_id') )
headword = Headword.query.get( request.args.get('headword_id') )
if citation in headword.citations:
headword.citations.remove(citation)
db.session.add(headword)
db.session.commit()
citations = headword.citations
return render_template("headwords/show.html", headword=headword,
citations=citations)
#############################################################################
### Private
def __create_citation(form, headword):
date = __form_date(form)
citation = Citation.create(
date = date,
circa = form.circa.data,
author = form.author.data,
source_id = form.source.data.id,
vol_page = form.vol_page.data,
edition = form.edition.data,
quote = form.quote.data,
notes = form.notes.data,
archived = False,
updated_at = dt.datetime.utcnow(),
updated_by = current_user.username
)
h = Headword.query.get(headword.id)
h.citations.append(citation)
db.session.add(h)
db.session.commit()
return citation.id
def __form_date(form):
if form.date.data == "":
flash("No date entered.", 'warning')
raise InvalidRequestError
form_date = re.split(r'/\s*', form.date.data)
if len(form_date) < 3:
if form.circa.data:
# pad out data to fit into datetime type
if len(form_date) == 2:
y = form_date[1].strip()
m = form_date[0].strip()
d = "1"
if len(form_date) == 1:
y = form_date[0].strip()
m = "1"
d = "1"
else:
flash("Partial date entered, perhaps 'Circa' should be checked.", 'warning')
raise InvalidRequestError
else:
y = form_date[2].strip()
m = form_date[1].strip()
d = form_date[0].strip()
# dt.datetime(y, m, d)
print "### form_date {0} / {1} / {2}".format(y,m,d)
date = dt.datetime(int(y), int(m), int(d))
return date
def __pretty_print_date(obj, circa=False):
print "### citation {0} {1}".format(obj, circa)
if isinstance(obj, Citation):
d = obj.date.day
m = obj.date.month
y = obj.date.year
circa = obj.circa
if isinstance(obj, dt.datetime):
d = obj.day
m = obj.month
y = obj.year
if circa:
if d == 1:
if m == 1:
m = ""
else:
m = "{0} / ".format(m)
d = ""
else:
d = "{0} / ".format(d)
m = "{0} / ".format(m)
print "test 1 {0}{1}{2}".format(d, m, y)
return "{0}{1}{2}".format(d, m, y)
else:
print "test 2 {0} / {1} / {2}".format(d, m, y)
return "{0} / {1} / {2}".format(d, m, y)
def __set_data_for_citation(citation, form):
try:
date = __form_date(form)
Citation.update(citation,
date = date,
circa = form.circa.data,
author = form.author.data,
source_id = form.source.data.id,
vol_page = form.vol_page.data,
edition = form.edition.data,
quote = form.quote.data,
notes = form.notes.data,
archived = form.archived.data,
updated_at = dt.datetime.utcnow(),
updated_by = current_user.username
)
flash("Edit of citation is saved.", 'success')
return True
except (IntegrityError, InvalidRequestError):
db.session.rollback()
flash("Edit of citation failed.", 'warning')
return False
| 35.846847
| 86
| 0.518975
| 0
| 0
| 0
| 0
| 3,883
| 0.487937
| 0
| 0
| 1,037
| 0.130309
|
a76eab46ba07f0fb8885169ad3e849032ee2d76c
| 81
|
py
|
Python
|
tests/conf.py
|
xncbf/django-dynamodb-cache
|
be6d1b4b8e92d581041043bcd694f2a9f00ee386
|
[
"MIT"
] | 21
|
2022-02-16T10:18:24.000Z
|
2022-03-31T23:40:06.000Z
|
tests/conf.py
|
xncbf/django-dynamodb-cache
|
be6d1b4b8e92d581041043bcd694f2a9f00ee386
|
[
"MIT"
] | 9
|
2022-03-01T06:40:59.000Z
|
2022-03-26T08:12:31.000Z
|
tests/conf.py
|
xncbf/django-dynamodb-cache
|
be6d1b4b8e92d581041043bcd694f2a9f00ee386
|
[
"MIT"
] | null | null | null |
from random import random
TABLE_NAME = f"test-django-dynamodb-cache-{random()}"
| 20.25
| 53
| 0.765432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.493827
|
a76f70dafa18b95735a41dd028a3dcb5cbf10b66
| 1,863
|
py
|
Python
|
dockerfiles/greeting/0.2/database.py
|
scherbertlemon/docker-training
|
f94c79b461f78a4d9242a3e838524efb70a2792e
|
[
"MIT"
] | 1
|
2021-08-06T17:00:53.000Z
|
2021-08-06T17:00:53.000Z
|
dockerfiles/greeting/0.2/database.py
|
scherbertlemon/docker-training
|
f94c79b461f78a4d9242a3e838524efb70a2792e
|
[
"MIT"
] | null | null | null |
dockerfiles/greeting/0.2/database.py
|
scherbertlemon/docker-training
|
f94c79b461f78a4d9242a3e838524efb70a2792e
|
[
"MIT"
] | null | null | null |
import psycopg2 as pg
import os
"""
Database (Postgres) module connecting to the database for the simple greeting
app.
"""
# The hostname where the database is running can be determined via environment
PGHOST = os.getenv("PG_HOST") if os.getenv("PG_HOST") else "localhost"
def get_pgconn():
"""
Connects to the database and also triggers the creation of the single
required table if it does not exist yet. Clearly you would not do
that in a production environment.
Returns
-------
psycopg2 database connection
"""
# database credentials hard-coded except for hostname
CRED = {
"host": PGHOST,
"port": 5432,
"database": "postgres",
"user": "postgres",
"password": "holymoly"
}
conn = pg.connect(**CRED)
create(conn)
return conn
def create(db):
"""
helper function to create the required database table if it does not exist
yet.
Parameters
----------
db: psycopg2 database connection
"""
SQL_CREATE = """
CREATE TABLE IF NOT EXISTS messages (
id SERIAL,
message TEXT,
author TEXT,
received TEXT
)
"""
cursor = db.cursor()
cursor.execute(SQL_CREATE)
db.commit()
def insert(db, dct):
"""
Inserts the entered data for author, message and timestamp into the
database.
Parameters
----------
db: psycopg2 database connection
dct: dict
containing the fields message, author, received. Validity is not
checked, every field is expected to be present and to contain a string
as value.
"""
SQL_INSERT = """
INSERT INTO messages(message, author, received) VALUES (
%(message)s,
%(author)s,
%(received)s
)
"""
cursor = db.cursor()
cursor.execute(SQL_INSERT, dct)
db.commit()
| 23
| 78
| 0.616747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,368
| 0.7343
|
a76faf50eeea6f4eeb893d3b2fcef43aec0e7eaf
| 3,277
|
py
|
Python
|
generator/constant_aug.py
|
zhou3968322/pytorch-CycleGAN-and-pix2pix
|
30730fddbc6797c5e421cd49c9fef369011d484d
|
[
"BSD-3-Clause"
] | null | null | null |
generator/constant_aug.py
|
zhou3968322/pytorch-CycleGAN-and-pix2pix
|
30730fddbc6797c5e421cd49c9fef369011d484d
|
[
"BSD-3-Clause"
] | null | null | null |
generator/constant_aug.py
|
zhou3968322/pytorch-CycleGAN-and-pix2pix
|
30730fddbc6797c5e421cd49c9fef369011d484d
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# -*- coding:utf-8 -*-
# email:bingchengzhou@foxmail.com
# create: 2020/11/25
from imgaug import augmenters as iaa
seq_cir = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.4, 0.7), fit_output=True)
],
random_order=True)
seq_cir_big = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.9, 1.5), fit_output=True)
],
random_order=True)
seq_ell = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-20, 20), scale=(0.4, 0.9), fit_output=True)
],
random_order=True)
seq_squ = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.18, 0.35), fit_output=True)
# iaa.Affine(rotate=(-90, 90), scale=(0.8, 1.4), fit_output=True)
],
random_order=True)
seq_rec = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.15, 0.25), fit_output=True)
# iaa.Affine(rotate=(-90, 90), scale=(0.2, 0.4), fit_output=True)
],
random_order=True)
seq_doc_noise = iaa.Sequential(
[
iaa.Sometimes(
0.6,
iaa.OneOf(iaa.Sequential([iaa.GaussianBlur(sigma=(0, 1.0))])
# iaa.AverageBlur(k=(2, 5)),
# iaa.MedianBlur(k=(3, 7))])
)
),
iaa.Sometimes(
0.5,
iaa.LinearContrast((0.8, 1.2), per_channel=0.5),
),
iaa.Sometimes(
0.3,
iaa.Multiply((0.8, 1.2), per_channel=0.5),
),
iaa.Sometimes(
0.3,
iaa.WithBrightnessChannels(iaa.Add((-40, 40))),
),
# iaa.Sometimes(
# 0.3,
# iaa.OneOf(iaa.Sequential([
# iaa.AdditiveGaussianNoise(scale=(0, 0.01*255), per_channel=0.5),
# iaa.SaltAndPepper(0.01)]))
# ),
iaa.Sometimes(
0.5,
iaa.Add((-10, 10), per_channel=0.5),
),
# iaa.Sometimes(
# 0.5,
# iaa.Dropout(p=(0, 0.05))
# ),
# iaa.JpegCompression(compression=(80, 99))
],
random_order=True)
| 30.915094
| 90
| 0.53494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,188
| 0.362527
|
a77112792896e19b96e12810cacf0861b725bf41
| 3,873
|
py
|
Python
|
ooobuild/lo/packages/x_data_sink_encr_support.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/packages/x_data_sink_encr_support.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/packages/x_data_sink_encr_support.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.packages
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from ..io.x_input_stream import XInputStream as XInputStream_98d40ab4
class XDataSinkEncrSupport(XInterface_8f010a43):
"""
Allows to get access to the stream of a PackageStream.
See Also:
`API XDataSinkEncrSupport <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1packages_1_1XDataSinkEncrSupport.html>`_
"""
__ooo_ns__: str = 'com.sun.star.packages'
__ooo_full_ns__: str = 'com.sun.star.packages.XDataSinkEncrSupport'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.packages.XDataSinkEncrSupport'
@abstractmethod
def getDataStream(self) -> 'XInputStream_98d40ab4':
"""
Allows to get access to the data of the PackageStream.
In case stream is encrypted one and the key for the stream is not set, an exception must be thrown.
Raises:
com.sun.star.packages.WrongPasswordException: ``WrongPasswordException``
com.sun.star.packages.zip.ZipException: ``ZipException``
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def getPlainRawStream(self) -> 'XInputStream_98d40ab4':
"""
Allows to get access to the raw data of the stream as it is stored in the package.
Raises:
com.sun.star.io.IOException: ``IOException``
com.sun.star.packages.NoEncryptionException: ``NoEncryptionException``
"""
@abstractmethod
def getRawStream(self) -> 'XInputStream_98d40ab4':
"""
Allows to get access to the data of the PackageStream as to raw stream.
In case stream is not encrypted an exception will be thrown.
The difference of raw stream is that it contains header for encrypted data, so an encrypted stream can be copied from one PackageStream to another one without decryption.
Raises:
com.sun.star.packages.NoEncryptionException: ``NoEncryptionException``
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def setDataStream(self, aStream: 'XInputStream_98d40ab4') -> None:
"""
Allows to set a data stream for the PackageStream.
In case PackageStream is marked as encrypted the data stream will be encrypted on storing.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def setRawStream(self, aStream: 'XInputStream_98d40ab4') -> None:
"""
Allows to set raw stream for the PackageStream.
The PackageStream object can not be marked as encrypted one, an exception will be thrown in such case.
Raises:
com.sun.star.packages.EncryptionNotAllowedException: ``EncryptionNotAllowedException``
com.sun.star.packages.NoRawFormatException: ``NoRawFormatException``
com.sun.star.io.IOException: ``IOException``
"""
__all__ = ['XDataSinkEncrSupport']
| 39.520408
| 178
| 0.694036
| 2,883
| 0.744384
| 0
| 0
| 2,337
| 0.603408
| 0
| 0
| 3,120
| 0.805577
|
a77229f1a130b744660ffd1757e86e6d6dd38d54
| 1,074
|
py
|
Python
|
questions/q197_choose_and_swap/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
questions/q197_choose_and_swap/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | 1
|
2021-05-15T07:56:51.000Z
|
2021-05-15T07:56:51.000Z
|
questions/q197_choose_and_swap/code.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
class Solution:
def chooseandswap (self, A):
opt = 'a'
fir = A[0]
arr = [0]*26
for s in A :
arr[ord(s)-97] += 1
i = 0
while i < len(A) :
if opt > 'z' :
break
while opt < fir :
if opt in A :
ans = ""
for s in A :
if s == opt :
ans += fir
elif s == fir :
ans += opt
else :
ans += s
return ans
opt = chr(ord(opt) + 1)
opt = chr(ord(opt) + 1)
while i < len(A) and A[i] <= fir :
i += 1
if i < len(A) :
fir = A[i]
return A
if __name__ == '__main__':
ob = Solution()
t = int (input ())
for _ in range (t):
A = input()
ans = ob.chooseandswap(A)
print(ans)
| 23.347826
| 46
| 0.286778
| 903
| 0.840782
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.01676
|
a7730b1c8e64cf80eb7189889ed0d119ac2a5fc8
| 10,625
|
py
|
Python
|
assignment4/assignment4.py
|
umamibeef/UBC-EECE-560-Coursework
|
4c89fb03a4dacf778e31eeb978423bfdaa95b591
|
[
"MIT"
] | null | null | null |
assignment4/assignment4.py
|
umamibeef/UBC-EECE-560-Coursework
|
4c89fb03a4dacf778e31eeb978423bfdaa95b591
|
[
"MIT"
] | null | null | null |
assignment4/assignment4.py
|
umamibeef/UBC-EECE-560-Coursework
|
4c89fb03a4dacf778e31eeb978423bfdaa95b591
|
[
"MIT"
] | null | null | null |
import argparse
import csv
import matplotlib
import matplotlib.ticker as tck
import matplotlib.pyplot as plt
import numpy as np
# Matplotlib export settings
matplotlib.use('pgf')
import matplotlib.pyplot as plt
matplotlib.rcParams.update({
'pgf.texsystem': 'pdflatex',
'font.size': 10,
'font.family': 'serif', # use serif/main font for text elements
'text.usetex': True, # use inline math for ticks
'pgf.rcfonts': False # don't setup fonts from rc parameters
})
# Main function
def main(args):
C_zero = 7.5240e-03 * 1e-6 # Farads/km
C_pos = 1.2027e-02 * 1e-6 # Farads/km
G_zero = 2.0000e-08 # Mhos/km
G_pos = 2.0000e-08 # Mhos/km
length = 300 # km
FREQ_INDEX = 0
R_ZERO_INDEX = 1
L_ZERO_INDEX = 2
R_POS_INDEX = 3
L_POS_INDEX = 4
MAGNITUDE_INDEX = 0
PHASE_INDEX = 1
# prepopulate data with a list of five empty lists
data = [[] for i in range(5)]
# Read in PSCAD .CSV data
print('*** Opening assignment 4 CSV data file...')
with open('data_assign04.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
# Read in row data
for row in csv_reader:
if line_count == 0:
print('Column names are: ' + ', '.join(row))
line_count += 1
else:
data[FREQ_INDEX].append(float(row[0]))
data[R_ZERO_INDEX].append(float(row[1])) # Ohms/km
data[L_ZERO_INDEX].append(float(row[2]) * 1e-3) # Henries/km
data[R_POS_INDEX].append(float(row[3])) # Ohms/km
data[L_POS_INDEX].append(float(row[4]) * 1e-3) # Henries/km
line_count += 1
# Figure out when break switched
print('Processed ' + str(line_count) + ' lines.')
num_data_points = len(data[FREQ_INDEX])
# Prepare values for Z(w) magnitude and phase
impedance_zero = [[],[]]
impedance_pos = [[],[]]
for index in range(num_data_points):
omega = 2*np.pi*data[FREQ_INDEX][index]
impedance_zero_val = np.sqrt((data[R_ZERO_INDEX][index] + (1j*omega*data[L_ZERO_INDEX][index]))/(G_zero + (1j*omega*C_zero)))
impedance_pos_val = np.sqrt((data[R_POS_INDEX][index] + (1j*omega*data[L_POS_INDEX][index])) /(G_pos + (1j*omega*C_pos)))
# print("F: " + str(data[FREQ_INDEX][index]))
# print("Omega: " + str(omega))
# print("R_0: " + str(data[R_ZERO_INDEX][index]))
# print("L_0: " + str(data[L_ZERO_INDEX][index]))
# print("C_0: " + str(C_zero))
# print("G_0: " + str(G_zero))
# print("R_+: " + str(data[R_POS_INDEX][index]))
# print("L_+: " + str(data[L_POS_INDEX][index]))
# print("C_+: " + str(C_pos))
# print("G_+: " + str(G_pos))
# print("Zc_0: " + str(impedance_zero_val))
# print("Zc_0 mag: " + str(np.absolute(impedance_zero_val)))
# print("Zc_+: " + str(impedance_pos_val))
# print("Zc_+ mag: " + str(np.absolute(impedance_pos_val)))
impedance_zero[MAGNITUDE_INDEX].append(np.absolute(impedance_zero_val))
impedance_zero[PHASE_INDEX].append(np.angle(impedance_zero_val))
impedance_pos[MAGNITUDE_INDEX].append(np.absolute(impedance_pos_val))
impedance_pos[PHASE_INDEX].append(np.angle(impedance_pos_val))
print("\r\n")
# Prepare values for propagation function magnitude and phase as well
# Prepare values for attenuation alpha(w) (nepers/km)
# Prepare values for phase displacement beta(w) (radians/km)
# Prepare values for propagation speed a(w) (km/s)
propagation_zero = [[],[]]
propagation_pos = [[],[]]
attenuation_zero = []
attenuation_pos = []
phase_zero = []
phase_pos = []
propagation_speed_zero = []
propagation_speed_pos = []
for index in range(num_data_points):
omega = 2*np.pi*data[FREQ_INDEX][index]
gamma_zero = np.sqrt((data[R_ZERO_INDEX][index] + 1j*omega*data[L_ZERO_INDEX][index])*(G_zero + 1j*omega*C_zero))
gamma_pos = np.sqrt((data[R_POS_INDEX][index] + 1j*omega*data[L_POS_INDEX][index])*(G_pos + 1j*omega*C_pos))
# propagation function magnitude and phase
propagation_zero[MAGNITUDE_INDEX].append(np.absolute(np.exp(-1*gamma_zero*length)))
propagation_zero[PHASE_INDEX].append(-np.imag(gamma_zero)*length)
propagation_pos[MAGNITUDE_INDEX].append(np.absolute(np.exp(-1*gamma_pos*length)))
propagation_pos[PHASE_INDEX].append(-np.imag(gamma_pos)*length)
# attenuation (real component of gamma) (nepers/km)
attenuation_zero.append(np.real(gamma_zero))
attenuation_pos.append(np.real(gamma_pos))
# phase displacement (imaginary component of gamma) (radians/km)
phase_zero.append(np.imag(gamma_zero))
phase_pos.append(np.imag(gamma_pos))
# propagation speed (omega/phase_displacement) (km/s)
propagation_speed_zero.append(omega/phase_zero[-1])
propagation_speed_pos.append(omega/phase_pos[-1])
# propagation_speed_zero.append(1/np.sqrt(data[L_ZERO_INDEX][index]*C_zero))
# propagation_speed_pos.append(1/np.sqrt(data[L_POS_INDEX][index]*C_pos))
# Plots for publication
legend_font_size = 6
# Plot Z(w) magnitude and phase
fig, ax = plt.subplots(2)
ax[0].plot(data[FREQ_INDEX], impedance_zero[MAGNITUDE_INDEX], color='b', label='zero sequence')
ax[0].plot(data[FREQ_INDEX], impedance_pos[MAGNITUDE_INDEX], color='g', label='positive sequence')
ax[0].set(xlabel='Frequency $Hz$', ylabel='Magnitude ($\Omega/km$)', title='$Z_c$ - Magnitude vs. Frequency')
ax[0].grid()
ax[0].set_xscale('log')
ax[0].legend(loc='best', prop={'size':legend_font_size}, fancybox=True, shadow=True)
ax[1].plot(data[FREQ_INDEX], impedance_zero[PHASE_INDEX], color='b', label='zero sequence')
ax[1].plot(data[FREQ_INDEX], impedance_pos[PHASE_INDEX], color='g', label='positive sequence')
ax[1].yaxis.set_major_formatter(tck.FormatStrFormatter('%1.1f $\pi$'))
ax[1].yaxis.set_major_locator(tck.MultipleLocator(base=1/5))
ax[1].set(xlabel='Frequency $Hz$', ylabel='Phase ($rad$)', title='$Z_c$ - Phase vs. Frequency')
ax[1].grid()
ax[1].set_xscale('log')
ax[1].legend(loc='best', prop={'size':legend_font_size}, fancybox=True, shadow=True)
fig.set_size_inches(6.5,8)
fig.tight_layout()
fig.savefig('zc_magnitude_phase_plots.pgf')
fig.savefig('zc_magnitude_phase_plots.png')
# Plot propagation function magnitude and phase
fig, ax = plt.subplots(2)
ax[0].plot(data[FREQ_INDEX], propagation_zero[MAGNITUDE_INDEX], color='b', label='zero sequence')
ax[0].plot(data[FREQ_INDEX], propagation_pos[MAGNITUDE_INDEX], color='g', label='positive sequence')
ax[0].set(xlabel='Frequency $Hz$', ylabel=r'Magnitude $\left|e^{-\gamma{}l}\right|$', title='$e^{-\gamma{}l}$ - Magnitude vs. Frequency')
ax[0].grid()
ax[0].set_xscale('log')
ax[0].legend(loc='best', prop={'size':legend_font_size}, fancybox=True, shadow=True)
ax[1].plot(data[FREQ_INDEX], propagation_zero[PHASE_INDEX], color='b', label='zero sequence')
ax[1].plot(data[FREQ_INDEX], propagation_pos[PHASE_INDEX], color='g', label='positive sequence')
ax[1].set(xlabel='Frequency $Hz$', ylabel=r'Phase $\phi{}=\beta{}l=\omega{}\tau$ ($rad$)', title='$e^{-\gamma{}l}$ - Phase vs. Frequency')
ax[1].grid()
ax[1].set_xscale('log')
ax[1].legend(loc='best', prop={'size':legend_font_size}, fancybox=True, shadow=True)
fig.set_size_inches(6.5,8)
fig.tight_layout()
fig.savefig('prop_magnitude_phase_plots.pgf')
fig.savefig('prop_magnitude_phase_plots.png')
# Plot propagation function magnitude and phase (no long for frequency)
fig, ax = plt.subplots(1)
ax.plot(data[FREQ_INDEX], propagation_zero[PHASE_INDEX], color='b', label='zero sequence')
ax.plot(data[FREQ_INDEX], propagation_pos[PHASE_INDEX], color='g', label='positive sequence')
ax.set(xlabel='Frequency $Hz$', ylabel=r'Phase $\phi{}=\beta{}l=\omega{}\tau$ ($rad$)', title='$e^{-\gamma{}l}$ - Phase vs. Frequency')
ax.grid()
ax.legend(loc='best', prop={'size':legend_font_size}, fancybox=True, shadow=True)
fig.set_size_inches(6.5,3.5)
fig.tight_layout()
fig.savefig('prop_phase_plot_nolog.pgf')
fig.savefig('prop_phase_plot_nolog.png')
# Plot attenuation (real component of gamma) (nepers/km)
fig, ax = plt.subplots()
ax.plot(data[FREQ_INDEX], attenuation_zero, color='b', label='zero sequence')
ax.plot(data[FREQ_INDEX], attenuation_pos, color='g', label='positive sequence')
ax.set(xlabel='Frequency $Hz$', ylabel=r'Attenuation $\alpha{}(\omega)$ $(nepers/km)$', title=r'Attenuation $\alpha{}(\omega)$ vs. Frequency')
ax.grid()
ax.set_xscale('log')
ax.legend(loc='best', prop={'size':legend_font_size}, fancybox=True, shadow=True)
fig.set_size_inches(6.5,3.5)
fig.tight_layout()
fig.savefig('attenuation_plots.pgf')
fig.savefig('attenuation_plots.png')
# Plot phase displacement beta(w) (radians/km)
fig, ax = plt.subplots()
ax.plot(data[FREQ_INDEX], phase_zero, color='b', label='zero sequence')
ax.plot(data[FREQ_INDEX], phase_pos, color='g', label='positive sequence')
ax.set(xlabel='Frequency $Hz$', ylabel=r'Phase Displacement $\beta{}(\omega)$ $(rad/km)$', title=r'Phase Displacement $\beta{}(\omega)$ vs. Frequency')
ax.grid()
ax.set_xscale('log')
ax.legend(loc='best', prop={'size':legend_font_size}, fancybox=True, shadow=True)
fig.set_size_inches(6.5,3.5)
fig.tight_layout()
fig.savefig('phase_displacement_plots.pgf')
fig.savefig('phase_displacement_plots.png')
# Plot propagation speed a(w) (km/s)
fig, ax = plt.subplots()
ax.plot(data[FREQ_INDEX], propagation_speed_zero, color='b', label='zero sequence')
ax.plot(data[FREQ_INDEX], propagation_speed_pos, color='g', label='positive sequence')
ax.set(xlabel='Frequency $Hz$', ylabel=r'Propagation Speed $a(\omega)$ $(km/s)$', title=r'Propagation Speed $a(\omega)$ vs. Frequency')
ax.grid()
ax.set_xscale('log')
ax.legend(loc='best', prop={'size':legend_font_size}, fancybox=True, shadow=True)
fig.set_size_inches(6.5,3.5)
fig.tight_layout()
fig.savefig('propagation_speed_plots.pgf')
fig.savefig('propagation_speed_plots.png')
if __name__ == '__main__':
# the following sets up the argument parser for the program
parser = argparse.ArgumentParser(description='Assignment 4 solution generator')
args = parser.parse_args()
main(args)
| 48.076923
| 155
| 0.660706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,781
| 0.355859
|
a774dc8ec0c70281d59955e540db50979da5c0cf
| 4,744
|
py
|
Python
|
src/python/pants/scm/subsystems/changed.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | 1
|
2021-11-11T14:04:24.000Z
|
2021-11-11T14:04:24.000Z
|
src/python/pants/scm/subsystems/changed.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/scm/subsystems/changed.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | 1
|
2021-11-11T14:04:12.000Z
|
2021-11-11T14:04:12.000Z
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.build_environment import get_scm
from pants.base.exceptions import TaskError
from pants.goal.workspace import ScmWorkspace
from pants.scm.change_calculator import BuildGraphChangeCalculator
from pants.subsystem.subsystem import Subsystem
from pants.util.objects import datatype
# TODO: Remove this in 1.5.0dev0.
class _ChainedOptions(object):
def __init__(self, options_seq):
self._options_seq = options_seq
def __getattr__(self, attr):
for options in self._options_seq:
option_value = getattr(options, attr, None)
if option_value is not None:
return option_value
return None
class ChangedRequest(datatype('ChangedRequest',
['changes_since', 'diffspec', 'include_dependees', 'fast'])):
"""Parameters required to compute a changed file/target set."""
@classmethod
def from_options(cls, options):
"""Given an `Options` object, produce a `ChangedRequest`."""
return cls(options.changes_since,
options.diffspec,
options.include_dependees,
options.fast)
def is_actionable(self):
return bool(self.changes_since or self.diffspec)
class Changed(object):
"""A subsystem for global `changed` functionality.
This supports the "legacy" `changed`, `test-changed` and `compile-changed` goals as well as the
v2 engine style `--changed-*` argument target root replacements which can apply to any goal (e.g.
`./pants --changed-parent=HEAD~3 list` replaces `./pants --changed-parent=HEAD~3 changed`).
"""
class Factory(Subsystem):
options_scope = 'changed'
@classmethod
def register_options(cls, register):
register('--changes-since', '--parent', '--since',
help='Calculate changes since this tree-ish/scm ref (defaults to current HEAD/tip).')
register('--diffspec',
help='Calculate changes contained within given scm spec (commit range/sha/ref/etc).')
register('--include-dependees', choices=['none', 'direct', 'transitive'], default='none',
help='Include direct or transitive dependees of changed targets.')
register('--fast', type=bool,
help='Stop searching for owners once a source is mapped to at least one owning target.')
# TODO: Remove or reduce this in 1.5.0dev0 - we only need the subsystem's options scope going fwd.
@classmethod
def create(cls, alternate_options=None):
"""
:param Options alternate_options: An alternate `Options` object for overrides.
"""
options = cls.global_instance().get_options()
# N.B. This chaining is purely to support the `changed` tests until deprecation.
ordered_options = [option for option in (alternate_options, options) if option is not None]
# TODO: Kill this chaining (in favor of outright options replacement) as part of the `changed`
# task removal (post-deprecation cycle). See https://github.com/pantsbuild/pants/issues/3893
chained_options = _ChainedOptions(ordered_options)
changed_request = ChangedRequest.from_options(chained_options)
return Changed(changed_request)
def __init__(self, changed_request):
self._changed_request = changed_request
# TODO: Remove this in 1.5.0dev0 in favor of `TargetRoots` use of `EngineChangeCalculator`.
def change_calculator(self, build_graph, address_mapper, scm=None, workspace=None,
exclude_target_regexp=None):
"""Constructs and returns a BuildGraphChangeCalculator.
:param BuildGraph build_graph: A BuildGraph instance.
:param AddressMapper address_mapper: A AddressMapper instance.
:param Scm scm: The SCM instance. Defaults to discovery.
:param ScmWorkspace: The SCM workspace instance.
:param string exclude_target_regexp: The exclude target regexp.
"""
scm = scm or get_scm()
if scm is None:
raise TaskError('A `changed` goal or `--changed` option was specified, '
'but no SCM is available to satisfy the request.')
workspace = workspace or ScmWorkspace(scm)
return BuildGraphChangeCalculator(
scm,
workspace,
address_mapper,
build_graph,
self._changed_request.include_dependees,
fast=self._changed_request.fast,
changes_since=self._changed_request.changes_since,
diffspec=self._changed_request.diffspec,
exclude_target_regexp=exclude_target_regexp
)
| 41.982301
| 103
| 0.706788
| 4,107
| 0.865725
| 0
| 0
| 1,675
| 0.353078
| 0
| 0
| 2,161
| 0.455523
|
a775681f9ac02e296e8b3818c15064c985162dc4
| 1,825
|
py
|
Python
|
SOLID/LSP/GoodLSPCode.py
|
maumneto/DesignPatternCourse
|
eb55a3d4e6a3261265dc98fcc6ec48d7b8e6b7a8
|
[
"MIT"
] | 1
|
2021-06-26T15:32:35.000Z
|
2021-06-26T15:32:35.000Z
|
SOLID/LSP/GoodLSPCode.py
|
maumneto/DesignPatternCourse
|
eb55a3d4e6a3261265dc98fcc6ec48d7b8e6b7a8
|
[
"MIT"
] | null | null | null |
SOLID/LSP/GoodLSPCode.py
|
maumneto/DesignPatternCourse
|
eb55a3d4e6a3261265dc98fcc6ec48d7b8e6b7a8
|
[
"MIT"
] | null | null | null |
class AccountManager(object):
def __init__(self, balance = 0):
self.balance = balance
def getBalance(self):
return self.balance
def withdraw(self, value):
if self.balance >= value:
self.balance = self.balance - value
print('Successful Withdrawal.')
else:
print('Insufficient Funds')
def deposit(self, value):
self.balance = self.balance + value
print('Successful Deposit')
def income(self, rate):
self.balance = self.balance + self.balance*rate
class AccountCommon(AccountManager):
def __init__(self, balance = 0):
super(AccountCommon, self).__init__(balance=balance)
def getBalance(self):
return super().getBalance()
def deposit(self, value):
super().deposit(value)
def withdraw(self, value):
super().withdraw(value)
def income(self, rate):
super().income(rate)
def message(self):
print('Common account balance: %.2f' % self.getBalance())
class AccountSpetial(AccountManager):
def __init__(self, balance = 0):
super(AccountSpetial, self).__init__(balance=balance)
def getBalance(self):
return super().getBalance()
def deposit(self, value):
super().deposit(value)
def withdraw(self, value):
super().withdraw(value)
def message(self):
print('Common account balance: %.2f' % self.getBalance())
if __name__ == '__main__':
commonAccount = AccountCommon(500)
commonAccount.deposit(500)
commonAccount.withdraw(100)
commonAccount.income(0.005)
commonAccount.message()
print(' ------- ')
spetialAccount = AccountSpetial(1000)
spetialAccount.deposit(500)
spetialAccount.withdraw(200)
spetialAccount.message()
| 26.449275
| 65
| 0.629041
| 1,462
| 0.801096
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.079452
|
a7758434e025289995bc94bace734e2f383e3e76
| 818
|
py
|
Python
|
test/test_global_customer_api.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
test/test_global_customer_api.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
test/test_global_customer_api.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
"""
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import unittest
import eZmaxApi
from eZmaxApi.api.global_customer_api import GlobalCustomerApi # noqa: E501
class TestGlobalCustomerApi(unittest.TestCase):
"""GlobalCustomerApi unit test stubs"""
def setUp(self):
self.api = GlobalCustomerApi() # noqa: E501
def tearDown(self):
pass
def test_global_customer_get_endpoint_v1(self):
"""Test case for global_customer_get_endpoint_v1
Get customer endpoint # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 22.108108
| 97
| 0.689487
| 384
| 0.469438
| 0
| 0
| 0
| 0
| 0
| 0
| 447
| 0.546455
|
a7762ca16d51e6d7fb512c7980d15ee79dbeff30
| 3,930
|
py
|
Python
|
reconstruction_model.py
|
JungahYang/Deep3DFaceReconstruction
|
041b89a781f90ba459f3294c4e568b5c1a3cf7da
|
[
"MIT"
] | 1,424
|
2019-05-07T05:03:12.000Z
|
2022-03-31T08:52:29.000Z
|
reconstruction_model.py
|
zepengF/Deep3DFaceReconstruction
|
5b131a3e67597da67409486e20db50007f48427d
|
[
"MIT"
] | 194
|
2019-05-08T21:11:23.000Z
|
2022-03-30T02:58:25.000Z
|
reconstruction_model.py
|
zepengF/Deep3DFaceReconstruction
|
5b131a3e67597da67409486e20db50007f48427d
|
[
"MIT"
] | 359
|
2019-05-10T11:05:41.000Z
|
2022-03-28T21:57:42.000Z
|
import tensorflow as tf
import face_decoder
import networks
import losses
from utils import *
###############################################################################################
# model for single image face reconstruction
###############################################################################################
class Reconstruction_model():
# initialization
def __init__(self,opt):
self.Face3D = face_decoder.Face3D() #analytic 3D face object
self.opt = opt # training options
self.Optimizer = tf.train.AdamOptimizer(learning_rate = opt.lr) # optimizer
# load input data from queue
def set_input(self,input_iterator):
self.imgs,self.lm_labels,self.attention_masks = input_iterator.get_next()
# forward process of the model
def forward(self,is_train = True):
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
self.coeff = networks.R_Net(self.imgs,is_training=is_train)
self.Face3D.Reconstruction_Block(self.coeff,self.opt)
self.id_labels = networks.Perceptual_Net(self.imgs)
self.id_features = networks.Perceptual_Net(self.Face3D.render_imgs)
self.photo_loss = losses.Photo_loss(self.imgs,self.Face3D.render_imgs,self.Face3D.img_mask_crop*self.attention_masks)
self.landmark_loss = losses.Landmark_loss(self.Face3D.landmark_p,self.lm_labels)
self.perceptual_loss = losses.Perceptual_loss(self.id_features,self.id_labels)
self.reg_loss = losses.Regulation_loss(self.Face3D.id_coeff,self.Face3D.ex_coeff,self.Face3D.tex_coeff,self.opt)
self.reflect_loss = losses.Reflectance_loss(self.Face3D.face_texture,self.Face3D.facemodel)
self.gamma_loss = losses.Gamma_loss(self.Face3D.gamma)
self.loss = self.opt.w_photo*self.photo_loss + self.opt.w_lm*self.landmark_loss + self.opt.w_id*self.perceptual_loss\
+ self.opt.w_reg*self.reg_loss + self.opt.w_ref*self.reflect_loss + self.opt.w_gamma*self.gamma_loss
# backward process
def backward(self,is_train = True):
if is_train:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
var_list = tf.trainable_variables()
update_var_list = [v for v in var_list if 'resnet_v1_50' in v.name or 'fc-' in v.name]
grads = tf.gradients(self.loss,update_var_list)
# get train_op with update_ops to ensure updating for bn parameters
with tf.control_dependencies(update_ops):
self.train_op = self.Optimizer.apply_gradients(zip(grads,update_var_list),global_step = self.opt.global_step)
# if not training stage, avoid updating variables
else:
pass
# forward and backward
def step(self, is_train = True):
with tf.variable_scope(tf.get_variable_scope()) as scope:
self.forward(is_train = is_train)
self.backward(is_train = is_train)
# statistics summarization
def summarize(self):
# scalar and histogram stats
stat = [
tf.summary.scalar('reflect_error',self.reflect_loss),
tf.summary.scalar('gamma_error',self.gamma_loss),
tf.summary.scalar('id_sim_error',self.perceptual_loss),
tf.summary.scalar('lm_error',tf.sqrt(self.landmark_loss)),
tf.summary.scalar('photo_error',self.photo_loss),
tf.summary.scalar('train_error',self.loss),
tf.summary.histogram('id_coeff',self.Face3D.id_coeff),
tf.summary.histogram('ex_coeff',self.Face3D.ex_coeff),
tf.summary.histogram('tex_coeff',self.Face3D.tex_coeff)]
self.summary_stat = tf.summary.merge(stat)
# combine face region of reconstruction images with input images
render_imgs = self.Face3D.render_imgs[:,:,:,::-1]*self.Face3D.img_mask + tf.cast(self.imgs[:,:,:,::-1],tf.float32)*(1-self.Face3D.img_mask)
render_imgs = tf.clip_by_value(render_imgs,0,255)
render_imgs = tf.cast(render_imgs,tf.uint8)
# image stats
img_stat = [tf.summary.image('imgs',tf.concat([tf.cast(self.imgs[:,:,:,::-1],tf.uint8),render_imgs],axis = 2), max_outputs = 8)]
self.summary_img = tf.summary.merge(img_stat)
| 45.697674
| 142
| 0.711705
| 3,590
| 0.913486
| 0
| 0
| 0
| 0
| 0
| 0
| 800
| 0.203562
|
a7779af144a3ba68deaf47c8047f304427889fe5
| 2,002
|
py
|
Python
|
organizational_area/admin.py
|
mspasiano/uniTicket
|
1e8e4c2274293e751deea5b8b1fb4116136c5641
|
[
"Apache-2.0"
] | null | null | null |
organizational_area/admin.py
|
mspasiano/uniTicket
|
1e8e4c2274293e751deea5b8b1fb4116136c5641
|
[
"Apache-2.0"
] | null | null | null |
organizational_area/admin.py
|
mspasiano/uniTicket
|
1e8e4c2274293e751deea5b8b1fb4116136c5641
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import *
from .admin_inlines import *
class AbstractAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
class Media:
js = ('js/textarea-autosize.js',)
css = {'all': ('css/textarea-small.css',),}
@admin.register(OrganizationalStructureFunction)
class OrganizationalStructureFunctionAdmin(AbstractAdmin):
pass
@admin.register(OrganizationalStructureType)
class OrganizationalStructureTypeAdmin(AbstractAdmin):
pass
@admin.register(OrganizationalStructure)
class OrganizationalStructureAdmin(AbstractAdmin):
prepopulated_fields = {'slug': ('name',)}
list_display = ('name', 'structure_type',
'description', 'is_active')
list_filter = ('structure_type', 'is_active')
list_editable = ('is_active',)
inlines = [OrganizationalStructureLocationInline,
OrganizationalStructureOfficeInline,]
@admin.register(OrganizationalStructureOffice)
class OrganizationalStructureOfficeAdmin(AbstractAdmin):
prepopulated_fields = {'slug': ('name',)}
list_display = ('name', 'organizational_structure', 'is_active')
list_filter = ('organizational_structure',
'is_active')
list_editable = ('is_active',)
inlines = [OrganizationalStructureOfficeEmployeeInline,
OrganizationalStructureOfficeLocationInline,]
#@admin.register(TipoDotazione)
#class TipoDotazioneAdmin(admin.ModelAdmin):
#list_display = ('nome', 'descrizione')
#class Media:
#js = ('js/textarea-autosize.js',)
#css = {'all': ('css/textarea-small.css',),}
#@admin.register(Locazione)
#class LocazioneAdmin(admin.ModelAdmin):
#list_display = ('nome', 'indirizzo', 'descrizione_breve',)
#class Media:
#js = ('js/textarea-autosize.js',)
#css = {'all': ('css/textarea-small.css',),}
# @admin.register(OrganizationalStructureFunction)
# class OrganizationalStructureFunction(AbstractAdmin):
# pass
| 29.014493
| 68
| 0.700799
| 1,114
| 0.556444
| 0
| 0
| 1,103
| 0.550949
| 0
| 0
| 805
| 0.402098
|
a777a11d9cdd73ba24751d88b5b9e8b62e919781
| 2,509
|
py
|
Python
|
tests/test_symgroup.py
|
efrembernuz/symeess
|
d74868bbb8463e0420fcc28e3554fbfa8e6de22f
|
[
"MIT"
] | 1
|
2017-10-25T01:42:14.000Z
|
2017-10-25T01:42:14.000Z
|
tests/test_symgroup.py
|
efrembernuz/symeess
|
d74868bbb8463e0420fcc28e3554fbfa8e6de22f
|
[
"MIT"
] | null | null | null |
tests/test_symgroup.py
|
efrembernuz/symeess
|
d74868bbb8463e0420fcc28e3554fbfa8e6de22f
|
[
"MIT"
] | null | null | null |
import unittest
from cosymlib import file_io
from numpy import testing
from cosymlib.molecule.geometry import Geometry
import os
data_dir = os.path.join(os.path.dirname(__file__), 'data')
class TestSymgroupFchk(unittest.TestCase):
def setUp(self):
self._structure = file_io.read_generic_structure_file(data_dir + '/wfnsym/tih4_5d.fchk')
self._geometry = self._structure.geometry
def test_symmetry_measure(self):
# print(self._structure.geometry)
measure = self._geometry.get_symmetry_measure('C3', central_atom=1)
self.assertAlmostEqual(measure, 0)
class TestSymgroupCycles(unittest.TestCase):
def setUp(self):
self._geometry = Geometry(positions=[[ 0.506643354, -1.227657970, 0.000000000],
[ 1.303068499, 0.000000000, 0.000000000],
[ 0.506643354, 1.227657970, 0.000000000],
[-0.926250976, 0.939345948, 0.000000000],
[-0.926250976, -0.939345948, 0.000000000]],
# name='test',
symbols=['C', 'C', 'C', 'C', 'C'],
connectivity_thresh=1.5,
)
def test_symmetry_measure(self):
measure = self._geometry.get_symmetry_measure('C5')
self.assertAlmostEqual(measure, 0.8247502, places=6)
measure = self._geometry.get_symmetry_measure('C2')
self.assertAlmostEqual(measure, 0.0, places=6)
measure = self._geometry.get_symmetry_measure('C3')
self.assertAlmostEqual(measure, 33.482451, places=6)
#def test_symmetry_measure_permutation(self):
# measure = self._geometry.get_symmetry_measure('C5', fix_permutation=True)
# self.assertAlmostEqual(measure, 0.8247502, places=6)
def test_symmetry_nearest(self):
nearest = self._geometry.get_symmetry_nearest_structure('C5').get_positions()
# print(nearest)
reference = [[ 4.05078542e-01, -1.24670356e+00, 0.00000000e+00],
[ 1.31086170e+00, -1.33226763e-16, 0.00000000e+00],
[ 4.05078542e-01, 1.24670356e+00, 0.00000000e+00],
[-1.06050939e+00, 7.70505174e-01, 0.00000000e+00],
[-1.06050939e+00, -7.70505174e-01, 0.00000000e+00]]
testing.assert_array_almost_equal(nearest, reference, decimal=6)
| 44.017544
| 96
| 0.595058
| 2,313
| 0.921881
| 0
| 0
| 0
| 0
| 0
| 0
| 306
| 0.121961
|
a777d3b6992912736d9d3c1557062ac6df7a8a29
| 5,651
|
py
|
Python
|
mouth_detecting.py
|
nuocheng/Face-detection
|
84375b0c1bacaf572fb04aa6e05751469fe5f9c8
|
[
"MIT"
] | null | null | null |
mouth_detecting.py
|
nuocheng/Face-detection
|
84375b0c1bacaf572fb04aa6e05751469fe5f9c8
|
[
"MIT"
] | null | null | null |
mouth_detecting.py
|
nuocheng/Face-detection
|
84375b0c1bacaf572fb04aa6e05751469fe5f9c8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# import the necessary packages
from scipy.spatial import distance as dist
from imutils.video import FileVideoStream
from imutils.video import VideoStream
from imutils import face_utils
import numpy as np # 数据处理的库 numpy
import argparse
import imutils
import time
import dlib
import cv2
def eye_aspect_ratio(eye):
# 垂直眼标志(X,Y)坐标
A = dist.euclidean(eye[1], eye[5])# 计算两个集合之间的欧式距离
B = dist.euclidean(eye[2], eye[4])
# 计算水平之间的欧几里得距离
# 水平眼标志(X,Y)坐标
C = dist.euclidean(eye[0], eye[3])
# 眼睛长宽比的计算
ear = (A + B) / (2.0 * C)
# 返回眼睛的长宽比
return ear
def mouth_aspect_ratio(mouth):
A = np.linalg.norm(mouth[2] - mouth[9]) # 51, 59
B = np.linalg.norm(mouth[4] - mouth[7]) # 53, 57
C = np.linalg.norm(mouth[0] - mouth[6]) # 49, 55
mar = (A + B) / (2.0 * C)
return mar
# 定义两个常数
# 眼睛长宽比
# 闪烁阈值
EYE_AR_THRESH = 0.2
EYE_AR_CONSEC_FRAMES = 3
# 打哈欠长宽比
# 闪烁阈值
MAR_THRESH = 0.5
MOUTH_AR_CONSEC_FRAMES = 3
# 初始化帧计数器和眨眼总数
COUNTER = 0
TOTAL = 0
# 初始化帧计数器和打哈欠总数
mCOUNTER = 0
mTOTAL = 0
# 初始化DLIB的人脸检测器(HOG),然后创建面部标志物预测
print("[INFO] loading facial landmark predictor...")
# 第一步:使用dlib.get_frontal_face_detector() 获得脸部位置检测器
detector = dlib.get_frontal_face_detector()
# 第二步:使用dlib.shape_predictor获得脸部特征位置检测器
predictor = dlib.shape_predictor('./model/shape_predictor_68_face_landmarks.dat')
# 第三步:分别获取左右眼面部标志的索引
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
# 第四步:打开cv2 本地摄像头
cap = cv2.VideoCapture(0)
# 从视频流循环帧
while True:
# 第五步:进行循环,读取图片,并对图片做维度扩大,并进灰度化
ret, frame = cap.read()
frame = imutils.resize(frame, width=720)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 第六步:使用detector(gray, 0) 进行脸部位置检测
rects = detector(gray, 0)
# 第七步:循环脸部位置信息,使用predictor(gray, rect)获得脸部特征位置的信息
for rect in rects:
shape = predictor(gray, rect)
# 第八步:将脸部特征信息转换为数组array的格式
shape = face_utils.shape_to_np(shape)
# 第九步:提取左眼和右眼坐标
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
# 嘴巴坐标
mouth = shape[mStart:mEnd]
# 第十步:构造函数计算左右眼的EAR值,使用平均值作为最终的EAR
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
# 打哈欠
mar = mouth_aspect_ratio(mouth)
# 第十一步:使用cv2.convexHull获得凸包位置,使用drawContours画出轮廓位置进行画图操作
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
mouthHull = cv2.convexHull(mouth)
cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1)
# 第十二步:进行画图操作,用矩形框标注人脸
left = rect.left()
top = rect.top()
right = rect.right()
bottom = rect.bottom()
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 3)
'''
分别计算左眼和右眼的评分求平均作为最终的评分,如果小于阈值,则加1,如果连续3次都小于阈值,则表示进行了一次眨眼活动
'''
# 第十三步:循环,满足条件的,眨眼次数+1
if ear < EYE_AR_THRESH:# 眼睛长宽比:0.2
COUNTER += 1
else:
# 如果连续3次都小于阈值,则表示进行了一次眨眼活动
if COUNTER >= EYE_AR_CONSEC_FRAMES:# 阈值:3
TOTAL += 1
# 重置眼帧计数器
COUNTER = 0
# 第十四步:进行画图操作,同时使用cv2.putText将眨眼次数进行显示
cv2.putText(frame, "Faces: {}".format(len(rects)), (10, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "Blinks: {}".format(TOTAL), (150, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "COUNTER: {}".format(COUNTER), (300, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "EAR: {:.2f}".format(ear), (450, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
'''
计算张嘴评分,如果小于阈值,则加1,如果连续3次都小于阈值,则表示打了一次哈欠,同一次哈欠大约在3帧
'''
# 同理,判断是否打哈欠
if mar > MAR_THRESH:# 张嘴阈值0.5
mCOUNTER += 1
cv2.putText(frame, "Yawning!", (10, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
else:
# 如果连续3次都小于阈值,则表示打了一次哈欠
if mCOUNTER >= MOUTH_AR_CONSEC_FRAMES:# 阈值:3
mTOTAL += 1
# 重置嘴帧计数器
mCOUNTER = 0
cv2.putText(frame, "Yawning: {}".format(mTOTAL), (150, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "mCOUNTER: {}".format(mCOUNTER), (300, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "MAR: {:.2f}".format(mar), (480, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# 第十五步:进行画图操作,68个特征点标识
for (x, y) in shape:
cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)
print('嘴巴实时长宽比:{:.2f} '.format(mar)+"\t是否张嘴:"+str([False,True][mar > MAR_THRESH]))
print('眼睛实时长宽比:{:.2f} '.format(ear)+"\t是否眨眼:"+str([False,True][COUNTER>=1]))
# 确定疲劳提示
if TOTAL >= 50 or mTOTAL>=15:
cv2.putText(frame, "SLEEP!!!", (100, 200),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
# 按q退出
cv2.putText(frame, "Press 'q': Quit", (20, 500),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (84, 255, 159), 2)
# 窗口显示 show with opencv
cv2.imshow("Frame", frame)
# if the `q` key was pressed, break from the loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# 释放摄像头 release camera
cap.release()
# do a bit of cleanup
cv2.destroyAllWindows()
| 32.854651
| 117
| 0.603787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,837
| 0.40465
|
a7780199003eb4084f3a08db621a30c4ac94b9d2
| 2,894
|
py
|
Python
|
Scripts/Ros/Identifica_cor.py
|
pcliquet/robotic_resumo
|
3d1d8705820cae39d5be956836a94c7884ab490d
|
[
"MIT"
] | 1
|
2022-03-26T22:50:26.000Z
|
2022-03-26T22:50:26.000Z
|
Scripts/Ros/Identifica_cor.py
|
pcliquet/robotic_resumo
|
3d1d8705820cae39d5be956836a94c7884ab490d
|
[
"MIT"
] | null | null | null |
Scripts/Ros/Identifica_cor.py
|
pcliquet/robotic_resumo
|
3d1d8705820cae39d5be956836a94c7884ab490d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
import rospy
import numpy as np
import tf
import math
import cv2
import time
from geometry_msgs.msg import Twist, Vector3, Pose
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
import smach
import smach_ros
def identifica_cor(frame):
'''
Segmenta o maior objeto cuja cor é parecida com cor_h (HUE da cor, no espaço HSV).
'''
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cor_menor = np.array([0, 50, 100])
cor_maior = np.array([6, 255, 255])
segmentado_cor = cv2.inRange(frame_hsv, cor_menor, cor_maior)
cor_menor = np.array([174, 50, 100])
cor_maior = np.array([180, 255, 255])
mask = cv2.inRange(frame_hsv, cor_menor, cor_maior)
kernel = np.ones((5, 5), np.uint8)
morpho = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
#segmentado_cor += cv2.inRange(frame_hsv, cor_menor, cor_maior)
# Note que a notacão do numpy encara as imagens como matriz, portanto o enderecamento é
# linha, coluna ou (y,x)
# Por isso na hora de montar a tupla com o centro precisamos inverter, porque
centro = (frame.shape[1]//2, frame.shape[0]//2)
def cross(img_rgb, point, color, width,length):
cv2.line(img_rgb, (int( point[0] - length/2 ), point[1] ), (int( point[0] + length/2 ), point[1]), color ,width, length)
cv2.line(img_rgb, (point[0], int(point[1] - length/2) ), (point[0], int( point[1] + length/2 ) ),color ,width, length)
segmentado_cor = cv2.morphologyEx(morpho,cv2.MORPH_CLOSE,np.ones((7, 7)))
contornos, arvore = cv2.findContours(morpho.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
maior_contorno = None
maior_contorno_area = 0
for cnt in contornos:
area = cv2.contourArea(cnt)
if area > maior_contorno_area:
maior_contorno = cnt
maior_contorno_area = area
# Encontramos o centro do contorno fazendo a média de todos seus pontos.
if not maior_contorno is None :
cv2.drawContours(frame, [maior_contorno], -1, [0, 0, 255], 5)
maior_contorno = np.reshape(maior_contorno, (maior_contorno.shape[0], 2))
media = maior_contorno.mean(axis=0)
media = media.astype(np.int32)
cv2.circle(frame, (media[0], media[1]), 5, [0, 255, 0])
cross(frame, centro, [255,0,0], 1, 17)
else:
media = (0, 0)
# Representa a area e o centro do maior contorno no frame
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
cv2.putText(frame,"{:d} {:d}".format(*media),(20,100), 1, 4,(255,255,255),2,cv2.LINE_AA)
cv2.putText(frame,"{:0.1f}".format(maior_contorno_area),(20,50), 1, 4,(255,255,255),2,cv2.LINE_AA)
# cv2.imshow('video', frame)
cv2.imshow('seg', segmentado_cor)
cv2.waitKey(1)
return media, centro, maior_contorno_area
| 33.651163
| 129
| 0.664824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 582
| 0.200759
|
a77866394277674aa9998582e0c75620917bdb48
| 2,041
|
py
|
Python
|
integration_tests/test_test_oracle_tax.py
|
weblucas/mseg-semantic
|
ec3d179003bb26dd0f1336853b719319721757a4
|
[
"MIT"
] | 391
|
2020-06-05T17:30:44.000Z
|
2022-03-31T12:01:30.000Z
|
integration_tests/test_test_oracle_tax.py
|
weblucas/mseg-semantic
|
ec3d179003bb26dd0f1336853b719319721757a4
|
[
"MIT"
] | 27
|
2020-06-06T15:08:37.000Z
|
2022-02-28T07:57:57.000Z
|
integration_tests/test_test_oracle_tax.py
|
weblucas/mseg-semantic
|
ec3d179003bb26dd0f1336853b719319721757a4
|
[
"MIT"
] | 57
|
2020-06-09T06:05:30.000Z
|
2022-03-28T15:49:36.000Z
|
#!/usr/bin/python3
from pathlib import Path
from types import SimpleNamespace
from mseg_semantic.scripts.collect_results import parse_result_file
from mseg_semantic.tool.test_oracle_tax import test_oracle_taxonomy_model
REPO_ROOT_ = Path(__file__).resolve().parent.parent
# Replace this variables with your own path to run integration tests.
INTEGRATION_TEST_OUTPUT_DIR = '/srv/scratch/jlambert30/MSeg/mseg-semantic/integration_test_data'
# Copy the mseg-3m-1080p model there
CAMVID_MODEL_PATH = f'{INTEGRATION_TEST_OUTPUT_DIR}/camvid-11-1m.pth'
def test_evaluate_oracle_tax_model():
"""
Ensure oracle model testing script works correctly.
base_sizes=(
#360
720
#1080
python -u mseg_semantic/tool/test_oracle_tax.py --config=${config_fpath}
dataset ${dataset_name} model_path ${model_fpath} model_name ${model_name}
"""
base_size = 1080
d = {
'dataset': 'camvid-11',
'config': f'{REPO_ROOT_}/mseg_semantic/config/test/default_config_${base_size}_ss.yaml',
'model_path': CAMVID_MODEL_PATH,
'model_name': 'mseg-3m-1080p',
'input_file': 'default',
'base_size': base_size,
'test_h': 713,
'test_w': 713,
'scales': [1.0],
'save_folder': 'default',
'arch': 'hrnet',
'index_start': 0,
'index_step': 0,
'workers': 16,
'has_prediction': False,
'split': 'val',
'vis_freq': 20
}
args = SimpleNamespace(**d)
use_gpu = True
test_oracle_taxonomy_model(args, use_gpu)
# Ensure that results match paper
result_file_path = INTEGRATION_TEST_OUTPUT_DIR
result_file_path += f'/camvid-11-1m/camvid-11/{base_size}/ss/results.txt'
assert Path(result_file_path).exists()
mIoU = parse_result_file(result_file_path)
print(f"mIoU: {mIoU}")
# single-scale result
assert mIoU == 78.79
OKGREEN = '\033[92m'
ENDC = '\033[0m'
print(OKGREEN + ">>>>>>>>>>>>>>>>>>>>>>>>>>>>" + ENDC)
print(OKGREEN + 'Oracle model evalution passed successfully' + ENDC)
print(OKGREEN + ">>>>>>>>>>>>>>>>>>>>>>>>>>>>" + ENDC)
if __name__ == '__main__':
test_evaluate_oracle_tax_model()
| 28.347222
| 96
| 0.708476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,071
| 0.524743
|
a778a8a2d428dbcafebc55dfd31568980b3b8abe
| 375
|
py
|
Python
|
app/env/lib/python3.7/site-packages/twilio/http/response.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 30
|
2018-06-12T12:00:53.000Z
|
2021-05-02T01:27:16.000Z
|
app/env/lib/python3.7/site-packages/twilio/http/response.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 11
|
2019-12-26T17:21:03.000Z
|
2022-03-21T22:17:07.000Z
|
bot/lib/python3.7/site-packages/twilio/http/response.py
|
carlosrh18/DavinciBot
|
d73a6b7f68d7bab25d134d3f85c6b63a86c206c5
|
[
"MIT"
] | 4
|
2019-03-28T18:20:48.000Z
|
2019-11-18T18:52:04.000Z
|
class Response(object):
"""
"""
def __init__(self, status_code, text):
self.content = text
self.cached = False
self.status_code = status_code
self.ok = self.status_code < 400
@property
def text(self):
return self.content
def __repr__(self):
return 'HTTP {} {}'.format(self.status_code, self.content)
| 22.058824
| 66
| 0.592
| 374
| 0.997333
| 0
| 0
| 57
| 0.152
| 0
| 0
| 24
| 0.064
|
a7799a223cdf2e189549e42fb31de6f6391c2873
| 1,911
|
py
|
Python
|
sports_manager/models/gymnasium.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
sports_manager/models/gymnasium.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
sports_manager/models/gymnasium.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Gymnasium implementation."""
# Django
from django.core.validators import RegexValidator
from django.db import models
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _ # noqa
class Gymnasium(models.Model):
"""Gymnasium model for the website."""
slug = models.SlugField(_('slug'), unique=True, max_length=128)
name = models.CharField(_('name'), max_length=128)
address = models.CharField(_('address'), max_length=255)
city = models.CharField(_('city'), max_length=255)
zip_code = models.IntegerField(_('zip code'))
phone = models.CharField(
_('phone number'),
max_length=10,
blank=True,
validators=[
# ^
# (?:(?:\+|00)33|0) # Dialing code
# \s*[1-9] # First number (from 1 to 9)
# (?:[\s.-]*\d{2}){4} # End of the phone number
# $
RegexValidator(regex=r"^(?:(?:\+|00)33|0)\s*[1-7,9](?:[\s.-]*\d{2}){4}$",
message=_("This is not a correct phone number"))
]
)
surface = models.SmallIntegerField(_('surface'), blank=True, null=True)
capacity = models.SmallIntegerField(_('capacity'), blank=True, null=True)
def __str__(self):
"""Representation of a Gymnasium as a string."""
return "Gymnasium {}".format(self.name)
class Meta:
verbose_name = _("gymnasium")
verbose_name_plural = _("gymnasiums")
ordering = ("name", "city")
def save(self, *args, **kwargs):
"""Override the save method in order to rewrite the slug field each time we save the object."""
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def get_time_slots(self):
"""Return a list of all the time slots in the gymnasium."""
return self.time_slot_set.all()
| 36.75
| 103
| 0.588697
| 1,662
| 0.869702
| 0
| 0
| 0
| 0
| 0
| 0
| 671
| 0.351125
|
a779d1a47d9473c22bbee36fab9477af4aad4943
| 228
|
py
|
Python
|
01-logica-de-programacao-e-algoritmos/Aula 04/1/exercicio01.py
|
rafaelbarretomg/Uninter
|
1f84b0103263177122663e991db3a8aeb106a959
|
[
"MIT"
] | null | null | null |
01-logica-de-programacao-e-algoritmos/Aula 04/1/exercicio01.py
|
rafaelbarretomg/Uninter
|
1f84b0103263177122663e991db3a8aeb106a959
|
[
"MIT"
] | null | null | null |
01-logica-de-programacao-e-algoritmos/Aula 04/1/exercicio01.py
|
rafaelbarretomg/Uninter
|
1f84b0103263177122663e991db3a8aeb106a959
|
[
"MIT"
] | null | null | null |
# Exercicio 01 Tuplas
x = int(input('Digite o primeiro numero: '))
y = int(input('Digite o segundo numero: '))
cont = 1
soma = x
while cont < y:
soma = soma + x
cont = cont + 1
print('O resultado eh: {}' .format(soma))
| 20.727273
| 44
| 0.618421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.421053
|
a77a0a8078a541187f7e349449f50c15dd027ebe
| 832
|
py
|
Python
|
docs/OOPS/Accessing_pvt_var2.py
|
munyumunyu/Python-for-beginners
|
335d001d4b8f13af71f660beed0b7f5fe313aa3b
|
[
"MIT"
] | 158
|
2018-10-03T23:36:48.000Z
|
2022-03-25T00:16:00.000Z
|
docs/OOPS/Accessing_pvt_var2.py
|
munyumunyu/Python-for-beginners
|
335d001d4b8f13af71f660beed0b7f5fe313aa3b
|
[
"MIT"
] | 10
|
2018-10-11T03:52:28.000Z
|
2019-12-04T02:51:28.000Z
|
docs/OOPS/Accessing_pvt_var2.py
|
munyumunyu/Python-for-beginners
|
335d001d4b8f13af71f660beed0b7f5fe313aa3b
|
[
"MIT"
] | 40
|
2018-10-03T10:47:28.000Z
|
2022-02-22T19:55:46.000Z
|
'''
To have a error free way of accessing and updating private variables, we create specific methods for this.
Those methods which are meant to set a value to a private variable are called setter methods and methods
meant to access private variable values are called getter methods.
The below code is an example of getter and setter methods:
'''
class Customer:
def __init__(self, id, name, age, wallet_balance):
self.id = id
self.name = name
self.age = age
self.__wallet_balance = wallet_balance
def set_wallet_balance(self, amount):
if amount < 1000 and amount> 0:
self.__wallet_balance = amount
def get_wallet_balance(self):
return self.__wallet_balance
c1=Customer(100, "Gopal", 24, 1000)
c1.set_wallet_balance(120)
print(c1.get_wallet_balance())
| 32
| 107
| 0.71274
| 385
| 0.46274
| 0
| 0
| 0
| 0
| 0
| 0
| 356
| 0.427885
|
a77b4550c67262bf40db6267243d9f55a2869fd2
| 21,013
|
py
|
Python
|
src/runmanager/runinstance.py
|
scherma/antfarm
|
ad4d1d564eb79bdc7e00780b97ca10594c75cd5c
|
[
"MIT"
] | 6
|
2018-08-26T10:15:29.000Z
|
2022-03-03T21:12:37.000Z
|
src/runmanager/runinstance.py
|
scherma/antfarm
|
ad4d1d564eb79bdc7e00780b97ca10594c75cd5c
|
[
"MIT"
] | 10
|
2018-03-09T18:18:28.000Z
|
2021-05-06T21:37:53.000Z
|
src/runmanager/runinstance.py
|
scherma/antfarm
|
ad4d1d564eb79bdc7e00780b97ca10594c75cd5c
|
[
"MIT"
] | 3
|
2018-11-29T07:47:30.000Z
|
2020-05-24T09:58:57.000Z
|
#!/usr/bin/env python3
# coding: utf-8
# MIT License © https://github.com/scherma
# contact http_error_418 @ unsafehex.com
import logging, os, configparser, libvirt, json, arrow, pyvnc, shutil, time, victimfiles, glob, websockify, multiprocessing, signal
import tempfile, evtx_dates, db_calls, psycopg2, psycopg2.extras, sys, pcap_parser, yarahandler, magic, case_postprocess
import scapy.all as scapy
from lxml import etree
from io import StringIO, BytesIO
from PIL import Image
logger = logging.getLogger("antfarm.worker")
# Manages connection to VM and issuing of commands
class RunInstance():
def __init__( self,
cursor,
dbconn,
domuuid,
conf,
fname,
uuid,
submittime,
hashes,
victim_params,
ttl=240,
interactive=False,
reboots=0,
web=True,
banking=False,
collect_registries=False,
filespath = "suspects",
outdir = "output"
):
self.conf = conf
self.rootdir = os.path.join(conf.get('General', 'basedir'), self.conf.get('General', 'instancename'))
self.uuid = uuid
self.starttime = arrow.utcnow().timestamp
self.endtime = None
self.submittime = submittime
self.hashes = hashes
self.ttl = ttl
self.interactive = interactive
self.reboots = reboots
self.banking = banking
self.collect_registries = collect_registries
self.web = web
self.filespath = os.path.join(self.rootdir, filespath)
self.fname = self._suspect_exists(fname)
self.victim_params = victim_params
self.runcmds = []
self.rundir = self._make_outputdir(outdir)
self.imgdir = self._make_imgdir()
self.runlog = self._register_logger()
self.pcap_file = os.path.join(self.rundir, "capture.pcap")
#self.stop_capture = False
self.imgsequence = 0
self.cursor = cursor
self.dbconn = dbconn
self.domuuid = domuuid
self.yara_test()
self.vf = None
self.websockserver = None
def __del__(self):
self._unregister_logger()
self.remove_vnc()
@property
def rawfile(self):
return os.path.join(self.filespath, self.hashes["sha256"][0:2], self.hashes["sha256"])
@property
def downloadfile(self):
return os.path.join(self.filespath, 'downloads', str(self.domuuid), self.fname)
@property
def banking(self):
return int(self._banking)
@banking.setter
def banking(self, value):
self._banking = bool(value)
@property
def web(self):
return int(self._web)
@banking.setter
def web(self, value):
self._web = bool(value)
@property
def interactive(self):
return int(self._interactive)
@interactive.setter
def interactive(self, value):
self._interactive = bool(value)
def _dump_dict(self):
tformat = 'YYYY-MM-DD HH:mm:ss.SSSZ'
selfdict = {
"rootdir": self.rootdir,
"uuid": self.uuid,
"starttime": self.starttime,
"endtime": self.endtime,
"submittime": self.submittime,
"hashes": self.hashes,
"ttl": self.ttl,
"interactive": self.interactive,
"reboots": self.reboots,
"banking": self.banking,
"web": self.web,
"filespath": self.filespath,
"fname": self.fname,
"victim_params": self.victim_params,
"runcmds": self.runcmds,
"rundir": self.rundir,
"pcap_file": self.pcap_file
}
return selfdict
def _make_outputdir(self, outdir):
short = self.hashes["sha256"][0:2]
bdir = os.path.join(self.rootdir, outdir, short)
if not os.path.exists(bdir):
os.mkdir(bdir)
fdir = os.path.join(bdir, self.hashes["sha256"])
if not os.path.exists(fdir):
os.mkdir(fdir)
# rundir should not exist before the run - if it does, UUID is broken somehow!
rundir = os.path.join(fdir, self.uuid)
if not os.path.exists(rundir):
os.mkdir(rundir)
logger.debug("Created run instance directory {0}".format(rundir))
return rundir
def _make_imgdir(self):
imgshort = os.path.join(self.rootdir, 'www', 'public', 'images', 'cases', self.uuid[0:2])
if not os.path.exists(imgshort):
os.mkdir(imgshort)
logger.debug("Made images base dir {0}".format(imgshort))
imgdir = os.path.join(imgshort, self.uuid)
if not os.path.exists(imgdir):
os.mkdir(imgdir)
logger.debug("Made images final dir {0}".format(imgdir))
return imgdir
def _register_logger(self):
formatter = logging.Formatter(fmt='[%(asctime)s] %(levelname)s\t%(message)s', datefmt='%Y%m%d %H:%M:%S')
runlog = logging.FileHandler(os.path.join(self.rundir, 'run.log'))
RUN_NUM_LEVEL = getattr(logging, self.conf.get('General', 'runloglevel'))
runlog.setLevel(RUN_NUM_LEVEL)
runlog.setFormatter(formatter)
log_modules = [__name__, "pyvnc", "vmworker", "runinstance", "db_calls", "victimfiles", "yarahandler"]
for module in log_modules:
logging.getLogger(module).setLevel(RUN_NUM_LEVEL)
logger.addHandler(runlog)
return runlog
def _unregister_logger(self):
logger.removeHandler(self.runlog)
def _suspect_exists(self, fname):
open(self.rawfile).close()
logger.debug("Confirmed file '{0}' exists with sha256 '{1}'".format(fname, self.hashes["sha256"]))
return fname
def yara_test(self):
matches = yarahandler.testyara(self.conf, self.rawfile)
if matches:
logger.info("Found yara matches: {}".format(matches))
db_calls.yara_detection(matches, self.hashes["sha256"], self.cursor)
else:
logger.info("No yara matches found")
# make a screenshot
# https://www.linuxvoice.com/issues/003/LV3libvirt.pdf
def screenshot(self, dom, lv_conn):
imgpath = os.path.join(self.imgdir, "{0}.png".format(self.imgsequence))
thumbpath = os.path.join(self.imgdir, "{0}-thumb.png".format(self.imgsequence))
i = get_screen_image(dom, lv_conn)
i.save(imgpath)
i.thumbnail((400, 400))
i.save(thumbpath)
logger.debug("Took screenshot {0}".format(imgpath))
self.imgsequence += 1
def _sc_writer(self, stream, data, b):
b.write(data)
def case_update(self, status):
self.cursor.execute("""UPDATE "cases" SET status = %s WHERE uuid=%s""", (status, self.uuid))
self.dbconn.commit()
def get_pcap(self):
try:
folder = "/usr/local/unsafehex/{}/pcaps".format(self.conf.get("General", "instancename"))
def getmtime(name):
path = os.path.join(folder, name)
return os.path.getmtime(path)
pcaps = sorted(os.listdir(folder), key=getmtime, reverse=True)
to_read = []
start = arrow.get(self.starttime)
end = arrow.get(self.endtime)
hours_list = arrow.Arrow.range("hour", start, end)
for pcap_file in pcaps:
pf = os.path.join(folder, pcap_file)
if arrow.get(os.path.getmtime(pf)) > start:
to_read.append(pf)
logger.debug("Reading from pcaps: {}".format(to_read))
#for hour in hours_list:
# pcap_file = "{}.pcap".format(hour.format("HH"))
# to_read.append(pcap_file)
fl = "host {0} and not (host {1} and port 28080)".format(self.victim_params["ip"], self.conf.get("General", "gateway_ip"))
logger.debug("Reading pcapring with filter {}".format(fl))
logger.debug("Time parameters: {} :: {}".format(start, end))
written = 0
for pcap in to_read:
logger.debug("Reading {}".format(pcap))
packets = scapy.sniff(offline=pcap, filter=fl)
for packet in packets:
ptime = arrow.get(packet.time)
if ptime >= start and ptime <= end:
scapy.wrpcap(self.pcap_file, packet, append=True)
written += 1
logger.info("Wrote {} packets to file {}".format(written, self.pcap_file))
conversations = pcap_parser.conversations(self.pcap_file)
db_calls.insert_pcap_streams(conversations, self.uuid, self.cursor)
except Exception:
ex_type, ex, tb = sys.exc_info()
fname = os.path.split(tb.tb_frame.f_code.co_filename)[1]
lineno = tb.tb_lineno
logger.error("Exception {0} {1} in {2}, line {3} while processing pcap".format(ex_type, ex, fname, lineno))
def events_to_store(self, searchfiles, startdate, enddate):
events = {}
logger.debug("Searching suricata log files: {}".format(searchfiles))
for searchfile in searchfiles:
evctr = 0
if os.path.exists(searchfile):
with open(searchfile) as f:
for line in f:
d = json.loads(line.rstrip(' \t\r\n\0').lstrip(' \t\r\n\0'))
t = arrow.get(d["timestamp"])
# ensure only event types we can handle safely get looked at
if d["event_type"] in ["tls", "http", "dns", "alert"]:
# include everything from selected host
if ((d["src_ip"] == self.victim_params["ip"] or d["dest_ip"] == self.victim_params["ip"]) and
# that falls within the run time
(t >= startdate and t <= enddate) and not
# except where the target is the API service
(d["dest_ip"] == self.conf.get("General", "gateway_ip") and d["dest_port"] == 28080)):
if d["event_type"] != "alert" or (d["event_type"] == "alert" and d["alert"]["category"] != "Generic Protocol Command Decode"):
if d["event_type"] not in events:
events[d["event_type"]] = [d]
else:
events[d["event_type"]].append(d)
evctr += 1
logger.info("Identified {0} events to include from {1}".format(evctr, searchfile))
return events
def behaviour(self, dom, lv_conn):
try:
cstr = "{0}::{1}".format(self.victim_params["vnc"]["address"], self.victim_params["vnc"]["port"])
vncconn = pyvnc.Connector(cstr, self.victim_params["password"], (self.victim_params["display_x"], self.victim_params["display_y"]))
logger.debug("Initialised VNC connection")
click_after = arrow.now().format("YYYY-MM-DD HH:mm:ss")
for i in range(0,5):
vncconn.run_sample(self.victim_params["malware_pos_x"], self.victim_params["malware_pos_y"])
time.sleep(6)
if self.sample_has_run(click_after):
self.screenshot(dom, lv_conn)
break
logger.error("Didn't see a process creation. That's odd...")
#logger.info("VM prepped for suspect execution, starting behaviour sequence")
if self.interactive:
logger.info("Passing control to user")
else:
vncconn.basic()
logger.info("Basic behaviour complete")
self.screenshot(dom, lv_conn)
if self.banking:
vncconn.bank()
self.screenshot(dom, lv_conn)
logger.info("Banking happened")
if self.reboots:
vncconn.restart()
logger.info("System rebooted")
if self.web:
vncconn.web()
self.screenshot(dom, lv_conn)
logger.info("Web activity happened")
if self.reboots > 1:
vncconn.restart()
logger.info("System rebooted")
logger.info("Behaviour sequence complete")
vncconn.disconnect()
logger.debug("VNC disconnect issued")
except Exception as e:
ex_type, ex, tb = sys.exc_info()
fname = os.path.split(tb.tb_frame.f_code.co_filename)[1]
lineno = tb.tb_lineno
raise RuntimeError("Exception {0} {1} in {2}, line {3} while processing job, run not completed. Aborting.".format(ex_type, ex, fname, lineno))
def sample_has_run(self, click_after):
# for now let's just assume it did
return True
self.cursor.execute("""SELECT * FROM sysmon_evts WHERE uuid=%s AND eventid=1 AND timestamp > %s""", (self.uuid, click_after))
rows = self.cursor.fetchall()
# check if any processes have been started from Explorer
for row in rows:
if row["eventid"] == 1:
if row["eventdata"]["ParentImage"] == "C:\\Windows\\explorer.exe":
return True
return False
def do_run(self, dom, lv_conn):
logger.info("Started run sequence")
# prep the file for run
shutil.copy(self.rawfile, self.downloadfile)
logger.debug("File copied ready for download")
try:
dom.resume()
logger.debug("Resumed VM in preparation for run")
case_obtained = False
while not case_obtained:
self.cursor.execute("""SELECT status FROM cases WHERE uuid=%s""", (self.uuid,))
rows = self.cursor.fetchall()
if rows and rows[0]["status"] == "obtained":
break
else:
time.sleep(5)
logger.info("Suspect was delivered, starting behaviour sequence")
self.behaviour(dom, lv_conn)
# except block for debugging purposes - clean this up for production
except Exception as e:
ex_type, ex, tb = sys.exc_info()
fname = os.path.split(tb.tb_frame.f_code.co_filename)[1]
lineno = tb.tb_lineno
raise RuntimeError("Exception {0} {1} in {2}, line {3} while processing job, run not completed. Aborting.".format(ex_type, ex, fname, lineno))
finally:
#ssh.close()
os.remove(self.downloadfile)
logger.debug("Removed download file")
def targeted_files_list(self):
targeted_files = []
targeted_files.extend(db_calls.timestomped_files(self.uuid, self.cursor))
return targeted_files
def construct_record(self, victim_params):
dtstart = arrow.get(self.starttime)
dtend = arrow.get(self.endtime)
try:
logger.info("Obtaining new files from guest filesystem")
self.vf = victimfiles.VictimFiles(self.conf, self.victim_params["diskfile"], '/dev/sda2')
filesdict = self.vf.download_new_files(dtstart, self.rundir)
registriesdict = self.vf.download_modified_registries(dtstart, self.rundir, self.victim_params["username"], self.collect_registries)
targetedfilesdict = self.vf.download_specific_files(self.targeted_files_list(), self.rundir)
compileddict = {**filesdict, **registriesdict, **targetedfilesdict}
db_calls.insert_files(compileddict, self.uuid, self.cursor)
except Exception:
ex_type, ex, tb = sys.exc_info()
fname = os.path.split(tb.tb_frame.f_code.co_filename)[1]
lineno = tb.tb_lineno
logger.error("Exception {0} {1} in {2}, line {3} while processing filesystem output".format(ex_type, ex, fname, lineno))
finally:
try:
del(vf)
except Exception:
pass
try:
# record suricata events
eventlog = os.path.join(self.rundir, "eve.json")
with open(eventlog, 'w') as e:
files = self._suricata_logfiles
events = self.events_to_store(files, dtstart, dtend)
#e.write(json.dumps(events))
qty = {}
for evtype in events:
qty[evtype] = len(events[evtype])
if evtype == "dns":
db_calls.insert_dns(events[evtype], self.uuid, self.cursor)
if evtype == "http":
db_calls.insert_http(events[evtype], self.uuid, self.cursor)
if evtype == "alert":
db_calls.insert_alert(events[evtype], self.uuid, self.cursor)
if evtype == "tls":
db_calls.insert_tls(events[evtype], self.uuid, self.cursor)
logger.info("Wrote events to {0}: {1}".format(eventlog, str(qty)))
except Exception:
ex_type, ex, tb = sys.exc_info()
fname = os.path.split(tb.tb_frame.f_code.co_filename)[1]
lineno = tb.tb_lineno
logger.error("Exception {0} {1} in {2}, line {3} while processing job, Suricata data not written".format(ex_type, ex, fname, lineno))
try:
self.get_pcap()
except Exception:
ex_type, ex, tb = sys.exc_info()
fname = os.path.split(tb.tb_frame.f_code.co_filename)[1]
lineno = tb.tb_lineno
logger.error("Exception {0} {1} in {2}, line {3} while processing job, pcap processing failed".format(ex_type, ex, fname, lineno))
try:
pp = case_postprocess.Postprocessor(self.uuid, self.cursor)
pp.update_events()
except Exception:
ex_type, ex, tb = sys.exc_info()
fname = os.path.split(tb.tb_frame.f_code.co_filename)[1]
lineno = tb.tb_lineno
logger.error("Exception {0} {1} in {2}, line {3} while processing job, case postprocessing failed".format(ex_type, ex, fname, lineno))
@property
def _suricata_logfiles(self):
evefiles = sorted(glob.glob("/var/log/suricata/eve-*.json"), key=os.path.getmtime, reverse=True)
to_read = []
start = arrow.get(self.starttime)
end = arrow.get(self.endtime)
for evefile in evefiles:
evefiletime = arrow.get(evefile.split("-")[1].split(".")[0], "YYYYMMDDHHmmss")
if evefiletime < start:
to_read.insert(0, evefile)
break
else:
to_read.insert(0, evefile)
return to_read
def present_vnc(self):
lport = 6800 + (int(self.victim_params["vnc"]["port"]) - 5900)
dport = self.victim_params["vnc"]["port"]
self.vncthread = multiprocessing.Process(target=vncsocket, args=("127.0.0.1", lport, dport))
self.vncthread.start()
logger.info("Started websockify server on {} -> {}".format(lport, dport))
def remove_vnc(self):
if self.vncthread and isinstance(self.vncthread, multiprocessing.Process):
self.vncthread.terminate()
logger.info("Stopped websockify server")
def vncsocket(host, lport, dport):
logger.debug("Spinning up websocket process...")
server = websockify.WebSocketProxy(**{"target_host": host, "target_port": dport, "listen_port": lport})
server.start_server()
def get_screen_image(dom, lv_conn):
s = lv_conn.newStream()
# cause libvirt to take the screenshot
dom.screenshot(s, 0)
# copy the data into a buffer
buf = BytesIO()
s.recvAll(sc_writer, buf)
s.finish()
# write the buffer to file
buf.seek(0)
i = Image.open(buf)
return i
def sc_writer(stream, data, b):
b.write(data)
class StopCaptureException(RuntimeError):
def __init__(self, message, errors):
super(RuntimeError, self).__init__(message)
self.errors = errors
def __init__(self, message):
super(RuntimeError, self).__init__(message)
| 42.279678
| 158
| 0.555942
| 19,804
| 0.942419
| 0
| 0
| 1,304
| 0.062054
| 0
| 0
| 4,080
| 0.194156
|
a77c6d836bc31836353a31c25d2a780968623e8a
| 4,104
|
py
|
Python
|
test-framework/test-suites/integration/tests/list/test_list_repo.py
|
sammeidinger/stack
|
a8085dce179dbe903f65f136f4b63bcc076cc057
|
[
"BSD-3-Clause"
] | 123
|
2015-05-12T23:36:45.000Z
|
2017-07-05T23:26:57.000Z
|
test-framework/test-suites/integration/tests/list/test_list_repo.py
|
sammeidinger/stack
|
a8085dce179dbe903f65f136f4b63bcc076cc057
|
[
"BSD-3-Clause"
] | 177
|
2015-06-05T19:17:47.000Z
|
2017-07-07T17:57:24.000Z
|
test-framework/test-suites/integration/tests/list/test_list_repo.py
|
sammeidinger/stack
|
a8085dce179dbe903f65f136f4b63bcc076cc057
|
[
"BSD-3-Clause"
] | 32
|
2015-06-07T02:25:03.000Z
|
2017-06-23T07:35:35.000Z
|
import json
class TestListRepo:
def test_invalid(self, host):
result = host.run('stack list repo test')
assert result.rc == 255
assert result.stderr.startswith('error - ')
def test_args(self, host, add_repo):
# Add a second repo so we can make sure it is skipped
add_repo('test2', 'test2url')
# Run list repo with just the test box
result = host.run('stack list repo test output-format=json')
assert result.rc == 0
# Make sure we got data only for the test box
repo_data = json.loads(result.stdout)
assert len(repo_data) == 1
assert repo_data[0]['name'] == 'test'
# now get all of them
# assert both repos are in the list data
result = host.run('stack list repo output-format=json')
repo_data = json.loads(result.stdout)
assert len(repo_data) == 2
assert {'test', 'test2'} == {repo['name'] for repo in repo_data}
# now get all of them, by explicitly asking for them
# assert both repos are in the list data
result = host.run('stack list repo test test2 output-format=json')
new_repo_data = json.loads(result.stdout)
assert len(new_repo_data) == 2
assert {'test', 'test2'} == {repo['name'] for repo in new_repo_data}
def test_removed_not_listed(self, host, add_repo, revert_etc):
# Run list repo with just the test box
result = host.run('stack list repo test output-format=json')
assert result.rc == 0
# Make sure we got data only for the test box
repo_data = json.loads(result.stdout)
assert len(repo_data) == 1
assert repo_data[0]['name'] == 'test'
result = host.run('stack remove repo test')
assert result.rc == 0
# Run list repo again
result = host.run('stack list repo test output-format=json')
assert result.rc == 255
assert result.stderr.startswith('error - ')
def test_expanded_columns(self, host, host_os, add_repo):
# Run list repo with just the test box
result = host.run('stack list repo test expanded=true output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
"name": "test",
"alias": "test",
"url": "test_url",
"autorefresh": False,
"assumeyes": False,
"type": "rpm-md",
"is_mirrorlist": False,
"gpgcheck": False,
"gpgkey": None,
"os": host_os,
"pallet name": None
}
]
def test_add_repo_with_pallet(self, host, host_os, add_repo, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks, revert_etc):
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')
#result = host.run(f'stack add pallet /root/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
pallet_data = json.loads(result.stdout)
assert len(pallet_data) == 1
# get pallet id, as well as the -'d name in the correct order
from stack.commands import DatabaseConnection, get_mysql_connection, Command
from stack.argument_processors.pallet import PalletArgProcessor
from operator import attrgetter
p = PalletArgProcessor()
p.db = DatabaseConnection(get_mysql_connection())
minimal_pallet = p.get_pallets(args=['minimal'], params=pallet_data[0])[0]
pallet_name = '-'.join(attrgetter('name', 'version', 'rel', 'os', 'arch')(minimal_pallet))
# now attach the test repo to the pallet
result = host.run(f'stack set repo test pallet={minimal_pallet.id}')
assert result.rc == 0
# now verify it is attached to that pallet
result = host.run('stack list repo test expanded=true output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
"name": "test",
"alias": "test",
"url": "test_url",
"autorefresh": False,
"assumeyes": False,
"type": "rpm-md",
"is_mirrorlist": False,
"gpgcheck": False,
"gpgkey": None,
"os": host_os,
"pallet name": pallet_name
}
]
# now verify that removing that pallet removes the repo as well
result = host.run('stack remove pallet minimal')
assert result.rc == 0
result = host.run('stack list repo')
assert result.rc == 0
assert result.stdout == ''
| 33.096774
| 144
| 0.690058
| 4,090
| 0.996589
| 0
| 0
| 0
| 0
| 0
| 0
| 1,696
| 0.413255
|
a780f3f7149ef35270e3ab77e079fdeb31c39092
| 769
|
py
|
Python
|
LeetCode/0044_Wildcard_Matching.py
|
scott-au/PythonAlgorithms
|
fcd5662a247440bd16a7d0f9b5e50259f5d1c063
|
[
"MIT"
] | null | null | null |
LeetCode/0044_Wildcard_Matching.py
|
scott-au/PythonAlgorithms
|
fcd5662a247440bd16a7d0f9b5e50259f5d1c063
|
[
"MIT"
] | null | null | null |
LeetCode/0044_Wildcard_Matching.py
|
scott-au/PythonAlgorithms
|
fcd5662a247440bd16a7d0f9b5e50259f5d1c063
|
[
"MIT"
] | null | null | null |
class Solution:
def isMatch(self, s: str, p: str) -> bool:
# this is a dynamic programming solution fot this
matrix = [[False for x in range(len(p) + 1)] for x in range(len(s) + 1)]
matrix[0][0] = True
for i in range(1, len(matrix[0])):
if p[i - 1] == "*":
matrix[0][i] = matrix[0][i - 1]
for i in range(1, len(matrix)):
for j in range(1, len(matrix[0])):
if s[i - 1] == p[j - 1] or p[j - 1] == "?":
matrix[i][j] = matrix[i - 1][j - 1]
elif p[j - 1] == "*":
matrix[i][j] = matrix[i][j - 1] or matrix[i - 1][j]
else:
matrix[i][j] = False
return matrix[len(s)][len(p)]
| 38.45
| 80
| 0.421326
| 768
| 0.9987
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.075423
|
a78199b06e3d85a0cae2dc6b22fe2403a2e45cd5
| 405
|
py
|
Python
|
Python/luhnchecksum.py
|
JaredLGillespie/OpenKattis
|
71d26883cb5b8a4a1d63a072587de5575d7c29af
|
[
"MIT"
] | null | null | null |
Python/luhnchecksum.py
|
JaredLGillespie/OpenKattis
|
71d26883cb5b8a4a1d63a072587de5575d7c29af
|
[
"MIT"
] | null | null | null |
Python/luhnchecksum.py
|
JaredLGillespie/OpenKattis
|
71d26883cb5b8a4a1d63a072587de5575d7c29af
|
[
"MIT"
] | null | null | null |
# https://open.kattis.com/problems/luhnchecksum
for _ in range(int(input())):
count = 0
for i, d in enumerate(reversed(input())):
if i % 2 == 0:
count += int(d)
continue
x = 2 * int(d)
if x < 10:
count += x
else:
x = str(x)
count += int(x[0]) + int(x[1])
print('PASS' if count % 10 == 0 else 'FAIL')
| 25.3125
| 48
| 0.449383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 0.145679
|
a7822d26e1d72928c623eabd7ce7c6e586e1f9ee
| 2,531
|
py
|
Python
|
dart_board/plotting.py
|
GSavathrakis/dart_board
|
9430d97675d69e381b701499587a02fd71b02990
|
[
"MIT"
] | 8
|
2017-12-04T22:32:25.000Z
|
2021-10-01T11:45:09.000Z
|
dart_board/plotting.py
|
GSavathrakis/dart_board
|
9430d97675d69e381b701499587a02fd71b02990
|
[
"MIT"
] | 2
|
2018-03-14T00:10:43.000Z
|
2021-05-02T18:51:11.000Z
|
dart_board/plotting.py
|
GSavathrakis/dart_board
|
9430d97675d69e381b701499587a02fd71b02990
|
[
"MIT"
] | 2
|
2018-07-17T23:00:01.000Z
|
2021-08-25T15:46:38.000Z
|
import matplotlib.pyplot as plt
import numpy as np
def plot_chains(chain, fileout=None, tracers=0, labels=None, delay=0, ymax=200000, thin=100, num_xticks=7, truths=None):
if chain.ndim < 3:
print("You must include a multiple chains")
return
n_chains, length, n_var = chain.shape
print(n_chains, length, n_var)
if (labels is not None) and (len(labels) != n_var):
print("You must provide the correct number of variable labels.")
return
if (truths is not None) and (len(truths) != n_var):
print("You must provide the correct number of truths.")
return
fig, ax = plt.subplots(int(n_var/2) + n_var%2, 2, figsize=(8, 0.8*n_var))
plt.subplots_adjust(left=0.09, bottom=0.07, right=0.96, top=0.96, hspace=0)
color = np.empty(n_chains, dtype=str)
color[:] = 'k'
alpha = 0.01 * np.ones(n_chains)
zorder = np.ones(n_chains)
if tracers > 0:
idx = np.random.choice(n_chains, tracers, replace=False)
color[idx] = 'r'
alpha[idx] = 1.0
zorder[idx] = 2.0
for i in range(n_var):
ix = int(i/2)
iy = i%2
for j in range(n_chains):
xvals = (np.arange(length)*thin - delay) / 1000.0
ax[ix,iy].plot(xvals, chain[j,:,i], color=color[j], alpha=alpha[j], rasterized=True, zorder=zorder[j])
if ymax is None: ymax = (length*thin-delay)
ax[ix,iy].set_xlim(-delay/1000.0, ymax/1000.0)
ax[ix,iy].set_xticks(np.linspace(-delay/1000.0,ymax/1000.0,num_xticks))
ax[ix,iy].set_xticklabels([])
# Add y-axis labels if provided by use
if labels is not None: ax[ix,iy].set_ylabel(labels[i])
if delay != 0: ax[ix,iy].axvline(0, color='k', linestyle='dashed', linewidth=2.0, zorder=9)
if truths is not None: ax[ix,iy].axhline(truths[i], color='C0', linestyle='dashed', linewidth=2.0, zorder=10)
# plt.tight_layout()
ax[-1,0].set_xticklabels(np.linspace(-delay/1000.0,ymax/1000.0,num_xticks).astype('i8').astype('U'))
ax[-1,1].set_xticklabels(np.linspace(-delay/1000.0,ymax/1000.0,num_xticks).astype('i8').astype('U'))
ax[-1,0].set_xlabel(r'Steps ($\times$1000)')
ax[-1,1].set_xlabel(r'Steps ($\times$1000)')
if fileout is None:
plt.show()
else:
plt.savefig(fileout, rasterized=True)
return
# from dart_board import plotting
# import numpy as np
# import pickle
# chains = pickle.load(open("../data/HMXB_chain.obj", "rb"))
# plotting.plot_chains(chains)
| 30.130952
| 120
| 0.621889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 446
| 0.176215
|
a78349abf743773098268654aaf64c037f2be3f7
| 2,063
|
py
|
Python
|
challenge/eval.py
|
CodeCrawl/deep_learning
|
3f9c208bba5ee17b4b68be74dc10e43839b4f6d0
|
[
"Apache-2.0"
] | 8
|
2018-11-03T16:32:35.000Z
|
2020-05-18T23:03:17.000Z
|
challenge/eval.py
|
CodeCrawl/deep_learning
|
3f9c208bba5ee17b4b68be74dc10e43839b4f6d0
|
[
"Apache-2.0"
] | null | null | null |
challenge/eval.py
|
CodeCrawl/deep_learning
|
3f9c208bba5ee17b4b68be74dc10e43839b4f6d0
|
[
"Apache-2.0"
] | 7
|
2018-11-07T14:39:20.000Z
|
2020-04-19T23:54:20.000Z
|
##
## Evaluation Script
##
import numpy as np
import time
from sample_model import Model
from data_loader import data_loader
from generator import Generator
def evaluate(label_indices = {'brick': 0, 'ball': 1, 'cylinder': 2},
channel_means = np.array([147.12697, 160.21092, 167.70029]),
data_path = '../data',
minibatch_size = 32,
num_batches_to_test = 10,
checkpoint_dir = 'tf_data/sample_model'):
print("1. Loading data")
data = data_loader(label_indices = label_indices,
channel_means = channel_means,
train_test_split = 0.5,
data_path = data_path)
print("2. Instantiating the model")
M = Model(mode = 'test')
#Evaluate on test images:
GT = Generator(data.test.X, data.test.y, minibatch_size = minibatch_size)
num_correct = 0
num_total = 0
print("3. Evaluating on test images")
for i in range(num_batches_to_test):
GT.generate()
yhat = M.predict(X = GT.X, checkpoint_dir = checkpoint_dir)
correct_predictions = (np.argmax(yhat, axis = 1) == np.argmax(GT.y, axis = 1))
num_correct += np.sum(correct_predictions)
num_total += len(correct_predictions)
accuracy = round(num_correct/num_total,4)
return accuracy
def calculate_score(accuracy):
score = 0
if accuracy >= 0.92:
score = 10
elif accuracy >= 0.9:
score = 9
elif accuracy >= 0.85:
score = 8
elif accuracy >= 0.8:
score = 7
elif accuracy >= 0.75:
score = 6
elif accuracy >= 0.70:
score = 5
else:
score = 4
return score
if __name__ == '__main__':
program_start = time.time()
accuracy = evaluate()
score = calculate_score(accuracy)
program_end = time.time()
total_time = round(program_end - program_start,2)
print()
print("Execution time (seconds) = ", total_time)
print('Accuracy = ' + str(accuracy))
print("Score = ", score)
print()
| 26.792208
| 86
| 0.600582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 246
| 0.119244
|
a78364d0cdf1ba12f5219bbb941cde9ada297c73
| 7,793
|
py
|
Python
|
PaddleRec/tdm/tdm_demo/infer_network.py
|
danleifeng/models
|
b87761f8100a545e0015046dd55d886ce90c190e
|
[
"Apache-2.0"
] | 2
|
2020-03-12T13:35:02.000Z
|
2020-03-12T14:54:23.000Z
|
PaddleRec/tdm/tdm_demo/infer_network.py
|
danleifeng/models
|
b87761f8100a545e0015046dd55d886ce90c190e
|
[
"Apache-2.0"
] | 1
|
2020-07-02T03:05:00.000Z
|
2020-07-02T03:05:00.000Z
|
PaddleRec/tdm/tdm_demo/infer_network.py
|
danleifeng/models
|
b87761f8100a545e0015046dd55d886ce90c190e
|
[
"Apache-2.0"
] | 1
|
2020-09-09T16:53:01.000Z
|
2020-09-09T16:53:01.000Z
|
# -*- coding=utf-8 -*-
"""
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import math
import argparse
import numpy as np
import paddle.fluid as fluid
from utils import tdm_sampler_prepare, tdm_child_prepare, trace_var
from train_network import DnnLayerClassifierNet, InputTransNet
class TdmInferNet(object):
def __init__(self, args):
self.input_embed_size = args.query_emb_size
self.node_embed_size = args.node_emb_size
self.label_nums = 2 # label为正负两类
self.node_nums = args.node_nums
self.max_layers = args.layer_size
self.batch_size = args.batch_size
self.topK = args.topK # 最终召回多少个item
self.child_nums = args.child_nums # 若树为二叉树,则child_nums=2
self.layer_list = self.get_layer_list(args)
self.first_layer_idx = 0
self.first_layer_node = self.create_first_layer(args)
self.layer_classifier = DnnLayerClassifierNet(args)
self.input_trans_net = InputTransNet(args)
def input_data(self):
input_emb = fluid.data(
name="input_emb",
shape=[None, self.input_embed_size],
dtype="float32", )
# first_layer 与 first_layer_mask 对应着infer起始层的节点
first_layer = fluid.data(
name="first_layer_node",
shape=[None, 1],
dtype="int64",
lod_level=1, )
first_layer_mask = fluid.data(
name="first_layer_node_mask",
shape=[None, 1],
dtype="int64",
lod_level=1, )
inputs = [input_emb] + [first_layer] + [first_layer_mask]
return inputs
def get_layer_list(self, args):
"""get layer list from layer_list.txt"""
layer_list = []
with open(args.tree_layer_init_path, 'r') as fin:
for line in fin.readlines():
l = []
layer = (line.split('\n'))[0].split(',')
for node in layer:
if node:
l.append(node)
layer_list.append(l)
return layer_list
def create_first_layer(self, args):
"""decide which layer to start infer"""
first_layer_id = 0
for idx, layer_node in enumerate(args.layer_node_num_list):
if layer_node >= self.topK:
first_layer_id = idx
break
first_layer_node = self.layer_list[first_layer_id]
self.first_layer_idx = first_layer_id
return first_layer_node
def infer_net(self, inputs):
"""
infer的主要流程
infer的基本逻辑是:从上层开始(具体层idx由树结构及TopK值决定)
1、依次通过每一层分类器,得到当前层输入的指定节点的prob
2、根据prob值大小,取topK的节点,取这些节点的孩子节点作为下一层的输入
3、循环1、2步骤,遍历完所有层,得到每一层筛选结果的集合
4、将筛选结果集合中的叶子节点,拿出来再做一次topK,得到最终的召回输出
"""
input_emb = inputs[0]
current_layer_node = inputs[1]
current_layer_child_mask = inputs[2]
node_score = []
node_list = []
input_trans_emb = self.input_trans_net.input_fc_infer(input_emb)
for layer_idx in range(self.first_layer_idx, self.max_layers):
# 确定当前层的需要计算的节点数
if layer_idx == self.first_layer_idx:
current_layer_node_num = len(self.first_layer_node)
else:
current_layer_node_num = current_layer_node.shape[1] * \
current_layer_node.shape[2]
current_layer_node = fluid.layers.reshape(
current_layer_node, [-1, current_layer_node_num])
current_layer_child_mask = fluid.layers.reshape(
current_layer_child_mask, [-1, current_layer_node_num])
node_emb = fluid.embedding(
input=current_layer_node,
size=[self.node_nums, self.node_embed_size],
param_attr=fluid.ParamAttr(name="TDM_Tree_Emb"))
input_fc_out = self.input_trans_net.layer_fc_infer(input_trans_emb,
layer_idx)
# 过每一层的分类器
layer_classifier_res = self.layer_classifier.classifier_layer_infer(
input_fc_out, node_emb, layer_idx)
# 过最终的判别分类器
tdm_fc = fluid.layers.fc(
input=layer_classifier_res,
size=self.label_nums,
act=None,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(name="tdm.cls_fc.weight"),
bias_attr=fluid.ParamAttr(name="tdm.cls_fc.bias"))
prob = fluid.layers.softmax(tdm_fc)
positive_prob = fluid.layers.slice(
prob, axes=[2], starts=[1], ends=[2])
prob_re = fluid.layers.reshape(positive_prob,
[-1, current_layer_node_num])
# 过滤掉padding产生的无效节点(node_id=0)
node_zero_mask = fluid.layers.cast(current_layer_node, 'bool')
node_zero_mask = fluid.layers.cast(node_zero_mask, 'float')
prob_re = prob_re * node_zero_mask
# 在当前层的分类结果中取topK,并将对应的score及node_id保存下来
k = self.topK
if current_layer_node_num < self.topK:
k = current_layer_node_num
_, topk_i = fluid.layers.topk(prob_re, k)
# index_sample op根据下标索引tensor对应位置的值
# 若paddle版本>2.0,调用方式为paddle.index_sample
top_node = fluid.contrib.layers.index_sample(current_layer_node,
topk_i)
prob_re_mask = prob_re * current_layer_child_mask # 过滤掉非叶子节点
topk_value = fluid.contrib.layers.index_sample(prob_re_mask, topk_i)
node_score.append(topk_value)
node_list.append(top_node)
# 取当前层topK结果的孩子节点,作为下一层的输入
if layer_idx < self.max_layers - 1:
# tdm_child op 根据输入返回其 child 及 child_mask
# 若child是叶子节点,则child_mask=1,否则为0
current_layer_node, current_layer_child_mask = \
fluid.contrib.layers.tdm_child(x=top_node,
node_nums=self.node_nums,
child_nums=self.child_nums,
param_attr=fluid.ParamAttr(
name="TDM_Tree_Info"),
dtype='int64')
total_node_score = fluid.layers.concat(node_score, axis=1)
total_node = fluid.layers.concat(node_list, axis=1)
# 考虑到树可能是不平衡的,计算所有层的叶子节点的topK
res_score, res_i = fluid.layers.topk(total_node_score, self.topK)
res_layer_node = fluid.contrib.layers.index_sample(total_node, res_i)
res_node = fluid.layers.reshape(res_layer_node, [-1, self.topK, 1])
# 利用Tree_info信息,将node_id转换为item_id
tree_info = fluid.default_main_program().global_block().var(
"TDM_Tree_Info")
res_node_emb = fluid.layers.gather_nd(tree_info, res_node)
res_item = fluid.layers.slice(
res_node_emb, axes=[2], starts=[0], ends=[1])
res_item_re = fluid.layers.reshape(res_item, [-1, self.topK])
return res_item_re
| 39.760204
| 80
| 0.597203
| 7,617
| 0.899185
| 0
| 0
| 0
| 0
| 0
| 0
| 2,282
| 0.26939
|
a7855fa0e107181fe9f7c866727366717fbbb9d3
| 727
|
py
|
Python
|
fixtures/requests.py
|
AzatAza/december-api-tests
|
dd120fd0c479b035dbe84ccd1fb1dd687d84af5d
|
[
"Apache-2.0"
] | null | null | null |
fixtures/requests.py
|
AzatAza/december-api-tests
|
dd120fd0c479b035dbe84ccd1fb1dd687d84af5d
|
[
"Apache-2.0"
] | null | null | null |
fixtures/requests.py
|
AzatAza/december-api-tests
|
dd120fd0c479b035dbe84ccd1fb1dd687d84af5d
|
[
"Apache-2.0"
] | null | null | null |
import requests
from requests import Response
class Client:
@staticmethod
def request(method: str, url: str, **kwargs) -> Response:
"""
Request method
method: method for the new Request object: GET, OPTIONS, HEAD, POST, PUT, PATCH, or DELETE. # noqa
url – URL for the new Request object.
**kwargs:
params – (optional) Dictionary, list of tuples or bytes to send in the query string for the Request. # noqa
json – (optional) A JSON serializable Python object to send in the body of the Request. # noqa
headers – (optional) Dictionary of HTTP Headers to send with the Request.
"""
return requests.request(method, url, **kwargs)
| 42.764706
| 119
| 0.645117
| 687
| 0.934694
| 0
| 0
| 669
| 0.910204
| 0
| 0
| 530
| 0.721088
|
a7857bc199ab6450358c23073cebf9f0bd31bb0d
| 352
|
py
|
Python
|
rules_default/castervoice/lib/ctrl/mgr/grammar_container/base_grammar_container.py
|
MLH-Fellowship/LarynxCode
|
840fee18c689a357052825607c27fc8e3e56571c
|
[
"MIT"
] | 1
|
2021-09-17T06:11:02.000Z
|
2021-09-17T06:11:02.000Z
|
rules_default/castervoice/lib/ctrl/mgr/grammar_container/base_grammar_container.py
|
soma2000-lang/LarynxCode
|
840fee18c689a357052825607c27fc8e3e56571c
|
[
"MIT"
] | 5
|
2021-02-03T05:29:41.000Z
|
2021-02-08T01:14:11.000Z
|
rules_default/castervoice/lib/ctrl/mgr/grammar_container/base_grammar_container.py
|
soma2000-lang/LarynxCode
|
840fee18c689a357052825607c27fc8e3e56571c
|
[
"MIT"
] | 4
|
2021-02-03T05:05:00.000Z
|
2021-07-14T06:21:10.000Z
|
from castervoice.lib.ctrl.mgr.errors.base_class_error import DontUseBaseClassError
class BaseGrammarContainer(object):
def set_non_ccr(self, rcn, grammar):
raise DontUseBaseClassError(self)
def set_ccr(self, ccr_grammars):
raise DontUseBaseClassError(self)
def wipe_ccr(self):
raise DontUseBaseClassError(self)
| 25.142857
| 82
| 0.75
| 266
| 0.755682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a7871a31d1f892b28ff5af9f08dffdc9caf09213
| 262
|
py
|
Python
|
main/urls.py
|
homata/snow_removing
|
c02585b8ceab3da107b932d6066c8b8344af1ff7
|
[
"Apache-2.0"
] | 2
|
2018-12-05T01:03:10.000Z
|
2019-03-16T04:27:03.000Z
|
main/urls.py
|
homata/snow_removing
|
c02585b8ceab3da107b932d6066c8b8344af1ff7
|
[
"Apache-2.0"
] | null | null | null |
main/urls.py
|
homata/snow_removing
|
c02585b8ceab3da107b932d6066c8b8344af1ff7
|
[
"Apache-2.0"
] | 1
|
2018-12-04T14:18:08.000Z
|
2018-12-04T14:18:08.000Z
|
from django.urls import include, path
from . import views
from django.views.generic.base import RedirectView
# アプリケーションの名前空間
# https://docs.djangoproject.com/ja/2.0/intro/tutorial03/
app_name = 'main'
urlpatterns = [
path('', views.index, name='index'),
]
| 21.833333
| 57
| 0.736641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.392361
|
a7882585c7ab1245006e29c8a68efd228a0cc9dc
| 1,114
|
py
|
Python
|
server/server/urls.py
|
oSoc17/lopeningent_backend
|
3e1c149038c3773f66dfbbc2f15ebd0692ecb4cd
|
[
"MIT"
] | 4
|
2017-07-04T15:18:59.000Z
|
2017-07-08T10:48:37.000Z
|
server/server/urls.py
|
oSoc17/lopeningent_backend
|
3e1c149038c3773f66dfbbc2f15ebd0692ecb4cd
|
[
"MIT"
] | 16
|
2017-07-04T15:36:41.000Z
|
2017-10-18T07:47:45.000Z
|
server/server/urls.py
|
oSoc17/lopeningent_backend
|
3e1c149038c3773f66dfbbc2f15ebd0692ecb4cd
|
[
"MIT"
] | null | null | null |
"""server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
import interface.stats as stats
import interface.routes as route
import interface.pois as pois
urlpatterns = [
url(r'^stats/check/', stats.get_stats_from_id ),
url(r'^stats/update/', stats.post_stats_from_id),
url(r'^route/generate/', route.generate),
url(r'^route/return/', route.return_home),
url(r'^route/rate/', route.rate_route),
url(r'^poi/coords/', pois.get_coords),
url(r'^poi/types/', pois.get_types)
]
| 37.133333
| 79
| 0.701975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 752
| 0.675045
|
a7884b84cf2835ce8244b051ecf8f0adaa14e7d4
| 9,507
|
py
|
Python
|
app/backend/knowledge_base_approach/python-client/swagger_client/api/default_api.py
|
e-lubrini/fake-news-detector
|
f2464e4cac73d9203e7483ac0aa5cd47ddfba811
|
[
"MIT"
] | null | null | null |
app/backend/knowledge_base_approach/python-client/swagger_client/api/default_api.py
|
e-lubrini/fake-news-detector
|
f2464e4cac73d9203e7483ac0aa5cd47ddfba811
|
[
"MIT"
] | 1
|
2021-11-24T12:23:49.000Z
|
2021-11-24T12:23:49.000Z
|
app/backend/knowledge_base_approach/python-client/swagger_client/api/default_api.py
|
e-lubrini/fake-news-detector
|
f2464e4cac73d9203e7483ac0aa5cd47ddfba811
|
[
"MIT"
] | 1
|
2021-11-24T18:07:44.000Z
|
2021-11-24T18:07:44.000Z
|
# coding: utf-8
"""
FRED API
FRED is a tool for automatically producing RDF/OWL ontologies and linked data from natural language sentences. The method is based on Combinatory Categorial Grammar, Discourse Representation Theory, Linguistic Frames, and Ontology Design Patterns. Results are enriched with NER and WSD. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class DefaultApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def stlab_tools_fred_get(self, authorization, text, **kwargs): # noqa: E501
"""stlab_tools_fred_get # noqa: E501
Generate RDF from natural language text. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.stlab_tools_fred_get(authorization, text, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: The authorization bearear. Type \"Bearer xxx-yyy-zzz\", where is your secret token. (required)
:param str text: The input natural language text. (required)
:param str prefix: The prefix used for the namespace of terms introduced by FRED in the output. If not specified fred: is used as default.
:param str namespace: The namespace used for the terms introduced by FRED in the output. If not specified http://www.ontologydesignpatterns.org/ont/fred/domain.owl# is used as default.
:param bool wsd: Perform Word Sense Disambiguation on input terms. By default it is set to false.
:param bool wfd: Perform Word Frame Disambiguation on input terms in order to provide alignments to WordNet synsets, WordNet Super-senses and Dolce classes. By default it is set to false.
:param str wfd_profile: The profile associated with the Word Frame Disambiguation
:param bool tense: Include temporal relations between events according to their grammatical tense. By default it is set to false.
:param bool roles: Use FrameNet roles into the resulting ontology. By default it is set to false.
:param str textannotation: The vocabulary used for annotating the text in RDF. Two possible alternatives are available, i.e. EARMARK and NIF.
:param bool semantic_subgraph: Generate a RDF which only expresses the semantics of a sentence without additional RDF triples, such as those containing text spans, part-of-speeches, etc. By default it is set to false.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.stlab_tools_fred_get_with_http_info(authorization, text, **kwargs) # noqa: E501
else:
(data) = self.stlab_tools_fred_get_with_http_info(authorization, text, **kwargs) # noqa: E501
return data
def stlab_tools_fred_get_with_http_info(self, authorization, text, **kwargs): # noqa: E501
"""stlab_tools_fred_get # noqa: E501
Generate RDF from natural language text. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.stlab_tools_fred_get_with_http_info(authorization, text, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: The authorization bearear. Type \"Bearer xxx-yyy-zzz\", where is your secret token. (required)
:param str text: The input natural language text. (required)
:param str prefix: The prefix used for the namespace of terms introduced by FRED in the output. If not specified fred: is used as default.
:param str namespace: The namespace used for the terms introduced by FRED in the output. If not specified http://www.ontologydesignpatterns.org/ont/fred/domain.owl# is used as default.
:param bool wsd: Perform Word Sense Disambiguation on input terms. By default it is set to false.
:param bool wfd: Perform Word Frame Disambiguation on input terms in order to provide alignments to WordNet synsets, WordNet Super-senses and Dolce classes. By default it is set to false.
:param str wfd_profile: The profile associated with the Word Frame Disambiguation
:param bool tense: Include temporal relations between events according to their grammatical tense. By default it is set to false.
:param bool roles: Use FrameNet roles into the resulting ontology. By default it is set to false.
:param str textannotation: The vocabulary used for annotating the text in RDF. Two possible alternatives are available, i.e. EARMARK and NIF.
:param bool semantic_subgraph: Generate a RDF which only expresses the semantics of a sentence without additional RDF triples, such as those containing text spans, part-of-speeches, etc. By default it is set to false.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization', 'text', 'prefix', 'namespace', 'wsd', 'wfd', 'wfd_profile', 'tense', 'roles', 'textannotation', 'semantic_subgraph'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method stlab_tools_fred_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'authorization' is set
if self.api_client.client_side_validation and ('authorization' not in params or
params['authorization'] is None): # noqa: E501
raise ValueError("Missing the required parameter `authorization` when calling `stlab_tools_fred_get`") # noqa: E501
# verify the required parameter 'text' is set
if self.api_client.client_side_validation and ('text' not in params or
params['text'] is None): # noqa: E501
raise ValueError("Missing the required parameter `text` when calling `stlab_tools_fred_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'text' in params:
query_params.append(('text', params['text'])) # noqa: E501
if 'prefix' in params:
query_params.append(('prefix', params['prefix'])) # noqa: E501
if 'namespace' in params:
query_params.append(('namespace', params['namespace'])) # noqa: E501
if 'wsd' in params:
query_params.append(('wsd', params['wsd'])) # noqa: E501
if 'wfd' in params:
query_params.append(('wfd', params['wfd'])) # noqa: E501
if 'wfd_profile' in params:
query_params.append(('wfd_profile', params['wfd_profile'])) # noqa: E501
if 'tense' in params:
query_params.append(('tense', params['tense'])) # noqa: E501
if 'roles' in params:
query_params.append(('roles', params['roles'])) # noqa: E501
if 'textannotation' in params:
query_params.append(('textannotation', params['textannotation'])) # noqa: E501
if 'semantic_subgraph' in params:
query_params.append(('semantic-subgraph', params['semantic_subgraph'])) # noqa: E501
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/rdf+xml', 'text/turtle', 'application/rdf+json', 'text/rdf+n3', 'text/rdf+nt', 'image/png']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/stlab-tools/fred', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 54.637931
| 305
| 0.664458
| 8,882
| 0.934259
| 0
| 0
| 0
| 0
| 0
| 0
| 6,303
| 0.662985
|
a788aafcac15ec56bc56e7dbc0349b85a1880056
| 1,496
|
py
|
Python
|
geesedb/interpreter/metadata.py
|
informagi/GeeseDB
|
b502830cafbcba8676e7e779d13d5bc14ba842f9
|
[
"MIT"
] | 12
|
2021-07-05T12:33:20.000Z
|
2021-10-11T20:44:12.000Z
|
geesedb/interpreter/metadata.py
|
informagi/GeeseDB
|
b502830cafbcba8676e7e779d13d5bc14ba842f9
|
[
"MIT"
] | 7
|
2021-07-28T20:40:36.000Z
|
2021-10-12T12:31:51.000Z
|
geesedb/interpreter/metadata.py
|
informagi/GeeseDB
|
b502830cafbcba8676e7e779d13d5bc14ba842f9
|
[
"MIT"
] | null | null | null |
import json
from ..connection import get_connection
class Metadata:
def __init__(self, database):
self.connection = get_connection(database).connection
# first list is default if nothing is specified (should be extended)
# list is ordered as [edge_name, node1_id, edge_node1_id, edge_node2_id, node2_id2
def get_metadata(self):
self.connection.execute("SELECT metadata FROM _meta")
metadata = json.loads(self.connection.fetchone()[0])
return metadata
def update_metadata(self, data):
self.connection.execute(f"UPDATE _meta SET metadata='{json.dumps(data)}'")
def get_default_join_info(self, node1, node2):
return self.get_metadata()[node1][node2][0]
def get_all_join_info(self, node1, node2):
return self.get_metadata()[node1][node2]
# {
# 'term_dict': {
# 'docs': [['term_doc', 'term_id', 'term_id', 'doc_id', 'doc_id']]
# },
# 'docs': {
# 'term_dict': [['term_doc', 'doc_id', 'doc_id', 'term_id', 'term_id']],
# 'entities': [['entity_doc', 'collection_id', 'doc_id', 'entity', 'entity']],
# 'authors': [['doc_author', 'collection_id', 'doc', 'author', 'author']]
# },
# 'entities': {
# 'docs': [['entity_doc', 'entity', 'entity', 'doc_id', 'collection_id']]
# },
# 'authors': {
# 'docs': [['doc_author', 'author', 'author', 'doc', 'collection_id']]
# }
# }
| 34.790698
| 90
| 0.584225
| 1,440
| 0.962567
| 0
| 0
| 0
| 0
| 0
| 0
| 816
| 0.545455
|
a78ab7709a2fb033bbbef0c592de69b2eb89f7f4
| 2,381
|
py
|
Python
|
content_feeders/in.py
|
Giapa/ContentAggregator
|
978c552406a770791cff435d41eb2bf135b5454d
|
[
"MIT"
] | null | null | null |
content_feeders/in.py
|
Giapa/ContentAggregator
|
978c552406a770791cff435d41eb2bf135b5454d
|
[
"MIT"
] | 2
|
2020-04-15T09:16:50.000Z
|
2020-04-15T09:22:06.000Z
|
content_feeders/in.py
|
IEEEdiots/ContentAggregator
|
978c552406a770791cff435d41eb2bf135b5454d
|
[
"MIT"
] | 1
|
2021-03-25T17:58:16.000Z
|
2021-03-25T17:58:16.000Z
|
import requests
from bs4 import BeautifulSoup
def getSummary(link):
#Get page response
response = requests.get(link)
#Parse the pgae
soup = BeautifulSoup(response.content,'html.parser')
#Find first paragraph
summary_p = soup.find('p')
#Get the first text
summary = summary_p.text[:40] + ' ...'
#Return the text
return summary
def crawl_page():
#Available urls
urls = ['https://www.in.gr/world/','https://www.in.gr/politics/','https://www.in.gr/economy/','https://www.in.gr/sports/','https://www.in.gr/culture/','https://www.in.gr/entertainment/','https://www.in.gr/tech/']
#Types of articles
content = ['global','politics','economy','sports','culture','entertainment','technology']
#Adding headers
headers = {
'User-agent':'Mozilla/5.0'
}
#Complete list of article data
data_list = list()
#For given urls
for index,url in enumerate(urls):
#User report
print(f'---- Scrapping from url:{url}')
#Get page response
response = requests.get(url, headers=headers)
#Parse the page
soup = BeautifulSoup(response.content,'html.parser')
#Find the article element
articles = soup.find_all('article')
#Loop through all articles
for article in articles:
#Title and url
info = article.find('a',{'href':True})
link = info['href']
title = info['title']
#Image
image_element = article.find('div',{'class':'absg'})
image = image_element['data-src']
#Summary
try:
sum_span = article.find('span',{'class':'article-dd'})
summary = sum_span.text
except AttributeError:
summary = getSummary(link)
#Time
time_elem = article.find('time')
time = time_elem['datetime']
#Complete data
article_data = {
'site':'in',
'type':content[index],
'title':title,
'link':link,
'image':image,
'summary':summary,
'date':time
}
#Add the article data to the complete list
data_list.append(article_data)
print(len(data_list))
if __name__ == '__main__':
crawl_page()
| 34.014286
| 216
| 0.554389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 890
| 0.373793
|
a78b53e7326a1d9b30856a88ddc123ec056f3a2a
| 18,573
|
py
|
Python
|
resources/lib/database_tv.py
|
bradyemerson/plugin.video.showtimeanytime
|
65e7f130c14c8ef963cb3669638b8cf14860ec82
|
[
"Apache-2.0"
] | null | null | null |
resources/lib/database_tv.py
|
bradyemerson/plugin.video.showtimeanytime
|
65e7f130c14c8ef963cb3669638b8cf14860ec82
|
[
"Apache-2.0"
] | null | null | null |
resources/lib/database_tv.py
|
bradyemerson/plugin.video.showtimeanytime
|
65e7f130c14c8ef963cb3669638b8cf14860ec82
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
from datetime import date, datetime
from sqlite3 import dbapi2 as sqlite
from bs4 import BeautifulSoup
import simplejson as json
import xbmcvfs
import xbmcgui
import common
import connection
import database_common as db_common
def create():
c = _database.cursor()
c.execute('''CREATE TABLE series
(series_id INTEGER PRIMARY KEY,
title TEXT,
title_sort TEXT,
plot TEXT,
directors TEXT,
actors TEXT,
thumb TEXT,
total_seasons INTEGER,
total_episodes INTEGER,
favor BOOLEAN DEFAULT 0,
in_last_update BOOLEAN DEFAULT 1,
last_updated timestamp);''')
c.execute('''CREATE TABLE season
(season_id INTEGER PRIMARY KEY,
season_no INTEGER,
series_id INTEGER,
plot TEXT,
FOREIGN KEY(series_id) REFERENCES series(series_id) ON DELETE CASCADE);''')
c.execute('''CREATE TABLE episode
(episode_id INTEGER PRIMARY KEY,
season_id INTEGER,
episode_no INTEGER,
title TEXT,
title_sort TEXT,
plot TEXT,
duration INTEGER,
year INTEGER,
studio TEXT,
mpaa TEXT,
advisories TEXT,
aired_date timestamp,
thumb TEXT,
play_count INTEGER DEFAULT 0,
FOREIGN KEY(season_id) REFERENCES season(season_id) ON DELETE CASCADE);''')
_database.commit()
c.close()
def insert_series(series_id, title=None, title_sort=None, plot=None, directors=None, actors=None, thumb=None,
total_seasons=None, total_episodes=None):
c = _database.cursor()
c.execute('''INSERT OR REPLACE INTO series (
series_id,
title,
title_sort,
plot,
directors,
actors,
thumb,
total_seasons,
total_episodes,
favor,
in_last_update,
last_updated) VALUES (
:series_id,
:title,
:title_sort,
:plot,
:directors,
:actors,
:thumb,
:total_seasons,
:total_episodes,
(SELECT favor FROM series WHERE series_id = :series_id),
:in_last_update,
(SELECT last_updated FROM series WHERE series_id = :series_id))''', {
'series_id': series_id,
'title': title,
'title_sort': title_sort,
'plot': plot,
'directors': directors,
'actors': actors,
'thumb': thumb,
'total_seasons': total_seasons,
'total_episodes': total_episodes,
'in_last_update': True
})
_database.commit()
c.close()
def insert_season(series_id, season_no, plot=None):
c = _database.cursor()
row = lookup_season(series_id=series_id, season_no=season_no, fields='season_id').fetchone()
if row:
c.execute('''UPDATE season SET plot = :plot WHERE season_id = :season_id''', {
'season_id': row['season_id'],
'plot': plot
})
else:
c.execute('''INSERT INTO season (series_id, season_no, plot) VALUES (
:series_id,
:season_no,
:plot
)''', {
'series_id': series_id,
'season_no': season_no,
'plot': plot
})
_database.commit()
c.close()
def insert_episode(episode_id, season_id, episode_no=None, title=None, title_sort=None, plot=None,
duration=None, year=None, studio=None, mpaa=None, advisories=None, aired_date=None, thumb=None):
c = _database.cursor()
c.execute('''INSERT OR REPLACE INTO episode (
episode_id,
season_id,
episode_no,
title,
title_sort,
plot,
duration,
year,
studio,
mpaa,
advisories,
aired_date,
thumb,
play_count) VALUES (
:episode_id,
:season_id,
:episode_no,
:title,
:title_sort,
:plot,
:duration,
:year,
:studio,
:mpaa,
:advisories,
:aired_date,
:thumb,
COALESCE((SELECT play_count FROM episode WHERE episode_id = :episode_id), 0))''', {
'episode_id': episode_id,
'season_id': season_id,
'episode_no': episode_no,
'title': title,
'title_sort': title_sort,
'plot': plot,
'duration': duration,
'year': year,
'studio': studio,
'mpaa': mpaa,
'advisories': advisories,
'aired_date': aired_date,
'thumb': thumb
})
_database.commit()
c.close()
def lookup_series(content_id, fields='*'):
c = _database.cursor()
return c.execute('SELECT DISTINCT {0} FROM series WHERE series_id = (?)'.format(fields), (content_id,))
def lookup_season(season_id=None, series_id=None, season_no=None, fields='*'):
c = _database.cursor()
if season_id:
return c.execute('SELECT {0} FROM season WHERE season_id = (?)'.format(fields), (season_id,))
elif series_id and season_no:
return c.execute('SELECT {0} FROM season WHERE series_id = (?) AND season_no = (?)'.format(fields),
(series_id, season_no))
def lookup_episode(content_id):
c = _database.cursor()
return c.execute('SELECT DISTINCT * FROM episode WHERE episode_id = (?)', (content_id,))
def delete_series(content_id):
c = _database.cursor()
c.execute('DELETE FROM series WHERE series_id = (?)', (content_id,))
c.close()
def watch_episode(content_id):
# TODO make this actually increment
c = _database.cursor()
c.execute("UPDATE episode SET play_count = play_count + 1 WHERE episode_id = (?)", (content_id,))
_database.commit()
c.close()
return c.rowcount
def unwatch_episode(content_id):
c = _database.cursor()
c.execute("UPDATE episode SET play_count=? WHERE episode_id = (?)", (0, content_id))
_database.commit()
c.close()
return c.rowcount
def favor_series(content_id):
c = _database.cursor()
c.execute("UPDATE series SET favor=? WHERE series_id=?", (True, content_id))
_database.commit()
c.close()
return c.rowcount
def unfavor_series(content_id):
c = _database.cursor()
c.execute("UPDATE series SET favor=? WHERE series_id=?", (False, content_id))
_database.commit()
c.close()
return c.rowcount
def get_series(directorfilter=False, watchedfilter=False, favorfilter=False, actorfilter=False,
alphafilter=False, studiofilter=False):
c = _database.cursor()
if actorfilter:
actorfilter = '%' + actorfilter + '%'
return c.execute('SELECT DISTINCT * FROM series WHERE actors LIKE (?)',
(actorfilter,))
elif directorfilter:
return c.execute('SELECT DISTINCT * FROM series WHERE directors LIKE (?)',
(directorfilter,))
elif studiofilter:
return c.execute('SELECT DISTINCT * FROM series WHERE studio = (?)', (studiofilter,))
elif watchedfilter:
return c.execute('SELECT DISTINCT * FROM series WHERE playcount > 0')
elif favorfilter:
return c.execute('SELECT DISTINCT * FROM series WHERE favor = 1')
elif alphafilter:
return c.execute('SELECT DISTINCT * FROM series WHERE title REGEXP (?)',
(alphafilter + '*',))
else:
return c.execute('SELECT DISTINCT * FROM series')
def get_series_season_count(series_id):
c = _database.cursor()
row = c.execute('''SELECT MAX(sea.content_id) AS total_seasons
FROM season AS sea
JOIN series AS ser ON ser.content_id = sea.series_content_id
WHERE ser.content_id = (?)
GROUP BY ser.content_id''', (series_id,)).fetchone()
c.close()
if row:
return row['total_seasons']
else:
return 0
def get_series_episode_count(series_id, filter=None):
c = _database.cursor()
if filter == 'watched':
row = c.execute('''SELECT COUNT(e.episode_id) AS total_episodes
FROM episode AS e
JOIN season AS sea ON sea.season_id = e.season_id
JOIN series AS ser ON ser.series_id = sea.series_id
WHERE ser.series_id = (?) AND e.play_count > 0
GROUP BY ser.series_id''', (series_id,)).fetchone()
else:
row = c.execute('''SELECT COUNT(e.episode_id) AS total_episodes
FROM episode AS e
JOIN season AS sea ON sea.season_id = e.season_id
JOIN series AS ser ON ser.series_id = sea.series_id
WHERE ser.series_id = (?)
GROUP BY ser.series_id''', (series_id,)).fetchone()
c.close()
if row:
return row['total_episodes']
else:
return 0
def get_series_year(series_id):
c = _database.cursor()
row = c.execute('''SELECT e.year FROM episode AS e
JOIN season AS sea ON sea.season_id = e.season_id
JOIN series AS ser ON ser.series_id = sea.series_id
WHERE ser.series_id = (?)
ORDER BY e.year ASC LIMIT 1''', (series_id,)).fetchone()
c.close()
if row:
return row['year']
else:
return None
def _update_series_last_update(series_id, time=datetime.now()):
c = _database.cursor()
c.execute('UPDATE series SET last_updated = :last_update WHERE series_id = :series_id', {
'last_update': time,
'series_id': series_id
})
c.close()
def get_seasons(series_id):
c = _database.cursor()
return c.execute('''SELECT DISTINCT sea.*,ser.title AS series_title
FROM season AS sea
JOIN series AS ser ON ser.series_id = sea.series_id
WHERE ser.series_id = (?)''', (series_id,))
def get_season_episode_count(season_id, filter=None):
c = _database.cursor()
if filter == 'watched':
row = c.execute('''SELECT COUNT(e.episode_id) AS total_episodes
FROM episode AS e
JOIN season AS sea ON sea.season_id = e.season_id
WHERE sea.season_id = (?) AND e.play_count > 0
GROUP BY sea.season_id''', (season_id,)).fetchone()
else:
row = c.execute('''SELECT COUNT(e.episode_id) AS total_episodes
FROM episode AS e
JOIN season AS sea ON sea.season_id = e.season_id
WHERE sea.season_id = (?)
GROUP BY sea.season_id''', (season_id,)).fetchone()
c.close()
if row:
return row['total_episodes']
else:
return 0
def get_season_year(season_id):
c = _database.cursor()
row = c.execute('''SELECT e.year FROM episode AS e
JOIN season AS sea ON sea.season_id = e.season_id
WHERE sea.season_id = (?)
ORDER BY e.year ASC LIMIT 1''', (season_id,)).fetchone()
c.close()
if row:
return row['year']
else:
return None
def get_episodes(season_id):
c = _database.cursor()
return c.execute('''SELECT DISTINCT e.*, sea.season_no AS season_no, ser.title AS series_title, ser.series_id AS series_id
FROM episode AS e
JOIN season AS sea ON sea.season_id = e.season_id
JOIN series AS ser ON ser.series_id = sea.series_id
WHERE e.season_id = (?)''', (season_id,))
def get_types(col):
c = _database.cursor()
items = c.execute('select distinct %s from series' % col)
list = []
for data in items:
data = data[0]
if type(data) == type(str()):
if 'Rated' in data:
item = data.split('for')[0]
if item not in list and item <> '' and item <> 0 and item <> 'Inc.' and item <> 'LLC.':
list.append(item)
else:
data = data.decode('utf-8').encode('utf-8').split(',')
for item in data:
item = item.replace('& ', '').strip()
if item not in list and item <> '' and item <> 0 and item <> 'Inc.' and item <> 'LLC.':
list.append(item)
elif data <> 0:
if data is not None:
list.append(str(data))
c.close()
return list
def update_tv(force=False):
# Check if we've recently updated and skip
if not force and not _needs_update():
return
dialog = xbmcgui.DialogProgress()
dialog.create('Refreshing TV Database')
dialog.update(0, 'Initializing TV Scan')
xml_series_url = '{0}/tve/xml/category?categoryid=101'.format(db_common.API_DOMAIN)
data = connection.get_url(xml_series_url)
soup = BeautifulSoup(data)
series_list = soup.find('subcategory', recursive=False).find('series', recursive=False).find_all('series', recursive=False)
# Mark all series as unfound. This will be updated as we go through
c = _database.cursor()
c.execute("UPDATE series SET in_last_update = 0")
_database.commit()
c.close()
total = len(series_list)
count = 0
for series in series_list:
count += 1
dialog.update(0, 'Scanned {0} of {1} TV series'.format(count, total))
print 'series: '
print series
series_json_url = '{0}/api/series/{1}'.format(db_common.API_DOMAIN, series['seriesid'])
json_data = json.loads(connection.get_url(series_json_url))
series_id = series['seriesid']
title = common.string_unicode(json_data['name'])
title_sort = common.string_unicode(json_data['sortName'])
plot = common.string_unicode(json_data['description']['long'])
total_seasons = json_data['totalSeasons']
total_episodes = json_data['totalEpisodes']
thumb = None
for image in series.find_all('Image'):
if image['width'] == '1920' and image['height'] == '1080':
thumb = image.find('url').string
break
insert_series(series_id, title, title_sort, plot, None, None, thumb, total_seasons, total_episodes)
# Season Children
if 'seasons' in json_data:
_json_process_seasons(json_data['seasons'], series_id)
_set_last_update()
# Remove unfound movies
c = _database.cursor()
c.execute("DELETE FROM series WHERE in_last_update = 0")
c.close()
def _json_process_seasons(season_data, series_id):
for season in season_data:
insert_season(series_id, season['seasonNum'], season['description']['long'])
def update_series(series_id, force=False):
# Check for new episodes every 12 hours
row = lookup_series(series_id, 'last_updated').fetchone()
if force is False and row['last_updated']:
last_update = common.parse_date(row['last_updated'], '%Y-%m-%d %H:%M:%S.%f')
if (datetime.now() - last_update).seconds < 43200:
# No update needed
return
xml_series_url = '{0}/tve/xml/series?seriesid={1}'.format(db_common.API_DOMAIN, series_id)
data = connection.get_url(xml_series_url)
series = BeautifulSoup(data).find('series', recursive=False)
for episode in series.find_all('title', attrs={'type': 'Episode'}):
episode_id = episode['titleid']
title = common.string_unicode(episode.find('title', recursive=False).string)
title_sort = common.string_unicode(episode.find('sorttitle', recursive=False).string)
plot = common.string_unicode(episode.find('description', recursive=False).string)
year = episode.find('releaseyear', recursive=False).string
duration = episode.find('duration', recursive=False).string
mpaa = episode.find('rating', recursive=False).string
advisories = episode.find('advisories', recursive=False).string
air_date = None
try:
air_date = common.parse_date(episode.find('originalairdate', recursive=False).string, '%m/%d/%Y %I:%M%p')
except:
pass
thumb = None
for image in episode.find_all('image'):
if image['width'] == '866' and image['height'] == '487':
thumb = image.find('url').string
break
series_tag = episode.find('series', recursive=False)
episode_no = series_tag['episode']
season_no = series_tag['season']
season = lookup_season(series_id=series_id, season_no=season_no, fields='season_id').fetchone()
if not season:
insert_season(series_tag['seriesid'], season_no)
season = lookup_season(series_id=series_id, season_no=season_no, fields='season_id').fetchone()
season_id = season['season_id']
insert_episode(episode_id, season_id, episode_no, title, title_sort, plot, duration, year, None,
mpaa, advisories, air_date, thumb)
_update_series_last_update(series_id)
def _needs_update():
# Update every 15 days
if 'last_update' in _database_meta:
last_update = common.parse_date(_database_meta['last_update'], '%Y-%m-%d')
return (date.today() - last_update.date()).days > 15
return True
def _set_last_update():
_database_meta['last_update'] = date.today().strftime('%Y-%m-%d')
_write_meta_file()
def _write_meta_file():
f = open(DB_META_FILE, 'w')
json.dump(_database_meta, f)
f.close()
DB_META_FILE = os.path.join(common.__addonprofile__, 'tv.meta')
_database_meta = False
if xbmcvfs.exists(DB_META_FILE):
f = open(DB_META_FILE, 'r')
_database_meta = json.load(f)
f.close()
else:
_database_meta = {}
DB_FILE = os.path.join(common.__addonprofile__, 'tv.db')
if not xbmcvfs.exists(DB_FILE):
_database = sqlite.connect(DB_FILE)
_database.text_factory = str
_database.row_factory = sqlite.Row
create()
else:
_database = sqlite.connect(DB_FILE)
_database.text_factory = str
_database.row_factory = sqlite.Row
| 33.769091
| 127
| 0.586658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,009
| 0.431217
|
a78c1c68d2605e5b65a1772b489da024f926a771
| 16,450
|
py
|
Python
|
apps/configuration/editions/base.py
|
sotkonstantinidis/testcircle
|
448aa2148fbc2c969e60f0b33ce112d4740a8861
|
[
"Apache-2.0"
] | 3
|
2019-02-24T14:24:43.000Z
|
2019-10-24T18:51:32.000Z
|
apps/configuration/editions/base.py
|
sotkonstantinidis/testcircle
|
448aa2148fbc2c969e60f0b33ce112d4740a8861
|
[
"Apache-2.0"
] | 17
|
2017-03-14T10:55:56.000Z
|
2022-03-11T23:20:19.000Z
|
apps/configuration/editions/base.py
|
sotkonstantinidis/testcircle
|
448aa2148fbc2c969e60f0b33ce112d4740a8861
|
[
"Apache-2.0"
] | 2
|
2016-02-01T06:32:40.000Z
|
2019-09-06T04:33:50.000Z
|
import copy
from configuration.configuration import QuestionnaireConfiguration
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import F
from django.template.loader import render_to_string
from configuration.models import Configuration, Key, Value, Translation, \
Questiongroup, Category
class Edition:
"""
Base class for a new edition of a questionnaire configuration, providing a central interface for:
- Simple, explicit definition for changes in configuration
- Re-use of operations, with possibility of customizing them (callables)
- Changes can be tracked per question between editions
- Verbose display of changes before application
- Generic helper methods for help texts, release notes and such
"""
code = ''
edition = ''
hierarchy = [
'sections',
'categories',
'subcategories',
'questiongroups',
'questions'
]
hierarchy_modules = 'modules'
def __str__(self):
return f'{self.code}: Edition {self.edition}'
@property
def operations(self):
raise NotImplementedError('A list of operations is required.')
def __init__(
self, key: Key, value: Value, questiongroup: Questiongroup,
category: Category, configuration: Configuration,
translation: Translation):
"""
Load operations, and validate the required instance variables.
"""
self.key = key
self.value = value
self.questiongroup = questiongroup
self.category = category
self.configuration = configuration
self.translation = translation
self.validate_instance_variables()
def validate_instance_variables(self):
for required_variable in ['code', 'edition']:
if not getattr(self, required_variable):
raise NotImplementedError('Instance variable "%s" is required' % required_variable)
if self.code not in [code[0] for code in Configuration.CODE_CHOICES]:
raise AttributeError('Code %s is not a valid configuration code choice' % self.code)
def run_operations(self):
"""
Apply operations, as defined by self.operations
"""
data = self.get_base_configuration_data()
for _operation in self.operations:
data = _operation.migrate(**data)
self.save_object(**data)
def get_base_configuration_data(self):
"""
Get configuration data from the 'previous' version, which is the base for this edition.
"""
return self.configuration.objects.filter(
code=self.code
).exclude(
code=self.code, edition=self.edition
).latest(
'created'
).data
def save_object(self, **data) -> Configuration:
"""
Create or update the configuration with the modified data.
"""
try:
obj = self.configuration.objects.get(
edition=self.edition, code=self.code)
except self.configuration.DoesNotExist:
obj = self.configuration(edition=self.edition, code=self.code)
obj.data = data
# Validate the data before saving.
questionnaire_configuration = QuestionnaireConfiguration(
keyword=self.code, configuration_object=obj)
if questionnaire_configuration.configuration_error:
raise Exception('Configuration error: %s' %
questionnaire_configuration.configuration_error)
obj.save()
return obj
def get_release_notes(self):
for _operation in self.operations:
yield _operation.render()
def update_questionnaire_data(self, **data) -> dict:
"""
Gets called when creating a new version of a questionnaire in a new
edition. Calls each operation's "transform_questionnaire" method.
"""
for _operation in self.operations:
data = _operation.update_questionnaire_data(**data)
return data
@classmethod
def run_migration(cls, apps, schema_editor):
"""
Callable for the django migration file. Create an empty migration with:
```python manage.py makemigrations configuration --empty```
Add this tho the operations list:
operations = [
migrations.RunPython(<Subclass>.run_migration)
]
"""
if settings.IS_TEST_RUN:
# This needs discussion! What is expected of this migration in test mode?
return
# Models are loaded here, so they are available in the context of a migration.
model_names = ['Configuration', 'Key', 'Value', 'Translation']
kwargs = {}
for model in model_names:
kwargs[model.lower()] = apps.get_model('configuration', model)
cls(**kwargs).run_operations()
@property
def translation_key(self) -> str:
"""
Name of the current configuration, used as dict-key in the translation-data
(see Translation.get_translation)
"""
return f'{self.code}_{self.edition}'
def update_translation(self, update_pk: int, **data):
"""
Helper to replace texts (for choices, checkboxes, labels, etc.).
Create a new translation for this edition. Adds this configuration with
edition as a new key to the given (update_pk) translation object.
"""
obj = self.translation.objects.get(pk=update_pk)
obj.data.update({self.translation_key: data})
obj.save()
def create_new_translation(
self, translation_type, translation_keys: list=None,
**data) -> Translation:
"""
Create and return a new translation entry.
"""
if translation_keys:
data = {t: data for t in translation_keys}
else:
data = {self.translation_key: data}
translation, __ = self.translation.objects.get_or_create(
translation_type=translation_type, data=data)
return translation
def create_new_category(
self, keyword: str, translation: dict or int or None) -> Category:
if isinstance(translation, dict):
translation_obj = self.create_new_translation(
translation_type='category', **translation)
elif isinstance(translation, int):
translation_obj = self.translation.objects.get(pk=translation)
else:
translation_obj = None
category, __ = self.category.objects.get_or_create(
keyword=keyword, translation=translation_obj)
return category
def create_new_questiongroup(
self, keyword: str, translation: dict or int or None) -> Questiongroup:
if isinstance(translation, dict):
translation_obj = self.create_new_translation(
translation_type='questiongroup', **translation)
elif isinstance(translation, int):
translation_obj = self.translation.objects.get(pk=translation)
else:
translation_obj = None
configuration = {}
questiongroup, __ = self.questiongroup.objects.get_or_create(
keyword=keyword, translation=translation_obj,
configuration=configuration)
return questiongroup
def create_new_question(
self, keyword: str, translation: dict or int, question_type: str,
values: list=None, configuration: dict=None) -> Key:
"""
Create and return a new question (actually, in DB terms, a key), with a
translation.
"""
if isinstance(translation, dict):
translation_obj = self.create_new_translation(
translation_type='key', **translation)
else:
translation_obj = self.translation.objects.get(pk=translation)
configuration_data = configuration if configuration is not None else {}
configuration_data.update({'type': question_type})
try:
key = self.key.objects.get(keyword=keyword)
key.translation = translation_obj
key.configuration = configuration_data
key.save()
except ObjectDoesNotExist:
key = self.key.objects.create(
keyword=keyword,
translation=translation_obj,
configuration=configuration_data
)
if values is not None:
existing_values = key.values.all()
for new_value in values:
if new_value not in existing_values:
key.values.add(new_value)
return key
def create_new_value(
self, keyword: str, translation: dict or int, order_value: int=None,
configuration: dict=None, configuration_editions: list=None) -> Value:
"""
Create and return a new value, with a translation.
"""
if isinstance(translation, dict):
translation_obj = self.create_new_translation(
translation_type='value',
translation_keys=configuration_editions, **translation)
else:
translation_obj = self.translation.objects.get(pk=translation)
try:
value = self.value.objects.get(keyword=keyword)
value.translation = translation_obj
value.order_value = order_value
value.configuration = configuration
value.save()
except ObjectDoesNotExist:
value = self.value.objects.create(
keyword=keyword, translation=translation_obj,
order_value=order_value, configuration=configuration)
return value
def create_new_values_list(self, values_list: list) -> list:
"""Create and return a list of simple values."""
return [
self.create_new_value(
keyword=k,
translation={
'label': {
'en': l
}
})
for k, l in values_list
]
def add_new_value(
self, question_keyword: str, value: Value, order_value: int=None):
"""
Add a new value to an existing question.
"""
key = self.key.objects.get(keyword=question_keyword)
if order_value and not key.values.filter(pk=value.pk).exists():
# If order_value is provided and the value was not yet added to the
# question, update the ordering of the existing values.
key.values.filter(
order_value__gte=order_value
).update(
order_value=F('order_value') + 1
)
key.values.add(value)
def get_value(self, keyword: str) -> Value:
return self.value.objects.get(keyword=keyword)
def get_question(self, keyword: str) -> Key:
return self.key.objects.get(keyword=keyword)
def get_questiongroup(self, keyword: str) -> Questiongroup:
return self.questiongroup.objects.get(keyword=keyword)
def find_in_data(self, path: tuple, **data: dict) -> dict:
"""
Helper to find and return an element inside a configuration data dict.
Provide a path with keywords pointing to the desired element.
Drills down to the element assuming the following hierarchy of
configuration data:
"data": {
"sections": [
{
"keyword": "<section_keyword>",
"categories": [
{
"keyword": "<category_keyword>",
"subcategories": [
{
"keyword": "<subcategory_keyword>"
"questiongroups": [
{
"keyword": "<questiongroup_keyword>",
"questions": [
{
"keyword": "<question_keyword>"
}
]
}
]
}
]
}
]
}
],
"modules": [
"cca"
]
}
"""
for hierarchy_level, path_keyword in enumerate(path):
# Get the list of elements at the current hierarchy.
element_list = data[self.hierarchy[hierarchy_level]]
# Find the element by its keyword.
data = next((item for item in element_list
if item['keyword'] == path_keyword), None)
if data is None:
raise KeyError(
'No element with keyword %s found in list of %s' % (
path_keyword, self.hierarchy[hierarchy_level]))
return data
def update_config_data(self, path: tuple, updated, level=0, **data):
"""
Helper to update a portion of the nested configuration data dict.
"""
current_hierarchy = self.hierarchy[level]
# Make a copy of the current data, but reset the children.
new_data = copy.deepcopy(data)
new_data[current_hierarchy] = []
for element in data[current_hierarchy]:
if element['keyword'] != path[0]:
new_element = element
elif len(path) > 1:
new_element = self.update_config_data(
path=path[1:], updated=updated, level=level+1, **element)
else:
new_element = updated
new_data[current_hierarchy].append(new_element)
return new_data
def update_data(self, qg_keyword, q_keyword, updated, **data: dict) -> dict:
"""
Helper to update a question of the questionnaire data dict.
"""
questiongroup_data = data.get(qg_keyword, [])
if not questiongroup_data:
return data
updated_questiongroup_data = []
for qg_data in questiongroup_data:
if q_keyword in qg_data:
qg_data[q_keyword] = updated
updated_questiongroup_data.append(qg_data)
data[qg_keyword] = updated_questiongroup_data
return data
def add_new_module(self, updated, **data: dict) -> dict:
"""
Helper to add a module to the configuration
"""
# Modules data is fetched
module_data = data.get(self.hierarchy_modules, [])
if not module_data:
return data
# New module is appended
module_data.append(updated)
# Questionnaire configuration is updated with new module and returned
data[self.hierarchy_modules] = module_data
return data
def append_translation(self, update_pk: int, **data):
"""
Helper to append texts (for choices, checkboxes, labels, etc.).
"""
obj = self.translation.objects.get(pk=update_pk)
obj.data.update(data)
obj.save()
class Operation:
"""
Data structure for an 'operation' method.
Centralized wrapper for all operations, so they can be extended / modified
in a single class.
"""
default_template = 'configuration/partials/release_note.html'
def __init__(self, transform_configuration: callable, release_note: str, **kwargs):
"""
Args:
transform_configuration: callable for the update on the configuration data
release_note: string with release note
**kwargs:
transform_questionnaire: callable. Used to transform the
questionnaire data, e.g. for deleted/moved questions.
"""
self.transform_configuration = transform_configuration
self.release_note = release_note
self.template_name = kwargs.get('template_name', self.default_template)
self.transform_questionnaire = kwargs.get('transform_questionnaire')
def migrate(self, **data) -> dict:
return self.transform_configuration(**data)
def render(self) -> str:
return render_to_string(
template_name=self.template_name,
context={'note': self.release_note}
)
def update_questionnaire_data(self, **data):
if self.transform_questionnaire:
return self.transform_questionnaire(**data)
return data
| 35.376344
| 101
| 0.599392
| 16,090
| 0.978116
| 109
| 0.006626
| 1,186
| 0.072097
| 0
| 0
| 5,109
| 0.310578
|
a78ce58146e32ab5bc583a0b5ea144d7df99f985
| 10,152
|
py
|
Python
|
EyePatterns/main_test_all_clusters.py
|
Sale1996/Pattern-detection-of-eye-tracking-scanpaths
|
15c832f26dce98bb95445f9f39f454f99bbb6029
|
[
"MIT"
] | 1
|
2021-12-07T08:02:30.000Z
|
2021-12-07T08:02:30.000Z
|
EyePatterns/main_test_all_clusters.py
|
Sale1996/Pattern-detection-of-eye-tracking-scanpaths
|
15c832f26dce98bb95445f9f39f454f99bbb6029
|
[
"MIT"
] | null | null | null |
EyePatterns/main_test_all_clusters.py
|
Sale1996/Pattern-detection-of-eye-tracking-scanpaths
|
15c832f26dce98bb95445f9f39f454f99bbb6029
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import distance
from matplotlib import style
from clustering_algorithms.affinity_propagation import AffinityPropagation
from clustering_algorithms.custom_k_means import KMeans
from clustering_algorithms.custom_mean_shift import MeanShift
from clustering_algorithms.custom_mean_shift_string_edition import MeanShiftStringEdition
from clustering_algorithms.dbscan import DbScan
from prepare_data.format_sequences import format_sequences_from_student
from utils.e_mine import e_mine_find_common_scanpath
from utils.string_compare_algorithm import levenstein_sequence_similarity, is_string_similar, needleman_wunsch, \
needleman_wunsch_with_penalty
import numpy as np
# def initialize_2D_number_data_and_plot_them():
# number_data = np.array([[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11], [8, 2], [10, 2], [9, 3]])
# # plot data
# plt.scatter(number_data[:, 0], number_data[:, 1])
# plt.show()
# return number_data
#
#
# def test_k_means_with_numbers_then_plot_results():
# clf = KMeans(k=3)
# clf.fit(number_data)
#
# for centroid in clf.centroids:
# plt.scatter(clf.centroids[centroid][0], clf.centroids[centroid][1],
# marker="o", color="k", s=150, linewidths=5)
#
# for classification in clf.classifications:
# color = colors[classification]
# for featureset in clf.classifications[classification]:
# plt.scatter(featureset[0], featureset[1], marker="x", color=color,
# s=150, linewidths=5)
# plt.show()
#
#
# def test_mean_shift_with_numbers_then_plot_results():
# clf_ms = MeanShift()
# clf_ms.fit(number_data)
# plt.scatter(number_data[:, 0], number_data[:, 1], s=150)
# centroids = clf_ms.centroids
# for c in centroids:
# plt.scatter(centroids[c][0], centroids[c][1], color='k', marker="*", s=150)
# plt.show()
def initialize_string_sequences(student_name):
# print(format_sequences_from_student(student_name))
return format_sequences_from_student(student_name)
# return ["ACCAEF", "ACCEF", "AACF", "CCCEF", "CCAACCF", "CCACF"]
def print_description():
print("***************************************************")
print("NAME OF ALGORITHM")
print("- *CLUSTER REPRESENTER* [CLUSTER MEMBER, CLUSTER MEMBER, CLUSTER MEMBER]")
print("***************************************************")
def test_and_print_results_string_k_means_with_levenshtein_distance():
kmeans_alg = KMeans(k=3, distance_function=distance.levenshtein, find_average_function=e_mine_find_common_scanpath,
check_is_optimized_function=is_string_similar)
kmeans_alg.fit(string_data)
print_k_means_results(kmeans_alg, "Levenshtein")
def test_and_print_results_string_k_means_with_needleman_wunsch_distance():
kmeans_alg = KMeans(k=3, distance_function=needleman_wunsch, find_average_function=e_mine_find_common_scanpath,
check_is_optimized_function=is_string_similar)
kmeans_alg.fit(string_data)
print_k_means_results(kmeans_alg, "Needleman-Wunsch")
def test_and_print_results_string_k_means_with_needleman_wunsch_distance_with_extra_penalty_points():
kmeans_alg = KMeans(k=3, distance_function=needleman_wunsch_with_penalty,
find_average_function=e_mine_find_common_scanpath,
check_is_optimized_function=is_string_similar)
kmeans_alg.fit(string_data)
print_k_means_results(kmeans_alg, "Needleman-Wunsch with additional penalty")
def print_k_means_results(kmeans_alg, distance_algorithm):
centroid_cluster_map_kmeans = {}
for i in range(0, len(kmeans_alg.centroids)):
centroid_cluster_map_kmeans[kmeans_alg.centroids[i]] = kmeans_alg.classifications[i]
print()
print("K Means string edition with %s distance algorithm" % distance_algorithm)
for centroid in centroid_cluster_map_kmeans:
print(" - *%s* %s" % (centroid, centroid_cluster_map_kmeans[centroid]))
def test_and_print_results_string_mean_shift_with_levenshtein_distance():
mean_shift_string_edition = MeanShiftStringEdition()
mean_shift_string_edition.fit(string_data)
print_mean_shift_results(mean_shift_string_edition, "Levenshtein")
def test_and_print_results_string_mean_shift_with_needleman_wunsch_distance():
mean_shift_string_edition = MeanShiftStringEdition(distance_function=needleman_wunsch)
mean_shift_string_edition.fit(string_data)
print_mean_shift_results(mean_shift_string_edition, "Needleman-Wunsch")
def test_and_print_results_string_mean_shift_with_needleman_wunsch_distance_with_extra_penalty_points():
mean_shift_string_edition = MeanShiftStringEdition(distance_function=needleman_wunsch_with_penalty)
mean_shift_string_edition.fit(string_data)
print_mean_shift_results(mean_shift_string_edition, "Needleman-Wunsch with additional penalty")
def print_mean_shift_results(mean_shift_string_edition, distance_algorithm):
print()
print("Mean Shift string edition with %s distance algorithm" % distance_algorithm)
for centroid in mean_shift_string_edition.centroids:
print(" - *%s*" % mean_shift_string_edition.centroids[centroid])
def test_and_print_results_string_affinity_propagation_with_levenstein_distance():
data_as_array = np.asarray(string_data)
lev_similarity_scores = -1 * np.array(
[[distance.levenshtein(w1, w2) for w1 in data_as_array] for w2 in data_as_array])
affinity_propagation_alg = AffinityPropagation()
affinity_propagation_alg.fit(lev_similarity_scores)
print_affinity_propagation_results(affinity_propagation_alg, data_as_array, "Levenshtein")
def test_and_print_results_string_affinity_propagation_with_needleman_wunsch_distance():
data_as_array = np.asarray(string_data)
lev_similarity_scores = -1 * np.array(
[[needleman_wunsch(w1, w2) for w1 in data_as_array] for w2 in data_as_array])
affinity_propagation_alg = AffinityPropagation()
affinity_propagation_alg.fit(lev_similarity_scores)
print_affinity_propagation_results(affinity_propagation_alg, data_as_array, "Needleman-Wunsch")
def test_and_print_results_string_affinity_propagation_with_needleman_wunsch_distance_with_extra_penalty_points():
data_as_array = np.asarray(string_data)
lev_similarity_scores = -1 * np.array(
[[needleman_wunsch_with_penalty(w1, w2) for w1 in data_as_array] for w2 in data_as_array])
affinity_propagation_alg = AffinityPropagation()
affinity_propagation_alg.fit(lev_similarity_scores)
print_affinity_propagation_results(affinity_propagation_alg, data_as_array, "Needleman-Wunsch with additional penalty")
def print_affinity_propagation_results(affinity_propagation_alg, data_as_array, distance_algorithm):
print()
print('Affinity Propagation with %s distance algorithm' % distance_algorithm)
exemplar_features_map = affinity_propagation_alg.get_exemplars_and_their_features(data_as_array)
for exemplar in exemplar_features_map:
print(" - *%s* %s" % (exemplar, exemplar_features_map[exemplar]))
def test_and_print_results_string_db_scan_with_levenstein_distance():
def lev_metric(x, y):
i, j = int(x[0]), int(y[0]) # extract indices
return distance.levenshtein(string_data[i], string_data[j])
db_scan = DbScan()
db_scan.fit(lev_metric, string_data)
print_db_scan_results(db_scan, "Levenshtein")
def test_and_print_results_string_db_scan_with_needleman_wunsch_distance():
def lev_metric(x, y):
i, j = int(x[0]), int(y[0]) # extract indices
return needleman_wunsch(string_data[i], string_data[j])
db_scan = DbScan()
db_scan.fit(lev_metric, string_data)
print_db_scan_results(db_scan, "Needleman-Wunsch")
def test_and_print_results_string_db_scan_with_needleman_wunsch_distance_with_extra_penalty_points():
def lev_metric(x, y):
i, j = int(x[0]), int(y[0]) # extract indices
return needleman_wunsch_with_penalty(string_data[i], string_data[j])
db_scan = DbScan()
db_scan.fit(lev_metric, string_data)
print_db_scan_results(db_scan, "Needleman-Wunsch with additional penalty")
def print_db_scan_results(db_scan, distance_algorithm):
print()
print('DB Scan with %s distance algorithm' % distance_algorithm)
for cluster in db_scan.get_clusters():
cluster_representer = e_mine_find_common_scanpath(db_scan.get_clusters()[cluster])
print(" - *%s* %s" % (cluster_representer, db_scan.get_clusters()[cluster]))
'''
1# Initialize number collection and plot style
'''
# style.use('ggplot')
# number_data = initialize_2D_number_data_and_plot_them()
# colors = 10 * ["g", "r", "c", "b", "k"]
'''
Test classification algorithms with numbers
'''
# test_k_means_with_numbers_then_plot_results()
# test_mean_shift_with_numbers_then_plot_results()
'''
2# Initialize string collection and print description on printed form
'''
student_name = "student_1"
string_data = initialize_string_sequences(student_name)
print_description()
'''
Test classification algorithms with strings
'''
test_and_print_results_string_k_means_with_levenshtein_distance()
test_and_print_results_string_k_means_with_needleman_wunsch_distance()
test_and_print_results_string_k_means_with_needleman_wunsch_distance_with_extra_penalty_points()
test_and_print_results_string_mean_shift_with_levenshtein_distance()
test_and_print_results_string_mean_shift_with_needleman_wunsch_distance()
test_and_print_results_string_mean_shift_with_needleman_wunsch_distance_with_extra_penalty_points()
test_and_print_results_string_affinity_propagation_with_levenstein_distance()
test_and_print_results_string_affinity_propagation_with_needleman_wunsch_distance()
test_and_print_results_string_affinity_propagation_with_needleman_wunsch_distance_with_extra_penalty_points()
test_and_print_results_string_db_scan_with_levenstein_distance()
test_and_print_results_string_db_scan_with_needleman_wunsch_distance()
test_and_print_results_string_db_scan_with_needleman_wunsch_distance_with_extra_penalty_points()
| 42.476987
| 123
| 0.775611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,551
| 0.251281
|
a78d7f529a85265c767d731a1463e302ccbc27fe
| 2,381
|
py
|
Python
|
src/onevision/cv/imgproc/color/integer.py
|
phlong3105/onevision
|
90552b64df7213e7fbe23c80ffd8a89583289433
|
[
"MIT"
] | 2
|
2022-03-28T09:46:38.000Z
|
2022-03-28T14:12:32.000Z
|
src/onevision/cv/imgproc/color/integer.py
|
phlong3105/onevision
|
90552b64df7213e7fbe23c80ffd8a89583289433
|
[
"MIT"
] | null | null | null |
src/onevision/cv/imgproc/color/integer.py
|
phlong3105/onevision
|
90552b64df7213e7fbe23c80ffd8a89583289433
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Conversion between single-channel integer value to 3-channels color image.
Mostly used for semantic segmentation.
"""
from __future__ import annotations
import numpy as np
import torch
from multipledispatch import dispatch
from torch import Tensor
from onevision.cv.core import get_num_channels
from onevision.cv.core import to_channel_first
from onevision.type import TensorOrArray
__all__ = [
"integer_to_color",
"is_color_image",
]
# MARK: - Functional
def _integer_to_color(image: np.ndarray, colors: list) -> np.ndarray:
"""Convert the integer-encoded image to color image. Fill an image with
labels' colors.
Args:
image (np.ndarray):
An image in either one-hot or integer.
colors (list):
List of all colors.
Returns:
color (np.ndarray):
Colored image.
"""
if len(colors) <= 0:
raise ValueError(f"No colors are provided.")
# NOTE: Convert to channel-first
image = to_channel_first(image)
# NOTE: Squeeze dims to 2
if image.ndim == 3:
image = np.squeeze(image)
# NOTE: Draw color
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, len(colors)):
idx = image == l
r[idx] = colors[l][0]
g[idx] = colors[l][1]
b[idx] = colors[l][2]
rgb = np.stack([r, g, b], axis=0)
return rgb
@dispatch(Tensor, list)
def integer_to_color(image: Tensor, colors: list) -> Tensor:
mask_np = image.numpy()
mask_np = integer_to_color(mask_np, colors)
color = torch.from_numpy(mask_np)
return color
@dispatch(np.ndarray, list)
def integer_to_color(image: np.ndarray, colors: list) -> np.ndarray:
# [C, H, W]
if image.ndim == 3:
return _integer_to_color(image, colors)
# [B, C, H, W]
if image.ndim == 4:
colors = [_integer_to_color(i, colors) for i in image]
colors = np.stack(colors).astype(np.uint8)
return colors
raise ValueError(f"`image.ndim` must be 3 or 4. But got: {image.ndim}.")
def is_color_image(image: TensorOrArray) -> bool:
"""Check if the given image is color encoded."""
if get_num_channels(image) in [3, 4]:
return True
return False
| 25.602151
| 77
| 0.640067
| 0
| 0
| 0
| 0
| 667
| 0.280134
| 0
| 0
| 760
| 0.319194
|
a78db64f92c9f41c5d84dd1c53250b84b8159383
| 5,932
|
py
|
Python
|
doepy/problem_instance.py
|
scwolof/doepy
|
acb2cad95428de2c14b28563cff1aa30679e1f39
|
[
"MIT"
] | 1
|
2020-04-23T13:43:35.000Z
|
2020-04-23T13:43:35.000Z
|
doepy/problem_instance.py
|
scwolof/doepy
|
acb2cad95428de2c14b28563cff1aa30679e1f39
|
[
"MIT"
] | null | null | null |
doepy/problem_instance.py
|
scwolof/doepy
|
acb2cad95428de2c14b28563cff1aa30679e1f39
|
[
"MIT"
] | 1
|
2021-06-13T14:38:32.000Z
|
2021-06-13T14:38:32.000Z
|
"""
MIT License
Copyright (c) 2019 Simon Olofsson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
class ProblemInstance:
def __init__ (self, models, num_steps, div_criterion, u_bounds,
u_constraints=[], z_constraints=[]):
self.models = models
self.num_models = len( models )
self.num_meas = models[0].num_meas
self.num_steps = num_steps
self.divergence = div_criterion
self.u_bounds = u_bounds.copy()
self.num_control = u_bounds.shape[0]
# Control input bounds at every step
self.bounds = np.array([ self.u_bounds ] * self.num_steps )
self.bounds = self.bounds.reshape((self.num_steps*self.num_control, 2))
# Lists of constraints
self.u_constraints = u_constraints
if not isinstance(self.u_constraints, list):
self.u_constraints = [ self.u_constraints ]
self.z_constraints = z_constraints
if not isinstance(self.z_constraints, list):
self.z_constraints = [ self.z_constraints ]
# Compute the number of constraints
self.num_constraints = 0
for const in self.u_constraints:
# Control constraints
self.num_constraints += const.num_constraints( self.num_steps )
for const in self.z_constraints:
# Observed state constraints
self.num_constraints += self.num_steps * self.num_models \
* const.num_constraints()
for model in self.models:
# Latent state constraints - for data-driven surrogate models
self.num_constraints += self.num_steps * model.num_x_constraints()
def sample_U (self, check_constraints=False):
# U : sampled control signal
# C : (boolean) checks if U satisfies control constraints
ul, uu = self.u_bounds[:,0], self.u_bounds[:,1]
U = ul + (uu - ul) * np.random.rand(self.num_steps, self.num_control)
if not check_constraints:
return U
C = [ np.all( const(U) >= 0.) for const in self.u_constraints ]
return U, np.all( C )
def __call__ (self, u_flat):
E = self.num_meas
N = self.num_steps
M = self.num_models
D = self.num_control
# Unflatten
U = u_flat.reshape(( N, D ))
# Objective
f = 0.
dfdU = np.zeros( U.shape )
# Constraints
C = np.zeros( self.num_constraints )
dCdU = np.zeros((self.num_constraints,) + U.shape )
# Constraint counter
i_c = 0
# Control constraints
for const in self.u_constraints:
c, dc = const( U, grad=True )
L = const.num_constraints(self.num_steps)
C[ i_c: i_c+L ] = c
dCdU[ i_c: i_c+L ] = dc
i_c += L
# Initial states
x, s, dxdU, dsdU = [], [], [], []
for model in self.models:
x.append( model.x0 )
s.append( model.x0_covar )
dxdU.append( np.zeros(( N, model.num_states, D)) )
dsdU.append( np.zeros(( N, model.num_states, model.num_states, D)) )
model.initialise_x_constraints()
Z = np.zeros(( M, E ))
S = np.zeros(( M, E, E ))
dZdU = np.zeros(( M, E, N, D))
dSdU = np.zeros(( M, E, E, N, D))
def gradchain (do, dxdU, dsdU, i, j):
dMdU = np.matmul( do.dMdx, dxdU[i][j] ) \
+ np.einsum( 'ijk,jkn->in', do.dMds, dsdU[i][j] )
dSdU = np.matmul( do.dSdx, dxdU[i][j] ) \
+ np.einsum( 'imjk,jkn->imn', do.dSds, dsdU[i][j] )
return dMdU, dSdU
# Iterate over control sequence
for n, u in enumerate( U ):
dZdU.fill(0.)
dSdU.fill(0.)
# Predictive distributions at time n for model i
for i, model in enumerate( self.models ):
x[i], s[i], dox = model.predict_x_dist(x[i], s[i], u, grad=True)
Z[i], S[i], doy = model.predict_z_dist(x[i], s[i], grad=True)
for j in range( n+1 ):
dxdU[i][j], dsdU[i][j] = gradchain(dox, dxdU, dsdU, i, j)
if j == n:
dxdU[i][j] += dox.dMdu
dsdU[i][j] += dox.dSdu
dZdU[i,:,j], dSdU[i,:,:,j] = gradchain(doy, dxdU, dsdU, i, j)
# Update latent state constraints
model.update_x_constraints(x[i], s[i], dxdU[i], dsdU[i])
# State constraint for model i at time n
for const in self.z_constraints:
c, dcdZ, dcdS = const(Z[i], S[i], step=n, grad=True)
L = const.num_constraints()
C[ i_c: i_c+L ] = c
dCdU[ i_c: i_c+L ] = np.einsum('ij,jnk->ink',dcdZ,dZdU[i]) \
+ np.einsum('ijk,jknd->ind',dcdS,dSdU[i])
i_c += L
# Divergence between predictive distributions at time n
for i, model in enumerate( self.models ):
# Add measurement noise covariance
S[i] += model.y_covar
ftmp, dDdY, dDdS = self.divergence(Z, S, grad=True)
f -= ftmp ## Minimisation -> negative maximisation
for j in range( n+1 ):
dfdU[j] -= np.einsum('ij,ijk->k', dDdY, dZdU[:,:,j] ) \
+ np.einsum('ijk,ijkl->l', dDdS, dSdU[:,:,:,j])
# latent state constraints
for i, model in enumerate( self.models ):
res = model.get_x_constraints()
if not res is None:
L = res[0].shape[0]
C[ i_c: i_c+L ] = res[0]
dCdU[ i_c: i_c+L ] = res[1]
i_c += L
# flatten
dfdU = dfdU.reshape(u_flat.shape)
dCdU = dCdU.reshape((-1,) + u_flat.shape)
return f, C, dfdU, dCdU
| 34.289017
| 78
| 0.658968
| 4,831
| 0.814396
| 0
| 0
| 0
| 0
| 0
| 0
| 1,854
| 0.312542
|
a78f4a33fda334438866cc5eacb65a1aca2c29e8
| 1,831
|
py
|
Python
|
snuba/datasets/dataset_schemas.py
|
Appva/snuba
|
988a4312fc9c107bc735fb2295e269b01ef2dea4
|
[
"Apache-2.0"
] | null | null | null |
snuba/datasets/dataset_schemas.py
|
Appva/snuba
|
988a4312fc9c107bc735fb2295e269b01ef2dea4
|
[
"Apache-2.0"
] | null | null | null |
snuba/datasets/dataset_schemas.py
|
Appva/snuba
|
988a4312fc9c107bc735fb2295e269b01ef2dea4
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, List, Sequence, Union
from snuba.datasets.schemas import Schema
from snuba.datasets.schemas.tables import TableSchema, WritableTableSchema
class DatasetSchemas(object):
"""
A collection of schemas associated with a dataset, providing access to schemas and DDL functions
"""
def __init__(
self,
read_schema: Schema,
write_schema: Union[WritableTableSchema, None],
intermediary_schemas: Optional[List[Schema]] = None
) -> None:
if intermediary_schemas is None:
intermediary_schemas = []
self.__read_schema = read_schema
self.__write_schema = write_schema
self.__intermediary_schemas = intermediary_schemas
def get_read_schema(self) -> Schema:
return self.__read_schema
def __get_unique_schemas(self) -> Sequence[Schema]:
unique_schemas: List[Schema] = []
all_schemas_with_possible_duplicates = [self.__read_schema]
if self.__write_schema:
all_schemas_with_possible_duplicates.append(self.__write_schema)
all_schemas_with_possible_duplicates.extend(self.__intermediary_schemas)
for schema in all_schemas_with_possible_duplicates:
if schema not in unique_schemas:
unique_schemas.append(schema)
return unique_schemas
def get_create_statements(self) -> Sequence[str]:
return [
schema.get_local_table_definition()
for schema in self.__get_unique_schemas()
if isinstance(schema, TableSchema)
]
def get_drop_statements(self) -> Sequence[str]:
return [
schema.get_local_drop_table_statement()
for schema in self.__get_unique_schemas()
if isinstance(schema, TableSchema)
]
| 33.290909
| 100
| 0.677226
| 1,659
| 0.906062
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.061169
|
a790c9288954c501a2b40dde1e0f624366ddda8c
| 3,039
|
py
|
Python
|
Benchmarking/benchmark_alphabet_increase.py
|
icezyclon/AALpy
|
3c2f05fdbbcdc99b47ba6b918540239568fca17f
|
[
"MIT"
] | 61
|
2021-04-01T10:38:52.000Z
|
2022-03-28T13:44:23.000Z
|
Benchmarking/benchmark_alphabet_increase.py
|
icezyclon/AALpy
|
3c2f05fdbbcdc99b47ba6b918540239568fca17f
|
[
"MIT"
] | 16
|
2021-04-03T20:14:08.000Z
|
2022-02-16T10:21:48.000Z
|
Benchmarking/benchmark_alphabet_increase.py
|
haubitzer/AALpy
|
e5b51742d886d5c5c72ab3e9c20eb349c56e2469
|
[
"MIT"
] | 9
|
2021-04-05T13:43:17.000Z
|
2022-03-09T14:06:17.000Z
|
from statistics import mean
import csv
from aalpy.SULs import DfaSUL, MealySUL, MooreSUL
from aalpy.learning_algs import run_Lstar
from aalpy.oracles import RandomWalkEqOracle
from aalpy.utils import generate_random_dfa, generate_random_mealy_machine, generate_random_moore_machine
num_states = 1000
alph_size = 5
repeat = 10
num_increases = 20
states = ['alph_size', alph_size]
times_dfa = ['dfa_pypy_rs']
times_mealy = ['mealy_pypy_rs']
times_moore = ['moore_pypyrs']
cex_processing = 'rs'
for i in range(num_increases):
print(i)
total_time_dfa = []
total_time_mealy = []
total_time_moore = []
for _ in range(repeat):
alphabet = list(range(alph_size))
dfa = generate_random_dfa(num_states, alphabet=alphabet, num_accepting_states=num_states // 2)
sul = DfaSUL(dfa)
# eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, sul, eq_oracle, cex_processing=cex_processing, cache_and_non_det_check=False,
return_data=True, automaton_type='dfa')
total_time_dfa.append(data['learning_time'])
del dfa
del sul
del eq_oracle
mealy = generate_random_mealy_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)
sul_mealy = MealySUL(mealy)
# eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, sul_mealy, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, sul_mealy, eq_oracle, cex_processing=cex_processing,
cache_and_non_det_check=False,
return_data=True, automaton_type='mealy')
total_time_mealy.append(data['learning_time'])
del mealy
del sul_mealy
del eq_oracle
moore = generate_random_moore_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)
moore_sul = MooreSUL(moore)
# eq_oracle = StatePrefixEqOracle(alphabet, moore_sul, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, moore_sul, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, moore_sul, eq_oracle, cex_processing=cex_processing,
cache_and_non_det_check=False,
return_data=True, automaton_type='moore')
total_time_moore.append(data['learning_time'])
alph_size += 5
states.append(alph_size)
# save data and keep averages
times_dfa.append(round(mean(total_time_dfa), 4))
times_mealy.append(round(mean(total_time_mealy), 4))
times_moore.append(round(mean(total_time_moore), 4))
with open('increasing_alphabet_experiments.csv', 'w') as f:
wr = csv.writer(f, dialect='excel')
wr.writerow(states)
wr.writerow(times_dfa)
wr.writerow(times_mealy)
wr.writerow(times_moore)
| 35.337209
| 115
| 0.699901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 449
| 0.147746
|
a7910f32dd12a019dc980eaf9b89d7426fb179b4
| 2,505
|
py
|
Python
|
makehuman-master/makehuman/plugins/9_export_obj/mh2obj.py
|
Radiian-Arts-Main/Radiian-Arts-BioSource
|
51e08da0b3171fe96badc68780fd0f3381d49738
|
[
"MIT"
] | 1
|
2022-03-12T03:52:55.000Z
|
2022-03-12T03:52:55.000Z
|
makehuman-master/makehuman/plugins/9_export_obj/mh2obj.py
|
Phantori/Radiian-Arts-BioSource
|
51e08da0b3171fe96badc68780fd0f3381d49738
|
[
"MIT"
] | null | null | null |
makehuman-master/makehuman/plugins/9_export_obj/mh2obj.py
|
Phantori/Radiian-Arts-BioSource
|
51e08da0b3171fe96badc68780fd0f3381d49738
|
[
"MIT"
] | 3
|
2020-05-10T16:11:23.000Z
|
2021-05-30T02:11:28.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** Thomas Larsson, Jonas Hauquier
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
Exports proxy mesh to obj
"""
import wavefront
import os
from progress import Progress
import numpy as np
#
# exportObj(human, filepath, config):
#
def exportObj(filepath, config=None):
progress = Progress(0, None)
human = config.human
config.setupTexFolder(filepath)
filename = os.path.basename(filepath)
name = config.goodName(os.path.splitext(filename)[0])
progress(0, 0.3, "Collecting Objects")
objects = human.getObjects(excludeZeroFaceObjs=not config.hiddenGeom)
meshes = [o.mesh for o in objects]
if config.hiddenGeom:
# Disable the face masking on copies of the input meshes
meshes = [m.clone(filterMaskedVerts=False) for m in meshes]
for m in meshes:
# Would be faster if we could tell clone() to do this, but it would
# make the interface more complex.
# We could also let the wavefront module do this, but this would
# introduce unwanted "magic" behaviour into the export function.
face_mask = np.ones(m.face_mask.shape, dtype=bool)
m.changeFaceMask(face_mask)
m.calcNormals()
m.updateIndexBuffer()
progress(0.3, 0.99, "Writing Objects")
wavefront.writeObjFile(filepath, meshes, True, config, filterMaskedFaces=not config.hiddenGeom)
progress(1.0, None, "OBJ Export finished. Output file: %s" % filepath)
| 32.960526
| 99
| 0.686228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,539
| 0.614371
|
a79274aaddcc40eb1292cf7717dedc453646ab72
| 3,245
|
py
|
Python
|
util/plot_model.py
|
libccy/inv.cu
|
bab31a704b24888a99e07148b60266ff703f0968
|
[
"MIT"
] | null | null | null |
util/plot_model.py
|
libccy/inv.cu
|
bab31a704b24888a99e07148b60266ff703f0968
|
[
"MIT"
] | null | null | null |
util/plot_model.py
|
libccy/inv.cu
|
bab31a704b24888a99e07148b60266ff703f0968
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
from os.path import exists
import numpy as np
import pylab
import scipy.interpolate
def read_fortran(filename):
""" Reads Fortran style binary data and returns a numpy array.
"""
with open(filename, 'rb') as f:
# read size of record
f.seek(0)
n = np.fromfile(f, dtype='int32', count=1)[0]
# read contents of record
f.seek(4)
v = np.fromfile(f, dtype='float32')
return v[:-1]
def mesh2grid(v, x, z):
""" Interpolates from an unstructured coordinates (mesh) to a structured
coordinates (grid)
"""
lx = x.max() - x.min()
lz = z.max() - z.min()
nn = v.size
mesh = _stack(x, z)
nx = np.around(np.sqrt(nn*lx/lz))
nz = np.around(np.sqrt(nn*lz/lx))
dx = lx/nx
dz = lz/nz
# construct structured grid
x = np.linspace(x.min(), x.max(), nx)
z = np.linspace(z.min(), z.max(), nz)
X, Z = np.meshgrid(x, z)
grid = _stack(X.flatten(), Z.flatten())
# interpolate to structured grid
V = scipy.interpolate.griddata(mesh, v, grid, 'linear')
# workaround edge issues
if np.any(np.isnan(V)):
W = scipy.interpolate.griddata(mesh, v, grid, 'nearest')
for i in np.where(np.isnan(V)):
V[i] = W[i]
return np.reshape(V, (int(nz), int(nx))), x, z
def _stack(*args):
return np.column_stack(args)
if __name__ == '__main__':
""" Plots data on 2-D unstructured mesh
Modified from a script for specfem2d:
http://tigress-web.princeton.edu/~rmodrak/visualize/plot2d
Can be used to plot models or kernels created by inv.cu
SYNTAX
plot_model.py folder_name component_name||file_name (time_step)
e.g. ./plot_model.py output vx 1000
./plot_model.py output proc001000_vx.bin
./plot_model.py example/model/checker vs
"""
istr = ''
if len(sys.argv) > 3:
istr = str(sys.argv[3])
while len(istr) < 6:
istr = '0' + istr
else:
istr = '000000'
# parse command line arguments
x_coords_file = '%s/proc000000_x.bin' % sys.argv[1]
z_coords_file = '%s/proc000000_z.bin' % sys.argv[1]
# check that files actually exist
assert exists(x_coords_file)
assert exists(z_coords_file)
database_file = "%s/%s" % (sys.argv[1], sys.argv[2])
if not exists(database_file):
database_file = "%s/%s.bin" % (sys.argv[1], sys.argv[2])
if not exists(database_file):
database_file = "%s/proc%s_%s.bin" % (sys.argv[1], istr, sys.argv[2])
assert exists(database_file)
# read mesh coordinates
#try:
if True:
x = read_fortran(x_coords_file)
z = read_fortran(z_coords_file)
#except:
# raise Exception('Error reading mesh coordinates.')
# read database file
try:
v = read_fortran(database_file)
except:
raise Exception('Error reading database file: %s' % database_file)
# check mesh dimensions
assert x.shape == z.shape == v.shape, 'Inconsistent mesh dimensions.'
# interpolate to uniform rectangular grid
V, X, Z = mesh2grid(v, x, z)
# display figure
pylab.pcolor(X, Z, V)
locs = np.arange(X.min(), X.max() + 1, (X.max() - X.min()) / 5)
pylab.xticks(locs, map(lambda x: "%g" % x, locs / 1e3))
locs = np.arange(Z.min(), Z.max() + 1, (Z.max() - Z.min()) / 5)
pylab.yticks(locs, map(lambda x: "%g" % x, locs / 1e3))
pylab.colorbar()
pylab.xlabel('x / km')
pylab.ylabel('z / km')
pylab.gca().invert_yaxis()
pylab.show()
| 24.216418
| 73
| 0.659476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,218
| 0.375347
|
a7955298b73d5f2579d3f326020afc0190964952
| 6,150
|
py
|
Python
|
python/plugins/db_manager/db_plugins/oracle/data_model.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | null | null | null |
python/plugins/db_manager/db_plugins/oracle/data_model.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | null | null | null |
python/plugins/db_manager/db_plugins/oracle/data_model.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | 1
|
2021-12-25T08:40:30.000Z
|
2021-12-25T08:40:30.000Z
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS (Oracle)
Date : Aug 27, 2014
copyright : (C) 2014 by Médéric RIBREUX
email : mederic.ribreux@gmail.com
The content of this file is based on
- PG_Manager by Martin Dobias <wonder.sk@gmail.com> (GPLv2 license)
- DB Manager by Giuseppe Sucameli <brush.tyler@gmail.com> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from qgis.PyQt.QtCore import QTime
from qgis.core import QgsMessageLog
from ..data_model import (TableDataModel,
SqlResultModel,
SqlResultModelAsync,
SqlResultModelTask,
BaseTableModel)
from ..plugin import DbError
from ..plugin import BaseError
class ORTableDataModel(TableDataModel):
def __init__(self, table, parent=None):
self.cursor = None
TableDataModel.__init__(self, table, parent)
if not self.table.rowCount:
self.table.refreshRowCount()
self.table.aboutToChange.connect(self._deleteCursor)
self._createCursor()
def _createCursor(self):
fields_txt = u", ".join(self.fields)
table_txt = self.db.quoteId(
(self.table.schemaName(), self.table.name))
self.cursor = self.db._get_cursor()
sql = u"SELECT {0} FROM {1}".format(fields_txt, table_txt)
self.db._execute(self.cursor, sql)
def _sanitizeTableField(self, field):
# get fields, ignore geometry columns
if field.dataType.upper() == u"SDO_GEOMETRY":
return (u"CASE WHEN {0} IS NULL THEN NULL ELSE 'GEOMETRY'"
u"END AS {0}".format(
self.db.quoteId(field.name)))
if field.dataType.upper() == u"DATE":
return u"CAST({} AS VARCHAR2(8))".format(
self.db.quoteId(field.name))
if u"TIMESTAMP" in field.dataType.upper():
return u"TO_CHAR({}, 'YYYY-MM-DD HH:MI:SS.FF')".format(
self.db.quoteId(field.name))
if field.dataType.upper() == u"NUMBER":
if not field.charMaxLen:
return u"CAST({} AS VARCHAR2(135))".format(
self.db.quoteId(field.name))
elif field.modifier:
nbChars = 2 + int(field.charMaxLen) + \
int(field.modifier)
return u"CAST({} AS VARCHAR2({}))".format(
self.db.quoteId(field.name),
str(nbChars))
return u"CAST({0} As VARCHAR2({1}))".format(
self.db.quoteId(field.name), field.charMaxLen)
def _deleteCursor(self):
self.db._close_cursor(self.cursor)
self.cursor = None
def __del__(self):
self.table.aboutToChange.disconnect(self._deleteCursor)
self._deleteCursor()
def getData(self, row, col):
if (row < self.fetchedFrom or
row >= self.fetchedFrom + self.fetchedCount):
margin = self.fetchedCount / 2
if row + margin >= self.rowCount():
start = int(self.rowCount() - margin)
else:
start = int(row - margin)
if start < 0:
start = 0
self.fetchMoreData(start)
# For some improbable cases
if row - self.fetchedFrom >= len(self.resdata):
return None
return self.resdata[row - self.fetchedFrom][col]
def fetchMoreData(self, row_start):
if not self.cursor:
self._createCursor()
self.cursor.scroll(row_start - 1)
self.resdata = self.cursor.fetchmany(self.fetchedCount)
self.fetchedFrom = row_start
class ORSqlResultModelTask(SqlResultModelTask):
def __init__(self, db, sql, parent):
super().__init__(db, sql, parent)
def run(self):
try:
self.model = ORSqlResultModel(self.db, self.sql, None)
except BaseError as e:
self.error = e
QgsMessageLog.logMessage(e.msg)
return False
return True
def cancel(self):
self.db.connector.cancel()
SqlResultModelTask.cancel(self)
class ORSqlResultModelAsync(SqlResultModelAsync):
def __init__(self, db, sql, parent):
super().__init__()
self.task = ORSqlResultModelTask(db, sql, parent)
self.task.taskCompleted.connect(self.modelDone)
self.task.taskTerminated.connect(self.modelDone)
class ORSqlResultModel(SqlResultModel):
def __init__(self, db, sql, parent=None):
self.db = db.connector
t = QTime()
t.start()
c = self.db._execute(None, str(sql))
self._affectedRows = 0
data = []
header = self.db._get_cursor_columns(c)
if not header:
header = []
try:
if len(header) > 0:
data = self.db._fetchall(c)
self._affectedRows = len(data)
except DbError:
# nothing to fetch!
data = []
header = []
self._secs = t.elapsed() / 1000.0
del t
BaseTableModel.__init__(self, header, data, parent)
# commit before closing the cursor to make sure that the
# changes are stored
self.db._commit()
c.close()
del c
| 33.064516
| 77
| 0.532195
| 4,541
| 0.738134
| 0
| 0
| 0
| 0
| 0
| 0
| 1,662
| 0.270156
|
a795b31c14f03e20f3936fc4c94e68f3ce593493
| 1,711
|
py
|
Python
|
Opencv1/prayog4.py
|
priyanshgupta1998/Image_Processing
|
d1848838ff1acd6dfcf551b99380a8bbf9c879fa
|
[
"MIT"
] | null | null | null |
Opencv1/prayog4.py
|
priyanshgupta1998/Image_Processing
|
d1848838ff1acd6dfcf551b99380a8bbf9c879fa
|
[
"MIT"
] | null | null | null |
Opencv1/prayog4.py
|
priyanshgupta1998/Image_Processing
|
d1848838ff1acd6dfcf551b99380a8bbf9c879fa
|
[
"MIT"
] | null | null | null |
# with the TRACKBAR gui component
# we can perform some action my moving cursor
import cv2
import numpy as np
def funk(): # create one funciton # Now we are not adding any action in it . # just pass
pass
def main():
img1 = np.zeros((512,512,3) , np.uint8) # create a imgae of size 512 x512
windowName = 'openCV BGR color Palette' # give the name to window which will appear after the execution ofthis program
cv2.namedWindow(windowName) # putting it (name) into the command
# create just labels
# make a Trackbar (we can scroll and change the color manually upto range 0 -255)
# just create scroll label
cv2.createTrackbar('B',windowName , 0 , 255 , funk)
cv2.createTrackbar('G',windowName , 0 , 255 , funk)
cv2.createTrackbar('R',windowName , 0 , 255 , funk)
while(True):
cv2.imshow(windowName , img1) # display the image on the window which we have created earlier
#exit from the loop
if cv2.waitKey(20)>0: # enter the any key to close the window
break
# put/ decide the colors on the labels corresponding windowNmae
# when scroll will move then track position will also be changed ., so corresponding to that position these below commands will perform some action
blue = cv2.getTrackbarPos('B' , windowName)
green = cv2.getTrackbarPos('R' , windowName)
red = cv2. getTrackbarPos('G' , windowName)
img1[:]= [blue ,green , red] # correspoding to cordinates this image will be shown
print(blue , green , red)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 39.790698
| 157
| 0.64173
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 889
| 0.519579
|
a795b8548eef5888745b44763ff2a163b2d8f0d7
| 9,814
|
py
|
Python
|
qiushaoyi/study_practices/qiushaoyi_study_path/L2_Module.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 2
|
2018-03-29T08:26:17.000Z
|
2019-06-17T10:56:19.000Z
|
qiushaoyi/study_practices/qiushaoyi_study_path/L2_Module.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 1
|
2022-03-22T20:26:08.000Z
|
2022-03-22T20:26:08.000Z
|
qiushaoyi/study_practices/qiushaoyi_study_path/L2_Module.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 1
|
2019-02-18T10:44:20.000Z
|
2019-02-18T10:44:20.000Z
|
import sys
from PIL import Image
from enum import Enum, unique
# 模块: cd 文件夹路径 再ls ,读取文件! 若是上级界面,路径: ./
im = Image.open('test.png')
print(im.format, im.size, im.mode)
# 面向过程:函数化!!!
# 面向对象(Object Oriented Programming,简称OOP):对象化(包含数据和函数)!!!高度封装
std1 = {'name': 'Machileg', 'score': 89}
std2 = {'name': 'mahfljg', 'score': 67}
def print_score(std):
print('name:%s,score:%s' % (std['name'], std['score']))
print_score(std1)
class Student(object):
"""docstring for Student"""
def __init__(self, name, score):
super(Student, self).__init__()
self.name = name
self.score = score
def print_score(self):
print('name:%s,score:%s' % (self.name, self.score))
def get_grade(self):
if self.score >= 90:
return 'A'
elif self.score >= 60:
return 'B'
else:
return 'C'
std666 = Student(std2['name'], std2['score'])
std666.print_score()
print(std666.get_grade())
# 开始了class: 实例的变量名如果以__开头,就变成了一个私有变量(private),无法直接访问,可通过get和set方法
# 变量: 1、普通变量 self.name 2、私有变量 self.__name 3、特殊变量 self.__name__
class Human(object):
"""docstring for Human"""
def __init__(self, name, sex, height):
super(Human, self).__init__()
self.__name = name
self.__sex = sex
self.height = height
def print_hunman(self):
print('%s:%s:%s' % (self.__name, self.__sex, self.height))
def get_name(self):
return self.__name
def set_name(self, name):
self.__name = name
def get_sex(self):
return self.__sex
def set_sex(self, sex):
self.__sex = sex
def get_height(self):
return self.height
def set_height(self, height):
self.height = height
human1 = Human('Borjglj', '男', 160)
human1.print_hunman()
print(human1.set_name('改名就是这么简单'), \
human1.get_name(), human1.get_sex(), human1.get_height())
# 继承:多重继承。遗传性,python是多继承 多态:多样性
'''
多态的好处是:任何子类都可以调用基类方法,该基类的方法作为一个bridge,可以连接任何子类,
以基类为参数,在任何子类中创建某个func,就可以实现在该func中调用任何子类的方法
'''
class RunJump(object):
print('能跑和跳的都来这里')
class Animal(object):
def run(self):
print('Animal is running...')
class Dog(Animal, RunJump):
def run(self):
print('Dog is running...')
def eat(self):
print('Dog Eating meat...')
def invokeAnimalSubclass(self, animal):
animal.run()
class Cat(Animal, RunJump):
def run(self, t):
print('Cat is running...')
def eat(self):
print('Cat Eating meat...')
def invokeAnimalSubclass(self, animal):
animal.run()
dog = Dog()
cat = Cat()
dog.run()
dog.eat()
# 多态:不止是子类调用基类和 重写基类方法;可以实现在某一个子类中通过基类,调用任何其他子类的基类方法
dog.invokeAnimalSubclass(Cat())
cat.run()
cat.eat()
cat.invokeAnimalSubclass(Dog())
# 对象是否同类:type()只是本类,不到基类,isinstance(),issubclass()
if issubclass(Dog, Animal):
print('Dog 是Animal的子类')
if isinstance(dog, Dog):
print('dog 是Dog对象')
if isinstance(dog, Animal):
print('dog 是Animal对象')
if type(dog) == Dog:
print('type来判断dog 是Dog')
if type(dog) == Animal:
print('type不能用来判断dog 是Animal对象')
if type(cat) == Cat:
print('type来判断cat 是Cat对象,获取属性和变量', dir(cat))
class MyDog(object):
# __slots__ = ('typeName','name','score','height') 限制该class实例能添加的属性: __slots__ = ('name','age')
typeName = '我是变量'
@property
def score(self):
return self._score
@score.setter
def score(self, value):
if not isinstance(value, str):
raise ValueError('score must be an integer')
self._score = value
def __init__(self, height):
super(MyDog, self).__init__()
self.height = height
self.a, self.b = 0, 1 # 初始化两个默认的计数器a,b
def set_name(self, name):
self.name = name
def __len__(self):
return 100
def __str__(self):
return 'MyDog object (name:%s)' % self.name
def __str__(self):
return 'MyDog object (typeName:%s name:%s height:%s score:%s)' \
% (self.typeName, self.name, self.height, self.score)
__repr__ = __str__
# 获取某个list数据
def __iter__(self):
return self
def __next__(self):
self.a, self.b = self.b, self.a + self.b
if self.a > 100000:
raise StopIteration
return self.a
# 截取对应的list数据
def __getitem__(self, n):
if isinstance(n, int): # n是索引
a, b = 1, 1
for x in range(n):
a, b = b, a + b
return a
if isinstance(n, slice): # n是切片
start = n.start
stop = n.stop
if start is None:
start = 0
a, b = 1, 1
L = []
for x in range(stop):
if x >= start:
L.append(a)
a, b = b, a + b
return L
def __getattr__(self, attr): # 某属性未被定义的补救方法
if attr == 'qsy':
return lambda: 25
raise AttributeError('Dog object has no attribute %s' % attr)
def __call__(self):
print('通过对象自身调用,即可调用该方法:My name is %s', self.name)
myDog = MyDog(150)
myDog.typeName = '我就改你的type类型怎么了'
myDog.score = '100分'
myDog.set_name('我类个去')
print(myDog, hasattr(myDog, 'name'), setattr(myDog, 'name', '设置下新的name'), getattr(myDog, 'name'))
for n in myDog:
print(n)
print(myDog[4])
print(myDog[0:5])
print(myDog.qsy()) # myDog.qsyNo()
myDog()
print('某对象是否可作为函数,被调用', callable(myDog), callable(dog))
# 要写SDK,给每个URL对应的API都写一个方法,完全动态的__getattr__,我们可以写出一个链式调用
class Chain(object):
"""docstring for Chain"""
def __init__(self, path=''):
super(Chain, self).__init__()
self._path = path
def __getattr__(self, path):
return Chain('%s/%s' % (self._path, path))
def __str__(self):
return self._path
__repr__ = __str__
chain = Chain()
print(chain.status.user.timeline.list)
# 枚举
Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
for name, member in Month.__members__.items():
print(name, '=>', member, ',', member.value)
@unique # 装饰器可以帮我们检查保证有没有重复值
class Weekday(Enum):
Sun = 0
Mon = 1
Tue = 2
Wed = 3
Thu = 4
Fri = 5
Sat = 6
day1 = Weekday.Mon
day2 = Weekday.Sun
print(day1.value, day2)
# 一、定义类,然后创建实例
# 方式1:通过class
# Hello是一个class,它的类型就是type. 而h是一个实例,它的类型是class Hello
class Hello(object):
def hello(self, name='world'):
print('Hello,%s' % name)
h = Hello()
h.hello()
# 方式2:通过type创建class
def fn(self, name='world'):
print('Hello 1,%s.' % name)
Hello1 = type('Hello', (object,), dict(hello=fn, sex=vars))
h3 = Hello1()
h3.hello()
# 二、定义metaclass,就可以创建类(类本身是metaclass的实例),最后创建实例
class ListMetaclass(type): # metaclass的类名总是以Metaclass结尾
def __new__(cls, name, bases, attrs):
attrs['add'] = lambda self, value: self.append(value)
return type.__new__(cls, name, bases, attrs)
class MyList(list, metaclass=ListMetaclass):
pass
L = MyList()
L.add(12)
L.add(150)
print(L)
# 总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子:\
# ORM框架全称“Object Relational Mapping”,即对象-关系映射,就是一个类对应一个表
class Field(object):
"""docstring for Field"""
def __init__(self, name, column_type):
super(Field, self).__init__()
self.name = name
self.column_type = column_type
def __str__(self):
return ('<%s:%s>' % (self.__class__.__name__, self.name))
class StringField(Field):
"""docstring for StringField"""
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'bigint')
class ModelMetaclass(type): # 定义一个metaclass
def __new__(cls, name, bases, attrs):
if name == 'Model':
return type.__new__(cls, name, bases, attrs)
print('Found model:%s' % name)
mappings = dict()
for k, v in attrs.items():
if isinstance(v, Field):
print('Found mapping:%s==>%s' % (k, v))
mappings[k] = v
for k in mappings.keys():
attrs.pop(k)
attrs['__mappings__'] = mappings # 保存属性和列的映射关系
attrs['__table__'] = name # 假设表名和类名映射为一致的
print('test查看attrs:%s' % attrs)
return type.__new__(cls, name, bases, attrs)
class Model(dict, metaclass=ModelMetaclass): # 以metaclass创建一个名字为 Model 的对象
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
# 增删改查
def add(self):
fields = []
params = []
args = []
for k, v in self.__mappings__.items():
fields.append(v.name)
params.append('?')
args.append(getattr(self, k, None))
sql = 'insert into %s(%s) values(%s)' % (self.__table__, ','.join(fields), ','.join(params))
print('SQL:%s' % sql) # SQL:insert into User(id1,username1,email1,password1) values(?,?,?,?)
print('ARGS:%s' % str(
args)) # ARGS:[123456, 'Michael', '1129331905@qq.com', 'my-pwdTest'] 或 [78910, 'Jack', '1279487948@qq.com', 'you-pwdTest']
def dele(self):
pass
def update(self):
pass
def find(self):
pass
# testing code: User对应一个表
class User(Model):
id = IntegerField('id1')
name = StringField('username1')
email = StringField('email1')
password = StringField('password1')
u1 = User(id=123456, name='Michael', email='1129331905@qq.com', password='my-pwdTest')
u2 = User(id=78910, name='Jack', email='1279487948@qq.com', password='you-pwdTest')
u1.add()
u2.add()
| 23.99511
| 135
| 0.598431
| 7,047
| 0.631678
| 0
| 0
| 392
| 0.035138
| 0
| 0
| 3,859
| 0.345913
|
a795e8692fbeff0ef48524f2158106ba51c6886a
| 1,404
|
py
|
Python
|
2019/day-03/3.py
|
Valokoodari/advent-of-code
|
c664987f739e0b07ddad34bad87d56768556a5a5
|
[
"MIT"
] | 2
|
2021-12-27T18:59:11.000Z
|
2022-01-10T02:31:36.000Z
|
2019/day-03/3.py
|
Valokoodari/advent-of-code-2019
|
c664987f739e0b07ddad34bad87d56768556a5a5
|
[
"MIT"
] | null | null | null |
2019/day-03/3.py
|
Valokoodari/advent-of-code-2019
|
c664987f739e0b07ddad34bad87d56768556a5a5
|
[
"MIT"
] | 2
|
2021-12-23T17:29:10.000Z
|
2021-12-24T03:21:49.000Z
|
inputFile = "3-input"
outputFile = "3-output"
dir = {'L': [-1,0],'R': [1,0],'U': [0,1],'D': [0,-1]}
def readFile():
file = open(inputFile, "r")
A,B = file.readlines()
A,B = [line.split(",") for line in [A,B]]
file.close()
return A,B
def writeFile(a, b):
file = open(outputFile, "w+")
file.write("Part 1: " + a + "\n")
file.write("Part 2: " + b)
file.close()
def mapCommands(A):
cx, cy, step = 0, 0, 0
mapped = [[0]*20000 for _ in range(20000)]
for cmd in A:
ax,ay = dir[cmd[0]][0],dir[cmd[0]][1]
for _ in range(int(cmd[1:])):
cx += ax
cy += ay
step += 1
mapped[cx+10000][cy+10000] = step
return mapped
def findIntersects(A, B):
mapped = mapCommands(A);
cx, cy, step = 0, 0, 0
dist = 10000000
steps = 10000000
for cmd in B:
for _ in range(int(cmd[1:])):
cx += dir[cmd[0]][0]
cy += dir[cmd[0]][1]
step += 1
aStep = mapped[cx+10000][cy+10000]
aDist = abs(cx)+abs(cy)
if aStep != 0:
if (dist > aDist): dist = aDist
if (steps > aStep + step): steps = aStep + step
return dist, steps
def main():
A,B = readFile()
solA, solB = findIntersects(A, B)
print(solA, solB)
#writeFile(str(solA), str(solB))
if __name__ == '__main__': main()
| 24.206897
| 63
| 0.491453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 107
| 0.076211
|
a795f6ac4554f8ba46a00dda59b825a3e846ebb5
| 942
|
py
|
Python
|
notebook/3_tst_3dep.py
|
ACWI-SSWD/nldi_el_serv
|
35276014728662b1a9bdbbbf4790c562c1df760c
|
[
"CC0-1.0"
] | null | null | null |
notebook/3_tst_3dep.py
|
ACWI-SSWD/nldi_el_serv
|
35276014728662b1a9bdbbbf4790c562c1df760c
|
[
"CC0-1.0"
] | 1
|
2021-03-04T18:50:19.000Z
|
2021-03-04T18:50:19.000Z
|
notebook/3_tst_3dep.py
|
ACWI-SSWD/nldi_el_serv
|
35276014728662b1a9bdbbbf4790c562c1df760c
|
[
"CC0-1.0"
] | null | null | null |
from nldi_el_serv.XSGen import XSGen
from nldi_el_serv.dem_query import query_dems_shape
import py3dep
from pynhd import NLDI
gagebasin = NLDI().get_basins("06721000").to_crs('epsg:3857')
gageloc = NLDI().getfeature_byid("nwissite", "USGS-06721000").to_crs('epsg:3857')
cid = gageloc.comid.values.astype(str)
print(cid, gageloc.comid.values.astype(int)[0])
# strmseg_basin = NLDI().getfeature_byid("comid", cid[0], basin=True).to_crs('epsg:3857')
strmseg_loc = NLDI().getfeature_byid("comid", cid[0]).to_crs('epsg:3857')
xs = XSGen(point=gageloc, cl_geom=strmseg_loc, ny=101, width=1000)
xs_line = xs.get_xs()
xs_line_geom = xs_line.to_crs('epsg:4326')
print(xs_line_geom)
bbox = xs_line_geom.geometry[0].envelope.bounds
print(bbox)
query = query_dems_shape(bbox)
print(query)
t1 = (xs_line.total_bounds) + ((-100., -100., 100., 100.))
dem = py3dep.get_map("DEM", tuple(t1), resolution=10, geo_crs="EPSG:3857", crs="epsg:3857")
tmp = 0
| 33.642857
| 91
| 0.740977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 202
| 0.214437
|
a7970ccf14ddf7d03195daa316d4ddd0d08e404f
| 3,437
|
py
|
Python
|
Python/Product/Pyvot/Pyvot/setup.py
|
mikiec84/PTVS
|
6cbeadd70a4438d6e6ea4d22a465d678eacf5eb2
|
[
"Apache-2.0"
] | 3
|
2015-04-09T03:57:26.000Z
|
2016-07-25T10:00:34.000Z
|
Python/Product/Pyvot/Pyvot/setup.py
|
anstkosh/PTVS
|
1854ab3d6397e66765697bb63d898aec16d0406e
|
[
"Apache-2.0"
] | 2
|
2019-06-05T18:13:41.000Z
|
2019-06-05T20:13:16.000Z
|
Python/Product/Pyvot/Pyvot/setup.py
|
RaymonGulati1/PTVS
|
ee1d09f2a94be4e21016f7579205bb65ec82c616
|
[
"Apache-2.0"
] | 2
|
2018-03-02T19:55:14.000Z
|
2019-02-14T22:37:28.000Z
|
# PyVot
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
#
# This file must work in both Python 2 and 3 as-is (without applying 2to3)
#
import sys
# If setuptools available, we normally want to install dependencies. The --no-downloads flag
# allows the PTVS installer to prevent this, to avoid network-related failure cases
allow_downloads = True
no_downloads_flag = '--no-downloads'
if no_downloads_flag in sys.argv:
sys.argv.remove(no_downloads_flag)
allow_downloads = False
try:
from setuptools import setup, Distribution
use_setuptools = True
except ImportError:
from distutils.core import setup, Distribution
use_setuptools = False
running_python3 = sys.version_info.major > 2
# Sets __version__ as a global without importing xl's __init__. We might not have pywin32 yet.
with open(r'.\xl\version.py') as version_file:
exec(version_file.read(), globals())
class PyvotDistribution(Distribution):
def find_config_files(self):
configs = Distribution.find_config_files(self)
configs.append("setup.py3.cfg" if running_python3 else "setup.py2.cfg")
return configs
long_description = \
"""Pyvot connects familiar data-exploration and visualization tools in Excel with the powerful data analysis
and transformation capabilities of Python, with an emphasis on tabular data. It provides a minimal and Pythonic
interface to Excel, smoothing over the pain points in using the existing Excel object model as exposed via COM."""
setup_options = dict(
name="Pyvot",
version=__version__,
author="Microsoft Corporation",
author_email="ptvshelp@microsoft.com",
license="Apache License 2.0",
description="Pythonic interface for data exploration in Excel",
long_description=long_description,
download_url="http://pypi.python.org/pypi/Pyvot",
url="http://pytools.codeplex.com/wikipage?title=Pyvot",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Win32 (MS Windows)',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Office/Business :: Financial :: Spreadsheet',
'License :: OSI Approved :: Apache Software License'],
packages=['xl', 'xl._impl'],
distclass=PyvotDistribution
)
if running_python3:
use_2to3 = True
from distutils.command.build_py import build_py_2to3
setup_options.update(dict(
cmdclass={'build_py': build_py_2to3}
))
if use_setuptools:
setup_options.update(dict(
zip_safe=True
))
if use_setuptools and allow_downloads:
setup_options.update(dict(
setup_requires=["Sphinx"],
))
setup(**setup_options)
| 35.802083
| 118
| 0.719814
| 229
| 0.066628
| 0
| 0
| 0
| 0
| 0
| 0
| 1,984
| 0.577248
|
a797c1cadcd8bbf1b052c2b8f77c7e0d396cdcfb
| 271
|
py
|
Python
|
src/ralph/api/__init__.py
|
DoNnMyTh/ralph
|
97b91639fa68965ad3fd9d0d2652a6545a2a5b72
|
[
"Apache-2.0"
] | 1,668
|
2015-01-01T12:51:20.000Z
|
2022-03-29T09:05:35.000Z
|
src/ralph/api/__init__.py
|
hq-git/ralph
|
e2448caf02d6e5abfd81da2cff92aefe0a534883
|
[
"Apache-2.0"
] | 2,314
|
2015-01-02T13:26:26.000Z
|
2022-03-29T04:06:03.000Z
|
src/ralph/api/__init__.py
|
hq-git/ralph
|
e2448caf02d6e5abfd81da2cff92aefe0a534883
|
[
"Apache-2.0"
] | 534
|
2015-01-05T12:40:28.000Z
|
2022-03-29T21:10:12.000Z
|
from ralph.api.serializers import RalphAPISerializer
from ralph.api.viewsets import RalphAPIViewSet, RalphReadOnlyAPIViewSet
from ralph.api.routers import router
__all__ = [
'RalphAPISerializer',
'RalphAPIViewSet',
'RalphReadOnlyAPIViewSet',
'router',
]
| 24.636364
| 71
| 0.778598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.258303
|
a799e70dc24ceb42c8e876b81ace1c8f5d0f6ceb
| 727
|
py
|
Python
|
demo_odoo_tutorial_wizard/models/models.py
|
digitalsatori/odoo-demo-addons-tutorial
|
8eb56156ac55f317f90bca089886c392556759c2
|
[
"MIT"
] | 57
|
2020-06-22T05:28:11.000Z
|
2022-03-25T08:15:08.000Z
|
demo_odoo_tutorial_wizard/models/models.py
|
digitalsatori/odoo-demo-addons-tutorial
|
8eb56156ac55f317f90bca089886c392556759c2
|
[
"MIT"
] | 2
|
2020-11-20T07:11:27.000Z
|
2022-03-30T00:20:29.000Z
|
demo_odoo_tutorial_wizard/models/models.py
|
digitalsatori/odoo-demo-addons-tutorial
|
8eb56156ac55f317f90bca089886c392556759c2
|
[
"MIT"
] | 29
|
2020-07-04T15:24:01.000Z
|
2022-03-28T01:29:03.000Z
|
from odoo import models, fields, api
from odoo.exceptions import ValidationError
class DemoOdooWizardTutorial(models.Model):
_name = 'demo.odoo.wizard.tutorial'
_description = 'Demo Odoo Wizard Tutorial'
name = fields.Char('Description', required=True)
partner_id = fields.Many2one('res.partner', string='Partner')
@api.multi
def action_context_demo(self):
# if self._context.get('context_data', False):
if self.env.context.get('context_data'):
raise ValidationError('have context data')
raise ValidationError('hello')
@api.multi
def action_button(self):
for record in self:
record.with_context(context_data=True).action_context_demo()
| 34.619048
| 72
| 0.696011
| 645
| 0.887208
| 0
| 0
| 383
| 0.526823
| 0
| 0
| 175
| 0.240715
|
a79daf8941b0f06f1e88f279de06585e5430d9d8
| 659
|
py
|
Python
|
eaa_donations/donations/models/partner_charity.py
|
andrewbird2/eaa_donations
|
40a2cb2431130b330130f101c89bd3f8c503d2e2
|
[
"MIT"
] | null | null | null |
eaa_donations/donations/models/partner_charity.py
|
andrewbird2/eaa_donations
|
40a2cb2431130b330130f101c89bd3f8c503d2e2
|
[
"MIT"
] | 13
|
2020-06-05T19:27:58.000Z
|
2022-02-26T13:40:54.000Z
|
eaa_donations/donations/models/partner_charity.py
|
andrewbird2/eaa_donations
|
40a2cb2431130b330130f101c89bd3f8c503d2e2
|
[
"MIT"
] | null | null | null |
from django.db import models
class PartnerCharity(models.Model):
slug_id = models.CharField(max_length=30, unique=True)
name = models.TextField(unique=True, verbose_name='Name (human readable)')
email = models.EmailField(help_text='Used to cc the charity on receipts')
xero_account_name = models.TextField(help_text='Exact text of incoming donation account in xero')
active = models.BooleanField(default=True)
thumbnail = models.FileField(blank=True, null=True)
order = models.IntegerField(null=True, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Partner charities'
| 36.611111
| 101
| 0.732929
| 627
| 0.951442
| 0
| 0
| 0
| 0
| 0
| 0
| 127
| 0.192716
|
a7a100722ec7bb48d5749ae19f7101fc66740935
| 2,931
|
py
|
Python
|
python_data_utils/spark/ml/lightgbm.py
|
surajiyer/python-data-utils
|
d6e9bf81204a01545a3edb165c5724eb24f37c18
|
[
"MIT"
] | 4
|
2019-01-06T00:09:21.000Z
|
2022-01-28T06:03:13.000Z
|
python_data_utils/spark/ml/lightgbm.py
|
surajiyer/python-data-utils
|
d6e9bf81204a01545a3edb165c5724eb24f37c18
|
[
"MIT"
] | null | null | null |
python_data_utils/spark/ml/lightgbm.py
|
surajiyer/python-data-utils
|
d6e9bf81204a01545a3edb165c5724eb24f37c18
|
[
"MIT"
] | null | null | null |
__all__ = ['LightGBMRegressorModel']
from mmlspark.lightgbm.LightGBMRegressor import LightGBMRegressor, LightGBMRegressionModel
from mmlspark.train import ComputeModelStatistics
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.sql import DataFrame
import pyspark.sql.functions as F
from python_data_utils.spark.ml.base import BinaryClassCVModel, Metrics, RegressionCVModel
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
class LightGBMRegressorModel(RegressionCVModel):
def __init__(
self, *, estimator=None, evaluator=None,
label_col: str = 'label', params_map=None):
estimator = LightGBMRegressor(
objective='regression_l1',
# earlyStoppingRound=3,
# validationIndicatorCol='is_val',
labelCol=label_col)\
if not estimator else estimator
assert isinstance(estimator, LightGBMRegressor)
evaluator = RegressionEvaluator(metricName='mae')\
if not evaluator else evaluator
super().__init__(estimator, evaluator)
self.params_map = {
'baggingFraction': [.8, .9, 1.],
'featureFraction': [.5, .75, 1.],
'lambdaL1': [.1, .2, .3],
'learningRate': [0.01, 0.1],
'maxDepth': [-1, 3, 12],
'numIterations': [200],
'numLeaves': [31]
} if not params_map else params_map
@Metrics.register('regression_metrics')
def regression_metrics(self, predictions: DataFrame):
return ComputeModelStatistics(
evaluationMetric='regression',
labelCol=self.estimator.getLabelCol(),
scoresCol=self.estimator.getPredictionCol())\
.transform(predictions)\
.toPandas().to_dict(orient='list')
@Metrics.register('feature_importances')
def feature_importances(self, predictions: DataFrame):
feat_importances = pd.DataFrame(sorted(zip(
self.best_model.stages[-1].getFeatureImportances()
, self.features)), columns=['Value', 'Feature'])
# plot feature importance
_, ax = plt.subplots(figsize=(20, 10))
ax = sns.barplot(
x="Value",
y="Feature",
ax=ax,
data=feat_importances.sort_values(
by="Value", ascending=False))
ax.set_title('LightGBM Features (avg over folds)')
plt.tight_layout()
return {'data': feat_importances, 'plot': ax}
@Metrics.register('residuals_plot')
def residuals_plot(self, predictions: DataFrame):
# plot residuals
predictions = predictions.withColumn(
'_resid', F.col(self.estimator.getPredictionCol())\
- F.col(self.estimator.getLabelCol()))
return predictions.select('_resid').toPandas().hist()
| 37.101266
| 90
| 0.643466
| 2,380
| 0.81201
| 0
| 0
| 1,397
| 0.476629
| 0
| 0
| 429
| 0.146366
|
a7a176d1745c365cc7c57002a3194eb70a8c838f
| 80
|
py
|
Python
|
UDTherapy/__init__.py
|
JonSn0w/Urban-Dictionary-Therapy
|
8257cd3883bcef31207c2b089197ee9b0788727f
|
[
"MIT"
] | 3
|
2017-05-08T11:59:51.000Z
|
2017-06-20T22:36:07.000Z
|
UDTherapy/__init__.py
|
JonSn0w/Urban-Dictionary-Therapy
|
8257cd3883bcef31207c2b089197ee9b0788727f
|
[
"MIT"
] | null | null | null |
UDTherapy/__init__.py
|
JonSn0w/Urban-Dictionary-Therapy
|
8257cd3883bcef31207c2b089197ee9b0788727f
|
[
"MIT"
] | null | null | null |
name = 'Urban Dictionary Therapy'
__all__ = ['UDTherapy',
'helper']
| 16
| 33
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.5625
|
a7a1b1b2f4da0731134b12b8f90d117d800a2c6f
| 2,453
|
py
|
Python
|
torch-test/mpich-3.4.3/modules/libfabric/contrib/intel/jenkins/runtests.py
|
alchemy315/NoPFS
|
f3901e963e2301e8a6f1c7aac0511d0cf9a1889d
|
[
"BSD-3-Clause"
] | null | null | null |
torch-test/mpich-3.4.3/modules/libfabric/contrib/intel/jenkins/runtests.py
|
alchemy315/NoPFS
|
f3901e963e2301e8a6f1c7aac0511d0cf9a1889d
|
[
"BSD-3-Clause"
] | null | null | null |
torch-test/mpich-3.4.3/modules/libfabric/contrib/intel/jenkins/runtests.py
|
alchemy315/NoPFS
|
f3901e963e2301e8a6f1c7aac0511d0cf9a1889d
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import os
import sys
sys.path.append(os.environ['CI_SITE_CONFIG'])
import ci_site_config
import run
import common
parser = argparse.ArgumentParser()
parser.add_argument("--prov", help="core provider", choices=["psm2", "verbs", \
"tcp", "udp", "sockets", "shm"])
parser.add_argument("--util", help="utility provider", choices=["rxd", "rxm"])
parser.add_argument("--ofi_build_mode", help="specify the build configuration", \
choices = ["dbg", "dl"])
args = parser.parse_args()
args_core = args.prov
args_util = args.util
if (args.ofi_build_mode):
ofi_build_mode = args.ofi_build_mode
else:
ofi_build_mode='reg'
node = (os.environ['NODE_NAME']).split('-')[0]
hosts = [node]
# Note: Temporarily disabling all mpich testing
# due to mpich options issues which is causing
# multiple tests to fail.
#mpilist = ['impi', 'mpich', 'ompi']
mpilist = ['impi', 'ompi']
#this script is executed from /tmp
#this is done since some mpi tests
#look for a valid location before running
# the test on the secondary host(client)
# but jenkins only creates a valid path on
# the primary host (server/test node)
os.chdir('/tmp/')
if(args_core):
for host in ci_site_config.node_map[node]:
hosts.append(host)
if (args_util == None):
run.fi_info_test(args_core, hosts, ofi_build_mode)
run.fabtests(args_core, hosts, ofi_build_mode)
run.shmemtest(args_core, hosts, ofi_build_mode)
for mpi in mpilist:
run.intel_mpi_benchmark(args_core, hosts, mpi, ofi_build_mode)
run.mpistress_benchmark(args_core, hosts, mpi, ofi_build_mode)
run.osu_benchmark(args_core, hosts, mpi, ofi_build_mode)
else:
run.fi_info_test(args_core, hosts, ofi_build_mode, util=args_util)
run.fabtests(args_core, hosts, ofi_build_mode, util=args_util)
run.shmemtest(args_core, hosts, ofi_build_mode, util=args_util)
for mpi in mpilist:
run.intel_mpi_benchmark(args_core, hosts, mpi, ofi_build_mode, \
util=args_util,)
run.mpistress_benchmark(args_core, hosts, mpi, ofi_build_mode, \
util=args_util)
run.osu_benchmark(args_core, hosts, mpi, ofi_build_mode, \
util=args_util)
else:
print("Error : Specify a core provider to run tests")
| 35.042857
| 81
| 0.653078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 640
| 0.260905
|
a7a23de4cab3d9668dde589a3d12beefa58de55c
| 664
|
py
|
Python
|
interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0075_sort_colors.py
|
mrinalini-m/data_structures_and_algorithms
|
f9bebcca8002064e26ba5b46e47b8abedac39c3e
|
[
"MIT"
] | 2
|
2020-12-18T21:42:05.000Z
|
2020-12-21T06:07:33.000Z
|
interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0075_sort_colors.py
|
mrinalini-m/data_structures_and_algorithms
|
f9bebcca8002064e26ba5b46e47b8abedac39c3e
|
[
"MIT"
] | null | null | null |
interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0075_sort_colors.py
|
mrinalini-m/data_structures_and_algorithms
|
f9bebcca8002064e26ba5b46e47b8abedac39c3e
|
[
"MIT"
] | 2
|
2020-07-04T20:30:19.000Z
|
2021-08-31T08:32:36.000Z
|
from typing import List
def swap(arr, i, j):
temp = arr[i]
arr[i] = arr[j]
arr[j] = temp
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
low = 0
high = len(nums) - 1
i = 0
while i <= high:
if nums[i] == 0:
swap(nums, i, low)
i += 1
low += 1
elif nums[i] == 2:
swap(nums, i, high)
high -= 1
else:
i += 1
list = [1, 2, 0, 1, 0, 2, 0, 0]
Solution().sortColors(list)
print(list)
| 18.971429
| 61
| 0.414157
| 482
| 0.725904
| 0
| 0
| 0
| 0
| 0
| 0
| 77
| 0.115964
|
a7a370eb2ff4da9ac60e7150eb94a68c2fab78e2
| 14,452
|
py
|
Python
|
bnpy/data/GroupXData.py
|
raphael-group/bnpy
|
b11dc6f5689b06fc967bab6dffe7e01551d84667
|
[
"BSD-3-Clause"
] | 184
|
2016-12-13T21:05:48.000Z
|
2022-02-28T11:47:23.000Z
|
bnpy/data/GroupXData.py
|
raphael-group/bnpy
|
b11dc6f5689b06fc967bab6dffe7e01551d84667
|
[
"BSD-3-Clause"
] | 37
|
2016-12-18T14:07:53.000Z
|
2022-03-13T10:58:14.000Z
|
bnpy/data/GroupXData.py
|
raphael-group/bnpy
|
b11dc6f5689b06fc967bab6dffe7e01551d84667
|
[
"BSD-3-Clause"
] | 50
|
2017-01-25T19:44:34.000Z
|
2022-03-15T10:22:01.000Z
|
'''
Classes
-----
GroupXData
Data object for holding a dense matrix X of real 64-bit floats,
organized contiguously based on provided group structure.
'''
import numpy as np
from collections import namedtuple
from bnpy.data.XData import XData
from bnpy.util import as1D, as2D, as3D, toCArray
from bnpy.util import numpyToSharedMemArray, sharedMemToNumpyArray
class GroupXData(XData):
""" Dataset object for dense real vectors organized in groups.
GroupXData can represent situations like:
* obseved image patches, across many images
group=image, observation=patch
* observed test results for patients, across many hospitals
group=hospital, obsevation=patient test result
Attributes
------
X : 2D array, size N x D
each row is a single dense observation vector
Xprev : 2D array, size N x D, optional
"previous" observations for auto-regressive likelihoods
dim : int
the dimension of each observation
nObs : int
the number of in-memory observations for this instance
TrueParams : dict
key/value pairs represent names and arrays of true parameters
doc_range : 1D array, size nDoc+1
the number of in-memory observations for this instance
nDoc : int
the number of in-memory documents for this instance
nDocTotal : int
total number of documents in entire dataset
Example
--------
# Create 1000 observations, each one a 3D vector
>>> X = np.random.randn(1000, 3)
# Assign items 0-499 to doc 1, 500-1000 to doc 2
>>> doc_range = [0, 500, 1000]
>>> myData = GroupXData(X, doc_range)
>>> print (myData.nObs)
1000
>>> print (myData.X.shape)
(1000, 3)
>>> print (myData.nDoc)
2
"""
@classmethod
def LoadFromFile(cls, filepath, nDocTotal=None, **kwargs):
''' Constructor for loading data from disk into XData instance
'''
if filepath.endswith('.mat'):
return cls.read_mat(filepath, nDocTotal=nDocTotal, **kwargs)
raise NotImplemented('Only .mat file supported.')
def save_to_mat(self, matfilepath):
''' Save contents of current object to disk
'''
import scipy.io
SaveVars = dict(X=self.X, nDoc=self.nDoc, doc_range=self.doc_range)
if hasattr(self, 'Xprev'):
SaveVars['Xprev'] = self.Xprev
if hasattr(self, 'TrueParams') and 'Z' in self.TrueParams:
SaveVars['TrueZ'] = self.TrueParams['Z']
scipy.io.savemat(matfilepath, SaveVars, oned_as='row')
@classmethod
def read_npz(cls, npzfilepath, nDocTotal=None, **kwargs):
''' Constructor for building an instance of GroupXData from npz
'''
var_dict = dict(**np.load(npzfilepath, allow_pickle=True))
if 'X' not in var_dict:
raise KeyError(
'Stored npz file needs to have data in field named X')
if 'doc_range' not in var_dict:
raise KeyError(
'Stored npz file needs to have field named doc_range')
if nDocTotal is not None:
var_dict['nDocTotal'] = nDocTotal
return cls(**var_dict)
@classmethod
def read_mat(cls, matfilepath, nDocTotal=None, **kwargs):
''' Constructor for building an instance of GroupXData from disk
'''
import scipy.io
InDict = scipy.io.loadmat(matfilepath)
if 'X' not in InDict:
raise KeyError(
'Stored matfile needs to have data in field named X')
if 'doc_range' not in InDict:
raise KeyError(
'Stored matfile needs to have field named doc_range')
if nDocTotal is not None:
InDict['nDocTotal'] = nDocTotal
return cls(**InDict)
def __init__(self, X=None, doc_range=None, nDocTotal=None,
Xprev=None, TrueZ=None,
TrueParams=None, fileNames=None, summary=None, **kwargs):
''' Create an instance of GroupXData for provided array X
Post Condition
---------
self.X : 2D array, size N x D
with standardized dtype, alignment, byteorder.
self.Xprev : 2D array, size N x D
with standardized dtype, alignment, byteorder.
self.doc_range : 1D array, size nDoc+1
'''
self.X = as2D(toCArray(X, dtype=np.float64))
self.doc_range = as1D(toCArray(doc_range, dtype=np.int32))
if summary is not None:
self.summary = summary
if Xprev is not None:
self.Xprev = as2D(toCArray(Xprev, dtype=np.float64))
# Verify attributes are consistent
self._set_dependent_params(doc_range, nDocTotal)
self._check_dims()
# Add optional true parameters / true hard labels
if TrueParams is not None:
self.TrueParams = dict()
for key, arr in list(TrueParams.items()):
self.TrueParams[key] = toCArray(arr)
if TrueZ is not None:
if not hasattr(self, 'TrueParams'):
self.TrueParams = dict()
self.TrueParams['Z'] = as1D(toCArray(TrueZ))
self.TrueParams['K'] = np.unique(self.TrueParams['Z']).size
# Add optional source files for each group/sequence
if fileNames is not None:
if hasattr(fileNames, 'shape') and fileNames.shape == (1, 1):
fileNames = fileNames[0, 0]
if len(fileNames) > 1:
self.fileNames = [str(x).strip()
for x in np.squeeze(fileNames)]
else:
self.fileNames = [str(fileNames[0])]
# Add extra data attributes custom for the dataset
for key in kwargs:
if hasattr(self, key):
continue
if not key.startswith("__"):
arr = np.squeeze(as1D(kwargs[key]))
if arr.shape == ():
try:
arr = float(arr)
except TypeError:
continue
setattr(self, key, arr)
def _set_dependent_params(self, doc_range, nDocTotal=None):
self.nObs = self.X.shape[0]
self.dim = self.X.shape[1]
self.nDoc = self.doc_range.size - 1
if nDocTotal is None:
self.nDocTotal = self.nDoc
else:
self.nDocTotal = int(nDocTotal)
def _check_dims(self):
assert self.X.ndim == 2
assert self.X.flags.c_contiguous
assert self.X.flags.owndata
assert self.X.flags.aligned
assert self.X.flags.writeable
assert self.doc_range.ndim == 1
assert self.doc_range.size == self.nDoc + 1
assert self.doc_range[0] == 0
assert self.doc_range[-1] == self.nObs
assert np.all(self.doc_range[1:] - self.doc_range[:-1] >= 0)
def get_size(self):
return self.nDoc
def get_total_size(self):
return self.nDocTotal
def get_dim(self):
return self.dim
def get_text_summary(self):
''' Returns human-readable description of this dataset
'''
if hasattr(self, 'summary'):
s = self.summary
else:
s = 'GroupXData'
return s
def get_stats_summary(self):
''' Returns human-readable summary of this dataset's basic properties
'''
s = ' size: %d units (documents)\n' % (self.get_size())
s += ' dimension: %d' % (self.get_dim())
return s
def toXData(self):
''' Return simplified XData instance, losing group structure
'''
if hasattr(self, 'TrueParams'):
TParams = self.TrueParams
else:
TParams=None
if hasattr(self, 'Xprev'):
return XData(self.X, Xprev=self.Xprev, TrueParams=TParams)
else:
return XData(self.X, TrueParams=TParams)
# Create Subset
#########################################################
def make_subset(self,
docMask=None,
atomMask=None,
doTrackTruth=False,
doTrackFullSize=True):
""" Get subset of this dataset identified by provided unit IDs.
Parameters
-------
docMask : 1D array_like of ints
Identifies units (documents) to use to build subset.
doTrackFullSize : boolean, optional
default=True
If True, return DataObj with same nDocTotal value as this
dataset. If False, returned DataObj has smaller size.
atomMask : 1D array_like of ints, optional
default=None
If present, identifies rows of X to return as XData
Returns
-------
Dchunk : bnpy.data.GroupXData instance
"""
if atomMask is not None:
return self.toXData().select_subset_by_mask(atomMask)
if len(docMask) < 1:
raise ValueError('Cannot select empty subset')
newXList = list()
newXPrevList = list()
newDocRange = np.zeros(len(docMask) + 1)
newPos = 1
for d in range(len(docMask)):
start = self.doc_range[docMask[d]]
stop = self.doc_range[docMask[d] + 1]
newXList.append(self.X[start:stop])
if hasattr(self, 'Xprev'):
newXPrevList.append(self.Xprev[start:stop])
newDocRange[newPos] = newDocRange[newPos - 1] + stop - start
newPos += 1
newX = np.vstack(newXList)
if hasattr(self, 'Xprev'):
newXprev = np.vstack(newXPrevList)
else:
newXprev = None
if doTrackFullSize:
nDocTotal = self.nDocTotal
else:
nDocTotal = None
if hasattr(self, 'alwaysTrackTruth'):
doTrackTruth = doTrackTruth or self.alwaysTrackTruth
hasTrueZ = hasattr(self, 'TrueParams') and 'Z' in self.TrueParams
if doTrackTruth and hasTrueZ:
TrueZ = self.TrueParams['Z']
newTrueZList = list()
for d in range(len(docMask)):
start = self.doc_range[docMask[d]]
stop = self.doc_range[docMask[d] + 1]
newTrueZList.append(TrueZ[start:stop])
newTrueZ = np.hstack(newTrueZList)
assert newTrueZ.size == newDocRange[-1]
else:
newTrueZ = None
return GroupXData(newX, newDocRange,
Xprev=newXprev,
nDocTotal=nDocTotal,
TrueZ=newTrueZ)
def add_data(self, XDataObj):
""" Appends (in-place) provided dataset to this dataset.
Post Condition
-------
self.Data grows by adding all units from provided DataObj.
"""
if not self.dim == XDataObj.dim:
raise ValueError("Dimensions must match!")
self.nObs += XDataObj.nObs
self.nDocTotal += XDataObj.nDocTotal
self.nDoc += XDataObj.nDoc
self.X = np.vstack([self.X, XDataObj.X])
if hasattr(self, 'Xprev'):
self.Xprev = np.vstack([self.Xprev, XDataObj.Xprev])
new_doc_range = XDataObj.doc_range[1:] + self.doc_range[-1]
self.doc_range = np.hstack([self.doc_range, new_doc_range])
self._check_dims()
def get_random_sample(self, nDoc, randstate=np.random):
nDoc = np.minimum(nDoc, self.nDoc)
mask = randstate.permutation(self.nDoc)[:nDoc]
Data = self.select_subset_by_mask(mask, doTrackFullSize=False)
return Data
def __str__(self):
return self.X.__str__()
def getRawDataAsSharedMemDict(self):
''' Create dict with copies of raw data as shared memory arrays
'''
dataShMemDict = dict()
dataShMemDict['X'] = numpyToSharedMemArray(self.X)
dataShMemDict['doc_range'] = numpyToSharedMemArray(self.doc_range)
dataShMemDict['nDocTotal'] = self.nDocTotal
if hasattr(self, 'Xprev'):
dataShMemDict['Xprev'] = numpyToSharedMemArray(self.Xprev)
return dataShMemDict
def getDataSliceFunctionHandle(self):
""" Return function handle that can make data slice objects.
Useful with parallelized algorithms,
when we need to use shared memory.
Returns
-------
f : function handle
"""
return makeDataSliceFromSharedMem
def makeDataSliceFromSharedMem(dataShMemDict,
cslice=(0, None),
batchID=None):
""" Create data slice from provided raw arrays and slice indicators.
Returns
-------
Dslice : namedtuple with same fields as GroupXData object
* X
* nObs
* nObsTotal
* dim
Represents subset of documents identified by cslice tuple.
Example
-------
>>> Data = GroupXData(np.random.rand(25,2), doc_range=[0,4,12,25])
>>> shMemDict = Data.getRawDataAsSharedMemDict()
>>> Dslice = makeDataSliceFromSharedMem(shMemDict)
>>> np.allclose(Data.X, Dslice.X)
True
>>> np.allclose(Data.nObs, Dslice.nObs)
True
>>> Data.dim == Dslice.dim
True
>>> Aslice = makeDataSliceFromSharedMem(shMemDict, (0, 2))
>>> Aslice.nDoc
2
>>> np.allclose(Aslice.doc_range, Dslice.doc_range[0:(2+1)])
True
"""
if batchID is not None and batchID in dataShMemDict:
dataShMemDict = dataShMemDict[batchID]
# Make local views (NOT copies) to shared mem arrays
doc_range = sharedMemToNumpyArray(dataShMemDict['doc_range'])
X = sharedMemToNumpyArray(dataShMemDict['X'])
nDocTotal = int(dataShMemDict['nDocTotal'])
dim = X.shape[1]
if cslice is None:
cslice = (0, doc_range.size - 1)
elif cslice[1] is None:
cslice = (0, doc_range.size - 1)
tstart = doc_range[cslice[0]]
tstop = doc_range[cslice[1]]
keys = ['X', 'Xprev', 'doc_range', 'nDoc', 'nObs', 'dim', 'nDocTotal']
if 'Xprev' in dataShMemDict:
Xprev = sharedMemToNumpyArray(dataShMemDict['Xprev'])[tstart:tstop]
else:
Xprev = None
Dslice = namedtuple("GroupXDataTuple", keys)(
X=X[tstart:tstop],
Xprev=Xprev,
doc_range=doc_range[cslice[0]:cslice[1] + 1] - doc_range[cslice[0]],
nDoc=cslice[1] - cslice[0],
nObs=tstop - tstart,
dim=dim,
nDocTotal=nDocTotal,
)
return Dslice
| 34.657074
| 77
| 0.588846
| 12,063
| 0.834694
| 0
| 0
| 1,534
| 0.106144
| 0
| 0
| 5,320
| 0.368115
|
a7a3c07297bdc5a9d9dc9e8e2723b1d3e587876e
| 915
|
py
|
Python
|
sghymnal/users/models.py
|
shortnd/sghymnal
|
c10d9a7e2fda803dcb5046b9f7bc099f32b6c603
|
[
"MIT"
] | null | null | null |
sghymnal/users/models.py
|
shortnd/sghymnal
|
c10d9a7e2fda803dcb5046b9f7bc099f32b6c603
|
[
"MIT"
] | null | null | null |
sghymnal/users/models.py
|
shortnd/sghymnal
|
c10d9a7e2fda803dcb5046b9f7bc099f32b6c603
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import AbstractUser
from django.db.models import BooleanField, CharField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = CharField(_("Name of User"), blank=True, max_length=255)
foes_allowed = BooleanField("Foes Allowed", default=False)
push_notifications_allowed = BooleanField(
"Push Notifications Allowed", default=False
)
roster_allowed = BooleanField("Rosters Allowed", default=False)
songbook_allowed = BooleanField("Songbook Allowed", default=False)
users_allowed = BooleanField("Users Allowed", default=False)
feed_allowed = BooleanField("Feed Allowed", default=False)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
| 39.782609
| 74
| 0.748634
| 719
| 0.785792
| 0
| 0
| 0
| 0
| 0
| 0
| 216
| 0.236066
|
a7a3c324c55d54b727b474911571c79dbd56bbdd
| 3,285
|
py
|
Python
|
GNetLMM/pycore/mtSet/linalg/linalg_matrix.py
|
PMBio/GNetLMM
|
103d6433ff6d4a13b5787c116032fda268dc4302
|
[
"Apache-2.0"
] | 4
|
2016-02-25T18:40:36.000Z
|
2019-05-06T06:15:47.000Z
|
GNetLMM/pycore/mtSet/linalg/linalg_matrix.py
|
PMBio/GNetLMM
|
103d6433ff6d4a13b5787c116032fda268dc4302
|
[
"Apache-2.0"
] | 6
|
2016-03-29T02:55:17.000Z
|
2017-11-27T19:30:04.000Z
|
GNetLMM/pycore/mtSet/linalg/linalg_matrix.py
|
PMBio/GNetLMM
|
103d6433ff6d4a13b5787c116032fda268dc4302
|
[
"Apache-2.0"
] | 2
|
2017-05-09T05:23:50.000Z
|
2019-07-27T13:19:22.000Z
|
"""Matrix linear algebra routines needed for GP models"""
import scipy as SP
import scipy.linalg as linalg
import logging
def solve_chol(A,B):
"""
Solve cholesky decomposition::
return A\(A'\B)
"""
# X = linalg.solve(A,linalg.solve(A.transpose(),B))
# much faster version
X = linalg.cho_solve((A, True), B)
return X
def jitChol(A, maxTries=10, warning=True):
"""Do a Cholesky decomposition with jitter.
Description:
U, jitter = jitChol(A, maxTries, warning) attempts a Cholesky
decomposition on the given matrix, if matrix isn't positive
definite the function adds 'jitter' and tries again. Thereafter
the amount of jitter is multiplied by 10 each time it is added
again. This is continued for a maximum of 10 times. The amount of
jitter added is returned.
Returns:
U - the Cholesky decomposition for the matrix.
jitter - the amount of jitter that was added to the matrix.
Arguments:
A - the matrix for which the Cholesky decomposition is required.
maxTries - the maximum number of times that jitter is added before
giving up (default 10).
warning - whether to give a warning for adding jitter (default is True)
See also
CHOL, PDINV, LOGDET
Copyright (c) 2005, 2006 Neil D. Lawrence
"""
jitter = 0
i = 0
while(True):
try:
# Try --- need to check A is positive definite
if jitter == 0:
jitter = abs(SP.trace(A))/A.shape[0]*1e-6
LC = linalg.cholesky(A, lower=True)
return LC.T, 0.0
else:
if warning:
# pdb.set_trace()
# plt.figure()
# plt.imshow(A, interpolation="nearest")
# plt.colorbar()
# plt.show()
logging.error("Adding jitter of %f in jitChol()." % jitter)
LC = linalg.cholesky(A+jitter*SP.eye(A.shape[0]), lower=True)
return LC.T, jitter
except linalg.LinAlgError:
# Seems to have been non-positive definite.
if i<maxTries:
jitter = jitter*10
else:
raise linalg.LinAlgError, "Matrix non positive definite, jitter of " + str(jitter) + " added but failed after " + str(i) + " trials."
i += 1
return LC
def jitEigh(A,maxTries=10,warning=True):
"""
Do a Eigenvalue Decompsition with Jitter,
works as jitChol
"""
warning = True
jitter = 0
i = 0
while(True):
if jitter == 0:
jitter = abs(SP.trace(A))/A.shape[0]*1e-6
S,U = linalg.eigh(A)
else:
if warning:
# pdb.set_trace()
# plt.figure()
# plt.imshow(A, interpolation="nearest")
# plt.colorbar()
# plt.show()
logging.error("Adding jitter of %f in jitEigh()." % jitter)
S,U = linalg.eigh(A+jitter*SP.eye(A.shape[0]))
if S.min()>1E-10:
return S,U
if i<maxTries:
jitter = jitter*10
i += 1
raise linalg.LinAlgError, "Matrix non positive definite, jitter of " + str(jitter) + " added but failed after " + str(i) + " trials."
| 26.92623
| 150
| 0.565297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,744
| 0.530898
|
a7a51a41fbb112ee2ecd860311e6db4a6211d1fb
| 12,504
|
py
|
Python
|
WebDev/Task 3/we-poll/flaskapp/main.py
|
vigneshd332/delta-inductions-2021-master
|
70d4e7ecd92d69a2521df72ca99ac0ef2f135a23
|
[
"MIT"
] | null | null | null |
WebDev/Task 3/we-poll/flaskapp/main.py
|
vigneshd332/delta-inductions-2021-master
|
70d4e7ecd92d69a2521df72ca99ac0ef2f135a23
|
[
"MIT"
] | null | null | null |
WebDev/Task 3/we-poll/flaskapp/main.py
|
vigneshd332/delta-inductions-2021-master
|
70d4e7ecd92d69a2521df72ca99ac0ef2f135a23
|
[
"MIT"
] | null | null | null |
import ast
from flask import Flask, request
from flaskext.mysql import MySQL
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
mysql=MySQL()
app.config['MYSQL_DATABASE_USER'] = 'admin'
app.config['MYSQL_DATABASE_PASSWORD'] = 'noobmaster69'
app.config['MYSQL_DATABASE_DB'] = 'wepoll'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
@app.route('/api/index')
def index():
return {'token' :'lmaoo hi'}
@app.route('/api/auth', methods = ['POST'])
@cross_origin()
def auth():
if request.method == 'POST':
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
username = mydata["userName"]
password = mydata["password"]
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM users WHERE username = '" + username + "';")
data = cursor.fetchone()
print(data)
cursor.close()
if data is None:
return {'username': None, 'password': password,'message': 'User Doesnt Exist', 'doLogin': False}
else:
if data[1] == password:
return {'username': username, 'password': password, 'message': 'Login Successful', 'doLogin': True}
else:
return {'username': None, 'password': password, 'message': 'Incorrect Password', 'doLogin': False}
return "Error, GET methods are not supported"
@app.route('/api/create-team', methods =['POST'])
@cross_origin()
def create_team():
if request.method == 'POST':
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
teamname = mydata["teamName"]
username = mydata['userName']
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM teams WHERE teamname = '" + teamname + "';")
teams = cursor.fetchone()
if(teams is None):
if(teamname ==''):
return {'teamname': None, 'username': None ,'message': 'Team Name cannot be Empty'}
cursor.execute("INSERT INTO teams (teamname,member1) VALUES ('" + teamname + "','"+username+"');")
conn.commit()
cursor.close()
return {'teamname': teamname, 'username': username ,'message': 'Team Created'}
else:
cursor.close()
return {'teamname': None, 'message': 'Team Already Exists'}
@app.route('/api/get-teams', methods =['POST'])
def get_teams():
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
username = mydata['userName']
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM teams WHERE '"+username+"' IN (member1,member2,member3,member4);")
teams = cursor.fetchall()
cursor.execute("SELECT * FROM teams WHERE '"+username+"' IN (member1);")
ownerStates = cursor.fetchall()
cursor.close()
isOwner =[]
for x in ownerStates:
isOwner.append(x[0])
ownerStates=[]
payload=''
for x in teams:
if x[0] in isOwner:
ownerStates.append('true')
else:
ownerStates.append('false')
x='%20'.join(['None' if v is None else v for v in x])
payload = payload + x + '%40'
ownerStates='%20'.join(ownerStates)
return{'payload':payload,'ownerStates':ownerStates}
@app.route('/api/get-polls', methods =['POST'])
def get_polls():
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
username = mydata['userName']
isvote = mydata['isVote']
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT teamname FROM teams WHERE '"+username+"' IN (member1,member2,member3,member4);")
teams = cursor.fetchall()
teams = [ele for entry in teams for ele in entry]
cursor.execute("SELECT * FROM polls WHERE teamname IN ('"+"','".join(teams)+"');")
polls = cursor.fetchall()
if isvote == 'true':
cursor.execute("SELECT * FROM polls WHERE assigned like '%"+username+"%';")
polls = polls + cursor.fetchall()
cursor.close()
payload=''
for x in polls:
x='%20'.join(x)
payload = payload + x + '%40'
return{'payload':payload}
@app.route('/api/create-poll', methods =['POST'])
@cross_origin()
def create_poll():
if request.method == 'POST':
print('data = ',request.data)
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
teamname = mydata["teamName"]
pollname = mydata["pollName"]
options = mydata["options"]
votes=[]
for x in options.split(' '):
votes.append('0')
votes=' '.join(votes)
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM polls WHERE pollname = '" + pollname + "';")
polls = cursor.fetchone()
if(polls is None):
if(pollname ==''):
return {'pollname': None, 'message': 'Poll Name cannot be Empty'}
cursor.execute("INSERT INTO polls (pollname,teamname,options,state,votes,voted,assigned) VALUES ('" + pollname + "','"+teamname+"','"+options+"','true','"+votes+"','none','none');")
conn.commit()
cursor.close()
return {'pollname': pollname, 'message': 'Poll Created'}
else:
cursor.close()
return {'pollname': None, 'message': 'Poll Already Exists'}
@app.route('/api/add-member', methods=['POST'])
@cross_origin()
def add_member():
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
membername = mydata["memberName"]
membercol = mydata["membercol"]
teammod = mydata["teammod"]
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM users WHERE username = '"+membername+"';")
data = cursor.fetchone()
if data is None:
cursor.close()
return {'membername': None, 'message': 'User Doesnt Exist'}
else:
cursor.execute("UPDATE teams SET "+membercol+" = '"+membername+"' WHERE teamname = '"+teammod+"';")
conn.commit()
cursor.close()
return {'membername': membername, 'message': 'Member Added', 'teamname' : teammod}
@app.route('/api/remove-member', methods=['POST'])
@cross_origin()
def remove_member():
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
membermod = mydata["membermod"]
teammod = mydata["teammod"]
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute("UPDATE teams SET "+membermod+" = 'None' WHERE teamname = '"+teammod+"';")
conn.commit()
cursor.close()
return {'message': 'success', 'membername': membermod, 'teammod': teammod}
@app.route('/api/delete-poll', methods=['POST'])
@cross_origin()
def delete_poll():
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
poll = mydata['pollName']
conn=mysql.connect()
cursor=conn.cursor()
cursor.execute("SELECT * FROM polls WHERE pollname='"+poll+"';")
data=cursor.fetchone()
if data is None:
cursor.close()
return {'message': 'Poll does not exist', 'pollName':None}
else:
cursor.execute("DELETE FROM polls WHERE pollname='"+poll+"';")
conn.commit()
cursor.close()
return {'message': 'Poll Deleted', 'pollName': poll}
@app.route('/api/change-poll-status', methods=['POST'])
@cross_origin()
def changepollstatus():
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
poll = mydata['pollName']
conn=mysql.connect()
cursor=conn.cursor()
cursor.execute("SELECT state FROM polls WHERE pollname='"+poll+"';")
data=cursor.fetchone()
if data[0]=='true':
cursor.execute("UPDATE polls SET state='false' WHERE pollname='"+poll+"';")
message = 'State set to false'
else:
cursor.execute("UPDATE polls SET state='true' WHERE pollname='"+poll+"';")
message = 'State set to true'
conn.commit()
cursor.close()
return{'message': message, 'pollName':poll}
@app.route('/api/register', methods=['POST'])
@cross_origin()
def register():
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
username = mydata['userName']
password = mydata['password']
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM users WHERE username = '"+username+"';")
data = cursor.fetchone()
if data is None:
cursor.execute("INSERT INTO users (username,password) VALUES ('"+username+"','"+password+"');")
conn.commit()
cursor.close()
return {'message': 'success', 'username': username}
else:
cursor.close()
return {'message': 'Username already exists', 'username': None}
@app.route('/api/vote', methods=['POST'])
@cross_origin()
def vote():
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
pollname = mydata['pollName']
voteIndex = int(mydata['voteIndex'])
username = mydata['userName']
conn=mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT votes FROM polls WHERE pollname='"+pollname+"';")
votes = cursor.fetchone()
votes=votes[0].split(' ')
votes[voteIndex] = str(int(votes[voteIndex])+1)
votes=' '.join(votes)
cursor.execute("UPDATE polls SET votes='"+votes+"' WHERE pollname='"+pollname+"';")
cursor.execute("SELECT voted FROM polls WHERE pollname='"+pollname+"';")
voted = cursor.fetchone()
voted = voted[0].split(' ')
voted.append(username)
voted=' '.join(voted)
cursor.execute("UPDATE polls SET voted='"+voted+"' WHERE pollname='"+pollname+"';")
conn.commit()
cursor.close()
return {'message' : 'success'}
@app.route('/api/delete-team', methods=['POST'])
@cross_origin()
def delete_team():
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
teamname = mydata['teamName']
conn=mysql.connect()
cursor=conn.cursor()
cursor.execute("SELECT * FROM teams WHERE teamname='"+teamname+"';")
data=cursor.fetchone()
if data is None:
cursor.close()
return {'message': 'Team does not exist', 'teamName':None}
else:
cursor.execute("DELETE FROM teams WHERE teamname='"+teamname+"';")
cursor.execute("DELETE FROM polls WHERE teamname='"+teamname+"';")
conn.commit()
cursor.close()
return {'message': 'Team Deleted', 'teamName': teamname}
@app.route('/api/poll-groups', methods=['POST'])
@cross_origin()
def poll_groups():
dict_str = request.data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
pollName = mydata['pollName']
userName = mydata['userName']
action = mydata['action']
conn=mysql.connect()
cursor=conn.cursor()
if action=='GET':
cursor.execute("SELECT assigned FROM polls WHERE pollname='"+pollName+"';")
assigned = cursor.fetchone()[0].split(' ')
cursor.execute("SELECT teamname FROM polls WHERE pollname='"+pollName+"';")
teamname = cursor.fetchone()[0]
cursor.execute("SELECT member1, member2, member3, member4 FROM teams where teamname='"+teamname+"';")
teammembers = cursor.fetchone()
cursor.execute("SELECT username FROM users")
users = cursor.fetchall()
allusers=[]
unassigned=[]
for user in users:
allusers.append(user[0])
for user in allusers:
if user in assigned:
continue
else:
if user not in teammembers:
unassigned.append(user)
assigned=' '.join(assigned)
unassigned=' '.join(unassigned)
cursor.close()
return {'message':'success', 'assigned':assigned, 'unassigned' : unassigned}
elif action=='POST':
cursor.execute("SELECT assigned FROM polls WHERE pollname='"+pollName+"';")
res = cursor.fetchone()[0].split(' ')
if userName in res:
res.remove(userName)
else:
res.append(userName)
res=' '.join(res)
cursor.execute("UPDATE polls SET assigned='"+res+"' WHERE pollname='"+pollName+"';")
conn.commit()
cursor.close()
return {'message': 'done', 'username' : userName, 'pollname': pollName}
if __name__ == "__main__":
app.run(debug=True)
| 37.437126
| 193
| 0.610125
| 0
| 0
| 0
| 0
| 11,996
| 0.959373
| 0
| 0
| 3,680
| 0.294306
|
a7a53397912424502a2174602b255501283824ca
| 3,514
|
py
|
Python
|
src/models/participants/participants.py
|
jfblg/Tracktime-UZE
|
f43666ac62d6e6450d0fb7082b5e68bae360547f
|
[
"MIT"
] | null | null | null |
src/models/participants/participants.py
|
jfblg/Tracktime-UZE
|
f43666ac62d6e6450d0fb7082b5e68bae360547f
|
[
"MIT"
] | null | null | null |
src/models/participants/participants.py
|
jfblg/Tracktime-UZE
|
f43666ac62d6e6450d0fb7082b5e68bae360547f
|
[
"MIT"
] | null | null | null |
from wtforms import Form, BooleanField, IntegerField, StringField, PasswordField, validators
from wtforms.fields.html5 import EmailField
from src.common.database import db
from sqlalchemy import exc
class RunnerRegistrationForm(Form):
first_name = StringField('First name', [
validators.Length(min=2, max=25),
validators.DataRequired(message="Required")])
last_name = StringField('Last name', [
validators.Length(min=2, max=25)])
gender = StringField('Gender', [
validators.Length(min=2, max=6),
validators.data_required(message="Required. 'boy' or 'girl'")])
year = IntegerField('Year of birth', [
validators.NumberRange(min=1917, max=2017),
validators.data_required(message="Required. Please specify number between 1917 and 2017.")])
class ParticipantModel(db.Model):
__tablename__ = "participants"
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(80), nullable=False)
last_name = db.Column(db.String(80), nullable=False)
gender = db.Column(db.String(6), nullable=False)
year = db.Column(db.Integer, nullable=False)
startlist = db.relationship("StartlistModel",
back_populates='participants',
cascade="all, delete, delete-orphan")
__table_args__ = (db.UniqueConstraint('first_name', 'last_name', 'year'),)
def __init__(self, first_name, last_name, gender, year):
self.first_name = first_name
self.last_name = last_name
self.gender = gender
self.year = int(year)
def json(self):
return {
"first_name": self.first_name,
"last_name": self.last_name,
"gender": self.gender,
"year": self.year,
}
@classmethod
def find_by_year(cls, year):
# 'guery' is a SQLAlchemy query builder
# SELECT FROM items WHERE name=name LIMIT 1
# returned data gets converted into ItemModel object
return cls.query.filter_by(year=int(year))
@classmethod
def find_by_gender_and_year(cls, gender, year):
return cls.query.filter_by(gender=gender, year=year)
def save_to_db(self):
''' Function does update and insert to the DB (upserting)
'''
# SQLAlchemy can translate object into the row
try:
db.session.add(self)
db.session.commit()
except exc.IntegrityError as e:
db.session().rollback()
@classmethod
def get_participants_ordered(cls):
return db.session.query(ParticipantModel.id,
ParticipantModel.last_name,
ParticipantModel.first_name,
ParticipantModel.gender,
ParticipantModel.year).\
order_by(ParticipantModel.last_name).\
order_by(ParticipantModel.first_name).\
all()
@classmethod
def get_by_id(cls, participant_id):
return db.session.query(cls).filter_by(id=participant_id).one()
@staticmethod
def drop_table():
db.drop_all()
@classmethod
def list_all(cls):
return cls.query.all()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
@classmethod
def delete_all_rows(cls):
all_rows = cls.list_all()
for row in all_rows:
row.delete_from_db()
| 31.945455
| 100
| 0.615822
| 3,308
| 0.941377
| 0
| 0
| 1,239
| 0.35259
| 0
| 0
| 526
| 0.149687
|
a7a5cb2262b40e9f0b4ed01f3aebd41690c8929f
| 901
|
py
|
Python
|
demo.py
|
ruixuantan/FourParts
|
d28f7fa87830406a08e618bdfcb25d5d87e3e2a1
|
[
"MIT"
] | null | null | null |
demo.py
|
ruixuantan/FourParts
|
d28f7fa87830406a08e618bdfcb25d5d87e3e2a1
|
[
"MIT"
] | 6
|
2020-10-09T04:25:07.000Z
|
2021-04-20T20:29:03.000Z
|
demo.py
|
ruixuantan/FourParts
|
d28f7fa87830406a08e618bdfcb25d5d87e3e2a1
|
[
"MIT"
] | null | null | null |
import fourparts as fp
import pandas as pd
file_name = 'chorale_F'
df = fp.midi_to_df('samples/' + file_name + '.mid', save=True)
chords = fp.PreProcessor(4).get_progression(df)
chord_progression = fp.ChordProgression(chords)
# gets pitch class sets
pitch_class_sets = chord_progression.get_pitch_class_sets()
pd.DataFrame(pitch_class_sets).to_csv(file_name + '_pitch_class_sets.csv')
# check parallels
result = chord_progression.check_parallels()
pd.DataFrame(result).to_csv(file_name + '_parallel_results.csv')
# demonstration for 2 parts
file_name = 'chorale_G_2parts'
df = fp.midi_to_df('samples/' + file_name + '.mid', save=True)
dyads = fp.PreProcessor(2).get_progression(df)
dyad_progression = fp.DyadProgression(dyads)
# gets intervals between each dyad
dyad_intervals = dyad_progression.get_harmonic_intervals()
pd.DataFrame(dyad_intervals).to_csv(file_name + '_dyad_intervals.csv')
| 30.033333
| 74
| 0.789123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 229
| 0.254162
|
a7a5eea8dc1da0b751835f89feabfe81cc8149c8
| 14,677
|
py
|
Python
|
jenkins_status.py
|
tektronix/obsidian
|
d7ee7cd1a8511ecea8d3b475c2b308bcd81c7706
|
[
"Apache-2.0"
] | 2
|
2019-05-03T19:45:22.000Z
|
2019-08-01T18:33:49.000Z
|
jenkins_status.py
|
tektronix/obsidian
|
d7ee7cd1a8511ecea8d3b475c2b308bcd81c7706
|
[
"Apache-2.0"
] | 5
|
2019-09-05T15:09:24.000Z
|
2019-10-02T20:36:35.000Z
|
jenkins_status.py
|
tektronix/obsidian
|
d7ee7cd1a8511ecea8d3b475c2b308bcd81c7706
|
[
"Apache-2.0"
] | 1
|
2019-05-13T20:13:00.000Z
|
2019-05-13T20:13:00.000Z
|
#!/usr/bin/env python3
# Display a Jenkins build job status and progress
# Re-use animation functions from https://github.com/jgarff/rpi_ws281x/blob/master/python/examples/strandtest.py
import argparse
import random
import sys
import datetime
import time
from rpi_ws281x import Adafruit_NeoPixel, Color
from requests import get
MAX_LED_COUNT = 10000
POLL_PERIOD_SECONDS = 10
CHANNEL_1_PWM_PINS = (13, 19, 41, 45, 53)
# LED strip default configuration:
LED_COUNT = 144 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
# LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 55 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
JENKINS_FAILURE = 'FAILURE'
JENKINS_SUCCESS = 'SUCCESS'
JENKINS_ABORTED = 'ABORTED'
JENKINS_NO_RESULT = None
COLOR_RED = Color(255, 0, 0)
COLOR_GREEN = Color(0, 255, 0)
COLOR_BLUE = Color(0, 0, 255)
COLOR_WHITE = Color(255, 255, 255)
COLOR_BLACK = Color(0, 0, 0)
def color_wipe(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms / 1000.0)
def theater_chase(strip, color, wait_ms=50, iterations=10):
"""Movie theater light style chaser animation."""
for j in range(iterations):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, color)
strip.show()
time.sleep(wait_ms / 1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, 0)
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=1):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256 * iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i + j) & 255))
strip.show()
time.sleep(wait_ms / 1000.0)
def rainbow_cycle(strip, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256 * iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))
strip.show()
time.sleep(wait_ms / 1000.0)
def theater_chase_rainbow(strip, wait_ms=50):
"""Rainbow movie theater light style chaser animation."""
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, wheel((i + j) % 255))
strip.show()
time.sleep(wait_ms / 1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, 0)
def color_shuffle(strip, color, wait_ms=50):
"""Shuffle color onto display a pixel at a time."""
indexes = [i for i in range(strip.numPixels())]
random.shuffle(indexes)
for i in indexes:
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms / 1000.0)
def tail_entry(strip, pixel, color, bg_color=COLOR_BLACK, travel_time_ms=100):
"""Animate a pixel enter from the tail of the strip to the specified pixel"""
if strip.numPixels() > pixel:
wait_ms = travel_time_ms / float((strip.numPixels() - pixel))
else:
wait_ms = travel_time_ms
for j in range(strip.numPixels(), pixel - 1, -1):
strip.setPixelColor(j, color)
for k in range(j + 1, strip.numPixels()):
strip.setPixelColor(k, bg_color)
strip.show()
time.sleep(wait_ms / 1000.0)
def head_entry(strip, pixel, color, bg_color=COLOR_BLACK, travel_time_ms=1000):
"""Animate a pixel enter from the head of the strip to the specified pixel"""
wait_ms = travel_time_ms / pixel
for j in range(pixel):
strip.setPixelColor(j, color)
for i in range(j):
strip.setPixelColor(i, bg_color)
strip.show()
time.sleep(wait_ms / 1000.0)
def solid(strip, color):
head_solid(strip, strip.numPixels(), color=color)
def head_solid(strip, pixel, color):
"""Set solid color from the head of the strip to the specified pixel"""
for i in range(pixel):
strip.setPixelColor(i, color)
strip.show()
def tail_solid(strip, pixel, color):
"""Set solid color from the specified pixel to the end of the strip"""
for i in range(strip.numPixels, pixel - 1):
strip.setPixelColor(i, color)
strip.show()
def tail_fill(strip, color, bg_color=COLOR_BLACK, travel_time_ms=100):
"""Tail fill the entire strip"""
for i in range(strip.numPixels()):
tail_entry(strip, i, color=color, bg_color=bg_color, travel_time_ms=travel_time_ms)
head_solid(strip, i, color=color)
def progress_bar_tail_entry(strip, pixel, color, bg_color=COLOR_BLACK, travel_time_ms=100):
"""Animate the last fill from tail end up to the specified pixel"""
head_solid(strip, pixel, color=color)
tail_entry(strip, pixel, color=color, bg_color=bg_color, travel_time_ms=travel_time_ms)
def progress_bar_tail_fill(strip, pixel, color, bg_color=COLOR_BLACK, travel_time_ms=100):
"""Animate progress bar fill from tail end from start up to the specified pixel"""
for i in range(pixel):
head_solid(strip, i, color=color)
tail_entry(strip, i, color=color, bg_color=bg_color, travel_time_ms=travel_time_ms)
def progress_bar(strip, percentage, progressColor, remainColor=COLOR_BLACK, wait_ms=10):
"""Animate progress bar"""
finishedProgress = strip.numPixels() * percentage / 100
for index in range(0, finishedProgress):
strip.setPixelColor(index, progressColor)
for index in range(finishedProgress, strip.numPixels()):
strip.setPixelColor(index, remainColor)
strip.show()
rainbow_pixel(strip, finishedProgress, wait_ms=wait_ms)
def rainbow_pixel(strip, pixel, wait_ms=100):
"""Cycle all colors for a given pixel"""
for j in range(256):
strip.setPixelColor(pixel, wheel(j))
strip.show()
time.sleep(wait_ms / 1000.0)
def rainbow_pixel_duration(strip, pixel, duration=10):
"""Animate rainbow for a fixed duration in seconds"""
et = datetime.datetime.now() + datetime.timedelta(0, duration)
while (et > datetime.datetime.now()):
rainbow_pixel(strip, pixel, wait_ms=10)
def show_success(strip):
"""Animate build result success"""
color_wipe(strip, COLOR_GREEN, 10)
def show_failure(strip):
"""Animate build result failure"""
color_wipe(strip, COLOR_RED, 10)
def show_aborted(strip):
"""Animate build result aborted"""
color_wipe(strip, Color(200, 200, 200), 10)
def show_build_started(strip):
"""Animate build started"""
color_shuffle(strip, color=COLOR_BLACK, wait_ms=10)
def show_build_in_progress(strip, progress, travel_time_s=POLL_PERIOD_SECONDS):
"""
Animate build in progress
"""
pixel = int(progress * strip.numPixels() / 100)
print("progress=%s%% => pixel=%s" % (progress, pixel))
if pixel == strip.numPixels():
travel_time_ms = 1000
else:
travel_time_ms = travel_time_s * 1000
progress_bar_tail_entry(strip, pixel, color=COLOR_BLUE, travel_time_ms=travel_time_ms)
def show_build_finished(strip):
"""Animate build is finished"""
theater_chase(strip, COLOR_WHITE, iterations=20)
def light_check(strip):
"""Check each RGB pixel"""
travel_time = 100
solid(strip, color=COLOR_BLACK)
head_entry(strip, strip.numPixels(), color=COLOR_RED, travel_time_ms=travel_time)
tail_entry(strip, 0, color=COLOR_RED, travel_time_ms=travel_time)
head_entry(strip, strip.numPixels(), color=COLOR_GREEN, travel_time_ms=travel_time)
tail_entry(strip, 0, color=COLOR_GREEN, travel_time_ms=travel_time)
head_entry(strip, strip.numPixels(), color=COLOR_BLUE, travel_time_ms=travel_time)
tail_entry(strip, 0, color=COLOR_BLUE, travel_time_ms=travel_time)
head_entry(strip, strip.numPixels(), color=COLOR_WHITE, travel_time_ms=travel_time)
tail_entry(strip, 0, color=COLOR_WHITE, travel_time_ms=travel_time)
color_shuffle(strip, color=COLOR_RED)
time.sleep(1)
color_shuffle(strip, color=COLOR_BLACK)
time.sleep(1)
color_shuffle(strip, color=COLOR_GREEN)
time.sleep(1)
color_shuffle(strip, color=COLOR_BLACK)
time.sleep(1)
color_shuffle(strip, color=COLOR_BLUE)
time.sleep(1)
color_shuffle(strip, color=COLOR_BLACK)
time.sleep(1)
color_shuffle(strip, color=COLOR_WHITE)
time.sleep(1)
color_shuffle(strip, color=COLOR_BLACK)
time.sleep(1)
solid(strip, color=COLOR_BLACK)
def validate_brightness_value(value):
"""Validate the brightness value"""
error_message = "The value of brightness must be between %d and %d."
return validate_range(value, 0, 255, error_message)
def validate_range(value, min_value, max_value, error_message):
"""Validate a value is between a given range (inclusive)"""
x = int(value)
if min_value <= x <= max_value:
return x
raise argparse.ArgumentTypeError(error_message % (min_value, max_value))
def validate_led_count(value):
"""Validate the LED Count"""
error_message = "The number of LED on a single strip should be between %d and %d"
return validate_range(value, 1, MAX_LED_COUNT, error_message)
def validate_poll_period(value):
"""Validate the period to poll for status"""
seconds_per_day = 60 * 60 * 24
error_message = "The period to poll for status change should be between between %d and %d"
return validate_range(value, 1, seconds_per_day, error_message)
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--check', action='store_true', help='Run a few light patterns to check the LED pixels.')
parser.add_argument('-b', '--brightness', action='store', type=validate_brightness_value,
help='The brightness level of the LEDs, where 0 is darkest and 255 is brightest',
default=LED_BRIGHTNESS)
parser.add_argument('-d', '--donotclear', action='store_true',
help='Leave the display as is without clearing it on exit')
parser.add_argument('-j', '--job', action='store',
help='Jenkins job URL. Example: http://somejenkins.com/job/job_name/')
parser.add_argument('-s', '--verifyssl', action='store_true',
help='Verify SSL when provided HTTPS URL')
parser.add_argument('-p', '--pin', action='store', type=int, choices=[12, 18, 13, 19],
help='The GPIO pin with PWM capable to use to drive the LED. '
'On the RaspPi3, pins 12 and 18 are on channel 1 and pins 13 and 19 are on ',
default=LED_PIN)
parser.add_argument('-l', '--length', action='store', type=validate_led_count,
help='The number of LED in the LED strip', default=LED_COUNT)
parser.add_argument('-f', '--pollperiod', action='store', type=validate_poll_period,
help='The number of seconds to wait between polling for status.', default=POLL_PERIOD_SECONDS)
return parser.parse_args()
if __name__ == '__main__':
args = process_args()
if args.length <= 0:
print("Not enough LED to work with!")
sys.exit()
print("Pin used %d" % args.pin)
pwm_channel = 0
if args.pin in CHANNEL_1_PWM_PINS:
pwm_channel = 1
strip = Adafruit_NeoPixel(args.length, args.pin, LED_FREQ_HZ, LED_DMA, LED_INVERT, args.brightness, pwm_channel)
strip.begin()
if args.check:
try:
light_check(strip)
except KeyboardInterrupt:
print("\nKeyboard Interrupt signal received.")
if not args.donotclear:
print("Clearing all LEDs...")
color_wipe(strip, COLOR_BLACK, wait_ms=5)
finally:
sys.exit()
if not args.job:
print("A Jenkins Job URL is required to query for its status. "
"Run this command again with the -h or --help on how to specify them.")
sys.exit()
job_url = args.job + "/lastBuild/api/json"
progress_url = job_url + "?tree=executor[progress]"
print('Monitor job: %s' % job_url)
print("")
print('Press Ctrl-C to quit.')
is_building = True
while True:
try:
while True:
response = get(job_url, verify=args.verifyssl)
job_status = response.json()
if job_status["result"] == JENKINS_NO_RESULT:
if not is_building:
show_build_started(strip)
is_building = True
response = get(progress_url, verify=args.verifyssl)
progress = int(response.json()["executor"]["progress"])
show_build_in_progress(strip, progress, travel_time_s=args.pollperiod)
else:
if is_building:
show_build_in_progress(strip, 100, travel_time_s=1)
show_build_finished(strip)
print("Done with status: %s" % job_status["result"])
if job_status["result"] == JENKINS_FAILURE:
show_failure(strip)
elif job_status["result"] == JENKINS_SUCCESS:
show_success(strip)
elif job_status["result"] == JENKINS_ABORTED:
show_aborted(strip)
is_building = False
time.sleep(5)
except (KeyboardInterrupt, SystemExit):
print("\nKeyboard Interrupt signal received.")
if not args.donotclear:
print("Clearing all LEDs...")
color_wipe(strip, COLOR_BLACK, wait_ms=5)
sys.exit()
except Exception as e:
print(e)
print("\nSleep 1 minutes and will try again")
rainbow_pixel_duration(strip, 1, 60)
| 36.239506
| 119
| 0.644614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,413
| 0.232541
|
a7a6463865702f264ac1badc4373232d010a238b
| 3,649
|
py
|
Python
|
RQ1_RQ2/Thermostat_case_study/EVALUATION/Pymoo_GA/MyTcMutation.py
|
dgumenyuk/Environment_generation
|
092fbecdc208f84aa58f2ccd3522262984e79cda
|
[
"MIT"
] | null | null | null |
RQ1_RQ2/Thermostat_case_study/EVALUATION/Pymoo_GA/MyTcMutation.py
|
dgumenyuk/Environment_generation
|
092fbecdc208f84aa58f2ccd3522262984e79cda
|
[
"MIT"
] | null | null | null |
RQ1_RQ2/Thermostat_case_study/EVALUATION/Pymoo_GA/MyTcMutation.py
|
dgumenyuk/Environment_generation
|
092fbecdc208f84aa58f2ccd3522262984e79cda
|
[
"MIT"
] | null | null | null |
import numpy as np
from pymoo.model.mutation import Mutation
import copy
import config as cf
import random as rm
class MyTcMutation(Mutation):
def __init__(self):
super().__init__()
def _do(self, problem, X, **kwargs):
# print("X mutate", X.shape)
# for each individual
for i in range(len(X)):
r = np.random.random()
s = X[i, 0]
# with a probabilty of 40% - change the order of characters
if r < cf.ga["mut_rate"]:
#
# for some reason it seems we must do a deep copy
# and replace the original object
# pymoo seems to keep a deep copy of the best object if I change it
# in a mutation it will not chnage pymoo best individual and we end up
# with an incosistency in evaluated fitnesse
sn = copy.deepcopy(s)
wr = np.random.random()
child = sn.states
child_init = child
if wr < 0.5:
# exchange operator
candidates = list(np.random.randint(1, high=len(child), size=2))
temp = child["st" + str(candidates[0])]
child["st" + str(candidates[0])] = child["st" + str(candidates[1])]
child["st" + str(candidates[1])] = temp
sn.states = child
else:
# change of variable operator
num = int(np.random.randint(1, high=len(child), size=1))
value = np.random.choice(["duration", "temp", "model"])
if value == "duration":
duration_list = []
for m in range(
cf.model["duration_min"], cf.model["duration_max"], 5
):
duration_list.append(m)
child["st" + str(num)][value] = int(
np.random.choice(duration_list)
)
sn.states = child
elif value == "temp":
maximum = cf.model["temp_max"]
minimum = cf.model["temp_min"]
jump = cf.model["jump"]
action = np.random.choice(["inc", "dec"])
temp = child["st" + str(num - 1)][value]
if action == "inc":
if temp + jump > maximum:
result = rm.randint(temp, maximum)
else:
result = rm.randint(temp, temp + jump)
child["st" + str(num)][value] = result
sn.states = child
elif action == "dec":
if temp - jump < minimum:
result = rm.randint(minimum, temp)
else:
result = rm.randint(temp - jump, temp)
child["st" + str(num)][value] = result
sn.states = child
elif value == "model":
model = rm.randint(0, cf.model["model_num"] - 1)
child["st" + str(num)][value] = model
sn.states = child
sn.states = sn.check_states()
sn.novelty = sn.calc_novelty(child_init, sn.states)
X[i, 0] = sn
return X
| 34.424528
| 87
| 0.417923
| 3,533
| 0.96821
| 0
| 0
| 0
| 0
| 0
| 0
| 599
| 0.164155
|
a7a7a93a99b79c8510b21680dab63b937c097a0a
| 6,609
|
py
|
Python
|
yufeng_code/models.py
|
BrandonThaiTran/stressed_emotion
|
72a24ae66a41a9cdf811145ada5f4d5ecbe3c680
|
[
"MIT"
] | null | null | null |
yufeng_code/models.py
|
BrandonThaiTran/stressed_emotion
|
72a24ae66a41a9cdf811145ada5f4d5ecbe3c680
|
[
"MIT"
] | null | null | null |
yufeng_code/models.py
|
BrandonThaiTran/stressed_emotion
|
72a24ae66a41a9cdf811145ada5f4d5ecbe3c680
|
[
"MIT"
] | null | null | null |
"""
Defines models
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0.01)
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class Model(nn.Module):
def __init__(self, opt):
super(Model, self).__init__()
self.acoustic_modality = opt.acoustic_modality
self.visual_modality = opt.visual_modality
self.lexical_modality = opt.lexical_modality
self.acoustic_feature_dim = opt.acoustic_feature_dim
self.visual_feature_dim = opt.visual_feature_dim
self.lexical_feature_dim = opt.lexical_feature_dim
self.conv_width_v = opt.conv_width_v
self.conv_width_a = opt.conv_width_a
self.kernel_size_v = opt.kernel_size_v
self.kernel_size_a = opt.kernel_size_a
self.max_pool_width = opt.max_pool_width
self.rnn_layer_num_v = opt.rnn_layer_num_v
self.rnn_layer_num_a = opt.rnn_layer_num_a
self.rnn_width = opt.rnn_width
self.linear_width_l = opt.linear_width_l
self.linear_width = opt.linear_width
self.dropout_rate = opt.dropout_rate
self.conv1d_v1 = nn.Conv1d( in_channels=opt.visual_feature_dim,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_v2 = nn.Conv1d( in_channels=self.conv_width_v,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_v3 = nn.Conv1d( in_channels=self.conv_width_v,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_a1 = nn.Conv1d( in_channels=opt.acoustic_feature_dim,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.conv1d_a2 = nn.Conv1d( in_channels=self.conv_width_a,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.conv1d_a3 = nn.Conv1d( in_channels=self.conv_width_a,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.maxpool = nn.MaxPool1d(self.max_pool_width)
self.gru_v = nn.GRU(input_size=self.conv_width_v,
num_layers=self.rnn_layer_num_v,
hidden_size=self.rnn_width,
batch_first=True)
self.gru_a = nn.GRU(input_size=self.conv_width_a,
num_layers=self.rnn_layer_num_a,
hidden_size=self.rnn_width,
batch_first=True)
self.linear_l = nn.Linear(self.lexical_feature_dim, self.linear_width_l)
self.batchnorm_v = nn.BatchNorm1d(self.rnn_width)
self.batchnorm_a = nn.BatchNorm1d(self.rnn_width)
self.batchnorm_l = nn.BatchNorm1d(self.linear_width_l)
self.dropout = nn.Dropout(self.dropout_rate)
width = 0
if self.acoustic_modality:
width += self.rnn_width
if self.visual_modality:
width += self.rnn_width
if self.lexical_modality:
width += self.linear_width_l
self.linear_1 = nn.Linear(width, self.linear_width)
self.linear_2 = nn.Linear(self.linear_width, 3)
self.softmax = nn.Softmax(dim=1)
self.relu = nn.ReLU()
def forward_v(self, x_v):
x = x_v
x = torch.transpose(x, 1, 2)
x = self.relu(self.maxpool(self.conv1d_v1(x)))
x = self.relu(self.maxpool(self.conv1d_v2(x)))
x = self.relu(self.maxpool(self.conv1d_v3(x)))
x = torch.transpose(x, 1, 2)
x, _ = self.gru_v(x)
x = torch.transpose(x, 1, 2)
x = F.adaptive_avg_pool1d(x,1)[:, :, -1]
x = self.batchnorm_v(self.dropout(x))
return x
def forward_a(self, x_a):
x = x_a
x = torch.transpose(x, 1, 2)
x = self.relu(self.maxpool(self.conv1d_a1(x)))
x = self.relu(self.maxpool(self.conv1d_a2(x)))
x = self.relu(self.maxpool(self.conv1d_a3(x)))
x = torch.transpose(x, 1, 2)
x, _ = self.gru_a(x)
x = torch.transpose(x, 1, 2)
x = F.adaptive_avg_pool1d(x,1)[:, :, -1]
x = self.batchnorm_a(self.dropout(x))
return x
def forward_l(self, x_l):
x = x_l
x = self.relu(self.linear_l(x))
x = self.batchnorm_l(self.dropout(x))
return x
def encoder(self, x_v, x_a, x_l):
if self.visual_modality:
x_v = self.forward_v(x_v)
if self.acoustic_modality:
x_a = self.forward_a(x_a)
if self.lexical_modality:
x_l = self.forward_l(x_l)
if self.visual_modality:
if self.acoustic_modality:
if self.lexical_modality:
x = torch.cat((x_v, x_a, x_l), 1)
else:
x = torch.cat((x_v, x_a), 1)
else:
if self.lexical_modality:
x = torch.cat((x_v, x_l), 1)
else:
x = x_v
else:
if self.acoustic_modality:
if self.lexical_modality:
x = torch.cat((x_a, x_l), 1)
else:
x = x_a
else:
x = x_l
return x
def recognizer(self, x):
x = self.relu(self.linear_1(x))
x = self.linear_2(x)
return x
def forward(self, x_v, x_a, x_l):
x = self.encoder(x_v, x_a, x_l)
x = self.recognizer(x)
return x
| 35.342246
| 80
| 0.55606
| 6,148
| 0.930247
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.003329
|
a7a883b149b66dd30bd8e6cfd41ec11cd9ae06f6
| 1,007
|
py
|
Python
|
intercom/api_operations/find.py
|
orikalinski/python-intercom
|
7acd881ac9fa042e88a31f540040be7027edafa1
|
[
"MIT"
] | null | null | null |
intercom/api_operations/find.py
|
orikalinski/python-intercom
|
7acd881ac9fa042e88a31f540040be7027edafa1
|
[
"MIT"
] | null | null | null |
intercom/api_operations/find.py
|
orikalinski/python-intercom
|
7acd881ac9fa042e88a31f540040be7027edafa1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Operation to find an instance of a particular resource."""
from intercom import HttpError
from intercom import utils
class Find(object):
"""A mixin that provides `find` functionality."""
def find(self, **params):
"""Find the instance of the resource based on the supplied parameters."""
collection = utils.resource_class_to_collection_name(
self.collection_class)
if 'id' in params:
object_data = self.client.get(
"/%s/%s" % (collection, params['id']), {})
else:
response = self.client.post("/%s/search" % collection, params)
data_list = response["data"]
if len(data_list) > 1:
raise Exception("There is more than 1 result (%s)" % len(data_list))
object_data = data_list[0]
if object_data is None:
raise HttpError('Http Error - No response entity returned')
return self.collection_class(**object_data)
| 34.724138
| 84
| 0.606753
| 859
| 0.853029
| 0
| 0
| 0
| 0
| 0
| 0
| 316
| 0.313803
|
a7aa730f8abab95fb0a5ea5e1812ce4926111dc2
| 4,743
|
py
|
Python
|
Python/maximum-number-of-occurrences-of-a-substring.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2022-01-30T06:55:28.000Z
|
2022-01-30T06:55:28.000Z
|
Python/maximum-number-of-occurrences-of-a-substring.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | null | null | null |
Python/maximum-number-of-occurrences-of-a-substring.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2021-12-31T03:56:39.000Z
|
2021-12-31T03:56:39.000Z
|
# Time: O(n)
# Space: O(n)
# 1297 weekly contest 168 12/21/2019
# Given a string s, return the maximum number of ocurrences of any substring under the following rules:
#
# The number of unique characters in the substring must be less than or equal to maxLetters.
# The substring size must be between minSize and maxSize inclusive.
# Constraints:
#
# 1 <= s.length <= 10^5
# 1 <= maxLetters <= 26
# 1 <= minSize <= maxSize <= min(26, s.length)
import collections
# rolling hash (Rabin-Karp Algorithm)
class Solution(object):
def maxFreq(self, s, maxLetters, minSize, maxSize):
"""
:type s: str
:type maxLetters: int
:type minSize: int
:type maxSize: int
:rtype: int
"""
M, p = 10**9+7, 113
power, rolling_hash = pow(p, minSize-1, M), 0
left = 0
lookup, count = collections.defaultdict(int), collections.defaultdict(int)
for right in xrange(len(s)):
count[s[right]] += 1
if right-left+1 > minSize:
count[s[left]] -= 1
rolling_hash = (rolling_hash - ord(s[left])*power) % M
if count[s[left]] == 0:
count.pop(s[left])
left += 1
rolling_hash = (rolling_hash*p + ord(s[right])) % M
if right-left+1 == minSize and len(count) <= maxLetters:
lookup[rolling_hash] += 1
return max(lookup.values() or [0])
# Time: O(m * n), m = 26
# Space: O(m * n)
class Solution2(object):
def maxFreq2(self, s, maxLetters, minSize, maxSize):
"""
:type s: str
:type maxLetters: int
:type minSize: int
:type maxSize: int
:rtype: int
"""
lookup = {}
for right in xrange(minSize-1, len(s)):
word = s[right-minSize+1:right+1]
if word in lookup:
lookup[word] += 1
elif len(collections.Counter(word)) <= maxLetters:
lookup[word] = 1
return max(lookup.values() or [0])
# O(n*size) where size = maxSize-minSize+1
def maxFreq(self, s: str, maxLetters: int, minSize: int, maxSize: int) -> int:
cnt=collections.defaultdict(int)
n=len(s)
st=0
ed=st+minSize
while st<n:
letters=set(s[st:ed])
while len(letters)<=maxLetters and ed-st<=maxSize and ed<=n:
cnt[s[st:ed]]+=1
ed+=1
if ed<n:
letters.add(s[ed])
st+=1
ed=st+minSize
return max(cnt.values()) if cnt else 0
def maxFreq_506140166(self, s: str, maxLetters: int, minSize: int, maxSize: int) -> int:
cnt=collections.defaultdict(int)
n=len(s)
st=ed=0
while st<n:
letters=set()
while len(letters)<=maxLetters and ed-st<=maxSize and ed<=n:
if ed-st>=minSize:
cnt[s[st:ed]]+=1
if ed<n:
letters.add(s[ed])
ed+=1
st+=1
ed=st
return max(cnt.values()) if cnt else 0
# TLE: O(n*size) where size = maxSize-minSize+1
# TLE because no pruning, when letterCnts > maxLetters, we should stop. But the problem is we
# cannot maintain the count of unique letters if exit the current iteration early.
def maxFreq_ming(self, s: str, maxLetters: int, minSize: int, maxSize: int) -> int:
sizes = maxSize-minSize+1
dp = [[0] * 26 for _ in range(sizes)] # letters for all valid sizes
letterCnts = [0]*(sizes) # count of unique letters for all valid sizes
subsCounter = collections.defaultdict(int)
ans = 0
for j, c in enumerate(s):
for i in range(len(dp)):
dp[i][ord(c)-ord('a')] += 1
if dp[i][ord(c)-ord('a')] == 1:
letterCnts[i] += 1
if sum(dp[i]) == minSize+i:
pos_to_discard = j+1-minSize-i
if letterCnts[i] <= maxLetters:
subs = s[pos_to_discard:j+1]
subsCounter[subs] += 1
ans = max(ans, subsCounter[subs])
dp[i][ord(s[pos_to_discard])-ord('a')] -= 1
if dp[i][ord(s[pos_to_discard])-ord('a')] == 0:
letterCnts[i] -= 1
return ans
print(Solution().maxFreq("babcbceccaaacddbdaedbadcddcbdbcbaaddbcabcccbacebda",1,1,1)) # 13
print(Solution().maxFreq("aababcaab", 2,3,4)) # 2
print(Solution().maxFreq("aaaa", 1,3,3)) #2
print(Solution().maxFreq("aabcabcab",2,2,3)) # 3
print(Solution().maxFreq("abcde",2,3,3)) # 0
| 35.395522
| 103
| 0.527093
| 3,907
| 0.82374
| 0
| 0
| 0
| 0
| 0
| 0
| 1,243
| 0.26207
|
a7ab53160a52a5d1c5533255c3c66a5f638a75da
| 2,899
|
py
|
Python
|
source/protocol/image_0203_protocol.py
|
chopin1993/protocolmaster-20210731
|
e23e235ee00b940a4161c606415574d2a52c701c
|
[
"Apache-2.0"
] | null | null | null |
source/protocol/image_0203_protocol.py
|
chopin1993/protocolmaster-20210731
|
e23e235ee00b940a4161c606415574d2a52c701c
|
[
"Apache-2.0"
] | null | null | null |
source/protocol/image_0203_protocol.py
|
chopin1993/protocolmaster-20210731
|
e23e235ee00b940a4161c606415574d2a52c701c
|
[
"Apache-2.0"
] | null | null | null |
# encoding:utf-8
from .protocol import Protocol
from .protocol import find_head
from .codec import BinaryEncoder
from tools.converter import hexstr2bytes, str2hexstr
from .data_container import *
import time
import struct
from protocol.data_container import DataStruct
IMG0203_HEAD = bytes([0x54,0x17,0xfe,0x02])
IMG0203_TAIL = 0x03
class ThremalImageData(DataStruct):
def __init__(self, width, height, data):
self.width = width
self.height = height
self.data = data
def get_data(self):
return self.data
def __str__(self):
return "img data"
# 'u8:STC=0x02 u8:CMD u32:Length byte[Length]:Data u8:CS u8:END=0x03'
class ImageProtocol0203(Protocol):
def __init__(self):
super(ImageProtocol0203, self).__init__()
self.image_data = None
self.did_unit = None
@staticmethod
def create_frame(*args, **kwargs):
protocol = ImageProtocol0203()
protocol.did_unit = args[0]
return protocol
def __str__(self):
if self.did_unit:
return str2hexstr(self.did_unit)
if self.image_data is not None:
return str(self.image_data)
return "not handle data"
def encode(self, encoder):
encoder.encode_bytes(self.did_unit)
return encoder.encode_char(5)
def decode(self, decoder):
decoder.decode_bytes(1) # skip start
cmd = decoder.decode_bytes(1)
self.length = decoder.decode_uint()
self.width = decoder.decode_u16()
self.height = decoder.decode_u16()
self.image_data = decoder.decode_bytes (self.width*self.height*2)
return ThremalImageData(self.width, self.height, self.image_data)
@staticmethod
def find_frame_in_buff(data):
start_pos = 0
total_len = len(data)
show_time = False
start = time.time()
assert not isinstance(data, str)
while start_pos < (len(data) - 11):
start_pos = find_head(data, start_pos, IMG0203_HEAD)
if start_pos == -1:
break
start_pos += 3 #skip head
frame_data = data[start_pos:]
if len(frame_data) < 8:
break
data_len = struct.unpack("I", frame_data[2:6])[0]
if data_len + 8 > len(frame_data):
start_pos += 1
continue
if frame_data[6 + data_len] != checksum(frame_data[1:data_len + 6]):
print("check error")
show_time = True
if frame_data[7 + data_len] != IMG0203_TAIL:
start_pos += 1
show_time = True
print("tail error")
else:
return True,start_pos,data_len+8
if(show_time):
print("time const:" ,time.time()-start,"data length",total_len)
return False, 0, 0
| 30.840426
| 80
| 0.600552
| 2,487
| 0.857882
| 0
| 0
| 1,331
| 0.459124
| 0
| 0
| 191
| 0.065885
|
a7ac0fd2c72d5a1ba2a439615b6e2a8a3fe255b0
| 936
|
py
|
Python
|
ucscsdk/methodmeta/ConfigUCEstimateImpactMeta.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 9
|
2016-12-22T08:39:25.000Z
|
2019-09-10T15:36:19.000Z
|
ucscsdk/methodmeta/ConfigUCEstimateImpactMeta.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 10
|
2017-01-31T06:59:56.000Z
|
2021-11-09T09:14:37.000Z
|
ucscsdk/methodmeta/ConfigUCEstimateImpactMeta.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 13
|
2016-11-14T07:42:58.000Z
|
2022-02-10T17:32:05.000Z
|
"""This module contains the meta information of ConfigUCEstimateImpact ExternalMethod."""
from ..ucsccoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("ConfigUCEstimateImpact", "configUCEstimateImpact", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"in_configs": MethodPropertyMeta("InConfigs", "inConfigs", "ConfigMap", "Version142b", "Input", True),
"in_impact_analyzer_id": MethodPropertyMeta("InImpactAnalyzerId", "inImpactAnalyzerId", "Xs:unsignedLong", "Version142b", "Input", False),
"out_impact_analyzer_dn": MethodPropertyMeta("OutImpactAnalyzerDn", "outImpactAnalyzerDn", "ReferenceObject", "Version142b", "Output", False),
}
prop_map = {
"cookie": "cookie",
"inConfigs": "in_configs",
"inImpactAnalyzerId": "in_impact_analyzer_id",
"outImpactAnalyzerDn": "out_impact_analyzer_dn",
}
| 44.571429
| 146
| 0.746795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 607
| 0.648504
|
a7ad127304af82024c33b51ab7d7e16625068796
| 2,315
|
py
|
Python
|
src/midiutil/midiosc.py
|
neonkingfr/VizBench
|
e41f559cb6e761d717f2f5b202482d5d8dacd2d8
|
[
"MIT"
] | 7
|
2015-01-05T06:32:49.000Z
|
2020-10-30T19:29:07.000Z
|
src/midiutil/midiosc.py
|
neonkingfr/VizBench
|
e41f559cb6e761d717f2f5b202482d5d8dacd2d8
|
[
"MIT"
] | null | null | null |
src/midiutil/midiosc.py
|
neonkingfr/VizBench
|
e41f559cb6e761d717f2f5b202482d5d8dacd2d8
|
[
"MIT"
] | 4
|
2016-03-09T22:29:26.000Z
|
2021-04-07T13:52:28.000Z
|
"""
This module provides an interface to MIDI things for OSC
"""
import sys
import time
import traceback
import thread
import threading
import copy
import string
import re
from threading import Thread,Lock
from math import sqrt
from ctypes import *
from time import sleep
from traceback import format_exc
from array import array
from nosuch.midiutil import *
from nosuch.oscutil import *
class MidiOscHardware(MidiBaseHardware):
def __init__(self,input_name=None,output_name=None):
if input_name == None:
input_name = "9998@127.0.0.1"
self.input_name = input_name
if output_name == None:
output_name = "9999@127.0.0.1"
self.output_name = output_name
def input_devices(self):
return [self.input_name]
def output_devices(self):
return [self.input_name]
def get_input(self,input_name=None):
if input_name == None:
input_name = self.input_name
port = re.compile(".*@").search(input_name).group()[:-1]
host = re.compile("@.*").search(input_name).group()[1:]
return MidiOscHardwareInput(host,port)
def get_output(self,output_name=None):
if output_name == None:
output_name = self.output_name
port = re.compile(".*@").search(output_name).group()[:-1]
host = re.compile("@.*").search(output_name).group()[1:]
return MidiOscHardwareOutput(host,port)
class MidiOscHardwareInput(MidiBaseHardwareInput):
def __init__(self,inhost,inport):
raise Exception, "MidiOscHardwareInput isn't finished"
def open(self):
if Midi.oneThread:
Midi.oneThread._add_midiin(self)
def close(self):
if Midi.oneThread:
Midi.oneThread._remove_midiin(self)
def __str__(self):
return 'MidiInput(name="debug")'
def to_xml(self):
return '<midi_input name="debug"/>'
class MidiOscHardwareOutput(MidiBaseHardwareOutput):
def __init__(self,outhost,outport):
self.recipient = OscRecipient(outhost,outport)
def is_open(self):
return True
def open(self):
pass
def close(self):
pass
def write_msg(self,m):
o = m.to_osc()
b = createBinaryMsg(o[0],o[1])
r = self.recipient
r.osc_socket.sendto(b,(r.osc_addr,r.osc_port))
def schedule(self,msg,time=None):
Midi.schedule(self,msg,time)
def __str__(self):
return 'MidiOutput(name="debug")'
def to_xml(self):
return '<midi_output name="debug"/>'
"""
This is executed when module is loaded
"""
| 21.238532
| 59
| 0.72959
| 1,868
| 0.806911
| 0
| 0
| 0
| 0
| 0
| 0
| 307
| 0.132613
|
a7ae12f206db8056cd85668443dc8c96311de3a2
| 7,128
|
py
|
Python
|
argentum-api/api/tests/test_guest_view.py
|
devium/argentum
|
2bbb0f663fe9be78d106b1afa409b094da449519
|
[
"MIT"
] | 1
|
2019-10-07T09:47:08.000Z
|
2019-10-07T09:47:08.000Z
|
argentum-api/api/tests/test_guest_view.py
|
devium/argentum
|
2bbb0f663fe9be78d106b1afa409b094da449519
|
[
"MIT"
] | null | null | null |
argentum-api/api/tests/test_guest_view.py
|
devium/argentum
|
2bbb0f663fe9be78d106b1afa409b094da449519
|
[
"MIT"
] | null | null | null |
import copy
import logging
from api.models import Transaction, BonusTransaction, Order, Tag, OrderItem
from api.models.guest import Guest
from api.models.label import Label
from api.tests.data.guests import TestGuests
from api.tests.data.statuses import TestStatuses
from api.tests.data.users import TestUsers
from api.tests.utils.combined_test_case import CombinedTestCase
LOG = logging.getLogger(__name__)
class GuestViewTestCase(CombinedTestCase):
REFRESH_OBJECTS = [TestGuests]
def test_list(self):
self.login(TestUsers.RECEPTION_EXT)
self.perform_list_test('/guests', TestGuests.SAVED)
def test_list_ordered(self):
self.login(TestUsers.RECEPTION_EXT)
response = self.client.get('/guests?ordering=-code')
self.assertPksEqual(response.data, [TestGuests.SHEELAH, TestGuests.ROBY])
response = self.client.get('/guests?ordering=balance')
self.assertPksEqual(response.data, [TestGuests.SHEELAH, TestGuests.ROBY])
def test_list_search(self):
self.login(TestUsers.RECEPTION_EXT)
response = self.client.get('/guests?code=001')
self.assertEqual(response.status_code, 200)
self.assertPksEqual(response.data, [TestGuests.ROBY])
response = self.client.get('/guests?name=roby')
self.assertPksEqual(response.data, [TestGuests.ROBY])
response = self.client.get('/guests?mail=rbrush')
self.assertPksEqual(response.data, [TestGuests.ROBY])
response = self.client.get(f'/guests?status={TestStatuses.PAID.id}')
self.assertPksEqual(response.data, [TestGuests.ROBY])
response = self.client.get('/guests?status=null')
self.assertPksEqual(response.data, [])
url = '/guests?code=DEMO&name=el&mail=sohu.com'
expected_response = copy.deepcopy(self.RESPONSES[f'GET{url}'])
self.patch_json_ids(expected_response)
response = self.client.get(url)
self.assertPksEqual(response.data, [TestGuests.ROBY])
self.assertJSONEqual(response.content, expected_response)
url = f'/guests?card={TestGuests.ROBY.card}'
expected_response = copy.deepcopy(self.RESPONSES[f'GET{url}'])
self.patch_json_ids(expected_response)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content, expected_response)
response = self.client.get('f/guests?card=notfound')
self.assertEqual(response.status_code, 404)
def test_create_min(self):
self.login(TestUsers.RECEPTION_EXT)
self.perform_create_test('/guests', TestGuests, '#min', '#min')
def test_create_max(self):
self.login(TestUsers.RECEPTION_EXT)
self.perform_create_test('/guests', TestGuests, '#max', '#max')
def test_update(self):
self.login(TestUsers.RECEPTION_EXT)
self.perform_update_test('/guests', TestGuests)
def test_patch_readonly(self):
self.login(TestUsers.ADMIN_EXT)
mutable_fields = {
'checked_in': "2019-12-31T22:01:00Z",
'code': '123',
'name': 'Jimmy',
'mail': 'jimmy@cherpcherp.org',
'status': TestStatuses.PAID.id,
'card': '1212',
}
immutable_fields = {
'balance': '5.00',
'bonus': '3.00'
}
self.assertPatchReadonly(f'/guests/{TestGuests.ROBY.id}', mutable_fields, immutable_fields)
def test_list_create_update(self):
self.login(TestUsers.RECEPTION_EXT)
identifier = 'PATCH/guests/list_update'
expected_response = copy.deepcopy(self.RESPONSES[identifier])
request = self.REQUESTS[identifier]
response = self.client.patch('/guests/list_update', request)
self.assertEqual(response.status_code, 201)
self.patch_object_ids(expected_response, response.data)
self.assertValueEqual(
Guest.objects.all(),
[TestGuests.ROBY_LIST_PATCHED, TestGuests.SHEELAH, TestGuests.JOHANNA_MIN]
)
self.patch_json_ids(expected_response)
self.assertJSONEqual(response.content, expected_response)
def test_delete_all(self):
self.login(TestUsers.ADMIN_EXT)
response = self.client.delete('/guests/delete_all')
self.assertEqual(response.status_code, 204)
self.assertValueEqual(Guest.objects.all(), [])
self.assertValueEqual(Transaction.objects.all(), [])
self.assertValueEqual(BonusTransaction.objects.all(), [])
self.assertValueEqual(Order.objects.all(), [])
self.assertValueEqual(OrderItem.objects.all(), [])
self.assertValueEqual(Tag.objects.all(), [])
self.assertValueEqual(Label.objects.all(), [])
def test_permissions(self):
self.perform_permission_test(
'/guests',
list_users=[TestUsers.ADMIN_EXT, TestUsers.RECEPTION_EXT],
list_by_card_users=[TestUsers.ADMIN_EXT, TestUsers.TERMINAL_EXT, TestUsers.RECEPTION_EXT],
retrieve_users=[TestUsers.ADMIN_EXT, TestUsers.RECEPTION_EXT],
create_users=[TestUsers.ADMIN_EXT, TestUsers.RECEPTION_EXT],
update_users=[TestUsers.ADMIN_EXT, TestUsers.RECEPTION_EXT],
delete_users=[],
card_parameter='card',
card=TestGuests.ROBY.card,
detail_id=TestGuests.ROBY.id,
create_suffix='#max'
)
self.assertPermissions(
lambda: self.client.get(f'/guests?mail={TestGuests.ROBY.mail}'),
[TestUsers.ADMIN_EXT, TestUsers.RECEPTION_EXT]
)
self.assertPermissions(
lambda: self.client.patch('/guests/list_update', self.REQUESTS['PATCH/guests/list_update']),
[TestUsers.ADMIN_EXT, TestUsers.RECEPTION_EXT]
)
self.assertPermissions(
lambda: self.client.delete('/guests/delete_all'),
[TestUsers.ADMIN_EXT]
)
def test_constraints(self):
self.login(TestUsers.ADMIN_EXT)
# Empty card is allowed.
body = {**self.REQUESTS['POST/guests#max'], **{'card': None}}
response = self.client.post('/guests', body)
self.assertEqual(response.status_code, 201)
# Code has to be unique.
body = {**self.REQUESTS['POST/guests#max'], **{'code': TestGuests.ROBY.code, 'card': 'CARD1'}}
response = self.client.post('/guests', body)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()['code'][0], 'guest with this code already exists.')
# Card has to be unique.
body = {**self.REQUESTS['POST/guests#max'], **{'code': 'CODE1', 'card': TestGuests.ROBY.card}}
response = self.client.post('/guests', body)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()['card'][0], 'guest with this card already exists.')
def test_str(self):
LOG.debug(TestGuests.ROBY)
self.assertEqual(
str(TestGuests.ROBY),
f'Guest(id={TestGuests.ROBY.id},name="Roby Brushfield",code="DEMO-00001")'
)
| 40.271186
| 104
| 0.658951
| 6,715
| 0.942059
| 0
| 0
| 0
| 0
| 0
| 0
| 1,068
| 0.149832
|
a7ae40689370a8b6c40572fb34aab6f86b6f10fd
| 5,106
|
py
|
Python
|
nonebot/adapters/qqguild/message.py
|
nonebot/adapter-qqguild
|
a3e4d353bfdaafb296743bc0f15ed5d643c64d85
|
[
"MIT"
] | 39
|
2021-12-23T14:26:41.000Z
|
2022-03-22T14:11:19.000Z
|
nonebot/adapters/qqguild/message.py
|
nonebot/adapter-qqguild
|
a3e4d353bfdaafb296743bc0f15ed5d643c64d85
|
[
"MIT"
] | 4
|
2022-01-22T17:59:50.000Z
|
2022-03-22T12:40:10.000Z
|
nonebot/adapters/qqguild/message.py
|
nonebot/adapter-qqguild
|
a3e4d353bfdaafb296743bc0f15ed5d643c64d85
|
[
"MIT"
] | 2
|
2022-01-16T02:38:51.000Z
|
2022-03-01T15:48:36.000Z
|
import re
from typing import Any, Type, Tuple, Union, Iterable
from nonebot.typing import overrides
from nonebot.adapters import Message as BaseMessage
from nonebot.adapters import MessageSegment as BaseMessageSegment
from .utils import escape, unescape
from .api import Message as GuildMessage
from .api import MessageArk, MessageEmbed
class MessageSegment(BaseMessageSegment["Message"]):
@classmethod
@overrides(BaseMessageSegment)
def get_message_class(cls) -> Type["Message"]:
return Message
@staticmethod
def ark(ark: MessageArk) -> "Ark":
return Ark("ark", data={"ark": ark})
@staticmethod
def embed(embed: MessageEmbed) -> "Embed":
return Embed("embed", data={"embed": embed})
@staticmethod
def emoji(id: str) -> "Emoji":
return Emoji("emoji", data={"id": id})
@staticmethod
def image(url: str) -> "Attachment":
return Attachment("attachment", data={"url": url})
@staticmethod
def mention_user(user_id: int) -> "MentionUser":
return MentionUser("mention_user", {"user_id": str(user_id)})
@staticmethod
def mention_channel(channel_id: int) -> "MentionChannel":
return MentionChannel("mention_channel", {"channel_id": str(channel_id)})
@staticmethod
def text(content: str) -> "Text":
return Text("text", {"text": content})
@overrides(BaseMessageSegment)
def is_text(self) -> bool:
return self.type == "text"
class Text(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return escape(self.data["text"])
class Emoji(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<emoji:{self.data['id']}>"
class MentionUser(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<@{self.data['user_id']}>"
class MentionEveryone(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return "@everyone"
class MentionChannel(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<#{self.data['channel_id']}>"
class Attachment(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<attachment:{self.data['url']}>"
class Embed(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<embed:{self.data['embed']}>"
class Ark(MessageSegment):
@overrides(MessageSegment)
def __str__(self) -> str:
return f"<ark:{self.data['ark']}>"
class Message(BaseMessage[MessageSegment]):
@classmethod
@overrides(BaseMessage)
def get_segment_class(cls) -> Type[MessageSegment]:
return MessageSegment
@overrides(BaseMessage)
def __add__(
self, other: Union[str, MessageSegment, Iterable[MessageSegment]]
) -> "Message":
return super(Message, self).__add__(
MessageSegment.text(other) if isinstance(other, str) else other
)
@overrides(BaseMessage)
def __radd__(
self, other: Union[str, MessageSegment, Iterable[MessageSegment]]
) -> "Message":
return super(Message, self).__radd__(
MessageSegment.text(other) if isinstance(other, str) else other
)
@staticmethod
@overrides(BaseMessage)
def _construct(msg: str) -> Iterable[MessageSegment]:
text_begin = 0
for embed in re.finditer(
r"\<(?P<type>(?:@|#|emoji:))!?(?P<id>\w+?)\>",
msg,
):
content = msg[text_begin : embed.pos + embed.start()]
if content:
yield Text("text", {"text": unescape(content)})
text_begin = embed.pos + embed.end()
if embed.group("type") == "@":
yield MentionUser("mention_user", {"user_id": embed.group("id")})
elif embed.group("type") == "#":
yield MentionChannel(
"mention_channel", {"channel_id": embed.group("id")}
)
else:
yield Emoji("emoji", {"id": embed.group("id")})
content = msg[text_begin:]
if content:
yield Text("text", {"text": unescape(msg[text_begin:])})
@classmethod
def from_guild_message(cls, message: GuildMessage) -> "Message":
msg = Message()
if message.content:
msg.extend(Message(message.content))
if message.attachments:
msg.extend(
Attachment("attachment", data={"url": seg.url})
for seg in message.attachments
if seg.url
)
if message.embeds:
msg.extend(Embed("embed", data={"embed": seg}) for seg in message.embeds)
if message.ark:
msg.append(Ark("ark", data={"ark": message.ark}))
return msg
def extract_content(self) -> str:
return "".join(
str(seg)
for seg in self
if seg.type
in ("text", "emoji", "mention_user", "mention_everyone", "mention_channel")
)
| 29.859649
| 87
| 0.607521
| 4,736
| 0.927536
| 905
| 0.177242
| 4,034
| 0.790051
| 0
| 0
| 696
| 0.13631
|
a7ae54fa73c46e74acbf26edef75a93f9daec057
| 2,689
|
py
|
Python
|
creme/metrics/__init__.py
|
Raul9595/creme
|
39cec7ac27ccd40ff0a7bdd6bceaf7ce25c1a8da
|
[
"BSD-3-Clause"
] | 1
|
2020-07-27T03:06:46.000Z
|
2020-07-27T03:06:46.000Z
|
creme/metrics/__init__.py
|
2torus/creme
|
bcc5e2a0155663a1f0ba779c68f23456695bcb54
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T06:24:42.000Z
|
2022-02-10T06:24:42.000Z
|
creme/metrics/__init__.py
|
igorol/creme
|
60977c4accfdca08cfd76a162095ff738ef87281
|
[
"BSD-3-Clause"
] | 1
|
2021-04-16T08:27:14.000Z
|
2021-04-16T08:27:14.000Z
|
"""
A set of metrics used in machine learning that can be computed in a streaming fashion, without any
loss in precision.
"""
from .accuracy import Accuracy
from .accuracy import RollingAccuracy
from .confusion import ConfusionMatrix
from .confusion import RollingConfusionMatrix
from .cross_entropy import CrossEntropy
from .cross_entropy import RollingCrossEntropy
from .fbeta import F1
from .fbeta import FBeta
from .fbeta import MacroF1
from .fbeta import MacroFBeta
from .fbeta import MicroF1
from .fbeta import MicroFBeta
from .fbeta import MultiFBeta
from .fbeta import RollingF1
from .fbeta import RollingFBeta
from .fbeta import RollingMacroF1
from .fbeta import RollingMacroFBeta
from .fbeta import RollingMicroF1
from .fbeta import RollingMicroFBeta
from .fbeta import RollingMultiFBeta
from .jaccard import Jaccard
from .log_loss import LogLoss
from .log_loss import RollingLogLoss
from .mae import MAE
from .mae import RollingMAE
from .mcc import MCC
from .mcc import RollingMCC
from .mse import MSE
from .mse import RollingMSE
from .multioutput import RegressionMultiOutput
from .precision import MacroPrecision
from .precision import MicroPrecision
from .precision import Precision
from .precision import RollingMacroPrecision
from .precision import RollingMicroPrecision
from .precision import RollingPrecision
from .recall import MacroRecall
from .recall import MicroRecall
from .recall import Recall
from .recall import RollingMacroRecall
from .recall import RollingMicroRecall
from .recall import RollingRecall
from .rmse import RMSE
from .rmse import RollingRMSE
from .rmsle import RMSLE
from .rmsle import RollingRMSLE
from .roc_auc import ROCAUC
from .smape import RollingSMAPE
from .smape import SMAPE
__all__ = [
'Accuracy',
'ConfusionMatrix',
'CrossEntropy',
'F1',
'FBeta',
'Jaccard',
'LogLoss',
'MAE',
'MacroF1',
'MacroFBeta',
'MacroPrecision',
'MacroRecall',
'MCC',
'MicroF1',
'MicroFBeta',
'MicroPrecision',
'MicroRecall',
'MSE',
'MultiFBeta',
'Precision',
'Recall',
'RegressionMultiOutput',
'RMSE',
'RMSLE',
'ROCAUC',
'RollingAccuracy',
'RollingConfusionMatrix',
'RollingCrossEntropy',
'RollingF1',
'RollingFBeta',
'RollingLogLoss',
'RollingMAE',
'RollingMacroF1',
'RollingMacroFBeta',
'RollingMacroPrecision',
'RollingMacroRecall',
'RollingMCC',
'RollingMicroF1',
'RollingMicroFBeta',
'RollingMicroPrecision',
'RollingMicroRecall',
'RollingMSE',
'RollingMultiFBeta',
'RollingPrecision',
'RollingRecall',
'RollingRMSE',
'RollingRMSLE',
'RollingSMAPE',
'SMAPE'
]
| 25.130841
| 98
| 0.748605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 780
| 0.290071
|
a7aefa18f5080501a0e0759b55fb0c060403255f
| 628
|
py
|
Python
|
cd/checks/is_player_connected.py
|
Axelware/CD-bot
|
8f923c09c3c8cfcff48813c6dd11ac50c410af72
|
[
"MIT"
] | 2
|
2021-12-10T00:36:59.000Z
|
2021-12-11T09:11:46.000Z
|
cd/checks/is_player_connected.py
|
Axelware/CD-bot
|
8f923c09c3c8cfcff48813c6dd11ac50c410af72
|
[
"MIT"
] | 2
|
2021-12-10T01:53:10.000Z
|
2021-12-10T09:06:01.000Z
|
cd/checks/is_player_connected.py
|
Axelware/CD-bot
|
8f923c09c3c8cfcff48813c6dd11ac50c410af72
|
[
"MIT"
] | 1
|
2021-12-10T00:37:07.000Z
|
2021-12-10T00:37:07.000Z
|
# Future
from __future__ import annotations
# Standard Library
from collections.abc import Callable
from typing import Literal, TypeVar
# Packages
from discord.ext import commands
# Local
from cd import custom, exceptions
__all__ = (
"is_player_connected",
)
T = TypeVar("T")
def is_player_connected() -> Callable[[T], T]:
async def predicate(ctx: custom.Context) -> Literal[True]:
if not ctx.voice_client or not ctx.voice_client.is_connected():
raise exceptions.EmbedError(description="I'm not connected to any voice channels.")
return True
return commands.check(predicate)
| 19.030303
| 95
| 0.719745
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.394904
| 109
| 0.173567
|
a7af30cf8c221d4921a6b97b3773efaa600307b2
| 986
|
py
|
Python
|
src/test/base.py
|
vincent-lg/levantine
|
21d5296eec2161d9fae404bbfd9d538ac7970f2c
|
[
"BSD-3-Clause"
] | null | null | null |
src/test/base.py
|
vincent-lg/levantine
|
21d5296eec2161d9fae404bbfd9d538ac7970f2c
|
[
"BSD-3-Clause"
] | null | null | null |
src/test/base.py
|
vincent-lg/levantine
|
21d5296eec2161d9fae404bbfd9d538ac7970f2c
|
[
"BSD-3-Clause"
] | null | null | null |
"""Base test for TalisMUD tests.
It creates an in-memory database for each test, so they run in independent
environments.
"""
import unittest
from pony.orm import db_session
from data.base import db
from data.properties import LazyPropertyDescriptor
# Bind to a temporary database
db.bind(provider="sqlite", filename=":memory:")
db.generate_mapping(create_tables=True)
class BaseTest(unittest.TestCase):
"""Base class for TalisMUD unittests."""
def setUp(self):
"""Called before each test method."""
db.create_tables()
db_session._enter()
def tearDown(self):
"""Called after the test method."""
# Reset lazy properties
for entity in db.entities.values():
for key in dir(entity):
value = getattr(entity, key)
if isinstance(value, LazyPropertyDescriptor):
value.memory.clear()
db_session.__exit__()
db.drop_all_tables(with_all_data=True)
| 25.947368
| 74
| 0.662272
| 609
| 0.617647
| 0
| 0
| 0
| 0
| 0
| 0
| 310
| 0.314402
|
a7b03e4ffb4bbb414e62639fa7c3f4af65273269
| 4,074
|
py
|
Python
|
setup.py
|
zkbt/henrietta
|
653d798b241ad5591b704967a0413a2457a4e734
|
[
"MIT"
] | null | null | null |
setup.py
|
zkbt/henrietta
|
653d798b241ad5591b704967a0413a2457a4e734
|
[
"MIT"
] | 12
|
2018-09-12T03:56:04.000Z
|
2019-02-15T04:12:53.000Z
|
setup.py
|
zkbt/henrietta
|
653d798b241ad5591b704967a0413a2457a4e734
|
[
"MIT"
] | null | null | null |
'''
This setup.py file sets up our package to be installable on any computer,
so that folks can `import henrietta` from within any directory.
Thanks to this file, you can...
...tell python to look for `henrietta` in the current directory (which you
can continue to edit), by typing *one* of the following commands:
`pip install -e .`
or
`python setup.py develop`
...move a copy of this code to your site-packages directory, where python will
be able to find it (but you won't be able to keep editing it), by typing *one*
of the following commands:
`pip install .`
or
`python setup.py install`
...upload the entire package to the Python Package Index, so that other folks
will be able to install your package via the simple `pip install henrietta`, by
running the following command:
`python setup.py release`
The template for this setup.py came was pieced together with help from
barentsen, christinahedges, timothydmorton, and dfm. Check them out on github
for more neat tricks!
[`python-packaging`](https://python-packaging.readthedocs.io/en/latest/index.html)
is a pretty useful resource too!
'''
# import our basic setup ingredients
from setuptools import setup, find_packages
import os,sys
# running `python setup.py release` from the command line will post to PyPI
if "release" in sys.argv[-1]:
os.system("python setup.py sdist")
# uncomment the next line to test out on test.pypi.com/project/tess-zap
#os.system("twine upload --repository-url https://test.pypi.org/legacy/ dist/*")
os.system("twine upload dist/*")
os.system("rm -rf dist/henrietta*")
sys.exit()
# a little kludge to get the version number from __version__
exec(open('henrietta/version.py').read())
# run the setup function
setup(
# people can type `import henrietta` to access this package
name = "henrietta",
# this package will only be installed if the current version doesn't exist
version = __version__,
# what's a short description of the package?
description = "Python toolkit playing with stellar brightness measurements, for ASTR3400 at CU Boulder.",
# what's a more detailed description?
long_description = open('README.md').read(),
# who's the main author?
author = "Zach Berta-Thompson",
# what's the main author's email?
author_email = "zach.bertathompson@colorado.edu",
# what's the URL for the repository?
url = "https://github.com/zkbt/henrietta",
# this figures out what subdirectories to include
packages = find_packages(),
# are the directories of data that should be accessible when installed?
include_package_data=False,
# where are those data directories?
package_data = {'henrietta':[]},
# any scripts will be copied into your $PATH, so that can run from the command line
scripts = [],
# some descriptions about this package (for searchability?)
classifiers=[
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Astronomy'
],
# what other packages are required. these must be pip-installable
install_requires=['numpy',
'astropy',
'scipy',
'ipython',
'matplotlib',
'lightkurve>=1.0b26',
'tqdm',
'thefriendlystars>=0.0.2',
'illumination>=0.0.3',
'ipywidgets',
'jupyter',
'photutils',
'ipympl',
'scikit-image',
'emcee',
'corner'],
# the packages in `key` will be installed if folks run `pip install henrietta[key]`
extras_require={'models':['batman-package', ],
'docs':['sphinx', 'nbsphinx', 'sphinx_rtd_theme', 'numpydoc']},
# (I think just leave this set to False)
zip_safe=False,
# under what license is this code released?
license='MIT')
| 37.376147
| 109
| 0.650957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,015
| 0.740059
|
a7b07918e04c601018fdac4669f2078472d113b2
| 18,739
|
py
|
Python
|
zedenv/plugins/systemdboot.py
|
slicer69/zedenv
|
415397b48fdc8ec137cc8f99d2d9a70e8c8981f1
|
[
"BSD-3-Clause"
] | null | null | null |
zedenv/plugins/systemdboot.py
|
slicer69/zedenv
|
415397b48fdc8ec137cc8f99d2d9a70e8c8981f1
|
[
"BSD-3-Clause"
] | null | null | null |
zedenv/plugins/systemdboot.py
|
slicer69/zedenv
|
415397b48fdc8ec137cc8f99d2d9a70e8c8981f1
|
[
"BSD-3-Clause"
] | null | null | null |
import shutil
import os
import re
import tempfile
import click
import zedenv.lib.be
import zedenv.plugins.configuration as plugin_config
import zedenv.lib.system
from zedenv.lib.logger import ZELogger
class SystemdBoot:
systems_allowed = ["linux"]
bootloader = "systemdboot"
def __init__(self, zedenv_data: dict):
for k in zedenv_data:
if k not in plugin_config.allowed_keys:
raise ValueError(f"Type {k} is not in allowed keys")
self.boot_environment = zedenv_data['boot_environment']
self.old_boot_environment = zedenv_data['old_boot_environment']
self.bootloader = zedenv_data['bootloader']
self.verbose = zedenv_data['verbose']
self.noconfirm = zedenv_data['noconfirm']
self.noop = zedenv_data['noop']
self.be_root = zedenv_data['boot_environment_root']
self.env_dir = "env"
self.boot_mountpoint = "/boot"
self.entry_prefix = "zedenv"
self.old_entry = f"{self.entry_prefix}-{self.old_boot_environment}"
self.new_entry = f"{self.entry_prefix}-{self.boot_environment}"
esp = zedenv.lib.be.get_property(
"/".join([self.be_root, self.boot_environment]), "org.zedenv:esp")
if esp is None or esp == "-":
self.esp = "/mnt/efi"
else:
self.esp = esp
ZELogger.verbose_log({
"level": "INFO",
"message": f"esp set to {esp}\n"
}, self.verbose)
if not os.path.isdir(self.esp):
ZELogger.log({
"level": "EXCEPTION",
"message": ("To use the systemdboot plugin, an 'esp' must be mounted at the "
"default location of `/mnt/esp`, or at another location, with the "
"property 'org.zedenv:esp' set on the dataset. To set it use the "
"command (replacing with your pool and dataset)\n'"
"zfs set org.zedenv:esp='/mnt/efi' zpool/ROOT/default\n")
}, exit_on_error=True)
def modify_fstab(self, be_mountpoint: str):
be_fstab = os.path.join(be_mountpoint, "etc/fstab")
temp_fstab = os.path.join(be_mountpoint, "fstab.zedenv.new")
try:
shutil.copy(be_fstab, temp_fstab)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {temp_fstab}\n{e}"
}, exit_on_error=True)
except IOError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"IOError writing to {temp_fstab}\n{e}"
}, exit_on_error=True)
replace_pattern = r'(^{esp}/{env}/?)(.*)(\s.*{boot}\s.*$)'.format(
esp=self.esp, env=self.env_dir, boot=self.boot_mountpoint)
target = re.compile(replace_pattern)
"""
Find match for: $esp/$env_dir/$boot_environment $boot_location <fstab stuff>
eg: /mnt/efi/env/default-3 /boot none rw,defaults,bind 0 0
"""
with open(temp_fstab) as in_f:
lines = in_f.readlines()
match = next(
((i, target.search(m)) for i, m in enumerate(lines) if target.search(m)), None)
"""
Replace BE name with new one
"""
if match:
old_fstab_entry = lines[match[0]]
new_fstab_entry = re.sub(
replace_pattern, r"\1" + self.new_entry + r"\3", lines[match[0]])
lines[match[0]] = new_fstab_entry
with open(temp_fstab, 'w') as out_f:
out_f.writelines(lines)
else:
ZELogger.log({
"level": "INFO",
"message": (f"Couldn't find bindmounted directory to replace, your system "
"may not be configured for boot environments with systemdboot.")
})
if not self.noop:
try:
shutil.copy(be_fstab, f"{be_fstab}.bak")
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {be_fstab}.bak\n{e}"
}, exit_on_error=True)
except IOError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"IOError writing to {be_fstab}.bak\n{e}"
}, exit_on_error=True)
if not self.noconfirm:
if click.confirm(
"Would you like to edit the generated 'fstab'?", default=True):
click.edit(filename=temp_fstab)
try:
shutil.copy(temp_fstab, be_fstab)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {be_fstab}\n{e}"
}, exit_on_error=True)
except IOError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"IOError writing to {be_fstab}\n{e}"
}, exit_on_error=True)
ZELogger.log({
"level": "INFO",
"message": (f"Replaced fstab entry:\n{old_fstab_entry}\nWith new entry:\n"
f"{new_fstab_entry}\nIn the boot environment's "
f"'/etc/fstab'. A copy of the original "
"'/etc/fstab' can be found at '/etc/fstab.bak'.\n")
})
def edit_bootloader_entry(self, temp_esp: str):
temp_entries_dir = os.path.join(temp_esp, "loader/entries")
temp_bootloader_file = os.path.join(temp_entries_dir,
f"{self.new_entry}.conf")
real_entries_dir = os.path.join(self.esp, "loader/entries")
real_bootloader_file = os.path.join(
real_entries_dir, f"{self.old_entry}.conf")
try:
os.makedirs(temp_entries_dir)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {temp_entries_dir}\n{e}"
}, exit_on_error=True)
except OSError as os_err:
ZELogger.log({
"level": "EXCEPTION",
"message": os_err
}, exit_on_error=True)
config_entries = os.listdir(real_entries_dir)
entry_guess_list = [
f"title Boot Environment [{self.boot_environment}]\n",
f"linux /env/{self.new_entry}/vmlinuz-linux\n",
f"initrd /env/{self.new_entry}/initramfs-linux.img\n",
f"options zfs={self.be_root}/{self.boot_environment}\n"
]
config_matches = [en.split(".conf")[0] for en in config_entries
if en.split(".conf")[0] == (
self.old_entry or self.new_entry)]
old_conf = True if self.old_entry in config_matches else False
new_conf = True if self.new_entry in config_matches else False
if old_conf and (self.old_boot_environment == self.boot_environment):
ZELogger.log({
"level": "INFO",
"message": (f"Attempting to activate same boot environment while config "
f"'{self.old_entry}.conf' "
"already exists. Will not modify old configuration.\n")
})
elif new_conf:
ZELogger.log({
"level": "INFO",
"message": (f"Attempting to activate boot environment while config for "
f"'{self.new_entry}.conf' already exists. "
"Will not modify old configuration.\n")
})
else:
if old_conf:
ZELogger.log({
"level": "INFO",
"message": (f"Using existing entry {self.old_entry} as template "
f"taking best guess at creating one at "
f"{self.new_entry}.conf\n")
})
with open(real_bootloader_file, "r") as old_conf:
old_conf_list = old_conf.readlines()
new_entry_list = [l.replace(self.old_boot_environment, self.boot_environment)
for l in old_conf_list]
else:
entry_guess_full = '\n'.join(entry_guess_list)
ZELogger.log({
"level": "INFO",
"message": (f"You have no matching bootloader entries in {real_entries_dir}, "
f"taking best guess at creating one at {real_bootloader_file}:\n"
f"{entry_guess_full}.\n")
})
new_entry_list = entry_guess_list
if not self.noop:
with open(temp_bootloader_file, "w") as boot_conf:
boot_conf.writelines(new_entry_list)
if not self.noconfirm:
if click.confirm(
"Would you like to edit the generated bootloader config?",
default=True):
click.edit(filename=temp_bootloader_file)
def modify_bootloader(self, temp_esp: str,):
real_kernel_dir = os.path.join(self.esp, "env")
temp_kernel_dir = os.path.join(temp_esp, "env")
real_old_dataset_kernel = os.path.join(real_kernel_dir, self.old_entry)
temp_new_dataset_kernel = os.path.join(temp_kernel_dir, self.new_entry)
if not os.path.isdir(real_old_dataset_kernel):
ZELogger.log({
"level": "INFO",
"message": (f"No directory for Boot environments kernels found at "
f"'{real_old_dataset_kernel}', creating empty directory."
f"Don't forget to add your kernel to "
f"{real_kernel_dir}/{self.boot_environment}.")
})
if not self.noop:
try:
os.makedirs(temp_new_dataset_kernel)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {temp_new_dataset_kernel}\n{e}"
}, exit_on_error=True)
except OSError as os_err:
ZELogger.log({
"level": "EXCEPTION",
"message": os_err
}, exit_on_error=True)
else:
if not self.noop:
try:
shutil.copytree(real_old_dataset_kernel, temp_new_dataset_kernel)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {temp_new_dataset_kernel}\n{e}"
}, exit_on_error=True)
except IOError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"IOError writing to {temp_new_dataset_kernel}\n{e}"
}, exit_on_error=True)
def edit_bootloader_default(self, temp_esp: str, overwrite: bool):
real_loader_dir_path = os.path.join(self.esp, "loader")
temp_loader_dir_path = os.path.join(temp_esp, "loader")
real_loader_conf_path = os.path.join(real_loader_dir_path, "loader.conf")
temp_loader_conf_path = os.path.join(temp_loader_dir_path, "loader.conf")
ZELogger.verbose_log({
"level": "INFO",
"message": f"Updating {real_loader_conf_path}\n"
}, self.verbose)
if not os.path.isdir(temp_loader_dir_path):
try:
os.makedirs(temp_loader_dir_path)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {temp_loader_dir_path}\n{e}"
}, exit_on_error=True)
except OSError as os_err:
ZELogger.log({
"level": "EXCEPTION",
"message": os_err
}, exit_on_error=True)
if not os.path.isfile(real_loader_conf_path):
ZELogger.log({
"level": "EXCEPTION",
"message": f"Missing file: {real_loader_conf_path}\n"
}, exit_on_error=True)
try:
shutil.copy(real_loader_conf_path, temp_loader_conf_path)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to '{temp_loader_conf_path}'\n{e}"
}, exit_on_error=True)
except IOError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"IOError writing to '{temp_loader_conf_path}'\n{e}"
}, exit_on_error=True)
with open(temp_loader_conf_path, "r") as loader_conf:
conf_list = loader_conf.readlines()
line_num = next((l for l, val in enumerate(conf_list)
if val.split(' ', 1)[0] == "default"), None)
if line_num:
conf_list[line_num] = f"default {self.new_entry}\n"
if not self.noop:
if os.path.isfile(real_loader_conf_path):
ZELogger.verbose_log({
"level": "INFO",
"message": (f"File {real_loader_conf_path} already exists, backed up to "
f"'{real_loader_conf_path}.bak' and replaced.\n")
}, self.verbose)
if os.path.isfile(f"{real_loader_conf_path}.bak"):
try:
os.remove(f"{real_loader_conf_path}.bak")
except PermissionError:
ZELogger.log({
"level": "EXCEPTION",
"message": (f"Require Privileges to remove "
f"'{real_loader_conf_path}.bak'\n")
}, exit_on_error=True)
try:
shutil.move(real_loader_conf_path, f"{real_loader_conf_path}.bak")
except PermissionError:
ZELogger.log({
"level": "EXCEPTION",
"message": (f"Require Privileges to write to "
f"'{real_loader_conf_path}.bak'\n")
}, exit_on_error=True)
with open(real_loader_conf_path, "w") as loader_conf:
loader_conf.writelines(conf_list)
if not self.noconfirm:
if click.confirm(
"Would you like to edit the generated 'loader.conf'?", default=True):
click.edit(filename=real_loader_conf_path)
def recurse_move(self, source, dest):
for tf in os.listdir(source):
tf_path_src = os.path.join(source, tf)
tf_path_dst = os.path.join(dest, tf)
if os.path.isfile(tf_path_src):
if os.path.isfile(tf_path_dst):
ZELogger.verbose_log({
"level": "INFO",
"message": f"File {tf_path_dst} already exists, will not modify.\n"
}, self.verbose)
else:
try:
shutil.copy(tf_path_src, tf_path_dst)
except PermissionError:
ZELogger.log({
"level": "EXCEPTION",
"message": (f"Require Privileges to write to "
f"'{tf_path_dst}.bak'\n")
}, exit_on_error=True)
ZELogger.verbose_log({
"level": "INFO",
"message": f"Copied file {tf_path_src} -> {tf_path_dst}\n"
}, self.verbose)
elif os.path.isdir(tf_path_src):
if os.path.isdir(tf_path_dst):
ZELogger.verbose_log({
"level": "INFO",
"message": f"Directory {tf_path_dst} already exists, will not modify.\n"
}, self.verbose)
# Call again, may be empty
self.recurse_move(tf_path_src, tf_path_dst)
else:
try:
shutil.copytree(tf_path_src, tf_path_dst)
except PermissionError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Require Privileges to write to {tf_path_dst}\n{e}"
}, exit_on_error=True)
except IOError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"IOError writing to {tf_path_dst}\n{e}"
}, exit_on_error=True)
ZELogger.verbose_log({
"level": "INFO",
"message": f"Copied dir {tf_path_src} -> {tf_path_dst}\n"
}, self.verbose)
def post_activate(self):
ZELogger.verbose_log({
"level": "INFO",
"message": (f"Creating Temporary working directory. "
"No changes will be made until the end of "
"the systemd-boot configuration.\n")
}, self.verbose)
with tempfile.TemporaryDirectory(prefix="zedenv", suffix=self.bootloader) as t_esp:
ZELogger.verbose_log({
"level": "INFO",
"message": f"Created {t_esp}.\n"
}, self.verbose)
self.modify_bootloader(t_esp)
self.edit_bootloader_entry(t_esp)
self.recurse_move(t_esp, self.esp)
self.edit_bootloader_default(t_esp, overwrite=True)
# self.cleanup_entries()
def pre_activate(self):
pass
def mid_activate(self, be_mountpoint: str):
ZELogger.verbose_log({
"level": "INFO",
"message": f"Running {self.bootloader} mid activate.\n"
}, self.verbose)
self.modify_fstab(be_mountpoint)
| 40.560606
| 99
| 0.509365
| 18,533
| 0.989007
| 0
| 0
| 0
| 0
| 0
| 0
| 5,319
| 0.283847
|
a7b14f4594c0e55bb15609c8994038940d71c37f
| 1,158
|
py
|
Python
|
dockerfile/web/mailman-web/main.py
|
TommyLike/kubernetes-mailman
|
abd50e2798f2c4417e71302ff64b8a52d0d5878a
|
[
"MIT"
] | null | null | null |
dockerfile/web/mailman-web/main.py
|
TommyLike/kubernetes-mailman
|
abd50e2798f2c4417e71302ff64b8a52d0d5878a
|
[
"MIT"
] | null | null | null |
dockerfile/web/mailman-web/main.py
|
TommyLike/kubernetes-mailman
|
abd50e2798f2c4417e71302ff64b8a52d0d5878a
|
[
"MIT"
] | null | null | null |
import os
import socket
import ipaddress
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# NOTE: this is the MTA host, we need to update it.
EMAIL_HOST = 'mailman-exim4-0.mail-suit-service.default.svc.cluster.local'
EMAIL_PORT = 25
mailman_ip_address = socket.gethostbyname(
os.environ.get('MAILMAN_HOST_IP')).split('.')
mailman_ip_cidr = "{0}.{1}.0.0/16".format(mailman_ip_address[0],
mailman_ip_address[1])
MAILMAN_ARCHIVER_FROM = [str(ip) for ip in
ipaddress.IPv4Network(mailman_ip_cidr)]
ALLOWED_HOSTS = [
"localhost", # Archiving API from Mailman, keep it.
# Add here all production URLs you may have.
"mailman-database-0.mail-suit-service.default.svc.cluster.local",
"mailman-core-0.mail-suit-service.default.svc.cluster.local",
"mailman-web-0.mail-suit-service.default.svc.cluster.local",
"mail-web-service.default.svc.cluster.local",
# NOTE: This is the public ip address of the served host
"159.138.26.163",
"tommylike.me",
os.environ.get('SERVE_FROM_DOMAIN'),
os.environ.get('DJANGO_ALLOWED_HOSTS'),
]
| 37.354839
| 74
| 0.692573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 640
| 0.552677
|
a7b26907d2d76169184d6413917d51a6f4b07b22
| 16,381
|
py
|
Python
|
mask_functions.py
|
jhh37/wearmask3d
|
67c9beba323a34d0d207d7b6897f97cd59145e63
|
[
"MIT"
] | 6
|
2021-09-25T04:59:16.000Z
|
2022-03-02T15:39:13.000Z
|
mask_functions.py
|
jhh37/wearmask3d
|
67c9beba323a34d0d207d7b6897f97cd59145e63
|
[
"MIT"
] | null | null | null |
mask_functions.py
|
jhh37/wearmask3d
|
67c9beba323a34d0d207d7b6897f97cd59145e63
|
[
"MIT"
] | 1
|
2021-09-26T02:35:44.000Z
|
2021-09-26T02:35:44.000Z
|
# WearMask3D
# Copyright 2021 Hanjo Kim and Minsoo Kim. All rights reserved.
# http://github.com/jhh37/wearmask3d
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: rlakswh@gmail.com (Hanjo Kim)
# devkim1102@gmail.com (Minsoo Kim)
import cv2
import geomdl
import torch
import torchvision.transforms as transforms
from PIL import Image
from geomdl import exchange, NURBS
from pygame.constants import *
from misc_utils import get_models, mask_transformation
from obj_loader import *
from utils.ddfa import ToTensorGjz, NormalizeGjz
from utils.inference import parse_roi_box_from_landmark, crop_img, predict_68pts, predict_dense
def get_surface_v1(mType, points, delta = 0.025):
chin_left = points[2]
chin_right = points[14]
nose_tip = points[30]
nose_top = points[27]
lip_bottom = points[57]
chin_middle = (chin_left + chin_right) / 2
normalVector = nose_tip - chin_middle
verticalVector = lip_bottom - nose_top
horVector = chin_right - chin_left
c = []
for i in range(0, 28):
c.append(np.array([0, 0, 0, 1.0]))
if (mType == 4): # smooth (center-focused)
c1 = 0.50
c2 = 0.10
c3 = 0.25
c4 = 0.30
c5 = 0.07
c6 = 0.10
c7 = 0.015
c8 = 0.10
degree_u = 2
degree_v = 2
knotvector_u = [0, 0, 0, 0.5, 1, 1, 1]
knotvector_v = [0, 0, 0, 0.30, 0.50, 0.50, 0.70, 1, 1, 1]
elif (mType == 3): # smooth
c1 = 0.50
c2 = 0.10
c3 = 0.25
c4 = 0.30
c5 = 0.07
c6 = 0.00
c7 = 0.04
c8 = 0.15
degree_u = 2
degree_v = 2
knotvector_u = [0, 0, 0, 0.5, 1, 1, 1]
knotvector_v = [0, 0, 0, 0.15, 0.45, 0.55, 0.85, 1, 1, 1]
elif (mType == 2): # angulated
c1 = 0.50
c2 = 0.10
c3 = 0.17
c4 = 0.20
c5 = 0.08
c6 = 0.00
c7 = 0.04
c8 = 0.15
degree_u = 1
degree_v = 2
knotvector_u = [0, 0, 0.2, 0.75, 1, 1]
knotvector_v = [0, 0, 0, 0.15, 0.5, 0.5, 0.85, 1, 1, 1]
c[0] = chin_left - horVector * c7 * 1.5 + normalVector * 0.1
c[6] = chin_right + horVector * c7 * 1.5 + normalVector * 0.1
c[3] = nose_tip
c[1] = c[0] + normalVector * c1 + horVector * c7
c[5] = c[6] + normalVector * c1 - horVector * c7
c[2] = (c[1] + c[3]) / 2
c[4] = (c[5] + c[3]) / 2
for i in range(0, 7):
c[i] -= verticalVector * 0.4
c[i + 21] = c[i] + verticalVector * 1.4
c[1] -= normalVector * c2
c[2] -= normalVector * c2
c[3] -= normalVector * c2
c[4] -= normalVector * c2
c[5] -= normalVector * c2
c[0] += normalVector * c6 * 2
c[1] += normalVector * c6
c[5] += normalVector * c6
c[6] += normalVector * c6 * 2
c[7] += normalVector * c6 * 2
c[8] += normalVector * c6
c[12] += normalVector * c6
c[13] += normalVector * c6 * 2
c[21] += horVector * c8
c[22] -= -horVector * c8 + normalVector * 0.2
c[23] -= normalVector * 0.2
c[24] -= normalVector * 0.2
c[25] -= normalVector * 0.2
c[26] -= horVector * c8 + normalVector * 0.2
c[27] -= horVector * c8
for i in range(7, 14):
c[i] = (7 * c[i - 7] + 3 * c[i + 14]) / 10
c[i + 7] = (1 * c[i - 7] + 4 * c[i + 14]) / 5
c[8] += normalVector * 0.1
c[9] += normalVector * c3
c[10] += normalVector * c4
c[11] += normalVector * c3
c[12] += normalVector * 0.1
c[15] += normalVector * 0.08
c[16] += normalVector * 0.12
c[17] += normalVector * 0.12
c[18] += normalVector * 0.12
c[19] += normalVector * 0.08
c[22] -= normalVector * 0.1
c[23] -= normalVector * c5
c[24] -= normalVector * c5
c[25] -= normalVector * c5
c[26] -= normalVector * 0.1
ctrlPts = []
for i in range(0, 28):
ctrlPts.append(c[i].tolist())
os.chdir(os.path.dirname(os.path.realpath(__file__)))
surf = NURBS.Surface()
surf.degree_u = degree_u
surf.degree_v = degree_v
surf.set_ctrlpts(ctrlPts, 4, 7) # u=4 / v= 7
surf.knotvector_u = knotvector_u
surf.knotvector_v = knotvector_v
surf.delta = delta # default = 0.025
return exchange.export_obj_str(surf)
def get_surface_v0(mType, points, delta = 0.03):
chin_left = points[2]
chin_right = points[14]
nose_tip = points[30]
nose_top = points[27]
lip_bottom = points[57]
chin_middle = (chin_left + chin_right) / 2
normalVector = nose_tip - chin_middle
verticalVector = lip_bottom - nose_top
horVector = chin_right - chin_left
c = []
for i in range(0, 10):
c.append([0, 0, 0, 0])
c[0] = [chin_left[0] - horVector[0] * 0.036, chin_left[1] - horVector[1] * 0.036,
chin_left[2] - horVector[2] * 0.036, 1.0]
c[1] = [c[0][0] + normalVector[0], c[0][1] + normalVector[1], c[0][2] + normalVector[2], 1.0]
c[4] = [chin_right[0] + horVector[0] * 0.036, chin_right[1] + horVector[1] * 0.036,
chin_right[2] + horVector[2] * 0.036, 1.0]
c[2] = [nose_tip[0], nose_tip[1], nose_tip[2], 1.0]
c[3] = [c[4][0] + normalVector[0], c[4][1] + normalVector[1], c[4][2] + normalVector[2], 1.0]
for i in range(0, 5):
for j in range(0, 3):
c[i][j] -= verticalVector[j] * 0.4
for j in range(0, 3):
c[i + 5][j] = c[i][j] + verticalVector[j] * 1.4
c[i + 5][3] = 1.0
ctrlPts = []
for i in range(0, 10):
ctrlPts.append(c[i])
for i in range(0, 5):
c.append([0, 0, 0, 0])
for i in range(10, 15):
c[i][0] = c[i - 5][0]
c[i][1] = c[i - 5][1]
c[i][2] = c[i - 5][2]
c[i][3] = 1.0
ctrlPts.append(c[i])
for j in range(0, 3):
c[1][j] -= normalVector[j] * 0.1
c[2][j] -= normalVector[j] * 0.1
c[3][j] -= normalVector[j] * 0.1
c[10][j] += horVector[j] * 0.2
c[11][j] -= -horVector[j] * 0.2 + normalVector[j] * 0.2
c[12][j] -= normalVector[j] * 0.2
c[13][j] -= horVector[j] * 0.2 + normalVector[j] * 0.2
c[14][j] -= horVector[j] * 0.2
for i in range(6, 9):
c[i][0] = (c[i - 5][0] + c[i + 5][0]) / 2
c[i][1] = (c[i - 5][1] + c[i + 5][1]) / 2
c[i][2] = (c[i - 5][2] + c[i + 5][2]) / 2
c[i][3] = 1.0
for j in range(0, 3):
c[6][j] += normalVector[j] * 0.2
c[7][j] += normalVector[j] * 0.4
c[8][j] += normalVector[j] * 0.2
os.chdir(os.path.dirname(os.path.realpath(__file__)))
surf = NURBS.Surface()
surf.degree_u = 2
surf.degree_v = 2
surf.set_ctrlpts(ctrlPts, 3, 5)
surf.knotvector_u = [0, 0, 0, 1, 1, 1]
surf.knotvector_v = [0, 0, 0, 0.5, 0.5, 1, 1, 1]
surf.delta = delta
return exchange.export_obj_str(surf)
def surf_2_obj_str(objStr, mask_shape, numLineVertices):
firstF = True
sur_obj_string = ""
for idx, line in enumerate(objStr.splitlines()):
if (idx == 0):
sur_obj_string += 'mtllib nurbs_surf.mtl\n'
if (line[0] == 'f'):
if (firstF):
for y in range(0, numLineVertices):
for x in range(0, numLineVertices):
xCoord = 1 - x / (numLineVertices - 1)
y_ = 1 - y / (numLineVertices - 1)
if (y_ >= 0.5):
if (mask_shape == 1):
yCoord = 2 * (y_ - 0.5) ** 2 + 0.5
elif (mask_shape == 3):
yCoord = -y_ * y_ + 2.5 * y_ - 0.5
elif (mask_shape == 4):
if (xCoord >= 0.5):
yCoord = y_ + (2 * xCoord - 1) * 0.1
else:
yCoord = y_ + (1 - 2 * xCoord) * 0.1
else:
yCoord = y_
else:
if (mask_shape == 2):
yCoord = -2 * (y_ - 0.5) * (y_ - 0.5) + 0.5
else:
yCoord = y_
sur_obj_string += 'vt {} {}\n'.format(xCoord, yCoord)
sur_obj_string += 'usemtl material00\n'
firstF = False
fList = line.split()
sur_obj_string += 'f {}/{} {}/{} {}/{}\n'.format(fList[1], fList[1], fList[2], fList[2], fList[3],
fList[3])
else:
sur_obj_string += line + '\n'
return sur_obj_string
def get_face_region(face_regressor, img_cv, rect):
pts = face_regressor(img_cv, rect).parts()
pts = np.array([[pt.x, pt.y] for pt in pts]).T
roi_box = parse_roi_box_from_landmark(pts)
img = crop_img(img_cv, roi_box)
img = cv2.resize(img, dsize=(120, 120), interpolation=cv2.INTER_LINEAR)
return img, roi_box
def calculate_brightness(image):
greyscale_image = image.convert('L')
histogram = greyscale_image.histogram()
pixels = sum(histogram)
brightness = scale = len(histogram)
for index in range(0, scale):
ratio = histogram[index] / pixels
brightness += ratio * (-scale + index)
return 1 if brightness == 255 else brightness / scale
def variance_of_laplacian(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return cv2.Laplacian(gray, cv2.CV_64F).var()
def is_geomdl_newer_than_52():
ver = geomdl.__version__.split('.')
if (int(ver[0]) >= 5 or (int(ver[0]) == 5 and int(ver[1]) >= 3)):
return True
else:
return False
def batch_fit_masks(configs, file_list, cuda_device):
os.environ["CUDA_VISIBLE_DEVICES"] = str(cuda_device)
pid = os.getpid()
pygame.init()
# get face detection models
model, face_detector, face_regressor = get_models()
model.cuda().eval()
# torch tensor transformation
transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
bBr = 1
# get configurations
dataset_path = configs["srcPath"]
save_dataset_path = configs["dstPath"]
version = configs["version"]
# current version check
if (version == '1.1'):
from mask_functions import get_surface_v1 as get_surface
if is_geomdl_newer_than_52():
numLineVertices = 40 # 5.3.1
else:
numLineVertices = 41 # 5.2.10
elif (version == '1.0'):
from mask_functions import get_surface_v0 as get_surface
if is_geomdl_newer_than_52():
numLineVertices = 33 # 5.3.1
else:
numLineVertices = 34 # 5.2.10
for file_idx, file_path in enumerate(file_list):
if file_idx % 1000 == 0:
print(f'PID {pid}: {file_idx} / {len(file_list)} images processed')
nameLower = file_path.lower()
if not nameLower.endswith('.jpg') and not nameLower.endswith('.png'):
continue
img_cv = cv2.imread(file_path)
img_ori = Image.open((file_path))
height, width, _ = img_cv.shape
lp = variance_of_laplacian(img_cv)
br = calculate_brightness(img_ori)
if (bBr != 1):
br = 1.0
# transform mask image fit to face image
mask_surf, mask_shape, mask_surf_type = mask_transformation(br, lp)
rects = face_detector(img_cv, 1)
# no face detected
if len(rects) == 0:
continue
pts_res = []
vertices_lst = []
for rect in rects:
# get face region & resize face img
img, roi_box = get_face_region(face_regressor, img_cv, rect)
input = transform(img).unsqueeze(0)
with torch.no_grad():
input = input.cuda()
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
pts68 = predict_68pts(param, roi_box)
pts_res.append(pts68)
vertices = predict_dense(param, roi_box)
vertices_lst.append(vertices)
points = np.empty((1, 4), dtype=float)
for i in range(0, pts68[0].size):
points = np.append(points, np.array([[pts68[1][i], pts68[0][i], pts68[2][i], 1.0]]),
axis=0)
points = np.delete(points, [0, 0], axis=0)
mType = mask_surf_type
objStr = get_surface(mType, points)
sur_obj_string = surf_2_obj_str(objStr, mask_shape, numLineVertices)
srf = pygame.display.set_mode((width, height), OPENGL | RESIZABLE | DOUBLEBUF | HIDDEN)
size = srf.get_size()
imgSurf = pygame.image.load(file_path).convert()
image = pygame.image.tostring(imgSurf, 'RGBA', 1)
obj = MaskSurfObj(fileString=sur_obj_string, imgHeight=height, swapxy=True, mask_surf = mask_surf)
# real rendering using openGL lib
buffer = gl_rendering(image, obj, size, img_shape=(width,height))
# image save
screen_surf = pygame.image.fromstring(buffer, size, "RGBA", True)
pygame.image.save(screen_surf, file_path.replace(dataset_path, save_dataset_path))
if len(file_list) > 0:
print(f'PID {pid}: {file_idx+1} / {len(file_list)} images processed')
print(f'PID {pid}: mask augmentation completed')
def gl_rendering(image, obj, size, img_shape=(224,224)):
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_DEPTH_TEST)
glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glShadeModel(GL_SMOOTH)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
ambient = [0.0, 0.0, 0.0, 0.0]
diffuse = [0.8, 0.8, 0.8, 0.0]
specular = [0.0, 0.0, 0.0, 0.0]
position = [-0.4, -0.5, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient)
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse)
glLightfv(GL_LIGHT0, GL_SPECULAR, specular)
glLightfv(GL_LIGHT0, GL_POSITION, position)
texid = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texid)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glBindTexture(GL_TEXTURE_2D, texid)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, img_shape[0], img_shape[1], 0, GL_RGBA, GL_UNSIGNED_BYTE, image)
idx = glGenLists(1)
glDisable(GL_LIGHTING)
glNewList(idx, GL_COMPILE)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, texid)
glColor3f(1, 1, 1)
glBegin(GL_QUADS)
glTexCoord2f(0, 0)
glVertex3f(0, 0, -200)
glTexCoord2f(1, 0)
glVertex3f(img_shape[0], 0, -200)
glTexCoord2f(1, 1)
glVertex3f(img_shape[0], img_shape[1], -200)
glTexCoord2f(0, 1)
glVertex3f(0, img_shape[1], -200)
glEnd()
glEndList()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, img_shape[0], 0, img_shape[1], -10000, 10000)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glCallLists([idx, obj.gl_list])
buffer = glReadPixels(0, 0, *size, GL_RGBA, GL_UNSIGNED_BYTE)
return buffer
| 31.807767
| 110
| 0.565777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,920
| 0.117209
|
a7b2d4bcebc84c01285d54f2bcd39c69c67e7a6d
| 249
|
py
|
Python
|
adminapp/admin.py
|
gabyxbinnaeah/Bus-Booking
|
51d2a521f890986e4e7e17775708cec3cd71d2b4
|
[
"MIT"
] | null | null | null |
adminapp/admin.py
|
gabyxbinnaeah/Bus-Booking
|
51d2a521f890986e4e7e17775708cec3cd71d2b4
|
[
"MIT"
] | null | null | null |
adminapp/admin.py
|
gabyxbinnaeah/Bus-Booking
|
51d2a521f890986e4e7e17775708cec3cd71d2b4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Admin, Profile
# from userapp.models import Book
# from driverapp.models import Bus
admin.site.register(Admin)
admin.site.register(Profile)
# admin.site.register(Bus)
# admin.site.register(Book)
| 27.666667
| 35
| 0.795181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.481928
|
a7b37c8008c4aac92350512db834814661a375e1
| 41,807
|
py
|
Python
|
zstackwoodpecker/zstackwoodpecker/operations/hybrid_operations.py
|
bgerxx/woodpecker
|
fdc51245945cc9be4d1f028988079213eb99b2ad
|
[
"Apache-2.0"
] | null | null | null |
zstackwoodpecker/zstackwoodpecker/operations/hybrid_operations.py
|
bgerxx/woodpecker
|
fdc51245945cc9be4d1f028988079213eb99b2ad
|
[
"Apache-2.0"
] | null | null | null |
zstackwoodpecker/zstackwoodpecker/operations/hybrid_operations.py
|
bgerxx/woodpecker
|
fdc51245945cc9be4d1f028988079213eb99b2ad
|
[
"Apache-2.0"
] | null | null | null |
'''
All ldap operations for test.
@author: quarkonics
'''
from apibinding.api import ApiError
import apibinding.inventory as inventory
import apibinding.api_actions as api_actions
import zstackwoodpecker.test_util as test_util
import account_operations
import config_operations
import os
import inspect
def add_aliyun_key_secret(name, description, key, secret, session_uuid=None):
action = api_actions.AddAliyunKeySecretAction()
action.name = name
action.description = description
action.key = key
action.secret = secret
test_util.action_logger('Add [aliyun key secret:] %s' % key)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[aliyun key secret:] %s is added.' % key)
return evt.inventory
def del_aliyun_key_secret(uuid, session_uuid=None):
action = api_actions.DeleteAliyunKeySecretAction()
action.uuid = uuid
test_util.action_logger('Delete [aliyun key secret:] %s' % uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[aliyun key secret:] %s is deleted.' % uuid)
return evt
def update_aliyun_key_secret(uuid, name=None, description=None, session_uuid=None):
action = api_actions.UpdateAliyunKeySecretAction()
action.uuid = uuid
action.name = name
action.description = description
test_util.action_logger('Update [aliyun key secret:] %s' % uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[aliyun key secret:] %s is updated.' % uuid)
return evt
def attach_aliyun_key(uuid, session_uuid=None):
action = api_actions.AttachAliyunKeyAction()
action.uuid = uuid
test_util.action_logger('Attach [aliyun key:] %s' % uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[aliyun key:] %s is attached.' % uuid)
return evt
def detach_aliyun_key(uuid, session_uuid=None):
action = api_actions.DetachAliyunKeyAction()
action.uuid = uuid
test_util.action_logger('Detach [aliyun key:] %s' % uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[aliyun key:] %s is detached.' % uuid)
return evt
def get_oss_bucket_name_from_remote(data_center_uuid, session_uuid=None):
action = api_actions.GetOssBucketNameFromRemoteAction()
action.dataCenterUuid = data_center_uuid
test_util.action_logger('get Oss Bucket Name from Remote')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def add_oss_bucket_from_remote(data_center_uuid, oss_bucket_name, session_uuid=None):
action = api_actions.AddOssBucketFromRemoteAction()
action.dataCenterUuid = data_center_uuid
action.bucketName = oss_bucket_name
test_util.action_logger('Add [Oss Bucket From Remote:] %s %s' % (data_center_uuid, oss_bucket_name))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Oss Bucket:] %s %s is added.' % (data_center_uuid, oss_bucket_name))
return evt.inventory
def del_oss_bucket_name_in_local(uuid, session_uuid=None):
action = api_actions.DeleteOssBucketNameLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Oss File Bucket Name in local:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Oss File Bucket Name in local:] %s is deleted.' % (uuid))
return evt
def create_oss_bucket_remote(data_center_uuid, bucket_name, description, session_uuid=None):
action = api_actions.CreateOssBucketRemoteAction()
action.dataCenterUuid = data_center_uuid
action.bucketName = bucket_name
action.description = description
test_util.action_logger('Create [Oss Bucket Name Remote:] %s %s' % (data_center_uuid, bucket_name))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Oss Bucket Name Remote:] %s %s is created.' % (data_center_uuid, bucket_name))
return evt.inventory
def del_oss_bucket_remote(uuid, session_uuid=None):
action = api_actions.DeleteOssBucketRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Oss Bucket Name Remote:] %s' % uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Oss Bucket Name Remote:] %s is deleted.' % uuid)
return evt
def del_oss_bucket_file_remote(bucket_uuid, file_name, session_uuid=None):
action = api_actions.DeleteOssBucketFileRemoteAction()
action.uuid = bucket_uuid
action.fileName = file_name
test_util.action_logger('Delete [Oss Bucket File Remote:] %s %s' % (bucket_uuid, file_name))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Oss Bucket File Remote:] %s %s is deleted.' % (bucket_uuid, file_name))
return evt
def get_oss_bucket_file_from_remote(bucket_uuid, session_uuid=None):
action = api_actions.GetOssBucketFileFromRemoteAction()
action.uuid = bucket_uuid
test_util.action_logger('Get [Oss Bucket File From Remote:] %s' % bucket_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def get_datacenter_from_remote(datacenter_type, session_uuid=None):
action = api_actions.GetDataCenterFromRemoteAction()
action.type = datacenter_type
test_util.action_logger('Get [Datacenter From Remote:] %s' % datacenter_type)
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def get_ecs_instance_type_from_remote(iz_uuid, session_uuid=None):
action = api_actions.GetEcsInstanceTypeAction()
action.identityZoneUuid = iz_uuid
test_util.action_logger('Get [Ecs Instance Type From Remote:] %s' % iz_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.types
def add_datacenter_from_remote(datacenter_type, region_id, description, session_uuid=None):
action = api_actions.AddDataCenterFromRemoteAction()
action.type = datacenter_type
action.regionId = region_id
action.description = description
test_util.action_logger('Add [datacenter from remote:] %s %s' % (datacenter_type, region_id))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[datacenter from remote:] %s %s is added.' % (datacenter_type, region_id))
return evt.inventory
def del_datacenter_in_local(uuid, session_uuid=None):
action = api_actions.DeleteDataCenterInLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [datacenter in local:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[datacenter in local:] %s is deleted.' % uuid)
return evt
def attach_oss_bucket_to_ecs_datacenter(oss_bucket_uuid, session_uuid=None):
action = api_actions.AttachOssBucketToEcsDataCenterAction()
action.ossBucketUuid = oss_bucket_uuid
# action.dataCenterUuid = datacenter_uuid
test_util.action_logger('Attach [Oss bucket:] %s to Datacenter' % oss_bucket_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Oss bucket:] %s is attached to Datacenter.' % oss_bucket_uuid)
return evt
def detach_oss_bucket_to_ecs_datacenter(oss_bucket_uuid, session_uuid=None):
action = api_actions.DetachOssBucketFromEcsDataCenterAction()
action.ossBucketUuid = oss_bucket_uuid
# action.dataCenterUuid = datacenter_uuid
test_util.action_logger('Detach [Oss bucket:] %s from Datacenter' % oss_bucket_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Oss bucket:] %s is detached from Datacenter.' % oss_bucket_uuid)
return evt
def get_identity_zone_from_remote(datacenter_type, region_id, session_uuid=None):
action = api_actions.GetIdentityZoneFromRemoteAction()
action.type = datacenter_type
action.regionId = region_id
test_util.action_logger('Get [Identity zone From Remote:] %s %s' % (datacenter_type, region_id))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def add_identity_zone_from_remote(datacenter_type, datacenter_uuid, zone_id, session_uuid=None):
action = api_actions.AddIdentityZoneFromRemoteAction()
action.type = datacenter_type
action.dataCenterUuid = datacenter_uuid
action.zoneId = zone_id
test_util.action_logger('Add [identity zone from remote:] %s %s' % (datacenter_uuid, zone_id))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[identity zone from remote:] %s %s is added.' % (datacenter_uuid, zone_id))
return evt.inventory
def del_identity_zone_in_local(uuid, session_uuid=None):
action = api_actions.DeleteIdentityZoneInLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [identity zone in local:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[identity zone in local:] %s is deleted.' % uuid)
return evt
def create_ecs_vpc_remote(datacenter_uuid, name, vrouter_name, cidr_block, session_uuid=None):
action = api_actions.CreateEcsVpcRemoteAction()
action.dataCenterUuid = datacenter_uuid
action.name = name
action.vRouterName = vrouter_name
action.cidrBlock = cidr_block
test_util.action_logger('Create [Ecs VPC Remote:] %s %s %s %s' % (datacenter_uuid, name, vrouter_name, cidr_block))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs VPC Remote:] %s %s %s %s is created.' % (datacenter_uuid, name, vrouter_name, cidr_block))
return evt.inventory
def sync_ecs_vpc_from_remote(datacenter_uuid, session_uuid=None):
action = api_actions.SyncEcsVpcFromRemoteAction()
action.dataCenterUuid = datacenter_uuid
test_util.action_logger('Sync [Ecs VPC From Remote:] %s' % (datacenter_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def del_ecs_vpc_local(uuid, session_uuid=None):
action = api_actions.DeleteEcsVpcInLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Ecs VPC Local:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs VPC Local:] %s is deleted.' % (uuid))
return evt
def del_ecs_vpc_remote(uuid, session_uuid=None):
action = api_actions.DeleteEcsVpcRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Ecs VPC Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs VPC Remote:] %s is deleted.' % (uuid))
return evt
def create_ecs_vswtich_remote(vpc_uuid, identity_zone_uuid, name, cidr_block, session_uuid=None):
action = api_actions.CreateEcsVSwitchRemoteAction()
action.vpcUuid = vpc_uuid
action.identityZoneUuid = identity_zone_uuid
action.name = name
action.cidrBlock = cidr_block
test_util.action_logger('Create [Ecs VSwitch Remote:] %s %s %s %s' % (vpc_uuid, identity_zone_uuid, name, cidr_block))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs VSwitch Remote:] %s %s %s %s is created.' % (vpc_uuid, identity_zone_uuid, name, cidr_block))
return evt.inventory
def create_hybrid_eip(data_center_uuid, name, band_width, charge_type='PayByTraffic', eip_type='aliyun', session_uuid=None):
action = api_actions.CreateHybridEipAction()
action.dataCenterUuid = data_center_uuid
action.name = name
action.bandWidthMb = band_width
action.chargeType = charge_type
action.type = eip_type
test_util.action_logger('Create [Hybrid Eip:] %s %s %s %s' % (data_center_uuid, name, charge_type, eip_type))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Hybrid Eip:] %s %s %s %s is created.' % (data_center_uuid, name, charge_type, eip_type))
return evt.inventory
def del_hybrid_eip_remote(uuid, eip_type='aliyun', session_uuid=None):
action = api_actions.DeleteHybridEipRemoteAction()
action.uuid = uuid
action.type = eip_type
test_util.action_logger('Delete [Hybrid Eip Remote:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Hybrid Eip Remote:] %s is deleted.' % uuid)
return evt
def attach_hybrid_eip_to_ecs(eip_uuid, ecs_uuid, eip_type='aliyun', session_uuid=None):
action = api_actions.AttachHybridEipToEcsAction()
action.eipUuid = eip_uuid
action.ecsUuid = ecs_uuid
action.type = eip_type
test_util.action_logger('Attach [Hybrid Eip :] %s to ECS %s' % (eip_uuid, ecs_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Hybrid Eip :] %s is attached to Ecs %s.' % (eip_uuid, ecs_uuid))
return evt
def detach_hybrid_eip_from_ecs(eip_uuid, eip_type='aliyun', session_uuid=None):
action = api_actions.DetachHybridEipFromEcsAction()
action.eipUuid = eip_uuid
action.type = eip_type
test_util.action_logger('Detach [Hybrid Eip :] %s from ECS' % eip_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Hybrid Eip :] %s is detached from Ecs.' % eip_uuid)
return evt
def sync_hybrid_eip_from_remote(data_center_uuid, eip_type='aliyun', session_uuid=None):
action = api_actions.SyncHybridEipFromRemoteAction()
action.dataCenterUuid = data_center_uuid
action.type = eip_type
test_util.action_logger('Sync [Hybrid Eip From Remote:] %s' % (data_center_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def sync_ecs_vswitch_from_remote(data_center_uuid, session_uuid=None):
action = api_actions.SyncEcsVSwitchFromRemoteAction()
action.dataCenterUuid = data_center_uuid
test_util.action_logger('Sync [Ecs VSwitch From Remote:] %s' % (data_center_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def del_ecs_vswitch_in_local(uuid, session_uuid=None):
action = api_actions.DeleteEcsVSwitchInLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Ecs VSwitch: %s] in Local' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs VSwitch: %s] in Local is deleted.' % uuid)
return evt
def del_ecs_vswitch_remote(uuid, session_uuid=None):
action = api_actions.DeleteEcsVSwitchRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Ecs VSwitch Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs VSwitch Remote:] %s is deleted.' % (uuid))
return evt
def del_ecs_instance_local(uuid, session_uuid=None):
action = api_actions.DeleteEcsInstanceLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Ecs Instance in Local:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs Instance in Local:] %s is deleted.' % (uuid))
return evt
def sync_aliyun_virtual_router_from_remote(vpc_uuid, session_uuid=None):
action = api_actions.SyncAliyunVirtualRouterFromRemoteAction()
action.vpcUuid = vpc_uuid
test_util.action_logger('Sync [Aliyun VirtualRouter From Remote:] %s' % (vpc_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def sync_router_entry_from_remote(vrouter_uuid, vrouter_type, session_uuid=None):
action = api_actions.SyncAliyunRouteEntryFromRemoteAction()
action.vRouterUuid = vrouter_uuid
action.vRouterType = vrouter_type
test_util.action_logger('Sync [Route Entry From Remote:] %s' % (vrouter_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def create_aliyun_vpc_virtualrouter_entry_remote(dst_cidr_block, vrouter_uuid, vrouter_type, next_hop_type, next_hop_uuid, session_uuid=None):
action = api_actions.CreateAliyunVpcVirtualRouterEntryRemoteAction()
action.dstCidrBlock = dst_cidr_block
action.vRouterUuid = vrouter_uuid
action.vRouterType = vrouter_type
action.nextHopType = next_hop_type
action.nextHopUuid = next_hop_uuid
test_util.action_logger('Create [VPC VirtualRouter Entry Remote:] %s %s %s %s %s' % (dst_cidr_block, vrouter_uuid, vrouter_type, next_hop_type, next_hop_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VPC VirtualRouter Entry Remote:] %s %s %s %s %s is created.' % (dst_cidr_block, vrouter_uuid, vrouter_type, next_hop_type, next_hop_uuid))
return evt.inventory
def create_vpn_ipsec_config(name, pfs='group2', enc_alg='3des', auth_alg='sha1', session_uuid=None):
action = api_actions.CreateVpnIpsecConfigAction()
action.name = name
action.pfs = pfs
action.encAlg = enc_alg
action.authAlg = auth_alg
test_util.action_logger('Create [VPN IPsec Config:] %s %s %s %s' % (name, pfs, enc_alg, auth_alg))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VPN IPsec Config:] %s %s %s %s is created.' % (name, pfs, enc_alg, auth_alg))
return evt.inventory
def create_vpn_ike_ipsec_config(name, psk, local_ip, remote_ip, pfs='group2', enc_alg='3des', auth_alg='sha1', version='ikev1', mode='main', session_uuid=None):
action = api_actions.CreateVpnIkeConfigAction()
action.psk = psk
action.pfs = pfs
action.localIp = local_ip
action.remoteIp = remote_ip
action.encAlg = enc_alg
action.authAlg = auth_alg
action.version = version
action.mode = mode
action.name = name
test_util.action_logger('Create [VPN Ike Config:] %s %s %s %s %s %s %s %s %s' % (name, local_ip, remote_ip, psk, pfs, enc_alg, auth_alg, version, mode))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VPN Ike Config:] %s %s %s %s %s %s %s %s %s is created.' % (name, local_ip, remote_ip, psk, pfs, enc_alg, auth_alg, version, mode))
return evt.inventory
def create_vpc_vpn_connection(user_gatway_uuid, vpn_gateway_uuid, name, local_cidr, remote_cidr, ike_config_uuid, ipsec_config_uuid, active='true', session_uuid=None):
action = api_actions.CreateVpcVpnConnectionRemoteAction()
action.userGatewayUuid = user_gatway_uuid
action.vpnGatewayUuid = vpn_gateway_uuid
action.name = name
action.localCidr = local_cidr
action.remoteCidr = remote_cidr
action.ikeConfUuid = ike_config_uuid
action.ipsecConfUuid = ipsec_config_uuid
action.active = active
test_util.action_logger('Create [VPC VPN Connection:] %s %s' % (vpn_gateway_uuid, user_gatway_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VPC VPN Connection:] %s %s is created.' % (vpn_gateway_uuid, user_gatway_uuid))
return evt.inventory
def create_vpc_user_vpn_gateway(data_center_uuid, gw_ip, gw_name, session_uuid=None):
action = api_actions.CreateVpcUserVpnGatewayRemoteAction()
action.dataCenterUuid = data_center_uuid
action.ip = gw_ip
action.name = gw_name
test_util.action_logger('Create [VPC User VPN Gateway:] %s %s' % (data_center_uuid, gw_ip))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VPC User VPN Gateway:] %s %s is created.' % (data_center_uuid, gw_ip))
return evt.inventory
def del_vpc_user_vpn_gateway_remote(uuid, session_uuid=None):
action = api_actions.DeleteVpcUserVpnGatewayRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc User Vpn Gateway Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Vpc User Vpn Gateway Remote:] %s is deleted.' % (uuid))
return evt
def del_vpc_vpn_connection_remote(uuid, session_uuid=None):
action = api_actions.DeleteVpcVpnConnectionRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc Vpn Connection Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Vpc Vpn Connection Remote:] %s is deleted.' % (uuid))
return evt
def del_aliyun_route_entry_remote(uuid, session_uuid=None):
action = api_actions.DeleteAliyunRouteEntryRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Aliyun Route Entry Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Aliyun Route Entry Remote:] %s is deleted.' % (uuid))
return evt
def del_vpc_vpn_gateway_local(uuid, session_uuid=None):
action = api_actions.DeleteVpcVpnGatewayLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc Vpn Gateway in local:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Vpc Vpn Gateway in local:] %s is deleted.' % (uuid))
return evt
def del_vpc_vpn_connection_local(uuid, session_uuid=None):
action = api_actions.DeleteVpcVpnConnectionLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc Vpn Gateway Local:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Router Entry Remote:] %s is deleted.' % (uuid))
return evt
def del_vpc_ike_config_local(uuid, session_uuid=None):
action = api_actions.DeleteVpcIkeConfigLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc Ike Config in Local:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Vpc Ike Config in Local:] %s is deleted.' % (uuid))
return evt
def del_vpc_ipsec_config_local(uuid, session_uuid=None):
action = api_actions.DeleteVpcIpSecConfigLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc IPsec Config in Local:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Vpc IPsec Config in Local:] %s is deleted.' % (uuid))
return evt
def del_vpc_user_vpn_gateway_local(uuid, session_uuid=None):
action = api_actions.DeleteVpcUserVpnGatewayLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Router Entry Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Router Entry Remote:] %s is deleted.' % (uuid))
return evt
def destroy_vm_instance(uuid, session_uuid=None):
action = api_actions.DestroyVmInstanceAction()
action.uuid = uuid
test_util.action_logger('Destroy [VM Instance:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VM Instance:] %s is destroyed.' % (uuid))
return evt
def create_ecs_security_group_remote(name, vpc_uuid, session_uuid=None):
action = api_actions.CreateEcsSecurityGroupRemoteAction()
action.name = name
action.vpcUuid = vpc_uuid
test_util.action_logger('Create [Ecs Security Group Remote:] %s %s' % (name, vpc_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('Ecs Security Group Remote:] %s %s is created.' % (name, vpc_uuid))
return evt.inventory
def create_ecs_security_group_rule_remote(group_uuid, direction, protocol, port_range, cidr, policy, nic_type, priority, session_uuid=None):
action = api_actions.CreateEcsSecurityGroupRuleRemoteAction()
action.groupUuid = group_uuid
action.direction = direction
action.protocol = protocol
action.portRange = port_range
action.cidr = cidr
action.policy = policy
action.nictype = nic_type
action.priority = priority
test_util.action_logger('Create [Ecs Security Group Rule Remote:] %s %s %s %s %s %s %s %s' % (group_uuid, direction, protocol, port_range, cidr, policy, nic_type, priority))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs Security Group Rule Remote:] %s %s %s %s %s %s %s %s is created.' % (group_uuid, direction, protocol, port_range, cidr, policy, nic_type, priority))
return evt.inventory
def sync_ecs_security_group_from_remote(ecs_vpc_uuid, session_uuid=None):
action = api_actions.SyncEcsSecurityGroupFromRemoteAction()
action.ecsVpcUuid = ecs_vpc_uuid
test_util.action_logger('Sync [Security Group From Remote:] %s' % (ecs_vpc_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def sync_ecs_security_group_rule_from_remote(sg_uuid, session_uuid=None):
action = api_actions.SyncEcsSecurityGroupRuleFromRemoteAction()
action.uuid = sg_uuid
test_util.action_logger('Sync [Security Group From Remote:] %s' % (sg_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def sync_vpc_vpn_gateway_from_remote(data_center_uuid, session_uuid=None):
action = api_actions.SyncVpcVpnGatewayFromRemoteAction()
action.dataCenterUuid = data_center_uuid
test_util.action_logger('Sync [Vpc Vpn Gateway From Remote:] %s' % (data_center_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def sync_vpc_user_vpn_gateway_from_remote(data_center_uuid, session_uuid=None):
action = api_actions.SyncVpcUserVpnGatewayFromRemoteAction()
action.dataCenterUuid = data_center_uuid
test_util.action_logger('Sync [Vpc User Vpn Gateway From Remote:] %s' % (data_center_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def sync_vpc_vpn_connection_from_remote(data_center_uuid, session_uuid=None):
action = api_actions.SyncVpcVpnConnectionFromRemoteAction()
action.dataCenterUuid = data_center_uuid
test_util.action_logger('Sync [Vpc Vpn Connection From Remote:] %s' % (data_center_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def del_ecs_security_group_in_local(uuid, session_uuid=None):
action = api_actions.DeleteEcsSecurityGroupInLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [ecs security group in local:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs security group in local:] %s is deleted.' % uuid)
return evt
def del_ecs_security_group_rule_remote(uuid, session_uuid=None):
action = api_actions.DeleteEcsSecurityGroupRuleRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Ecs Security Group Rule Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs Security Group Rule Remote:] %s is deleted.' % (uuid))
return evt
def del_ecs_security_group_remote(uuid, session_uuid=None):
action = api_actions.DeleteEcsSecurityGroupRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Ecs Security Group Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs Security Group Remote:] %s is deleted.' % (uuid))
return evt
def create_ecs_image_from_local_image(bs_uuid, datacenter_uuid, image_uuid, name, session_uuid=None):
action = api_actions.CreateEcsImageFromLocalImageAction()
action.backupStorageUuid = bs_uuid
action.dataCenterUuid = datacenter_uuid
action.imageUuid = image_uuid
action.name = name
test_util.action_logger('Create Ecs Image from [Local image:] %s %s %s' % (bs_uuid, datacenter_uuid, image_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('Ecs Image is created from [Local image:] %s %s %s.' % (bs_uuid, datacenter_uuid, image_uuid))
return evt.inventory
def del_ecs_image_remote(uuid, session_uuid=None):
action = api_actions.DeleteEcsImageRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [ecs image remote:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs image remote:] %s is deleted.' % uuid)
return evt
def del_ecs_image_in_local(uuid, session_uuid=None):
action = api_actions.DeleteEcsImageLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [ecs image in local:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs image in local:] %s is deleted.' % uuid)
return evt
def del_hybrid_eip_local(uuid, eip_type='aliyun', session_uuid=None):
action = api_actions.DeleteHybridEipFromLocalAction()
action.type = eip_type
action.uuid = uuid
test_util.action_logger('Delete [Hybrid Eip in local:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Hybrid Eip in local:] %s is deleted.' % uuid)
return evt
def sync_ecs_image_from_remote(datacenter_uuid, image_type='self', session_uuid=None):
action = api_actions.SyncEcsImageFromRemoteAction()
action.dataCenterUuid = datacenter_uuid
action.type = image_type
test_util.action_logger('Sync [Ecs Image From Remote:] %s' % (datacenter_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def create_ecs_instance_from_ecs_image(ecs_root_password, image_uuid, ecs_vswitch_uuid, ecs_bandwidth, ecs_security_group_uuid, instance_offering_uuid=None, instance_type=None, private_ip_address=None, allocate_public_ip='false', name=None, ecs_console_password=None, session_uuid=None):
action = api_actions.CreateEcsInstanceFromEcsImageAction()
action.ecsRootPassword = ecs_root_password
action.ecsImageUuid = image_uuid
action.ecsVSwitchUuid = ecs_vswitch_uuid
action.instanceOfferingUuid = instance_offering_uuid
action.instanceType = instance_type
action.ecsBandWidth = ecs_bandwidth
action.ecsSecurityGroupUuid = ecs_security_group_uuid
action.privateIpAddress = private_ip_address
action.allocatePublicIp = allocate_public_ip
action.name = name
action.ecsConsolePassword = ecs_console_password
test_util.action_logger('Create Ecs Instance from [Ecs Image:] %s' % image_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('Ecs Instance is created from [Ecs Image:] %s.' % image_uuid)
return evt.inventory
def del_ecs_instance(uuid, session_uuid=None):
action = api_actions.DeleteEcsInstanceAction()
action.uuid = uuid
test_util.action_logger('Delete [ecs instance:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs instance:] %s is deleted.' % uuid)
return evt
def sync_ecs_instance_from_remote(datacenter_uuid, only_zstack=None, session_uuid=None):
action = api_actions.SyncEcsInstanceFromRemoteAction()
action.dataCenterUuid = datacenter_uuid
action.onlyZstack = only_zstack
test_util.action_logger('Sync [Ecs Instance From Remote:] %s' % (datacenter_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def update_ecs_instance(uuid, name=None, description=None, password=None, session_uuid=None):
action = api_actions.UpdateEcsInstanceAction()
action.uuid = uuid
action.name = name
action.description = description
action.password = password
test_util.action_logger('Update [Ecs Instance: %s]' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def stop_ecs_instance(uuid, session_uuid=None):
action = api_actions.StopEcsInstanceAction()
action.uuid = uuid
test_util.action_logger('Stop [ecs instance:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs instance:] %s is stopped.' % uuid)
return evt
def start_ecs_instance(uuid, session_uuid=None):
action = api_actions.StartEcsInstanceAction()
action.uuid = uuid
test_util.action_logger('Start [ecs instance:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs instance:] %s is started.' % uuid)
return evt
def reboot_ecs_instance(uuid, session_uuid=None):
action = api_actions.RebootEcsInstanceAction()
action.uuid = uuid
test_util.action_logger('Reboot [ecs instance:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs instance:] %s is rebooted.' % uuid)
return evt
def update_ecs_instance_vnc_password(uuid, password, session_uuid=None):
action = api_actions.UpdateEcsInstanceVncPasswordAction()
action.uuid = uuid
action.password = password
test_util.action_logger('Update [ecs instance:] vnc password %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs instance:] %s vnc password is updated.' % uuid)
return evt
def update_image_guestOsType(uuid, guest_os_type, session_uuid=None):
action = api_actions.UpdateImageAction()
action.uuid = uuid
action.guestOsType = guest_os_type
test_util.action_logger('Update [image %s] guestOsType' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.action_logger('[image %s] guestOsType is updated to [%s]' % (uuid, guest_os_type))
return evt
def query_ecs_image_local(condition=[], session_uuid=None):
action = api_actions.QueryEcsImageFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Ecs image from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_ecs_vpc_local(condition=[], session_uuid=None):
action = api_actions.QueryEcsVpcFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Ecs Vpc from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_ecs_vswitch_local(condition=[], session_uuid=None):
action = api_actions.QueryEcsVSwitchFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Ecs vSwitch from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_ecs_instance_local(condition=[], session_uuid=None):
action = api_actions.QueryEcsInstanceFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Ecs Instance from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_aliyun_key_secret(condition=[], session_uuid=None):
action = api_actions.QueryAliyunKeySecretAction()
action.conditions = condition
test_util.action_logger('Query Aliyun Key Secret')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_datacenter_local(condition=[], session_uuid=None):
action = api_actions.QueryDataCenterFromLocalAction()
action.conditions = condition
test_util.action_logger('Query DataCenter from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_ecs_security_group_local(condition=[], session_uuid=None):
action = api_actions.QueryEcsSecurityGroupFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Ecs Security Group from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_ecs_security_group_rule_local(condition=[], session_uuid=None):
action = api_actions.QueryEcsSecurityGroupRuleFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Ecs Security Group from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_hybrid_eip_local(condition=[], session_uuid=None):
action = api_actions.QueryHybridEipFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Hybrid Eip from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_vpc_vpn_gateway_local(condition=[], session_uuid=None):
action = api_actions.QueryVpcVpnGatewayFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Vpc Vpn Gate from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_vpc_vpn_ike_config_local(condition=[], session_uuid=None):
action = api_actions.QueryVpcIkeConfigFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Vpc Vpn Ike Config from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_vpc_vpn_ipsec_config_local(condition=[], session_uuid=None):
action = api_actions.QueryVpcIpSecConfigFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Vpc Vpn IPsec Config from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_vpc_user_vpn_gateway_local(condition=[], session_uuid=None):
action = api_actions.QueryVpcUserVpnGatewayFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Vpc User Vpn Gate from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_vpc_vpn_connection_local(condition=[], session_uuid=None):
action = api_actions.QueryVpcVpnConnectionFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Vpc Vpn Connection from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_aliyun_virtual_router_local(condition=[], session_uuid=None):
action = api_actions.QueryAliyunVirtualRouterFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Aliyun Virtual Router from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_aliyun_route_entry_local(condition=[], session_uuid=None):
action = api_actions.QueryAliyunRouteEntryFromLocalAction()
action.conditions = condition
test_util.action_logger('Query Aliyun Route Entry from local')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_oss_bucket_file_name(condition=[], session_uuid=None):
action = api_actions.QueryOssBucketFileNameAction()
action.conditions = condition
test_util.action_logger('Query Oss Bucket File Name')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def query_ipsec_connection(condition=[], session_uuid=None):
action = api_actions.QueryIPSecConnectionAction()
action.conditions = condition
test_util.action_logger('Query IPsec Connection')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def get_ecs_instance_vnc_url(uuid, session_uuid=None):
action = api_actions.GetEcsInstanceVncUrlAction()
action.uuid = uuid
test_util.action_logger('Get Ecs Instance Vpc Url')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def get_create_ecs_image_progress(data_center_uuid, image_uuid, session_uuid=None):
action = api_actions.GetCreateEcsImageProgressAction()
action.dataCenterUuid = data_center_uuid
action.imageUuid = image_uuid
test_util.action_logger('Get Create ECS Image Progress')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
| 50.73665
| 288
| 0.75107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,124
| 0.146483
|
a7b426f2174df82614170a394fb020de8df61abf
| 293
|
py
|
Python
|
datasets_example/populate_elastic.py
|
aleksbobic/csx
|
151ac6644694ac585bf5d070bae7146e94c30aec
|
[
"MIT"
] | null | null | null |
datasets_example/populate_elastic.py
|
aleksbobic/csx
|
151ac6644694ac585bf5d070bae7146e94c30aec
|
[
"MIT"
] | null | null | null |
datasets_example/populate_elastic.py
|
aleksbobic/csx
|
151ac6644694ac585bf5d070bae7146e94c30aec
|
[
"MIT"
] | null | null | null |
import requests
import sys
requests.put(f"http://localhost:9200/{sys.argv[1]}?pretty")
headers = {"Content-Type": "application/x-ndjson"}
data = open(sys.argv[2], "rb").read()
requests.post(
f"http://localhost:9200/{sys.argv[1]}/_bulk?pretty", headers=headers, data=data
)
| 22.538462
| 84
| 0.665529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 136
| 0.464164
|
a7b4359004682dcf25fa55ff619454c6c53df969
| 47
|
py
|
Python
|
numba/cuda/simulator/cudadrv/error.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 6,620
|
2015-01-04T08:51:04.000Z
|
2022-03-31T12:52:18.000Z
|
numba/cuda/simulator/cudadrv/error.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 6,457
|
2015-01-04T03:18:41.000Z
|
2022-03-31T17:38:42.000Z
|
numba/cuda/simulator/cudadrv/error.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 930
|
2015-01-25T02:33:03.000Z
|
2022-03-30T14:10:32.000Z
|
class CudaSupportError(RuntimeError):
pass
| 15.666667
| 37
| 0.787234
| 46
| 0.978723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a7b495865e605d6301abb8d08c5cad2ee915a172
| 7,369
|
py
|
Python
|
tests/unit/utils/test_attributes.py
|
pyqgis/plutil
|
79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d
|
[
"MIT"
] | null | null | null |
tests/unit/utils/test_attributes.py
|
pyqgis/plutil
|
79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d
|
[
"MIT"
] | null | null | null |
tests/unit/utils/test_attributes.py
|
pyqgis/plutil
|
79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Unit tests for Attributes.
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import os
import shutil
import tempfile
from unittest import TestCase, SkipTest
from unittest.mock import MagicMock
from qgis.PyQt.QtCore import QVariant
from qgis.core import (
QgsWkbTypes, QgsProject, QgsVectorLayer, QgsField,
QgsVectorDataProvider
)
from qgis_plutil.utils.attributes import (
variant_ctor_for_object, fields_from_data,
merge_fields_in_provider,
)
logger = logging.getLogger('tests.attributes')
class TestVariantCtorForObject(TestCase):
def test_invalid_params(self):
with self.assertRaises(NotImplementedError):
variant_ctor_for_object(None)
with self.assertRaises(NotImplementedError):
variant_ctor_for_object({})
with self.assertRaises(NotImplementedError):
variant_ctor_for_object(b'dd')
def test_valid_params(self):
self.assertEqual(variant_ctor_for_object(0.5), QVariant.Double)
self.assertEqual(variant_ctor_for_object(1), QVariant.Int)
self.assertEqual(variant_ctor_for_object(""), QVariant.String)
self.assertEqual(variant_ctor_for_object("test"), QVariant.String)
class TestFieldsFromData(TestCase):
def test_invalid(self):
with self.assertRaises(ValueError):
fields_from_data("abc")
with self.assertRaises(TypeError):
fields_from_data(1)
with self.assertRaises(ValueError):
fields_from_data(["abc"])
with self.assertRaises(ValueError):
fields_from_data([1])
def test_mixed_dict(self):
with self.assertRaises(ValueError):
fields, are_dicts = fields_from_data([{1: 1, 2: 1, 3: 1}, [1]])
with self.assertRaises(ValueError):
fields, are_dicts = fields_from_data([[1, 4, 6], {1: 1, 6: 9}])
def test_valid_list_of_one(self):
fields, are_dicts = fields_from_data([[1]])
self.assertFalse(are_dicts)
self.assertEqual(len(fields), 1)
self.assertIsInstance(fields["Field 1"], QgsField)
self.assertEqual(fields["Field 1"].type(), QVariant.Int)
self.assertEqual(fields["Field 1"].name(), "Field 1")
def test_valid_lists(self):
fields, are_dicts = fields_from_data([[1, "2", 3.5]])
self.assertFalse(are_dicts)
self.assertEqual(len(fields), 3)
self.assertIsInstance(fields["Field 1"], QgsField)
self.assertIsInstance(fields["Field 2"], QgsField)
self.assertIsInstance(fields["Field 3"], QgsField)
self.assertEqual(fields["Field 1"].type(), QVariant.Int)
self.assertEqual(fields["Field 1"].name(), "Field 1")
self.assertEqual(fields["Field 2"].type(), QVariant.String)
self.assertEqual(fields["Field 2"].name(), "Field 2")
self.assertEqual(fields["Field 3"].type(), QVariant.Double)
self.assertEqual(fields["Field 3"].name(), "Field 3")
def test_two_valid_lists(self):
fields, are_dicts = fields_from_data([
[1, "2", 3.5], [4, 5, 6, 7]
])
self.assertFalse(are_dicts)
self.assertEqual(len(fields), 4)
self.assertIsInstance(fields["Field 1"], QgsField)
self.assertIsInstance(fields["Field 2"], QgsField)
self.assertIsInstance(fields["Field 3"], QgsField)
self.assertIsInstance(fields["Field 4"], QgsField)
self.assertEqual(fields["Field 1"].type(), QVariant.Int)
self.assertEqual(fields["Field 1"].name(), "Field 1")
self.assertEqual(fields["Field 2"].type(), QVariant.String)
self.assertEqual(fields["Field 2"].name(), "Field 2")
self.assertEqual(fields["Field 3"].type(), QVariant.Double)
self.assertEqual(fields["Field 3"].name(), "Field 3")
self.assertEqual(fields["Field 4"].type(), QVariant.Int)
self.assertEqual(fields["Field 4"].name(), "Field 4")
def test_valid_dict_of_one(self):
fields, are_dicts = fields_from_data([{1: 2}])
self.assertTrue(are_dicts)
self.assertEqual(len(fields), 1)
self.assertIsInstance(fields['1'], QgsField)
self.assertEqual(fields['1'].type(), QVariant.Int)
def test_valid_dicts(self):
fields, are_dicts = fields_from_data([{1: 'a', "2": 4, 3: 3.5}])
self.assertTrue(are_dicts)
self.assertEqual(len(fields), 3)
self.assertIsInstance(fields["1"], QgsField)
self.assertIsInstance(fields["2"], QgsField)
self.assertIsInstance(fields["3"], QgsField)
self.assertEqual(fields["1"].type(), QVariant.String)
self.assertEqual(fields["1"].name(), "1")
self.assertEqual(fields["2"].type(), QVariant.Int)
self.assertEqual(fields["2"].name(), "2")
self.assertEqual(fields["3"].type(), QVariant.Double)
self.assertEqual(fields["3"].name(), "3")
def test_two_valid_dicts(self):
fields, are_dicts = fields_from_data([
{1: 'a', "2": 4, 3: 3.5},
{1: 'a', "2": 4, 3: 3.5, "some other": 5},
])
self.assertTrue(are_dicts)
self.assertEqual(len(fields), 4)
self.assertIsInstance(fields["1"], QgsField)
self.assertIsInstance(fields["2"], QgsField)
self.assertIsInstance(fields["3"], QgsField)
self.assertIsInstance(fields["some other"], QgsField)
self.assertEqual(fields["1"].type(), QVariant.String)
self.assertEqual(fields["1"].name(), "1")
self.assertEqual(fields["2"].type(), QVariant.Int)
self.assertEqual(fields["2"].name(), "2")
self.assertEqual(fields["3"].type(), QVariant.Double)
self.assertEqual(fields["3"].name(), "3")
self.assertEqual(fields["some other"].type(), QVariant.Int)
self.assertEqual(fields["some other"].name(), "some other")
class TestMergeFieldsInProvider(TestCase):
def test_no_layer(self):
provider = MagicMock(spec=QgsVectorDataProvider)
pf1 = MagicMock(spec=QgsField)
pf1.name.return_value = "1"
pf2 = MagicMock(spec=QgsField)
pf2.name.return_value = "2"
pf3 = MagicMock(spec=QgsField)
pf3.name.return_value = "3"
provider.fields.return_value = [pf1, pf2, pf3]
pn1 = MagicMock(spec=QgsField)
pn1.name.return_value = "1"
pn4 = MagicMock(spec=QgsField)
pn4.name.return_value = "4"
pn9 = MagicMock(spec=QgsField)
pn9.name.return_value = "9"
merge_fields_in_provider(
provider,
fields={'1': pn1, '4': pn4, '9': pn9},
layer=None)
provider.addAttributes.assert_called_once_with([pn4, pn9])
def test_layer(self):
pf1 = MagicMock(spec=QgsField)
pf1.name.return_value = "1"
pf2 = MagicMock(spec=QgsField)
pf2.name.return_value = "2"
pf3 = MagicMock(spec=QgsField)
pf3.name.return_value = "3"
provider = MagicMock(spec=QgsVectorDataProvider)
provider.fields.return_value = [pf1, pf2, pf3]
layer = MagicMock(spec=QgsVectorLayer)
merge_fields_in_provider(
provider,
fields={'1': pf1, '2': pf2, '3': pf3},
layer=layer)
provider.addAttributes.assert_called_once_with([])
layer.updateFields.assert_called_once()
| 39.832432
| 75
| 0.639707
| 6,775
| 0.919392
| 0
| 0
| 0
| 0
| 0
| 0
| 593
| 0.080472
|
a7b4c6f5823d6451d0e2b584c552a30bc5d41136
| 191
|
py
|
Python
|
portal/template.py
|
SlapBass/nx-portal
|
ee262079db1e5230a24ebbc205e44926f11f8da9
|
[
"Apache-2.0"
] | 5
|
2019-10-04T04:46:44.000Z
|
2019-10-09T10:02:01.000Z
|
portal/template.py
|
SlapBass/nx-portal
|
ee262079db1e5230a24ebbc205e44926f11f8da9
|
[
"Apache-2.0"
] | 9
|
2019-10-06T07:15:09.000Z
|
2020-09-24T02:19:40.000Z
|
portal/template.py
|
SlapBass/nx-portal
|
ee262079db1e5230a24ebbc205e44926f11f8da9
|
[
"Apache-2.0"
] | 1
|
2020-06-19T13:26:08.000Z
|
2020-06-19T13:26:08.000Z
|
from django.conf import settings
def shared_view_contexts(request):
return {
'APP_NAME': settings.APPLICATION_NAME,
'SEASONABLE_EMOJI': settings.SEASONABLE_EMOJI,
}
| 21.222222
| 54
| 0.712042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.146597
|
a7b57910267fcbe3aaeef29154600de509e91fa3
| 12,923
|
py
|
Python
|
app.py
|
Tesfa-eth/online_book_store
|
b3a9b24065f3952f2ea26107a72a96fe6c1988e8
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
Tesfa-eth/online_book_store
|
b3a9b24065f3952f2ea26107a72a96fe6c1988e8
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
Tesfa-eth/online_book_store
|
b3a9b24065f3952f2ea26107a72a96fe6c1988e8
|
[
"Apache-2.0"
] | null | null | null |
from enum import unique
from typing import Reversible
from flask import Flask, app, render_template, url_for, redirect, request
#import flask
from flask.helpers import flash
from flask_login.utils import login_fresh
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin, login_manager, login_user, LoginManager, login_required,logout_user, current_user
from flask_wtf.form import FlaskForm
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import InputRequired, Length, ValidationError
from flask_bcrypt import Bcrypt
import requests
app = Flask(__name__)
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db' # connects to the db
app.config['SECRET_KEY'] = 'thisisasecretekey'
# log in manager
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view="login"
global_username = ''
store = ''
count = 0
@login_manager.user_loader
def load_user(user_id):
"""load user"""
return User.query.get(int(user_id))
class User(db.Model, UserMixin): # creating the table
"""User table with user name and hashed password colums"""
id = db.Column(db.Integer, primary_key=True)
firstname = db.Column(db.String(20), nullable=True, unique=False)
lastname = db.Column(db.String(20), nullable=True, unique=False)
username = db.Column(db.String(20), nullable=False, unique=True)
password = db.Column(db.String(80), nullable=False)
class BookStore(db.Model, UserMixin): # creating the table
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(80), nullable=False, unique=False)
description = db.Column(db.String(200), nullable=False, unique=False)
url = db.Column(db.String(80), nullable=False, unique=False)
#material_id = db.Column(db.String(80), nullable=False)
class ReadList(db.Model, UserMixin): # creating the table
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, nullable=False, unique=False)
book_id = db.Column(db.Integer, nullable=False, unique=False)
class ReadListSearch(db.Model, UserMixin): # creating the table
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, primary_key=False)
isbn = db.Column(db.String(80), nullable=False, unique=False)
title = db.Column(db.String(80), nullable=False, unique=False)
subject = db.Column(db.String(200), nullable=False, unique=False)
# register and log in forms
class RegisterForm(FlaskForm):
firstname = StringField(validators=[InputRequired(), Length(min=4,max=20)],
render_kw={"placeholder": "First name"})
lastname = StringField(validators=[InputRequired(), Length(min=4,max=20)],
render_kw={"placeholder": "Last name"})
username = StringField(validators=[InputRequired(), Length(min=4,max=20)],
render_kw={"placeholder": "Enter username/email here"})
password = PasswordField(validators=[InputRequired(), Length(min=4,max=20)],
render_kw={"placeholder": "Password"})
submit = SubmitField("Register")
def validate_username(self, username):
#Checks if there is the username already exists
existing_username = User.query.filter_by(username=username.data).first()
if existing_username:
#print('existing user name') #debug
# redirect somewhere after this!
raise ValidationError(
"The username already exists. Please choose a different one."
)
class LoginForm(FlaskForm):
username = StringField(validators=[InputRequired(), Length(min=4,max=20)],
render_kw={"placeholder": "Username/email"})
password = PasswordField(validators=[InputRequired(), Length(min=4,max=20)],
render_kw={"placeholder": "Password"})
submit = SubmitField("Log in")
@app.route('/')
def index():
"""render index html"""
logout_user() # log out user if any on the session
#print('done')
print(current_user.is_authenticated)
return render_template('index.html', auth=current_user.is_authenticated)
@app.route('/signup', methods=['GET', 'POST'])
def signup():
"""register a newuser"""
form = RegisterForm()
if form.validate_on_submit():
print("validating")
hashed_password = bcrypt.generate_password_hash(form.password.data)
# get the form
print('creating table')
new_user = User(firstname=form.firstname.data, lastname=form.lastname.data,username=form.username.data, password=hashed_password)
db.create_all() # create the table
db.session.add(new_user)
db.session.commit()
#print('table created')
flash('Successfully Registered!')
return redirect(url_for('login'))
return render_template('signup.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
#print(current_user.is_authenticated) # check if user logged in
global global_username
global global_user_id
form = LoginForm()
#print("username -",form.username.data, "password- ", form.password.data) # debug
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user:
global_username = user.username # make these two global for future queries
global_user_id = user.id
if bcrypt.check_password_hash(user.password,form.password.data):
login_user(user)
#flash(f'Welcome, {user.username}')
return redirect(url_for('dashboard'))
else:
flash(f'Wrong password!')
else:
flash(f'Wrong username')
return render_template('login.html', form=form)
@app.route('/logout', methods=['GET', 'POST'])
@login_required
def logout():
"""log out the user"""
logout_user()
return redirect(url_for('login'))
@app.route('/dashboard',methods=['GET', 'POST'])
@login_required
def dashboard():
# object username
#print(load_user(user_id_print).username) -- gives me the id and username of logged person
books = BookStore.query.all()
#print(books)
user = User.query.filter_by(id=global_user_id).first()
return render_template('dashboard.html', username=global_username, books=books, firstname=user.firstname, lastname=user.lastname )
@app.route('/myreadinglist',methods=['GET', 'POST'])
@login_required
def myreadinglist():
readlist_object = ReadList.query.filter_by(user_id=global_user_id).all()
readlistSearch_object = ReadListSearch.query.filter_by(user_id=global_user_id).all()
#print(cart_list)
books_list = []
#print(readlist_object)
for books in readlist_object:
# #print(material.material_id)
books_list.append(BookStore.query.filter_by(id=books.book_id).first())
books_list_search = []
#print(readlistSearch_object, 'readlistSearch object')
for books in readlistSearch_object:
#print(books.isbn)
books_list_search.append(ReadListSearch.query.filter_by(isbn=books.isbn).first())
print(books_list_search)
return render_template('myreadinglist.html', username=global_username, books_list=books_list, books_list_search=books_list_search)
@app.route('/<int:id>/addtoreadinglist', methods=('POST',))
@login_required
def add_to_readingList(id):
#print(id, 'book id')
book_object = ReadList.query.filter_by(user_id=global_user_id).all()
print(book_object, "book")
book_ids = list(map(lambda x: (x.book_id), book_object))
if id not in book_ids: # check if it already existes
print("adding material")
add_material_to_cart = ReadList(user_id=global_user_id, book_id=id)
db.create_all() # create the table
db.session.add(add_material_to_cart)
db.session.commit()
#print("added sucessfully!")
flash('Successfully added to cart!')
else:
flash('The item is already in the cart!')
return redirect(url_for('dashboard'))
@app.route('/<int:id>/remove', methods=('POST',))
@login_required
def remove_from_readinglist(id):
print(id, "id to remove")
ReadList.query.filter(ReadList.user_id == global_user_id, ReadList.book_id == id).delete()
db.session.commit()
"""for i in temp:
print(i.user_id)"""
return redirect(url_for('myreadinglist'))
@app.route('/searchbook',methods=['GET', 'POST'])
@login_required
def searchbook():
if store and count:
store1 = store
count1 = count
else:
store1 = ''
count1 = 0
return render_template('searchbook.html', username=global_username, store=store1, count=count1)
def clean_data(data, i):
#global book_title, author_names, publish_year_first, isbn, subject
try:
book_title = data['docs'][i]['title']
except:
book_title = 'no book title'
try:
author_names = data['docs'][i]['author_name']
except:
author_names = 'no author name'
try:
publish_year_first = data['docs'][i]['publish_year'][0]
except:
publish_year_first = 'no published date'
try: # some may not have isbn
isbn = data['docs'][i]['isbn'][1] # set this to empty string later
except:
isbn = ''
try: # some may not have subject
subject = data['docs'][0]['subject'][0] # can take as many as needed
except:
subject = 'No subject available'
return book_title, author_names, publish_year_first, isbn, subject
def get_url(isbn):
if isbn:
try:
img_url = 'https://covers.openlibrary.org/b/isbn/' + str(isbn) + '-L.jpg'
except:
img_url = 'https://leadershiftinsights.com/wp-content/uploads/2019/07/no-book-cover-available.jpg'
else:
img_url = 'https://leadershiftinsights.com/wp-content/uploads/2019/07/no-book-cover-available.jpg'
return img_url
@app.route('/searchbookBtn', methods=('POST',))
@login_required
def searchbookBtn():
global list_json, result_count
#print(id, 'book id')
#global store
if request.method == 'GET':
return f"The URL /data is accessed directly. Try going to '/form' to submit form"
if request.method == 'POST':
form_data = request.form
searched_title = form_data['searchedtitle']
try:
response = requests.get("http://openlibrary.org/search.json?title=" + searched_title)
response_data = response.json()
except:
response_data = []
#return render_template('data.html',form_data = form_data)
list_json=[]
dict_store= {}
result_count = len(response_data['docs'])
#print(response_data)
for i in range(result_count):
book_title, author_names, publish_year_first, isbn, subject = clean_data(response_data, i)
img_url = get_url(isbn)
dict_store['book_title'] = book_title
dict_store['author_names'] = author_names
dict_store['publish_year_first'] = publish_year_first
dict_store['isbn'] = isbn
dict_store['subject'] = subject
dict_store['img_url'] = img_url
list_json.append(dict_store)
dict_store = {}
#print(isbn)
return render_template('searchbook.html', username=global_username, store=list_json, count=result_count)
@app.route('/<isbn>/<book_title>/<book_subject>/addtoRlistFromSearch', methods=('POST',))
@login_required
def addtoRlistFromSearch(isbn, book_title, book_subject):
print(isbn, 'book isbn')
print(book_title, 'book title')
print(book_subject, 'book subject')
#print('something gioing on here')
#print(user_id_print, "user id")
book_object = ReadListSearch.query.filter_by(user_id=global_user_id).all()
print(book_object, "book")
book_ids = list(map(lambda x: (x.isbn), book_object))
if isbn not in book_ids: # check if it already existes
print("adding material")
add_material_to_cart = ReadListSearch(user_id=global_user_id, isbn=isbn, title=book_title, subject=book_subject)
db.create_all() # create the table
db.session.add(add_material_to_cart)
db.session.commit()
print("added sucessfully!")
flash('Successfully added to cart!')
else:
flash('The item is already in the cart!')
return render_template('searchbook.html', username=global_username, store=list_json, count=result_count)
@app.route('/<isbn>/removereadlinglistSearch', methods=('POST',))
@login_required
def remove_from_readinglistS(isbn):
print(isbn, "id to remove")
ReadListSearch.query.filter(ReadListSearch.user_id == global_user_id, ReadListSearch.isbn == isbn).delete()
db.session.commit()
"""for i in temp:
print(i.user_id)"""
flash("Removed from your reading list successfully!")
return redirect(url_for('myreadinglist'))
if __name__ == '__main__':
db.create_all()
app.run(debug=True)
| 37.676385
| 137
| 0.686141
| 2,739
| 0.211948
| 0
| 0
| 7,846
| 0.607135
| 0
| 0
| 3,504
| 0.271144
|
a7b6c1b55e93ba5383e18ba855c03c57d497af3c
| 5,560
|
py
|
Python
|
aei_net.py
|
ilyakava/faceshifter
|
aa48f9eb79991bae4ee9d8c69bb3b51891a9f721
|
[
"BSD-3-Clause"
] | null | null | null |
aei_net.py
|
ilyakava/faceshifter
|
aa48f9eb79991bae4ee9d8c69bb3b51891a9f721
|
[
"BSD-3-Clause"
] | null | null | null |
aei_net.py
|
ilyakava/faceshifter
|
aa48f9eb79991bae4ee9d8c69bb3b51891a9f721
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import DataLoader
import torchvision
from torchvision import transforms
from torchvision.models import resnet101
import pytorch_lightning as pl
from model.AEINet import ADDGenerator, MultilevelAttributesEncoder
from model.MultiScaleDiscriminator import MultiscaleDiscriminator
from model.loss import GANLoss, AEI_Loss
from dataset import *
class AEINet(pl.LightningModule):
def __init__(self, hp):
super(AEINet, self).__init__()
self.hp = hp
self.G = ADDGenerator(hp.arcface.vector_size)
self.E = MultilevelAttributesEncoder()
self.D = MultiscaleDiscriminator(3)
self.Z = resnet101(num_classes=256)
self.Z.load_state_dict(torch.load(hp.arcface.chkpt_path, map_location='cpu'))
self.Loss_GAN = GANLoss()
self.Loss_E_G = AEI_Loss()
def forward(self, target_img, source_img):
z_id = self.Z(F.interpolate(source_img, size=112, mode='bilinear'))
z_id = F.normalize(z_id)
z_id = z_id.detach()
feature_map = self.E(target_img)
output = self.G(z_id, feature_map)
output_z_id = self.Z(F.interpolate(output, size=112, mode='bilinear'))
output_z_id = F.normalize(output_z_id)
output_feature_map = self.E(output)
return output, z_id, output_z_id, feature_map, output_feature_map
def training_step(self, batch, batch_idx, optimizer_idx):
target_img, source_img, same = batch
if optimizer_idx == 0:
output, z_id, output_z_id, feature_map, output_feature_map = self(target_img, source_img)
self.generated_img = output
output_multi_scale_val = self.D(output)
loss_GAN = self.Loss_GAN(output_multi_scale_val, True, for_discriminator=False)
loss_E_G, loss_att, loss_id, loss_rec = self.Loss_E_G(target_img, output, feature_map, output_feature_map, z_id,
output_z_id, same)
loss_G = loss_E_G + loss_GAN
self.logger.experiment.add_scalar("Loss G", loss_G.item(), self.global_step)
self.logger.experiment.add_scalar("Attribute Loss", loss_att.item(), self.global_step)
self.logger.experiment.add_scalar("ID Loss", loss_id.item(), self.global_step)
self.logger.experiment.add_scalar("Reconstruction Loss", loss_rec.item(), self.global_step)
self.logger.experiment.add_scalar("GAN Loss", loss_GAN.item(), self.global_step)
return loss_G
else:
multi_scale_val = self.D(target_img)
output_multi_scale_val = self.D(self.generated_img.detach())
loss_D_fake = self.Loss_GAN(multi_scale_val, True)
loss_D_real = self.Loss_GAN(output_multi_scale_val, False)
loss_D = loss_D_fake + loss_D_real
self.logger.experiment.add_scalar("Loss D", loss_D.item(), self.global_step)
return loss_D
def validation_step(self, batch, batch_idx):
target_img, source_img, same = batch
output, z_id, output_z_id, feature_map, output_feature_map = self(target_img, source_img)
self.generated_img = output
output_multi_scale_val = self.D(output)
loss_GAN = self.Loss_GAN(output_multi_scale_val, True, for_discriminator=False)
loss_E_G, loss_att, loss_id, loss_rec = self.Loss_E_G(target_img, output, feature_map, output_feature_map,
z_id, output_z_id, same)
loss_G = loss_E_G + loss_GAN
return {"loss": loss_G, 'target': target_img[0].cpu(), 'source': source_img[0].cpu(), "output": output[0].cpu(), }
def validation_end(self, outputs):
loss = torch.stack([x["loss"] for x in outputs]).mean()
validation_image = []
for x in outputs:
validation_image = validation_image + [x['target'], x['source'], x["output"]]
validation_image = torchvision.utils.make_grid(validation_image, nrow=3)
self.logger.experiment.add_scalar("Validation Loss", loss.item(), self.global_step)
self.logger.experiment.add_image("Validation Image", validation_image, self.global_step)
return {"loss": loss, "image": validation_image, }
def configure_optimizers(self):
lr_g = self.hp.model.learning_rate_E_G
lr_d = self.hp.model.learning_rate_D
b1 = self.hp.model.beta1
b2 = self.hp.model.beta2
opt_g = torch.optim.Adam(list(self.G.parameters()) + list(self.E.parameters()), lr=lr_g, betas=(b1, b2))
opt_d = torch.optim.Adam(self.D.parameters(), lr=lr_d, betas=(b1, b2))
return [opt_g, opt_d], []
def train_dataloader(self):
# transforms.Resize((256, 256)),
# transforms.CenterCrop((256, 256)),
transform = transforms.Compose([
transforms.ToTensor(),
])
dataset = AEI_Dataset(self.hp.data.dataset_dir, transform=transform)
return DataLoader(dataset, batch_size=self.hp.model.batch_size, num_workers=self.hp.model.num_workers, shuffle=True, drop_last=True)
def val_dataloader(self):
transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop((256, 256)),
transforms.ToTensor(),
])
dataset = AEI_Val_Dataset(self.hp.data.valset_dir, transform=transform)
return DataLoader(dataset, batch_size=1, shuffle=False)
| 40
| 140
| 0.660252
| 5,117
| 0.920324
| 0
| 0
| 0
| 0
| 0
| 0
| 273
| 0.049101
|
a7b811fd3215225ba11bbaec1274776a54d75f89
| 3,744
|
py
|
Python
|
luna/gateware/usb/usb3/link/timers.py
|
macdaliot/luna
|
97e725d0af001a6d4c4811eefb43b6c31a9b45e1
|
[
"BSD-3-Clause"
] | 2
|
2020-11-04T10:54:15.000Z
|
2022-03-17T20:38:21.000Z
|
luna/gateware/usb/usb3/link/timers.py
|
macdaliot/luna
|
97e725d0af001a6d4c4811eefb43b6c31a9b45e1
|
[
"BSD-3-Clause"
] | 4
|
2020-11-11T17:32:33.000Z
|
2020-11-30T13:08:05.000Z
|
luna/gateware/usb/usb3/link/timers.py
|
macdaliot/luna
|
97e725d0af001a6d4c4811eefb43b6c31a9b45e1
|
[
"BSD-3-Clause"
] | 2
|
2021-06-26T06:06:52.000Z
|
2022-01-19T22:36:19.000Z
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" U0 link-maintenance timers. """
from nmigen import *
class LinkMaintenanceTimers(Elaboratable):
""" Timers which ensure link integrity is maintained in U0.
These timers ensure that we provide enough traffic to maintain link state,
and move to link recovery if we ever fail to see constant traffic.
Our two main rules [USB3.2r1: 7.5.6.1]:
- If we don't send a link command for 10uS, we'll need to issue a
keepalive packet in order to keep our link stably in U0.
- If we don't receive a link command for 1mS, we know that the link
is no longer in good condition, since the other side should have
been sending keepalives to prevent this long of an idle. We'll have
to perform link recovery.
Attributes
----------
link_command_received: Signal(), input
Strobe that should be asserted when a link command is received.
link_command_transmitted: Signal(), input
Strobe that should be asserted when a link command is transmitted.
schedule_keepalive: Signal(), output
Strobe that indicates that we'll need to send a keepalive packet.
transition_to_recovery: Signal(), output
Strobe that indicates that our link is no longer stable; and we'll
need to perform link recovery.
Parameters
----------
ss_clock_frequency: float
The frequency of our ``ss`` domain clock, in Hz.
"""
KEEPALIVE_TIMEOUT = 10e-6
RECOVERY_TIMEOUT = 1e-3
def __init__(self, *, ss_clock_frequency=125e6):
self._clock_frequency = ss_clock_frequency
#
# I/O port.
#
self.enable = Signal()
self.link_command_received = Signal()
self.packet_received = Signal()
self.link_command_transmitted = Signal()
self.schedule_keepalive = Signal()
self.transition_to_recovery = Signal()
def elaborate(self, platform):
m = Module()
# Note that we don't care about rollover on any of our timers; as it's harmless in
# both cases. For our keepalive, we'll immediately send a link command, which should
# clear our timer. For our recovery timer, we'll enter recovery and reset this whole
# thing anyway. :)
#
# Keepalive Timer
#
keepalive_timeout_cycles = int(self.KEEPALIVE_TIMEOUT * self._clock_frequency)
# Time how long it's been since we've sent our last link command.
keepalive_timer = Signal(range(keepalive_timeout_cycles))
m.d.comb += self.schedule_keepalive.eq(keepalive_timer + 1 == keepalive_timeout_cycles)
with m.If(self.link_command_transmitted):
m.d.ss += keepalive_timer.eq(0)
with m.Elif(self.enable):
m.d.ss += keepalive_timer.eq(keepalive_timer + 1)
with m.Else():
m.d.ss += keepalive_timer.eq(0)
#
# Recovery Timer
#
recovery_timeout_cycles = int(self.RECOVERY_TIMEOUT * self._clock_frequency)
# Time how long it's been since we've received our last link command.
recovery_timer = Signal(range(recovery_timeout_cycles))
m.d.comb += self.transition_to_recovery.eq(recovery_timer + 1 == recovery_timeout_cycles)
with m.If(self.link_command_received | self.packet_received):
m.d.ss += recovery_timer.eq(0)
with m.Elif(self.enable):
m.d.ss += recovery_timer.eq(recovery_timer + 1)
with m.Else():
m.d.ss += recovery_timer.eq(0)
return m
| 34.036364
| 97
| 0.6461
| 3,540
| 0.945513
| 0
| 0
| 0
| 0
| 0
| 0
| 1,955
| 0.522169
|
a7bac68130322804ca0f8658958c0c6e65364b14
| 769
|
py
|
Python
|
S4/S4 Library/simulation/ensemble/ensemble_interactions.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | 1
|
2021-05-20T19:33:37.000Z
|
2021-05-20T19:33:37.000Z
|
S4/S4 Library/simulation/ensemble/ensemble_interactions.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
S4/S4 Library/simulation/ensemble/ensemble_interactions.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
from objects.base_interactions import ProxyInteraction
from sims4.utils import classproperty, flexmethod
class EnsembleConstraintProxyInteraction(ProxyInteraction):
INSTANCE_SUBCLASSES_ONLY = True
@classproperty
def proxy_name(cls):
return '[Ensemble]'
@classmethod
def generate(cls, proxied_affordance, ensemble):
result = super().generate(proxied_affordance)
result.ensemble = ensemble
return result
@flexmethod
def _constraint_gen(cls, inst, *args, **kwargs):
inst_or_cls = inst if inst is not None else cls
for constraint in super(__class__, inst_or_cls)._constraint_gen(*args, **kwargs):
yield constraint
yield inst_or_cls.ensemble.get_center_of_mass_constraint()
| 33.434783
| 89
| 0.723017
| 662
| 0.860858
| 290
| 0.377113
| 549
| 0.713914
| 0
| 0
| 12
| 0.015605
|
a7bb2b9d40a5d41ccee76669339046f57bb5ef22
| 742
|
py
|
Python
|
setup.py
|
ryderdamen/phonetic-alphabet
|
23239a532b05c5708629011dd02a228fc11f71e7
|
[
"MIT"
] | 3
|
2020-05-01T22:23:38.000Z
|
2021-03-30T17:47:40.000Z
|
setup.py
|
ryderdamen/phonetic-alphabet
|
23239a532b05c5708629011dd02a228fc11f71e7
|
[
"MIT"
] | null | null | null |
setup.py
|
ryderdamen/phonetic-alphabet
|
23239a532b05c5708629011dd02a228fc11f71e7
|
[
"MIT"
] | null | null | null |
import setuptools
def get_readme():
with open('README.md') as f:
return f.read()
INSTALL_REQUIRES = []
TESTS_REQUIRE = ['pytest']
setuptools.setup(
name='phonetic_alphabet',
version='0.1.0',
description='Convert characters and digits to phonetic alphabet equivalents.',
long_description=get_readme(),
long_description_content_type="text/markdown",
keywords='phonetic alphabet aviation flight alpha bravo charlie niner',
url='http://github.com/ryderdamen/phonetic-alphabet',
author='Ryder Damen',
author_email='dev@ryderdamen.com',
license='MIT',
packages=setuptools.find_packages(),
install_requires=INSTALL_REQUIRES,
test_suite='pytest',
tests_require=TESTS_REQUIRE,
)
| 27.481481
| 82
| 0.719677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 280
| 0.377358
|