hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ba450050a15bcbeeb12f87a5de142226f24ecf1b | 1,875 | py | Python | tests/conftest.py | Basalex/fastapi-gino-viewsets | 924f95e4db31571a8fb86ffb412bf78e1abdc045 | [
"MIT"
] | 6 | 2021-05-21T21:35:03.000Z | 2022-03-12T22:07:50.000Z | tests/conftest.py | Basalex/fastapi-gino-viewsets | 924f95e4db31571a8fb86ffb412bf78e1abdc045 | [
"MIT"
] | 1 | 2021-07-07T07:08:53.000Z | 2021-07-12T14:01:56.000Z | tests/conftest.py | Basalex/fastapi-gino-viewsets | 924f95e4db31571a8fb86ffb412bf78e1abdc045 | [
"MIT"
] | null | null | null | import asyncio
from itertools import cycle
import pytest
from gino import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from .factory import Factory
from .models import db, PG_URL, UserType
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope='function', autouse=True)
@pytest.fixture
@pytest.fixture
| 26.785714 | 84 | 0.6592 | import asyncio
from itertools import cycle
import pytest
from gino import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from .factory import Factory
from .models import db, PG_URL, UserType
@pytest.fixture(scope="session")
async def engine():
db_engine = await create_engine(PG_URL)
db.bind = db_engine
async with db_engine.acquire():
# await db.status(db.text("DROP TYPE usertype CASCADE"))
await db.status(db.text("DROP TABLE IF EXISTS users;"))
await db.status(db.text("DROP TABLE IF EXISTS teams;"))
await db.status(db.text("DROP TYPE IF EXISTS usertype;"))
await db.status(db.text("CREATE TYPE usertype AS ENUM ('USER', 'ADMIN');"))
await db.gino.create_all()
yield db_engine
await db.status(db.text("DROP TYPE usertype CASCADE"))
await db.status(db.text("DROP TABLE users"))
await db.status(db.text("DROP TABLE teams"))
await db_engine.close()
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
@pytest.fixture(scope='function', autouse=True)
async def clear_db(engine):
yield
await db.status(db.text("TRUNCATE users RESTART IDENTITY CASCADE"))
await db.status(db.text("TRUNCATE users RESTART IDENTITY CASCADE"))
@pytest.fixture
async def create_users():
team = await Factory.team()
users = []
types = cycle([UserType.ADMIN, UserType.USER])
for n in range(1, 6):
user = await Factory.user(
team=team,
age=n * 10,
nickname=f'Alex{n}',
email_list=[f'user{n}@gmail.com', f'user{n}@yahoo.com'],
usertype=next(types),
)
users.append(user)
return users
@pytest.fixture
async def get_users(create_users):
def wrapped():
return create_users
return wrapped
| 1,392 | 0 | 110 |
201fbae4c77d3f71f4efa120928e09a234ebba2b | 3,749 | py | Python | boundary_predictor.py | ishine/DiffSinger | d5dbe05ee1c7da0878393c73129089a67d0fe935 | [
"MIT"
] | null | null | null | boundary_predictor.py | ishine/DiffSinger | d5dbe05ee1c7da0878393c73129089a67d0fe935 | [
"MIT"
] | null | null | null | boundary_predictor.py | ishine/DiffSinger | d5dbe05ee1c7da0878393c73129089a67d0fe935 | [
"MIT"
] | null | null | null | import argparse
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils.model import get_model
from utils.tools import get_configs_of, to_device, get_mask_from_lengths
from dataset import Dataset
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--restore_step", type=int, required=True)
parser.add_argument("--path_tag", type=str, default="")
parser.add_argument(
"--model",
type=str,
default="aux",
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="name of dataset",
)
args = parser.parse_args()
# Read Config
args.model = "aux"
preprocess_config, model_config, train_config = get_configs_of(args.dataset)
configs = (preprocess_config, model_config, train_config)
path_tag = "_{}".format(args.path_tag) if args.path_tag != "" else args.path_tag
train_config["path"]["ckpt_path"] = train_config["path"]["ckpt_path"]+"_{}{}".format("shallow", path_tag)
train_config["path"]["log_path"] = train_config["path"]["log_path"]+"_{}{}".format("shallow", path_tag)
train_config["path"]["result_path"] = train_config["path"]["result_path"]+"_{}{}".format("aux", path_tag)
if preprocess_config["preprocessing"]["pitch"]["pitch_type"] == "cwt":
import numpy as np
from utils.pitch_tools import get_lf0_cwt
preprocess_config["preprocessing"]["pitch"]["cwt_scales"] = get_lf0_cwt(np.ones(10))[1]
# Log Configuration
print("\n==================================== Prediction Configuration ====================================")
print(" ---> Total Batch Size:", int(train_config["optimizer"]["batch_size"]))
print(" ---> Path of ckpt:", train_config["path"]["ckpt_path"])
print("================================================================================================")
# Get model
model = get_model(args, configs, device, train=False)
# Get dataset
dataset = Dataset(
"val.txt", preprocess_config, train_config, sort=False, drop_last=False
)
batch_size = train_config["optimizer"]["batch_size"]
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=dataset.collate_fn,
)
predict(model, args.restore_step, configs, loader, len(dataset))
| 37.118812 | 114 | 0.594559 | import argparse
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils.model import get_model
from utils.tools import get_configs_of, to_device, get_mask_from_lengths
from dataset import Dataset
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def predict(model, step, configs, loader, len_dataset):
preprocess_config, model_config, train_config = configs
num_timesteps = int(model_config["denoiser"]["timesteps"])
kld_T = 0
kld_ts = [0] * (num_timesteps+1)
for batchs in tqdm(loader):
for batch in batchs:
batch = to_device(batch, device)
with torch.no_grad():
# Forward
target_mel, mel_lens, max_mel_len = batch[6:9]
target_mel_mask = get_mask_from_lengths(mel_lens, max_mel_len)
teacher_forced_mel = model(*(batch[2:]))[0][0]
kld_T += model.diffusion.expected_kld_T(target_mel, target_mel_mask) * len(batch[0])
for t in range(1, num_timesteps+1):
kld_t = model.diffusion.expected_kld_t(teacher_forced_mel, target_mel, t, target_mel_mask)
kld_ts[t] += kld_t * len(batch[0])
kld_T = kld_T / len_dataset
kld_ts = [kld_t / len_dataset for kld_t in kld_ts[1:]]
print(kld_ts)
print(kld_T)
K = 0
for kld_t in kld_ts:
K += 1
if kld_t <= kld_T:
break
print("\nPredicted Boundary K is", K)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--restore_step", type=int, required=True)
parser.add_argument("--path_tag", type=str, default="")
parser.add_argument(
"--model",
type=str,
default="aux",
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="name of dataset",
)
args = parser.parse_args()
# Read Config
args.model = "aux"
preprocess_config, model_config, train_config = get_configs_of(args.dataset)
configs = (preprocess_config, model_config, train_config)
path_tag = "_{}".format(args.path_tag) if args.path_tag != "" else args.path_tag
train_config["path"]["ckpt_path"] = train_config["path"]["ckpt_path"]+"_{}{}".format("shallow", path_tag)
train_config["path"]["log_path"] = train_config["path"]["log_path"]+"_{}{}".format("shallow", path_tag)
train_config["path"]["result_path"] = train_config["path"]["result_path"]+"_{}{}".format("aux", path_tag)
if preprocess_config["preprocessing"]["pitch"]["pitch_type"] == "cwt":
import numpy as np
from utils.pitch_tools import get_lf0_cwt
preprocess_config["preprocessing"]["pitch"]["cwt_scales"] = get_lf0_cwt(np.ones(10))[1]
# Log Configuration
print("\n==================================== Prediction Configuration ====================================")
print(" ---> Total Batch Size:", int(train_config["optimizer"]["batch_size"]))
print(" ---> Path of ckpt:", train_config["path"]["ckpt_path"])
print("================================================================================================")
# Get model
model = get_model(args, configs, device, train=False)
# Get dataset
dataset = Dataset(
"val.txt", preprocess_config, train_config, sort=False, drop_last=False
)
batch_size = train_config["optimizer"]["batch_size"]
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=dataset.collate_fn,
)
predict(model, args.restore_step, configs, loader, len(dataset))
| 1,197 | 0 | 25 |
de6d0a71409314774043235e17032d6716894da6 | 3,485 | py | Python | modules/filters/extractImageComponents.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 25 | 2015-08-24T16:05:14.000Z | 2020-12-09T20:07:14.000Z | modules/filters/extractImageComponents.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 1 | 2016-02-16T21:18:10.000Z | 2016-02-16T21:18:10.000Z | modules/filters/extractImageComponents.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 5 | 2016-02-16T20:05:37.000Z | 2020-01-31T11:27:39.000Z | # $Id$
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
| 33.190476 | 77 | 0.588522 | # $Id$
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
class extractImageComponents(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._extract = vtk.vtkImageExtractComponents()
module_utils.setup_vtk_object_progress(self, self._extract,
'Extracting components.')
self._config.component1 = 0
self._config.component2 = 1
self._config.component3 = 2
self._config.numberOfComponents = 1
self._config.fileLowerLeft = False
configList = [
('Component 1:', 'component1', 'base:int', 'text',
'Zero-based index of first component to extract.'),
('Component 2:', 'component2', 'base:int', 'text',
'Zero-based index of second component to extract.'),
('Component 3:', 'component3', 'base:int', 'text',
'Zero-based index of third component to extract.'),
('Number of components:', 'numberOfComponents', 'base:int',
'choice',
'Number of components to extract. Only this number of the '
'above-specified component indices will be used.',
('1', '2', '3'))]
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkImageExtractComponents' : self._extract})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._extract
def get_input_descriptions(self):
return ('Multi-component vtkImageData',)
def set_input(self, idx, inputStream):
self._extract.SetInput(inputStream)
def get_output_descriptions(self):
return ('Extracted component vtkImageData',)
def get_output(self, idx):
return self._extract.GetOutput()
def logic_to_config(self):
# numberOfComponents is 0-based !!
self._config.numberOfComponents = \
self._extract.GetNumberOfComponents()
self._config.numberOfComponents -= 1
c = self._extract.GetComponents()
self._config.component1 = c[0]
self._config.component2 = c[1]
self._config.component3 = c[2]
def config_to_logic(self):
# numberOfComponents is 0-based !!
nc = self._config.numberOfComponents
nc += 1
if nc == 1:
self._extract.SetComponents(self._config.component1)
elif nc == 2:
self._extract.SetComponents(self._config.component1,
self._config.component2)
else:
self._extract.SetComponents(self._config.component1,
self._config.component2,
self._config.component3)
def execute_module(self):
self._extract.Update()
| 2,989 | 47 | 286 |
9941db7f3c8354684ea1ae63a6278b9e564b7669 | 3,115 | py | Python | face_detection/video_face.py | GlenOFI/Proctoring-AI | d6b4e1603c4b114c6ac6d401eea8db282c2ecd48 | [
"MIT"
] | null | null | null | face_detection/video_face.py | GlenOFI/Proctoring-AI | d6b4e1603c4b114c6ac6d401eea8db282c2ecd48 | [
"MIT"
] | null | null | null | face_detection/video_face.py | GlenOFI/Proctoring-AI | d6b4e1603c4b114c6ac6d401eea8db282c2ecd48 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 03:40:59 2020
@author: hp
"""
import cv2 # pip install opencv-python
import dlib
import numpy as np
# Use a file on your computer:
videoCapture = cv2.VideoCapture('video/clinton.mp4')
# Or use a web cam:
# videoCapture = cv2.VideoCapture(0)
# Initialise three separate models
# dlib
detector2 = dlib.get_frontal_face_detector()
# caffe (DNN)
modelFile = "models/res10_300x300_ssd_iter_140000.caffemodel"
configFile = "models/deploy.prototxt.txt"
net = cv2.dnn.readNetFromCaffe(configFile, modelFile)
# Haar cascade
classifier2 = cv2.CascadeClassifier('models/haarcascade_frontalface2.xml')
font = cv2.FONT_HERSHEY_SIMPLEX
# Each iteration of the while loop captures a single frame from the capture device (file or webcam)
while(True):
# Get the next frame
ret, img = videoCapture.read()
# If a frame was successfully captured
if ret == True:
# Resize image
img = cv2.resize(img, None, fx=0.5, fy=0.5)
height, width = img.shape[:2]
img2 = img.copy()
img3 = img.copy()
img4 = img.copy()
# Convert to greyscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces using dlib and draw bounding boxes
faces2 = detector2(gray, 1)
for result in faces2:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(img2, (x, y), (x1, y1), (0, 0, 255), 2)
cv2.putText(img2, 'dlib', (30, 30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# Detect faces using caffe (DNN) and draw bounding boxes
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)),
1.0, (300, 300), (104.0, 117.0, 123.0))
net.setInput(blob)
faces3 = net.forward()
for i in range(faces3.shape[2]):
confidence = faces3[0, 0, i, 2]
if confidence > 0.5:
box = faces3[0, 0, i, 3:7] * np.array([width, height, width, height])
(x, y, x1, y1) = box.astype("int")
# cv2.rectangle(img3, (x, y), (x1, y1), (0, 0, 255), 2)
cv2.rectangle(img3, (x, y), (x1, y1), (0, 0, 255), -1) # -1 fills the rectangle
cv2.putText(img3, 'dnn', (30, 30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# Detect faces using Haar cascades and draw bounding boxes
faces4 = classifier2.detectMultiScale(img)
for result in faces4:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(img4, (x, y), (x1, y1), (0, 0, 255), 2)
cv2.putText(img4, 'haar', (30, 30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# Show on the screen
cv2.imshow("dlib", img2)
cv2.imshow("dnn", img3)
cv2.imshow("haar", img4)
# Exit the loop with the escape key (with one of the video windows active)
if cv2.waitKey(1) & 0xFF == 27: # esc
break
else:
break
# Release resources
videoCapture.release()
cv2.destroyAllWindows()
| 31.785714 | 99 | 0.577849 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 03:40:59 2020
@author: hp
"""
import cv2 # pip install opencv-python
import dlib
import numpy as np
# Use a file on your computer:
videoCapture = cv2.VideoCapture('video/clinton.mp4')
# Or use a web cam:
# videoCapture = cv2.VideoCapture(0)
# Initialise three separate models
# dlib
detector2 = dlib.get_frontal_face_detector()
# caffe (DNN)
modelFile = "models/res10_300x300_ssd_iter_140000.caffemodel"
configFile = "models/deploy.prototxt.txt"
net = cv2.dnn.readNetFromCaffe(configFile, modelFile)
# Haar cascade
classifier2 = cv2.CascadeClassifier('models/haarcascade_frontalface2.xml')
font = cv2.FONT_HERSHEY_SIMPLEX
# Each iteration of the while loop captures a single frame from the capture device (file or webcam)
while(True):
# Get the next frame
ret, img = videoCapture.read()
# If a frame was successfully captured
if ret == True:
# Resize image
img = cv2.resize(img, None, fx=0.5, fy=0.5)
height, width = img.shape[:2]
img2 = img.copy()
img3 = img.copy()
img4 = img.copy()
# Convert to greyscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces using dlib and draw bounding boxes
faces2 = detector2(gray, 1)
for result in faces2:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(img2, (x, y), (x1, y1), (0, 0, 255), 2)
cv2.putText(img2, 'dlib', (30, 30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# Detect faces using caffe (DNN) and draw bounding boxes
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)),
1.0, (300, 300), (104.0, 117.0, 123.0))
net.setInput(blob)
faces3 = net.forward()
for i in range(faces3.shape[2]):
confidence = faces3[0, 0, i, 2]
if confidence > 0.5:
box = faces3[0, 0, i, 3:7] * np.array([width, height, width, height])
(x, y, x1, y1) = box.astype("int")
# cv2.rectangle(img3, (x, y), (x1, y1), (0, 0, 255), 2)
cv2.rectangle(img3, (x, y), (x1, y1), (0, 0, 255), -1) # -1 fills the rectangle
cv2.putText(img3, 'dnn', (30, 30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# Detect faces using Haar cascades and draw bounding boxes
faces4 = classifier2.detectMultiScale(img)
for result in faces4:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(img4, (x, y), (x1, y1), (0, 0, 255), 2)
cv2.putText(img4, 'haar', (30, 30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# Show on the screen
cv2.imshow("dlib", img2)
cv2.imshow("dnn", img3)
cv2.imshow("haar", img4)
# Exit the loop with the escape key (with one of the video windows active)
if cv2.waitKey(1) & 0xFF == 27: # esc
break
else:
break
# Release resources
videoCapture.release()
cv2.destroyAllWindows()
| 0 | 0 | 0 |
85c893b053be48a497c3f5752d4eff1719132dba | 3,364 | py | Python | src/python/twitter/common/java/java_types.py | zhouyijiaren/commons | 10df6fb63547baa9047782aa7ad4edf354914b10 | [
"Apache-2.0"
] | 1,143 | 2015-01-05T04:19:24.000Z | 2019-12-11T12:02:23.000Z | src/python/twitter/common/java/java_types.py | zhouyijiaren/commons | 10df6fb63547baa9047782aa7ad4edf354914b10 | [
"Apache-2.0"
] | 144 | 2015-01-06T05:05:07.000Z | 2019-12-12T18:02:37.000Z | src/python/twitter/common/java/java_types.py | zhouyijiaren/commons | 10df6fb63547baa9047782aa7ad4edf354914b10 | [
"Apache-2.0"
] | 426 | 2015-01-08T08:33:41.000Z | 2019-12-09T13:15:40.000Z | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import struct
| 26.698413 | 100 | 0.621284 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import struct
class JavaNativeType(object):
class ParseException(Exception): pass
def __init__(self, data):
pass
def __call__(self):
return self._value
def value(self):
return self._value
def get(self):
return self.value()
@staticmethod
def size():
raise Exception("Unimplemented!")
@staticmethod
def parse(data, *type_args):
offset = 0
parsed_types = []
total_size = 0
for t in type_args:
if not issubclass(t, JavaNativeType):
raise JavaNativeType.ParseException("Not a valid JavaNativeType: %s" % t)
total_size += t.size()
if total_size > len(data):
raise JavaNativeType.ParseException("Not enough data to deserialize %s" % repr(type_args))
for t in type_args:
parsed_type = t(data[slice(offset, offset + t.size())]).value()
parsed_types.append(parsed_type)
offset += t.size()
return parsed_types, data[total_size:]
class u1(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack('>B', data[0:1])[0]
@staticmethod
def size():
return 1
class u2(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">H", data[0:2])[0]
@staticmethod
def size():
return 2
class s2(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">h", data[0:2])[0]
@staticmethod
def size():
return 2
class u4(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">L", data[0:4])[0]
@staticmethod
def size():
return 4
class s4(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">l", data[0:4])[0]
@staticmethod
def size():
return 4
class s8(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">q", data[0:8])[0]
@staticmethod
def size():
return 8
class f4(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">f", data[0:4])[0]
@staticmethod
def size():
return 4
class f8(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">d", data[0:8])[0]
@staticmethod
def size():
return 8
| 1,460 | 782 | 207 |
10b98875257eafaa3b8c39da8dbb943ce80f179c | 14,119 | py | Python | util/seeg_utils.py | danzhewuju/SleepState | b39fa3a3374d35dcc7a9ef31f977fac4e9d4a322 | [
"Apache-2.0"
] | null | null | null | util/seeg_utils.py | danzhewuju/SleepState | b39fa3a3374d35dcc7a9ef31f977fac4e9d4a322 | [
"Apache-2.0"
] | null | null | null | util/seeg_utils.py | danzhewuju/SleepState | b39fa3a3374d35dcc7a9ef31f977fac4e9d4a322 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import os
import uuid
import mne
import numpy as np
import pandas as pd
import pyedflib
import scipy.io as sio
from mne.time_frequency import *
import matplotlib.pyplot as plt
def get_recorder_time(data):
'''
:param data: raw data
:return: 这个文件记录的时间长度
'''
time = data.times[-1]
return time
def re_sampling(data, fz):
'''
:param data: mne 模块读取的数据
:param fz: 重采样的频率
:return: 返回的是重采样的频率
'''
data.resample(fz, npad="auto")
return data
def rewrite(raw, include_names, save_path): # 对数据进行重写,主要是包含某些特殊的信道分离重写
'''
:param raw: 读取的原始数据
:param include_names: 包含信道的名称
:param save_path: 保存的路径
:return: 返回只包含对应信道的数据
'''
want_meg = True
want_eeg = False
want_stim = False
picks = mne.pick_types(raw.info, meg=want_meg, eeg=want_eeg, stim=want_stim,
include=include_names, exclude='bads')
print("include channel names:{}".format(include_names))
raw.save(save_path, picks=picks, overwrite=True)
# raw.save("SEEG.fif", picks=picks_seeg, overwrite=True)
print("successfully written!")
return True
def get_common_channels(ch_names1, ch_names2): # 寻找两个数据的公共信道
'''
:param ch_names1: raw1 ch_names list
:param ch_names2: raw2 ch_names list
:return: common ch_names list
'''
common_channels = [x for x in ch_names1 if x in ch_names2]
return common_channels
def data_connection(raw1, raw2): # 数据的拼接
'''
:param raw1: raw data1
:param raw2: raw data2
:return: data connection raw1:raw2
'''
raw1.append(raw2)
return raw1
def select_channel_data(raw, select_channel_names): # 根据某些信道的名称进行数据选择,直接选择这个信道的数据
'''
:param raw: raw data
:return: channel data
'''
ch_names = get_channels_names(raw)
pick_channel_No = mne.pick_channels(ch_names=ch_names, include=select_channel_names)
data, time = raw[pick_channel_No, :]
return data
def data_split(raw, time_step): # 数据的切片处理
'''
:param raw: 读取的原始数据
:param time_step: 窗口的大小
:return:
'''
data_split = []
end = max(raw.times)
epoch = int(end // time_step)
fz = int(len(raw) / end) # 采样频率
for index in range(epoch - 1):
start = index * fz * time_step
stop = (index + 1) * fz * time_step
data, time = raw[:, start:stop]
data_split.append(data)
return data_split
def get_duration_raw_data(raw, start, stop):
'''
:param raw: 原始数据
:param start: 开始的时间点
:param stop: 终止的时间点
:return:
'''
end = max(raw.times)
if stop > end:
print("over range!!!")
return None
else:
duration_data = raw.crop(start, stop)
return duration_data
def save_split_data(data_split, path, flag): # 切片数据的保存
'''
:param data_split: 被切片的数据
:param path: 所存储的文件夹,也就是存储文件的上一级文件夹
:param flag: 对应数据的标识
:return:
'''
if not os.path.exists(path):
os.makedirs(path)
for d in data_split:
name = str(uuid.uuid1()) + "-" + str(flag)
path_all = os.path.join(path, name)
save_numpy_info(d, path_all)
print("File save successfully {}".format(path))
return True
def seeg_preprocess(fin, fout, seeg_chan_name):
'''
SEEG滤波
:param fin: 源数据文件名
:param fout: 输出文件名(***以_raw.fif结尾***)
:param seeg_chan_name: 需要滤波的信道名列表
:return:
'''
raw = mne.io.read_raw_edf(fin, preload=True)
specific_chans = raw.pick_channels(seeg_chan_name)
del raw
if len(specific_chans.info['ch_names']) != len(seeg_chan_name):
print("channels number not matched")
return
sfreq = specific_chans.info['sfreq'] # 采样频率
nyq = sfreq / 2. # 奈奎斯特频率
specific_chans.notch_filter(np.arange(50, nyq, 50), filter_length='auto',
phase='zero')
specific_chans.filter(0.5, None, fir_design='firwin')
specific_chans.save(fout)
del specific_chans
def eeg_preprocess(fin, fout, seeg_chan_name):
'''
EEG滤波
:param fin: 源数据文件名
:param fout: 输出文件名(***以_raw.fif结尾***)
:param seeg_chan_name: 需要滤波的信道名列表
:return:
'''
raw = mne.io.read_raw_edf(fin, preload=True)
specific_chans = raw.copy().pick_channels(seeg_chan_name)
del raw
if len(specific_chans.info['ch_names']) != len(seeg_chan_name):
print("channels number not matched")
return
sfreq = specific_chans.info['sfreq'] # 采样频率
nyq = sfreq / 2. # 奈奎斯特频率
specific_chans.notch_filter(np.arange(50, nyq, 50), filter_length='auto',
phase='zero')
specific_chans.filter(1., None, fir_design='firwin')
specific_chans.save(fout)
del specific_chans
def seeg_npy_plot(data, channels, save_path, save_path_npy=None):
'''
:param data: numpy 格式的数据
:param cahnnels: 所选择的信道list
:return:
'''
k = len(channels)
k = 1 # 只选取一个信道
plt.figure(0)
plt.subplots_adjust(hspace=0.6, wspace=0.6)
if save_path_npy is not None:
data_p = data[channels[0]]
np.save(save_path_npy, data_p)
for i in range(k):
try:
plt.subplot(k, 1, i + 1)
plt.title("channel:{}".format(channels[i]))
plt.plot(data[channels[i]])
except IndexError:
print("IndexError")
plt.savefig(save_path)
plt.close(0)
# plt.show()
return True
def split_edf(filename, NEpochs=1): # 把太大的edf文件分成NEpochs个小edf文件
'''
:param filename: 源文件名称
:param NEpochs: 要划分的数量
:return:
'''
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
oridir = os.getcwd()
if dirname != "": # pyedflib只能读取当前工作目录的文件
os.chdir(dirname)
f = pyedflib.EdfReader(basename)
os.chdir(oridir) # 路径换回去
NSamples = int(f.getNSamples()[0] / NEpochs)
NChannels = f.signals_in_file
fileOutPrefix = basename + '_'
channels_info = list()
for ch in range(NChannels):
ch_dict = dict()
ch_dict['label'] = f.getLabel(ch)
ch_dict['dimension'] = f.getPhysicalDimension(ch)
ch_dict['sample_rate'] = f.getSampleFrequency(ch)
ch_dict['physical_max'] = f.getPhysicalMaximum(ch)
ch_dict['physical_min'] = f.getPhysicalMinimum(ch)
ch_dict['digital_max'] = f.getDigitalMaximum(ch)
ch_dict['digital_min'] = f.getDigitalMinimum(ch)
ch_dict['transducer'] = f.getTransducer(ch)
ch_dict['prefilter'] = f.getPrefilter(ch)
channels_info.append(ch_dict)
for i in range(NEpochs):
print("File %d starts" % i)
fileOut = os.path.join('.', fileOutPrefix + str(i) + '.edf')
fout = pyedflib.EdfWriter(fileOut, NChannels, file_type=pyedflib.FILETYPE_EDFPLUS)
data_list = list()
for ch in range(NChannels):
if ch == NChannels - 1:
data_list.append(f.readSignal(ch)[i * NSamples:])
else:
data_list.append(f.readSignal(ch)[i * NSamples: (i + 1) * NSamples - 1])
fout.setSignalHeaders(channels_info)
fout.writeSamples(data_list)
fout.close()
del fout
del data_list
print("File %d done" % i)
def save_raw_as_edf(raw, fout_name): # 把raw数据存为edf格式
'''
:param raw: raw格式数据
:param fout_name: 输出的文件名
:return:
'''
NChannels = raw.info['nchan']
channels_info = list()
for i in range(NChannels):
'''默认参数来自edfwriter.py'''
ch_dict = dict()
ch_dict['label'] = raw.info['chs'][i]['ch_name']
ch_dict['dimension'] = 'mV'
ch_dict['sample_rate'] = raw.info['sfreq']
ch_dict['physical_max'] = 1.0
ch_dict['physical_min'] = -1.0
ch_dict['digital_max'] = 32767
ch_dict['digital_min'] = -32767
ch_dict['transducer'] = 'trans1'
ch_dict['prefilter'] = "pre1"
channels_info.append(ch_dict)
fileOut = os.path.join('.', fout_name + '.edf')
fout = pyedflib.EdfWriter(fileOut, NChannels, file_type=pyedflib.FILETYPE_EDFPLUS)
data_list, _ = raw[:, :]
print(data_list)
fout.setSignalHeaders(channels_info)
fout.writeSamples(data_list)
fout.close()
print("Done!")
del fout
del data_list
def make_whole_as_epoch(raw, e_id=666):
'''
将一整个raw作为一个epoch返回
:param raw: raw类型对象
:param e_id: 整数类型,指定event的id,不能与已有id重复
:return: Epochs对象
'''
data, _ = raw[:, :]
event_id = {'Added': e_id} # 人为增加一个event
event = [[0, 0, e_id]] # 在第一个样本处标记event为id
epoch = mne.EpochsArray([data], raw.info, event, 0, event_id)
return epoch
def tfr_analyze(epochs, freqs, resample=None, decim=1):
'''
freqs:type为ndarray,指定一个离散的频率数组
:param epochs: 待分析的Epochs对象
:param freqs: ndarray类型,包含感兴趣的所有频率,例np.arange(80,100,0.5)
:param resample: 整数类型,指明重采样频率,通过对数据重采样减轻内存压力
:param decim: 整数类型,只抽取时频变换后的部分结果,减轻内存压力
:return: AverageTFR对象,包含时频变换后的数据和信息
'''
if resample is not None:
epochs.resample(resample, npad='auto') # 重采样,减少内存消耗
n_cycles = freqs / 2.
# 使用小波变换进行时频变换
# decim参数指定对转换过的结果后再次重采样的频率,例如若指定为5,则频率变为原来的5分之一
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, decim=decim)
power.info['sfreq'] /= decim
return power
def tfr_extract(power, tmin=0, tmax=None):
'''
提取tfr_analyze返回的数据中感兴趣的时间段
:param power: AverageTFR对象,时频变换的输出
:param tmin: 时间起点(包含在区间内)
:param tmax: 时间终点(不包含在区间内)
:return: ndarray, shape(n_channels, n_freqs, n_times)
'''
sfreq = power.info['sfreq']
start = int(tmin * sfreq)
if tmax is None:
return np.array([[[k for k in power.data[i][j][start:]] for j in range(len(power.data[i]))] for i in
range(len(power.data))])
else:
end = int(tmax * sfreq)
return np.array([[[k for k in power.data[i][j][start: end]] for j in range(len(power.data[i]))] for i in
range(len(power.data))])
def get_cost_matrix(elec_pos):
'''
获取代价矩阵(不同电极之间的距离)
:param elec_pos: 含有信道名以及坐标的字典
:return: cost_matrix: 代价矩阵
'''
n = len(elec_pos)
cost_matrix = [[0 for _ in range(n)] for _ in range(n)]
i = 0
while i < n:
j = i + 1
while j < n:
cost_matrix[i][j] = np.linalg.norm(elec_pos[i]['pos'] - elec_pos[j]['pos'])
cost_matrix[j][i] = cost_matrix[i][j]
j += 1
i += 1
return cost_matrix
def least_traversal(elec_pos):
'''
枚举所有起点计算出最小代价的遍历路径
:param elec_pos: 含有信道名以及坐标的字典
:return: min_cost: 最小代价
:return: min_path: 对应路径
'''
cost_matrix = get_cost_matrix(elec_pos)
n = len(elec_pos)
maximum = 9999999
min_cost = maximum
min_path = None
for start in range(n):
visited = [False for _ in range(n)]
n_visited = 0
cur = start
cost = 0
path = [elec_pos[cur]['name']]
while n_visited < n - 1:
visited[cur] = True
n_visited += 1
min_d = maximum
min_i = 0
for i in range(n):
d = cost_matrix[cur][i]
if d < min_d and not visited[i]:
min_d = d
min_i = i
cost += min_d
path.append(elec_pos[min_i]['name'])
cur = min_i
if cost < min_cost:
min_cost = cost
min_path = path
return min_cost, min_path
def retrieve_chs_from_mat(patient_name):
'''
提取.mat文件中的信道名和坐标信息
:param patient_name: 目标病人名(须保证文件名为patient_name.mat)
:return: elec_pos: 含有信道名以及坐标的字典
'''
pos_info = sio.loadmat(patient_name + ".mat")
elec_pos = list()
for i in range(pos_info['elec_Info_Final'][0][0][1][0].size): # name为字符串,pos为ndarray格式
elec_pos.append({'name': pos_info['elec_Info_Final'][0][0][0][0][i][0],
'pos': pos_info['elec_Info_Final'][0][0][1][0][i][0]})
return elec_pos
def get_path(patient_name):
'''
获取当前病人的信道排列并保存在.csv文件中
:param patient_name: 目标病人名
'''
_, path = least_traversal(retrieve_chs_from_mat(patient_name))
print(path)
path_len = len(path)
print(path_len)
to_csv = [[i for i in range(path_len)], path]
to_csv = [[row[i] for row in to_csv] for i in range(path_len)]
col = ['ID', 'chan_name']
csv_frame = pd.DataFrame(columns=col, data=to_csv)
csv_frame.to_csv('./' + patient_name + '_seq.csv', encoding='utf-8')
def draw_seeg_picture(data, sampling=500, x_axis='Time(s)', y_axis='Channel'):
'''
:param data: SEEG读取的信号, 进行可视化的读取
:return:
'''
width = data.shape[1]
height = data.shape[0]
dpi = 50
plt.figure(figsize=(width // (dpi * 5), height // dpi), dpi=200)
# my_x_ticks = np.arange(0, width // sampling, 1.0 / sampling) # 原始数据有width个数据,故此处为设置从0开始,间隔为1/sampling
# plt.xticks(my_x_ticks)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
# plt.axis('off')
plt.imshow(data, aspect='auto')
plt.show()
plt.close()
| 28.181637 | 112 | 0.614633 | #!/usr/bin/python3
import os
import uuid
import mne
import numpy as np
import pandas as pd
import pyedflib
import scipy.io as sio
from mne.time_frequency import *
import matplotlib.pyplot as plt
def read_raw(path):
raw = mne.io.read_raw_fif(path, preload=True)
return raw
def read_edf_raw(path):
raw = mne.io.read_raw_edf(path, preload=True)
return raw
def get_channels_names(raw):
channel_names = raw.info['ch_names']
return channel_names
def get_recorder_time(data):
'''
:param data: raw data
:return: 这个文件记录的时间长度
'''
time = data.times[-1]
return time
def re_sampling(data, fz):
'''
:param data: mne 模块读取的数据
:param fz: 重采样的频率
:return: 返回的是重采样的频率
'''
data.resample(fz, npad="auto")
return data
def filter_hz(raw, high_pass, low_pass): # 对数据进行滤波处理 对于(high_pass, low_pass)范围波形进行选择
raw.filter(high_pass, low_pass, fir_design='firwin')
return raw
def save_numpy_info(data, path): # 存储numpy的数据
if os.path.exists(path):
print("File is exist!!!")
return False
else:
np.save(path, data)
print("Successfully save!")
return True
def rewrite(raw, include_names, save_path): # 对数据进行重写,主要是包含某些特殊的信道分离重写
'''
:param raw: 读取的原始数据
:param include_names: 包含信道的名称
:param save_path: 保存的路径
:return: 返回只包含对应信道的数据
'''
want_meg = True
want_eeg = False
want_stim = False
picks = mne.pick_types(raw.info, meg=want_meg, eeg=want_eeg, stim=want_stim,
include=include_names, exclude='bads')
print("include channel names:{}".format(include_names))
raw.save(save_path, picks=picks, overwrite=True)
# raw.save("SEEG.fif", picks=picks_seeg, overwrite=True)
print("successfully written!")
return True
def get_common_channels(ch_names1, ch_names2): # 寻找两个数据的公共信道
'''
:param ch_names1: raw1 ch_names list
:param ch_names2: raw2 ch_names list
:return: common ch_names list
'''
common_channels = [x for x in ch_names1 if x in ch_names2]
return common_channels
def data_connection(raw1, raw2): # 数据的拼接
'''
:param raw1: raw data1
:param raw2: raw data2
:return: data connection raw1:raw2
'''
raw1.append(raw2)
return raw1
def select_channel_data(raw, select_channel_names): # 根据某些信道的名称进行数据选择,直接选择这个信道的数据
'''
:param raw: raw data
:return: channel data
'''
ch_names = get_channels_names(raw)
pick_channel_No = mne.pick_channels(ch_names=ch_names, include=select_channel_names)
data, time = raw[pick_channel_No, :]
return data
def select_channel_data_mne(raw, select_channel_name): # 根据信道的顺序,重新选择信道
chan_name = select_channel_name
specific_chans = raw.copy().pick_channels(chan_name)
# specific_chans.plot(block=True)
return specific_chans
def data_split(raw, time_step): # 数据的切片处理
'''
:param raw: 读取的原始数据
:param time_step: 窗口的大小
:return:
'''
data_split = []
end = max(raw.times)
epoch = int(end // time_step)
fz = int(len(raw) / end) # 采样频率
for index in range(epoch - 1):
start = index * fz * time_step
stop = (index + 1) * fz * time_step
data, time = raw[:, start:stop]
data_split.append(data)
return data_split
def get_sampling_hz(raw): # 返回采样的频率
end = max(raw.times)
fz = int(len(raw) / end) # 采样频率
return fz
def get_duration_raw_data(raw, start, stop):
'''
:param raw: 原始数据
:param start: 开始的时间点
:param stop: 终止的时间点
:return:
'''
end = max(raw.times)
if stop > end:
print("over range!!!")
return None
else:
duration_data = raw.crop(start, stop)
return duration_data
def save_split_data(data_split, path, flag): # 切片数据的保存
'''
:param data_split: 被切片的数据
:param path: 所存储的文件夹,也就是存储文件的上一级文件夹
:param flag: 对应数据的标识
:return:
'''
if not os.path.exists(path):
os.makedirs(path)
for d in data_split:
name = str(uuid.uuid1()) + "-" + str(flag)
path_all = os.path.join(path, name)
save_numpy_info(d, path_all)
print("File save successfully {}".format(path))
return True
def seeg_preprocess(fin, fout, seeg_chan_name):
'''
SEEG滤波
:param fin: 源数据文件名
:param fout: 输出文件名(***以_raw.fif结尾***)
:param seeg_chan_name: 需要滤波的信道名列表
:return:
'''
raw = mne.io.read_raw_edf(fin, preload=True)
specific_chans = raw.pick_channels(seeg_chan_name)
del raw
if len(specific_chans.info['ch_names']) != len(seeg_chan_name):
print("channels number not matched")
return
sfreq = specific_chans.info['sfreq'] # 采样频率
nyq = sfreq / 2. # 奈奎斯特频率
specific_chans.notch_filter(np.arange(50, nyq, 50), filter_length='auto',
phase='zero')
specific_chans.filter(0.5, None, fir_design='firwin')
specific_chans.save(fout)
del specific_chans
def eeg_preprocess(fin, fout, seeg_chan_name):
'''
EEG滤波
:param fin: 源数据文件名
:param fout: 输出文件名(***以_raw.fif结尾***)
:param seeg_chan_name: 需要滤波的信道名列表
:return:
'''
raw = mne.io.read_raw_edf(fin, preload=True)
specific_chans = raw.copy().pick_channels(seeg_chan_name)
del raw
if len(specific_chans.info['ch_names']) != len(seeg_chan_name):
print("channels number not matched")
return
sfreq = specific_chans.info['sfreq'] # 采样频率
nyq = sfreq / 2. # 奈奎斯特频率
specific_chans.notch_filter(np.arange(50, nyq, 50), filter_length='auto',
phase='zero')
specific_chans.filter(1., None, fir_design='firwin')
specific_chans.save(fout)
del specific_chans
def seeg_npy_plot(data, channels, save_path, save_path_npy=None):
'''
:param data: numpy 格式的数据
:param cahnnels: 所选择的信道list
:return:
'''
k = len(channels)
k = 1 # 只选取一个信道
plt.figure(0)
plt.subplots_adjust(hspace=0.6, wspace=0.6)
if save_path_npy is not None:
data_p = data[channels[0]]
np.save(save_path_npy, data_p)
for i in range(k):
try:
plt.subplot(k, 1, i + 1)
plt.title("channel:{}".format(channels[i]))
plt.plot(data[channels[i]])
except IndexError:
print("IndexError")
plt.savefig(save_path)
plt.close(0)
# plt.show()
return True
def split_edf(filename, NEpochs=1): # 把太大的edf文件分成NEpochs个小edf文件
'''
:param filename: 源文件名称
:param NEpochs: 要划分的数量
:return:
'''
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
oridir = os.getcwd()
if dirname != "": # pyedflib只能读取当前工作目录的文件
os.chdir(dirname)
f = pyedflib.EdfReader(basename)
os.chdir(oridir) # 路径换回去
NSamples = int(f.getNSamples()[0] / NEpochs)
NChannels = f.signals_in_file
fileOutPrefix = basename + '_'
channels_info = list()
for ch in range(NChannels):
ch_dict = dict()
ch_dict['label'] = f.getLabel(ch)
ch_dict['dimension'] = f.getPhysicalDimension(ch)
ch_dict['sample_rate'] = f.getSampleFrequency(ch)
ch_dict['physical_max'] = f.getPhysicalMaximum(ch)
ch_dict['physical_min'] = f.getPhysicalMinimum(ch)
ch_dict['digital_max'] = f.getDigitalMaximum(ch)
ch_dict['digital_min'] = f.getDigitalMinimum(ch)
ch_dict['transducer'] = f.getTransducer(ch)
ch_dict['prefilter'] = f.getPrefilter(ch)
channels_info.append(ch_dict)
for i in range(NEpochs):
print("File %d starts" % i)
fileOut = os.path.join('.', fileOutPrefix + str(i) + '.edf')
fout = pyedflib.EdfWriter(fileOut, NChannels, file_type=pyedflib.FILETYPE_EDFPLUS)
data_list = list()
for ch in range(NChannels):
if ch == NChannels - 1:
data_list.append(f.readSignal(ch)[i * NSamples:])
else:
data_list.append(f.readSignal(ch)[i * NSamples: (i + 1) * NSamples - 1])
fout.setSignalHeaders(channels_info)
fout.writeSamples(data_list)
fout.close()
del fout
del data_list
print("File %d done" % i)
def save_raw_as_edf(raw, fout_name): # 把raw数据存为edf格式
'''
:param raw: raw格式数据
:param fout_name: 输出的文件名
:return:
'''
NChannels = raw.info['nchan']
channels_info = list()
for i in range(NChannels):
'''默认参数来自edfwriter.py'''
ch_dict = dict()
ch_dict['label'] = raw.info['chs'][i]['ch_name']
ch_dict['dimension'] = 'mV'
ch_dict['sample_rate'] = raw.info['sfreq']
ch_dict['physical_max'] = 1.0
ch_dict['physical_min'] = -1.0
ch_dict['digital_max'] = 32767
ch_dict['digital_min'] = -32767
ch_dict['transducer'] = 'trans1'
ch_dict['prefilter'] = "pre1"
channels_info.append(ch_dict)
fileOut = os.path.join('.', fout_name + '.edf')
fout = pyedflib.EdfWriter(fileOut, NChannels, file_type=pyedflib.FILETYPE_EDFPLUS)
data_list, _ = raw[:, :]
print(data_list)
fout.setSignalHeaders(channels_info)
fout.writeSamples(data_list)
fout.close()
print("Done!")
del fout
del data_list
def make_whole_as_epoch(raw, e_id=666):
'''
将一整个raw作为一个epoch返回
:param raw: raw类型对象
:param e_id: 整数类型,指定event的id,不能与已有id重复
:return: Epochs对象
'''
data, _ = raw[:, :]
event_id = {'Added': e_id} # 人为增加一个event
event = [[0, 0, e_id]] # 在第一个样本处标记event为id
epoch = mne.EpochsArray([data], raw.info, event, 0, event_id)
return epoch
def tfr_analyze(epochs, freqs, resample=None, decim=1):
'''
freqs:type为ndarray,指定一个离散的频率数组
:param epochs: 待分析的Epochs对象
:param freqs: ndarray类型,包含感兴趣的所有频率,例np.arange(80,100,0.5)
:param resample: 整数类型,指明重采样频率,通过对数据重采样减轻内存压力
:param decim: 整数类型,只抽取时频变换后的部分结果,减轻内存压力
:return: AverageTFR对象,包含时频变换后的数据和信息
'''
if resample is not None:
epochs.resample(resample, npad='auto') # 重采样,减少内存消耗
n_cycles = freqs / 2.
# 使用小波变换进行时频变换
# decim参数指定对转换过的结果后再次重采样的频率,例如若指定为5,则频率变为原来的5分之一
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, decim=decim)
power.info['sfreq'] /= decim
return power
def tfr_extract(power, tmin=0, tmax=None):
'''
提取tfr_analyze返回的数据中感兴趣的时间段
:param power: AverageTFR对象,时频变换的输出
:param tmin: 时间起点(包含在区间内)
:param tmax: 时间终点(不包含在区间内)
:return: ndarray, shape(n_channels, n_freqs, n_times)
'''
sfreq = power.info['sfreq']
start = int(tmin * sfreq)
if tmax is None:
return np.array([[[k for k in power.data[i][j][start:]] for j in range(len(power.data[i]))] for i in
range(len(power.data))])
else:
end = int(tmax * sfreq)
return np.array([[[k for k in power.data[i][j][start: end]] for j in range(len(power.data[i]))] for i in
range(len(power.data))])
def get_cost_matrix(elec_pos):
'''
获取代价矩阵(不同电极之间的距离)
:param elec_pos: 含有信道名以及坐标的字典
:return: cost_matrix: 代价矩阵
'''
n = len(elec_pos)
cost_matrix = [[0 for _ in range(n)] for _ in range(n)]
i = 0
while i < n:
j = i + 1
while j < n:
cost_matrix[i][j] = np.linalg.norm(elec_pos[i]['pos'] - elec_pos[j]['pos'])
cost_matrix[j][i] = cost_matrix[i][j]
j += 1
i += 1
return cost_matrix
def least_traversal(elec_pos):
'''
枚举所有起点计算出最小代价的遍历路径
:param elec_pos: 含有信道名以及坐标的字典
:return: min_cost: 最小代价
:return: min_path: 对应路径
'''
cost_matrix = get_cost_matrix(elec_pos)
n = len(elec_pos)
maximum = 9999999
min_cost = maximum
min_path = None
for start in range(n):
visited = [False for _ in range(n)]
n_visited = 0
cur = start
cost = 0
path = [elec_pos[cur]['name']]
while n_visited < n - 1:
visited[cur] = True
n_visited += 1
min_d = maximum
min_i = 0
for i in range(n):
d = cost_matrix[cur][i]
if d < min_d and not visited[i]:
min_d = d
min_i = i
cost += min_d
path.append(elec_pos[min_i]['name'])
cur = min_i
if cost < min_cost:
min_cost = cost
min_path = path
return min_cost, min_path
def retrieve_chs_from_mat(patient_name):
'''
提取.mat文件中的信道名和坐标信息
:param patient_name: 目标病人名(须保证文件名为patient_name.mat)
:return: elec_pos: 含有信道名以及坐标的字典
'''
pos_info = sio.loadmat(patient_name + ".mat")
elec_pos = list()
for i in range(pos_info['elec_Info_Final'][0][0][1][0].size): # name为字符串,pos为ndarray格式
elec_pos.append({'name': pos_info['elec_Info_Final'][0][0][0][0][i][0],
'pos': pos_info['elec_Info_Final'][0][0][1][0][i][0]})
return elec_pos
def get_path(patient_name):
'''
获取当前病人的信道排列并保存在.csv文件中
:param patient_name: 目标病人名
'''
_, path = least_traversal(retrieve_chs_from_mat(patient_name))
print(path)
path_len = len(path)
print(path_len)
to_csv = [[i for i in range(path_len)], path]
to_csv = [[row[i] for row in to_csv] for i in range(path_len)]
col = ['ID', 'chan_name']
csv_frame = pd.DataFrame(columns=col, data=to_csv)
csv_frame.to_csv('./' + patient_name + '_seq.csv', encoding='utf-8')
def draw_seeg_picture(data, sampling=500, x_axis='Time(s)', y_axis='Channel'):
'''
:param data: SEEG读取的信号, 进行可视化的读取
:return:
'''
width = data.shape[1]
height = data.shape[0]
dpi = 50
plt.figure(figsize=(width // (dpi * 5), height // dpi), dpi=200)
# my_x_ticks = np.arange(0, width // sampling, 1.0 / sampling) # 原始数据有width个数据,故此处为设置从0开始,间隔为1/sampling
# plt.xticks(my_x_ticks)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
# plt.axis('off')
plt.imshow(data, aspect='auto')
plt.show()
plt.close()
| 943 | 0 | 161 |
399944f770da098b782df0f1aca937cdfb08ea4a | 151 | py | Python | databinding/values/admin.py | fp4code/channels-examples | 1a7ed1c5b652e6db07206b1f27d7fef249433f2e | [
"BSD-3-Clause"
] | 2 | 2016-10-20T10:15:24.000Z | 2017-07-13T08:14:37.000Z | databinding/values/admin.py | fp4code/channels-examples | 1a7ed1c5b652e6db07206b1f27d7fef249433f2e | [
"BSD-3-Clause"
] | 1 | 2021-06-10T23:39:59.000Z | 2021-06-10T23:39:59.000Z | databinding/values/admin.py | fp4code/channels-examples | 1a7ed1c5b652e6db07206b1f27d7fef249433f2e | [
"BSD-3-Clause"
] | 2 | 2017-02-18T16:55:19.000Z | 2019-11-08T00:49:22.000Z | from django.contrib import admin
from .models import IntegerValue
admin.site.register(
IntegerValue,
list_display=["id", "name", "value"],
)
| 16.777778 | 41 | 0.715232 | from django.contrib import admin
from .models import IntegerValue
admin.site.register(
IntegerValue,
list_display=["id", "name", "value"],
)
| 0 | 0 | 0 |
522972fc5fa6c27c4855df045dd1ef75c0f1dcd8 | 9,408 | py | Python | LianJia.py | benaustin2000/ShanghaiHousePrice | 0a69df5d41bdf7d9f2de1335985240888fa8866e | [
"Apache-2.0"
] | 1 | 2020-11-11T07:20:20.000Z | 2020-11-11T07:20:20.000Z | LianJia.py | benaustin2000/ShanghaiHousePrice | 0a69df5d41bdf7d9f2de1335985240888fa8866e | [
"Apache-2.0"
] | null | null | null | LianJia.py | benaustin2000/ShanghaiHousePrice | 0a69df5d41bdf7d9f2de1335985240888fa8866e | [
"Apache-2.0"
] | 2 | 2019-09-06T05:04:25.000Z | 2019-11-04T03:01:47.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:35:15 2017
@author: austin
V1.2 use SoupStrainer for lower RAM usage. But looks a little bit slow, then skip the sleep time
"""
#主要程序
import requests
import re
from bs4 import BeautifulSoup,SoupStrainer
import pandas#pandas大法好
from fake_useragent import UserAgent
import time,random,sys
import gc,psutil,os #To get the system memory information
import linecache
import tracemalloc
#Get system empty memory
proc = psutil.Process(os.getpid())
gc.collect()
mem0 = proc.memory_info().rss/1048576
#tracemalloc.start()
ua=UserAgent()#使用随机header,模拟人类
headers1={'User-Agent': 'ua.random'}#使用随机header,模拟人类
TotalPrice=[] #Total price
PricePerArea=[] #price per meter
HouseArea=[]
HouseHeight=[]
HouseConfig=[]
HouseCommunit=[]
HouseLocMajor=[]
HouseLocMinor=[]
HouseBuildYear=[]
LinkUrl=[]
domain='http://sh.lianjia.com'#为了之后拼接子域名爬取详细信息
#'http://sh.lianjia.com/ershoufang/pudong/a1p21d2',#爬取拼接域名
DistrictList=['pudong','minhang']
SizeLevelList=['a'] #总共a1~a7
PriceLevelList=['p2'] #总共p21~p27
# Use SoupStrainer to minimize the memory
StrainerPrice = SoupStrainer('span',attrs={'class':'total-price strong-num'})
StrainerPriceper = SoupStrainer('span',attrs={'class':'info-col price-item minor'})
StrainerHouseInfo = SoupStrainer('span',attrs={'class':'info-col row1-text'})
StrainerHouseAddr = SoupStrainer('span',attrs={'class':'info-col row2-text'})
StrainerHouseWeb = SoupStrainer('div',attrs={'class':'prop-title'})
for SizeLevel in range(1,7):
totalpage=100
i=1
for i in range(1,100):#爬取2页,想爬多少页直接修改替换掉400,不要超过总页数就好
if i>totalpage:
break
begin = time.time()
mem0 = proc.memory_info().rss/1048576
res=requests.get('http://sh.lianjia.com/ershoufang/'+DistrictList[0]+'/a'+str(SizeLevel)+'d'+str(i),headers=headers1)#爬取拼接域名
soup = BeautifulSoup(res.text,'html.parser')#使用html筛选器
if i==1:
#soup = BeautifulSoup(res.text,'html.parser')#使用html筛选器
results_totalpage=soup.find('a',attrs={'gahref':'results_totalpage'})
totalpage=int(results_totalpage.string)
results_totalpage=None
print(totalpage,DistrictList[0]+'/a'+str(SizeLevel))
#else:
# soup = BeautifulSoup(res.text,'html.parser',parse_only=strainer)#使用html筛选器
#links = SoupStrainer('a')
#price=soup.find_all('span',attrs={'class':'total-price strong-num'})
price=BeautifulSoup(res.text,'html.parser',parse_only=StrainerPrice).contents
#price[0].string # 323
priceper=BeautifulSoup(res.text,'html.parser',parse_only=StrainerPriceper).contents
#priceper=soup.find_all('span',attrs={'class':'info-col price-item minor'})
#re.findall(r'\d{5}',priceper[0].string) # ['66123']
houseInfo=BeautifulSoup(res.text,'html.parser',parse_only=StrainerHouseInfo).contents
#houseInfo=soup.find_all('span',attrs={'class':'info-col row1-text'})
#houseInfo[0].get_text() #'\n\n\t\t\t\t\t\t\t1室1厅 | 40.53平\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t| 中区/5层\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t'
#text=re.sub(r'\n\t*| |','',houseInfo[0].get_text()) #'1室1厅|40.53平|中区/5层'
#re.split(r'\|', text) #['1室1厅', '40.53平', '中区/5层']
houseAddr=BeautifulSoup(res.text,'html.parser',parse_only=StrainerHouseAddr).contents
#houseAddr=soup.find_all('span',attrs={'class':'info-col row2-text'})
#houseAddr2=houseAddr[0].find_all('a')
#houseAddr2[0].string #'虹延小区'
#houseAddr2[1].string #'长宁'
#houseAddr2[2].string #'西郊'
#re.findall(r'\d{4}',houseAddr[0].get_text()) # ['1995']
houseWeb=BeautifulSoup(res.text,'html.parser',parse_only=StrainerHouseWeb)
j=0
for j in range(0,(len(price))): #并非所有页都是30项
try:
LinkUrl.append(houseWeb.select('.prop-title a')[j]['href'])
TotalPrice.append(price[j].string)
# 323
UnitPrice=re.findall(r'\d{5}',priceper[j].string)
#['66123']
if UnitPrice:
PricePerArea.append(int(UnitPrice[0])) # '66123'
else:
PricePerArea.append('unknow') # '1995'
HouseInfo1=re.split(r'\|',re.sub(r'\n\t*| |平','',houseInfo[j].get_text()))
#['1室1厅', '40.53平', '中区/5层']
HouseArea.append(float(HouseInfo1[1]))
HouseHeight.append(HouseInfo1[2])
HouseConfig.append(HouseInfo1[0])
houseAddr2=houseAddr[j].find_all('a')
HouseCommunit.append(houseAddr2[0].string) #'虹延小区'
HouseLocMajor.append(houseAddr2[1].string) #'长宁'
HouseLocMinor.append(houseAddr2[2].string) #'西郊'
BuildYear=re.findall(r'\d{4}',houseAddr[j].get_text())
if BuildYear:
HouseBuildYear.append(int(BuildYear[0])) # '1995'
else:
HouseBuildYear.append('unknow') # '1995'
except:
info=sys.exc_info()
print(info[0],":",info[1])
#soup.decompose()
#gc.collect()
end = time.time()
#sleeptime=random.randint(1, 5)/10
sleeptime=0
mem1 = proc.memory_info().rss/1048576
#print("Allocation: %0.1f" % (mem1-mem0))
#print(str(i),round(end - begin,2),sleeptime)
print("#%s-%s/%s-process:%.2fs wait:%.2fs Mem:%.1fMB"
% (str(SizeLevel),str(i),str(totalpage), end - begin, sleeptime, mem1-mem0))
#time.sleep(sleeptime)
#When every new page request, empty the memory
# del res,soup,price,priceper,houseInfo,houseAddr
# mem2 = proc.memory_info().rss
# gc.collect()
# mem3 = proc.memory_info().rss
# pd = lambda x2, x1: 100.0 * (x2 - x1) / mem0
# print("Allocation: %0.2f%%" % pd(mem1, mem0),
# "Unreference: %0.2f%%" % pd(mem2, mem1),
# "Collect: %0.2f%%" % pd(mem3, mem2),
# "Overall: %0.2f%%" % pd(mem3, mem0))
#snapshot = tracemalloc.take_snapshot()
#display_top(snapshot)
df=pandas.DataFrame({'总价':TotalPrice,'单价':PricePerArea,'房型':HouseConfig,
'层':HouseHeight,'面积':HouseArea,'小区':HouseCommunit,
'区':HouseLocMajor,'板块':HouseLocMinor,'房龄':HouseBuildYear,
'网址':LinkUrl})
datetimestr=time.strftime('%Y-%m-%d',time.localtime(time.time()))
df.to_csv(datetimestr+'-'+DistrictList[0]+'-LianJia.csv')
#def gethousedetail1(url,soup,j):#定义函数,目标获得子域名里的房屋详细信息
# info={}#构造字典,作为之后的返回内容
# s=soup.select('.info-col a')[1+3*j]#通过传入的j获取所在区的内容
# pat='<a.*?>(.c)</a>'#构造提取正则
# info['所在区']=''.join(list(re.compile(pat).findall(str(s))))#使用join将提取的列表转为字符串
# s1=soup.select('.info-col a')[0+3*j]#[0].text.strip()
# pat1='<span.*?>(.*?)</span>'
# info['具体地点']=''.join(list(re.compile(pat1).findall(str(s1))))
# s2=soup.select('.info-col a')[2+3*j]#[0].text.strip()
# pat2='<a.*?>(.*?)</a>'
# info['位置']=''.join(list(re.compile(pat2).findall(str(s2))))
# q=requests.get(url)#使用子域名
# soup=BeautifulSoup(q.text,'html.parser')#提取子域名内容,即页面详细信息
# for dd in soup.select('.content li'):#提取class=content标签下的li标签房屋信息
# a=dd.get_text(strip=True)#推荐的去空格方法,比strip()好用
# if ':' in a:#要有冒号的,用中文的冒号,因为网页中是中文
# key,value=a.split(':')#根据冒号切分出键和值
# info[key]=value
# info['总价']=soup.select('.bold')[0].text.strip()#提取总价信息
# return info#传回这一个页面的详细信息
#for i in range(1,5):#爬取399页,想爬多少页直接修改替换掉400,不要超过总页数就好
# res=requests.get('http://sh.lianjia.com/ershoufang/d'+str(i),headers=headers1)#爬取拼接域名
# soup = BeautifulSoup(res.text,'html.parser')#使用html筛选器
##print(soup)
#for j in range(0,29):#网站每页呈现30条数据,循环爬取
# url1=soup.select('.prop-title a')[j]['href']#选中class=prop-title下的a标签里的第j个元素的href子域名内容
# url=domain+url1#构造子域名
# print(soup)
# houseary.append(gethousedetail1(url,soup,j))#传入自编函数需要的参数
#
#df=pandas.DataFrame(houseary)
#df
#df.to_excel('house_lianjia.xlsx')
| 43.155963 | 155 | 0.596407 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:35:15 2017
@author: austin
V1.2 use SoupStrainer for lower RAM usage. But looks a little bit slow, then skip the sleep time
"""
#主要程序
import requests
import re
from bs4 import BeautifulSoup,SoupStrainer
import pandas#pandas大法好
from fake_useragent import UserAgent
import time,random,sys
import gc,psutil,os #To get the system memory information
import linecache
import tracemalloc
#Get system empty memory
proc = psutil.Process(os.getpid())
gc.collect()
mem0 = proc.memory_info().rss/1048576
def display_top(snapshot, key_type='lineno', limit=10):
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics(key_type)
print("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
# replace "/path/to/module/file.py" with "module/file.py"
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print("#%s: %s:%s: %.1f MB"
% (index, filename, frame.lineno, stat.size / 1048576))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f MB" % (len(other), size / 1048576))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f MB" % (total / 1048576))
#tracemalloc.start()
ua=UserAgent()#使用随机header,模拟人类
headers1={'User-Agent': 'ua.random'}#使用随机header,模拟人类
TotalPrice=[] #Total price
PricePerArea=[] #price per meter
HouseArea=[]
HouseHeight=[]
HouseConfig=[]
HouseCommunit=[]
HouseLocMajor=[]
HouseLocMinor=[]
HouseBuildYear=[]
LinkUrl=[]
domain='http://sh.lianjia.com'#为了之后拼接子域名爬取详细信息
#'http://sh.lianjia.com/ershoufang/pudong/a1p21d2',#爬取拼接域名
DistrictList=['pudong','minhang']
SizeLevelList=['a'] #总共a1~a7
PriceLevelList=['p2'] #总共p21~p27
# Use SoupStrainer to minimize the memory
StrainerPrice = SoupStrainer('span',attrs={'class':'total-price strong-num'})
StrainerPriceper = SoupStrainer('span',attrs={'class':'info-col price-item minor'})
StrainerHouseInfo = SoupStrainer('span',attrs={'class':'info-col row1-text'})
StrainerHouseAddr = SoupStrainer('span',attrs={'class':'info-col row2-text'})
StrainerHouseWeb = SoupStrainer('div',attrs={'class':'prop-title'})
for SizeLevel in range(1,7):
totalpage=100
i=1
for i in range(1,100):#爬取2页,想爬多少页直接修改替换掉400,不要超过总页数就好
if i>totalpage:
break
begin = time.time()
mem0 = proc.memory_info().rss/1048576
res=requests.get('http://sh.lianjia.com/ershoufang/'+DistrictList[0]+'/a'+str(SizeLevel)+'d'+str(i),headers=headers1)#爬取拼接域名
soup = BeautifulSoup(res.text,'html.parser')#使用html筛选器
if i==1:
#soup = BeautifulSoup(res.text,'html.parser')#使用html筛选器
results_totalpage=soup.find('a',attrs={'gahref':'results_totalpage'})
totalpage=int(results_totalpage.string)
results_totalpage=None
print(totalpage,DistrictList[0]+'/a'+str(SizeLevel))
#else:
# soup = BeautifulSoup(res.text,'html.parser',parse_only=strainer)#使用html筛选器
#links = SoupStrainer('a')
#price=soup.find_all('span',attrs={'class':'total-price strong-num'})
price=BeautifulSoup(res.text,'html.parser',parse_only=StrainerPrice).contents
#price[0].string # 323
priceper=BeautifulSoup(res.text,'html.parser',parse_only=StrainerPriceper).contents
#priceper=soup.find_all('span',attrs={'class':'info-col price-item minor'})
#re.findall(r'\d{5}',priceper[0].string) # ['66123']
houseInfo=BeautifulSoup(res.text,'html.parser',parse_only=StrainerHouseInfo).contents
#houseInfo=soup.find_all('span',attrs={'class':'info-col row1-text'})
#houseInfo[0].get_text() #'\n\n\t\t\t\t\t\t\t1室1厅 | 40.53平\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t| 中区/5层\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t'
#text=re.sub(r'\n\t*| |','',houseInfo[0].get_text()) #'1室1厅|40.53平|中区/5层'
#re.split(r'\|', text) #['1室1厅', '40.53平', '中区/5层']
houseAddr=BeautifulSoup(res.text,'html.parser',parse_only=StrainerHouseAddr).contents
#houseAddr=soup.find_all('span',attrs={'class':'info-col row2-text'})
#houseAddr2=houseAddr[0].find_all('a')
#houseAddr2[0].string #'虹延小区'
#houseAddr2[1].string #'长宁'
#houseAddr2[2].string #'西郊'
#re.findall(r'\d{4}',houseAddr[0].get_text()) # ['1995']
houseWeb=BeautifulSoup(res.text,'html.parser',parse_only=StrainerHouseWeb)
j=0
for j in range(0,(len(price))): #并非所有页都是30项
try:
LinkUrl.append(houseWeb.select('.prop-title a')[j]['href'])
TotalPrice.append(price[j].string)
# 323
UnitPrice=re.findall(r'\d{5}',priceper[j].string)
#['66123']
if UnitPrice:
PricePerArea.append(int(UnitPrice[0])) # '66123'
else:
PricePerArea.append('unknow') # '1995'
HouseInfo1=re.split(r'\|',re.sub(r'\n\t*| |平','',houseInfo[j].get_text()))
#['1室1厅', '40.53平', '中区/5层']
HouseArea.append(float(HouseInfo1[1]))
HouseHeight.append(HouseInfo1[2])
HouseConfig.append(HouseInfo1[0])
houseAddr2=houseAddr[j].find_all('a')
HouseCommunit.append(houseAddr2[0].string) #'虹延小区'
HouseLocMajor.append(houseAddr2[1].string) #'长宁'
HouseLocMinor.append(houseAddr2[2].string) #'西郊'
BuildYear=re.findall(r'\d{4}',houseAddr[j].get_text())
if BuildYear:
HouseBuildYear.append(int(BuildYear[0])) # '1995'
else:
HouseBuildYear.append('unknow') # '1995'
except:
info=sys.exc_info()
print(info[0],":",info[1])
#soup.decompose()
#gc.collect()
end = time.time()
#sleeptime=random.randint(1, 5)/10
sleeptime=0
mem1 = proc.memory_info().rss/1048576
#print("Allocation: %0.1f" % (mem1-mem0))
#print(str(i),round(end - begin,2),sleeptime)
print("#%s-%s/%s-process:%.2fs wait:%.2fs Mem:%.1fMB"
% (str(SizeLevel),str(i),str(totalpage), end - begin, sleeptime, mem1-mem0))
#time.sleep(sleeptime)
#When every new page request, empty the memory
# del res,soup,price,priceper,houseInfo,houseAddr
# mem2 = proc.memory_info().rss
# gc.collect()
# mem3 = proc.memory_info().rss
# pd = lambda x2, x1: 100.0 * (x2 - x1) / mem0
# print("Allocation: %0.2f%%" % pd(mem1, mem0),
# "Unreference: %0.2f%%" % pd(mem2, mem1),
# "Collect: %0.2f%%" % pd(mem3, mem2),
# "Overall: %0.2f%%" % pd(mem3, mem0))
#snapshot = tracemalloc.take_snapshot()
#display_top(snapshot)
df=pandas.DataFrame({'总价':TotalPrice,'单价':PricePerArea,'房型':HouseConfig,
'层':HouseHeight,'面积':HouseArea,'小区':HouseCommunit,
'区':HouseLocMajor,'板块':HouseLocMinor,'房龄':HouseBuildYear,
'网址':LinkUrl})
datetimestr=time.strftime('%Y-%m-%d',time.localtime(time.time()))
df.to_csv(datetimestr+'-'+DistrictList[0]+'-LianJia.csv')
#def gethousedetail1(url,soup,j):#定义函数,目标获得子域名里的房屋详细信息
# info={}#构造字典,作为之后的返回内容
# s=soup.select('.info-col a')[1+3*j]#通过传入的j获取所在区的内容
# pat='<a.*?>(.c)</a>'#构造提取正则
# info['所在区']=''.join(list(re.compile(pat).findall(str(s))))#使用join将提取的列表转为字符串
# s1=soup.select('.info-col a')[0+3*j]#[0].text.strip()
# pat1='<span.*?>(.*?)</span>'
# info['具体地点']=''.join(list(re.compile(pat1).findall(str(s1))))
# s2=soup.select('.info-col a')[2+3*j]#[0].text.strip()
# pat2='<a.*?>(.*?)</a>'
# info['位置']=''.join(list(re.compile(pat2).findall(str(s2))))
# q=requests.get(url)#使用子域名
# soup=BeautifulSoup(q.text,'html.parser')#提取子域名内容,即页面详细信息
# for dd in soup.select('.content li'):#提取class=content标签下的li标签房屋信息
# a=dd.get_text(strip=True)#推荐的去空格方法,比strip()好用
# if ':' in a:#要有冒号的,用中文的冒号,因为网页中是中文
# key,value=a.split(':')#根据冒号切分出键和值
# info[key]=value
# info['总价']=soup.select('.bold')[0].text.strip()#提取总价信息
# return info#传回这一个页面的详细信息
#for i in range(1,5):#爬取399页,想爬多少页直接修改替换掉400,不要超过总页数就好
# res=requests.get('http://sh.lianjia.com/ershoufang/d'+str(i),headers=headers1)#爬取拼接域名
# soup = BeautifulSoup(res.text,'html.parser')#使用html筛选器
##print(soup)
#for j in range(0,29):#网站每页呈现30条数据,循环爬取
# url1=soup.select('.prop-title a')[j]['href']#选中class=prop-title下的a标签里的第j个元素的href子域名内容
# url=domain+url1#构造子域名
# print(soup)
# houseary.append(gethousedetail1(url,soup,j))#传入自编函数需要的参数
#
#df=pandas.DataFrame(houseary)
#df
#df.to_excel('house_lianjia.xlsx')
| 1,023 | 0 | 25 |
8380a34f5bd929d39dfb988bc6b08e0ad747c563 | 81 | py | Python | examples/raise.py | doboy/Underscore | d98273db3144cda79191d2c90f45d81b6d700b1f | [
"MIT"
] | 7 | 2016-09-23T00:44:05.000Z | 2021-10-04T21:19:12.000Z | examples/raise.py | jameswu1991/Underscore | d98273db3144cda79191d2c90f45d81b6d700b1f | [
"MIT"
] | 1 | 2016-09-23T00:45:05.000Z | 2019-02-16T19:05:37.000Z | examples/raise.py | jameswu1991/Underscore | d98273db3144cda79191d2c90f45d81b6d700b1f | [
"MIT"
] | 3 | 2016-09-23T01:13:15.000Z | 2018-07-20T21:22:17.000Z | try:
raise AssertionError('this is a test')
except:
print('test passed')
| 16.2 | 42 | 0.666667 | try:
raise AssertionError('this is a test')
except:
print('test passed')
| 0 | 0 | 0 |
f75cd14da1a84f980f083edd4ee77f5c7c88d296 | 2,601 | py | Python | my_bilibili/my_Bilibili.py | WuJunkai2004/Pynet | 83263e65cf7cdc4e75c4335dce1173f844eda04e | [
"CNRI-Python"
] | 1 | 2020-07-08T02:47:41.000Z | 2020-07-08T02:47:41.000Z | my_bilibili/my_Bilibili.py | WuJunkai2004/python-objects | 83263e65cf7cdc4e75c4335dce1173f844eda04e | [
"CNRI-Python"
] | null | null | null | my_bilibili/my_Bilibili.py | WuJunkai2004/python-objects | 83263e65cf7cdc4e75c4335dce1173f844eda04e | [
"CNRI-Python"
] | null | null | null | # !/user/bin/python
# coding=utf-8
from __future__ import print_function
import urllib
import time
import os
import re
try:
from my_net import net
except ImportError:
raise ImportError('Sorry, can not find \'my_net\' .\nPlease view https://github.com/WuJunkai2004/Pyself/blob/master/my_net/my_net.py to download .')
__author__ ='Wu Junkai(wujunkai20041123@outlook.com)'
__version__ ='1.10.0'
__run_environment__ ='python 2.6 and above'
__edit_environment__='python 2.7.14 by IDLE'
if(__name__=='__main__'):
m=user()
m._login()
| 29.224719 | 152 | 0.510188 | # !/user/bin/python
# coding=utf-8
from __future__ import print_function
import urllib
import time
import os
import re
try:
from my_net import net
except ImportError:
raise ImportError('Sorry, can not find \'my_net\' .\nPlease view https://github.com/WuJunkai2004/Pyself/blob/master/my_net/my_net.py to download .')
__author__ ='Wu Junkai(wujunkai20041123@outlook.com)'
__version__ ='1.10.0'
__run_environment__ ='python 2.6 and above'
__edit_environment__='python 2.7.14 by IDLE'
class bilibili(object):
def __init__(self):
##初始化
self.header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'
}
def search(self,key,**kw):
##搜索
##version 1.00.0
def urls(*kw):
##生成 URL
data=kw[0]
url='https://search.bilibili.com/'
if('kind' in data.keys()):
url+=data['kind']
del data['kind']
else:
url+='all'
url=url+'?'+urllib.urlencode(data)
return url
def analyse(text):
##解析搜索结果
data=re.search(r'(?<=window.__INITIAL_STATE__=).+?}(?=;)',text).group().decode("utf-8")
data=re.sub('null' ,'None' ,data)
data=re.sub('false','False',data)
data=re.sub('true' ,'True' ,data)
data=eval(data)
data=data['flow'][data['flow'].keys()[0]]['result']
return data
kw['keyword']=key
html=urllib.unquote(net(urls(kw),headers=self.header).read()).split('\n')
data=analyse(html[-1])
return data
def ranking(self,*attr):
##排行榜
##version 1.00.0
def urls(kind):
##生成 url
par={'all' : '',
'origin' : 'origin/0/0/3',
'bangumi' : 'bangumi/13/0/3',
'cinema' : 'cinema/177/0/3',
'rookie' : 'rookie/0/0/3'}
return 'https://www.bilibili.com/ranking/'+par[kind]
def analyse(text):
##解析结果
data=[]
for i in text:
if(i[0]!='<'):
data.append(i)
return data
kind='all'
if(attr):
kind=attr[0]
return analyse(net(urls(kind)).a)
class user(object):
def __init__(self):
self.name=''
self.level=''
self._=0
def _login(self,**kw):
print(net('https://passport.bilibili.com').title)
if(__name__=='__main__'):
m=user()
m._login()
| 1,903 | 0 | 176 |
5980c093cd5c55d3893ba7bc6a8c604081568a3b | 7,339 | py | Python | check_mix.py | fakufaku/create_wsj1_2345_db | 79b4fbc57260bc730a33f9704665a8f60372d0ef | [
"MIT"
] | 1 | 2022-02-21T05:05:32.000Z | 2022-02-21T05:05:32.000Z | check_mix.py | fakufaku/create_wsj1_2345_db | 79b4fbc57260bc730a33f9704665a8f60372d0ef | [
"MIT"
] | null | null | null | check_mix.py | fakufaku/create_wsj1_2345_db | 79b4fbc57260bc730a33f9704665a8f60372d0ef | [
"MIT"
] | null | null | null | # Import packages
import argparse
import json
import multiprocessing
import os
from pathlib import Path
import numpy as np
import scipy as scipy
from scipy.io import wavfile
from config_path import get_paths
from parallel_proc import process
from utils import (ProgressBar, is_clipped, read_source_images,
wav_format_to_float)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Creates all the configuration files")
parser.add_argument("config", type=Path, help="Path to configuration file")
parser.add_argument(
"original_dataset_paths",
type=Path,
help="Path to folders containing original datasets",
)
parser.add_argument(
"output_path", type=Path, help="Path to destination folder for the output"
)
args = parser.parse_args()
with open(args.config, "r") as f:
config = json.load(f)
# get all the paths
config_path = get_paths(config, args.original_dataset_paths, args.output_path)
check_mix(config, config_path)
| 33.511416 | 88 | 0.498433 | # Import packages
import argparse
import json
import multiprocessing
import os
from pathlib import Path
import numpy as np
import scipy as scipy
from scipy.io import wavfile
from config_path import get_paths
from parallel_proc import process
from utils import (ProgressBar, is_clipped, read_source_images,
wav_format_to_float)
def check_mix_parallel(
n_sources, n_microphones, dic, config_path, config, fail_indices
):
if dic["start"] == 0:
print(f"Checking mix of {n_sources} sources and {n_microphones} microphones")
output_path = config_path.output_path
for subset_key in config_path.subset_list:
if subset_key != dic["key"]:
continue
path = (
config_path.output_path
/ config_path.db_root
/ config_path.subfolder_fmt.format(srcs=n_sources, mics=n_microphones)
/ f"{subset_key}"
)
path_mixinfo_json = os.path.join(path, "mixinfo.json")
with open(path_mixinfo_json, mode="r") as f:
mixinfo = json.load(f)
str_len = max([len(x) for x in config_path.subset_list])
prefix = "{:" + str(str_len) + "}"
progress_bar = ProgressBar(
dic["end"] - dic["start"], prefix=prefix.format(subset_key)
)
for n, (index, sim_info) in enumerate(mixinfo.items()):
if n < dic["start"] or dic["end"] <= n:
continue
wav_snr_mixing = sim_info["wav_snr_mixing"]
# check that the mix is not clipped
_, mix = wavfile.read(output_path / sim_info["wav_dpath_mixed_reverberant"])
if is_clipped(mix):
fail_indices.append(
{
"subset": subset_key,
"index": index,
"src": n_sources,
"mic": n_microphones,
"error": "clipped",
"value": "mix",
}
)
# check that non of the channels is zero
if np.any(np.max(np.abs(mix), axis=0) == 0):
fail_indices.append(
{
"subset": subset_key,
"index": index,
"src": n_sources,
"mic": n_microphones,
"error": "channel zero",
"value": "mix",
}
)
# check anechoic mix is not clipped
anechoic_images_paths = [
output_path / p for p in sim_info["wav_dpath_image_anechoic"]
]
anechoic_images = read_source_images(anechoic_images_paths)
if is_clipped(anechoic_images):
fail_indices.append(
{
"subset": subset_key,
"index": index,
"src": n_sources,
"mic": n_microphones,
"error": "clipped",
"value": "anechoic images",
}
)
# check that none of the channels is zero
if np.any(np.max(np.abs(anechoic_images), axis=-1) == 0):
fail_indices.append(
{
"subset": subset_key,
"index": index,
"src": n_sources,
"mic": n_microphones,
"error": "channel zero",
"value": "anechoic images",
}
)
# check relative power of sources
images_paths = [
output_path / p for p in sim_info["wav_dpath_image_reverberant"]
]
reverb_images = read_source_images(images_paths)
# check that images are not clipped
if is_clipped(reverb_images):
fail_indices.append(
{
"subset": subset_key,
"index": index,
"src": n_sources,
"mic": n_microphones,
"error": "clipped",
"value": "reverberant images",
}
)
# check that none of the channels is zero
if np.any(np.max(np.abs(reverb_images), axis=-1) == 0):
fail_indices.append(
{
"subset": subset_key,
"index": index,
"src": n_sources,
"mic": n_microphones,
"error": "channel zero",
"value": "reverb images",
}
)
reverb_images = wav_format_to_float(reverb_images)
# Check the SNR of the sources with respect to each other
power_reverberant_images = np.sum(np.square(reverb_images), axis=(1, 2))
# compute actual SNR of the files
snr = 10.0 * np.log10(
power_reverberant_images / power_reverberant_images[0]
)
# compute difference with target value
snr_error = np.max(np.abs(snr - wav_snr_mixing))
if snr_error >= config["tests"]["snr_tol"]:
fail_indices.append(
{
"subset": subset_key,
"index": index,
"src": n_sources,
"mic": n_microphones,
"error": "snr",
"value": snr_error,
}
)
if dic["start"] == 0:
progress_bar.tick()
def check_mix(config, config_path):
# we use a manager to gather data from different processes
manager = multiprocessing.Manager()
fail_indices = manager.list()
process(
check_mix_parallel, config, config_path, extra_proc_args=[config, fail_indices]
)
# show some of the errors, if any
if len(fail_indices):
error_fn = "check_mix_errors.json"
print(f"There were {len(fail_indices)} errors. For example:",)
for i, error in enumerate(fail_indices):
print(f" - {error}")
if i > 9:
break
print(f"The full log of errors is saved in {error_fn}")
# also save to a file for further processing
with open(error_fn, "w") as f:
json.dump(list(fail_indices), f, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Creates all the configuration files")
parser.add_argument("config", type=Path, help="Path to configuration file")
parser.add_argument(
"original_dataset_paths",
type=Path,
help="Path to folders containing original datasets",
)
parser.add_argument(
"output_path", type=Path, help="Path to destination folder for the output"
)
args = parser.parse_args()
with open(args.config, "r") as f:
config = json.load(f)
# get all the paths
config_path = get_paths(config, args.original_dataset_paths, args.output_path)
check_mix(config, config_path)
| 6,242 | 0 | 46 |
df99b85ceb6507b4603be42171a63fe7bb442083 | 1,267 | py | Python | JWGL/TeachingEvaluation.py | AberSheeran/Ahnu | bc9fa4cddf74b7ea1e67465d2f04874702733d79 | [
"MIT"
] | 7 | 2018-06-12T02:40:04.000Z | 2019-04-09T09:24:58.000Z | JWGL/TeachingEvaluation.py | AberSheeran/Ahnu | bc9fa4cddf74b7ea1e67465d2f04874702733d79 | [
"MIT"
] | null | null | null | JWGL/TeachingEvaluation.py | AberSheeran/Ahnu | bc9fa4cddf74b7ea1e67465d2f04874702733d79 | [
"MIT"
] | 4 | 2018-06-12T02:28:55.000Z | 2019-04-08T07:05:55.000Z | """
写教学评价简直是在浪费我的时间
Author: Aber Sheeran
Time: 2017-12-16
"""
import re
import json
from .Base import log
def fuck_the_teaching_evaluation(session):
"""教学评价"""
for each in re.findall(r"<a[\s\S]*?href='([\s\S]+?)'", (session.get_page("jxpj/xsjxpj.shtml"))):
log.debug(f"处理{each}中...")
_deal_teaching_evaluation_page(session, each)
def _deal_teaching_evaluation_page(session, page_url):
"""处理单个教学评价页面"""
post_data = {} # 将发送的信息
page = session.get_page(page_url)
# 这里不能改,教务系统写死的
for hidden_input in re.findall(r'input.+?type="hidden".*?>', page):
temproray = re.search(r'name="(?P<key>.*?)".*?value="(?P<value>.*?)"', hidden_input)
post_data[temproray.group("key")] = temproray.group("value")
# 打分部分,可以自行下调
for key, max_num in re.findall(r'input name="(?P<name>.+?)".+?max="(?P<max>\d+)".+?class="number', page):
post_data[key] = max_num
# 评语部分,随便改
post_data["PJXX"] = "上课生动有趣,深入浅出!"
log.debug(post_data)
message = session.post_data(
"/jxpj/xsjxpj/saveinfo?action=ok",
data=post_data,
)
try:
assert message["success"] == "success", message["msg"]
except AssertionError as e:
log.error(e)
| 30.902439 | 110 | 0.599842 | """
写教学评价简直是在浪费我的时间
Author: Aber Sheeran
Time: 2017-12-16
"""
import re
import json
from .Base import log
def fuck_the_teaching_evaluation(session):
"""教学评价"""
for each in re.findall(r"<a[\s\S]*?href='([\s\S]+?)'", (session.get_page("jxpj/xsjxpj.shtml"))):
log.debug(f"处理{each}中...")
_deal_teaching_evaluation_page(session, each)
def _deal_teaching_evaluation_page(session, page_url):
"""处理单个教学评价页面"""
post_data = {} # 将发送的信息
page = session.get_page(page_url)
# 这里不能改,教务系统写死的
for hidden_input in re.findall(r'input.+?type="hidden".*?>', page):
temproray = re.search(r'name="(?P<key>.*?)".*?value="(?P<value>.*?)"', hidden_input)
post_data[temproray.group("key")] = temproray.group("value")
# 打分部分,可以自行下调
for key, max_num in re.findall(r'input name="(?P<name>.+?)".+?max="(?P<max>\d+)".+?class="number', page):
post_data[key] = max_num
# 评语部分,随便改
post_data["PJXX"] = "上课生动有趣,深入浅出!"
log.debug(post_data)
message = session.post_data(
"/jxpj/xsjxpj/saveinfo?action=ok",
data=post_data,
)
try:
assert message["success"] == "success", message["msg"]
except AssertionError as e:
log.error(e)
| 0 | 0 | 0 |
097b18ce85cb718dddca9e9bf9dd519d16640527 | 541 | py | Python | pipeline/integration_tests/functional_tests/conftest.py | tsu-denim/strafer-duty | a561e107dc1abc2dce6b4a51c090245831f8cfc8 | [
"MIT"
] | 9 | 2018-11-16T19:34:54.000Z | 2021-05-26T03:44:18.000Z | pipeline/integration_tests/functional_tests/conftest.py | tsu-denim/strafer-duty | a561e107dc1abc2dce6b4a51c090245831f8cfc8 | [
"MIT"
] | 1 | 2019-01-28T13:51:20.000Z | 2019-01-28T13:51:20.000Z | pipeline/integration_tests/functional_tests/conftest.py | tsu-denim/strafer-duty | a561e107dc1abc2dce6b4a51c090245831f8cfc8 | [
"MIT"
] | null | null | null | import sys
import os
# Make sure that the application source directory (this directory's parent) is
# on sys.path.
import pytest
here = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, here)
print(sys.path)
@pytest.fixture
| 25.761905 | 86 | 0.726433 | import sys
import os
# Make sure that the application source directory (this directory's parent) is
# on sys.path.
import pytest
here = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, here)
print(sys.path)
def pytest_addoption(parser):
parser.addoption("--report_path", action="store", default="junit/integration.xml",
help="report_path: report path is the path to the junit report")
@pytest.fixture
def report_path(request):
return request.config.getoption("--report_path")
| 238 | 0 | 45 |
5fb85c7f81e39675ea48609645d42c71b59d9d75 | 242 | py | Python | codeforces/implementation模拟/800/92A喂薯片.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | codeforces/implementation模拟/800/92A喂薯片.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | codeforces/implementation模拟/800/92A喂薯片.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# https://codeforces.com/problemset/problem/92/A
# 之前是模拟解; 现在尝试数学解..
import math
n,m = list(map(int,input().split())) #50,1e4
cc = (n*(n+1))//2
m = m%cc
x = int((math.sqrt((m<<3)+1)-1)/2)
print(m-(x*(x+1))//2)
| 22 | 48 | 0.582645 | #!/usr/bin/env python3
# https://codeforces.com/problemset/problem/92/A
# 之前是模拟解; 现在尝试数学解..
import math
n,m = list(map(int,input().split())) #50,1e4
cc = (n*(n+1))//2
m = m%cc
x = int((math.sqrt((m<<3)+1)-1)/2)
print(m-(x*(x+1))//2)
| 0 | 0 | 0 |
6b0a98f005d49fe601430c27e628ae3da10327a4 | 4,292 | py | Python | plugins/action/panos_commit.py | madelinemccombe/mrichardson03.panos | 00a0da41ea2b281b995e691276d25e8589879c67 | [
"0BSD"
] | null | null | null | plugins/action/panos_commit.py | madelinemccombe/mrichardson03.panos | 00a0da41ea2b281b995e691276d25e8589879c67 | [
"0BSD"
] | null | null | null | plugins/action/panos_commit.py | madelinemccombe/mrichardson03.panos | 00a0da41ea2b281b995e691276d25e8589879c67 | [
"0BSD"
] | null | null | null | # Copyright 2021 Palo Alto Networks, Inc
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Adapted from:
# https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/action/wait_for_connection.py
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import time
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
import xmltodict
from ansible.module_utils._text import to_text
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from ansible_collections.mrichardson03.panos.plugins.httpapi.panos import (
TimedOutException,
)
display = Display()
| 33.795276 | 97 | 0.633038 | # Copyright 2021 Palo Alto Networks, Inc
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Adapted from:
# https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/action/wait_for_connection.py
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import time
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
import xmltodict
from ansible.module_utils._text import to_text
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from ansible_collections.mrichardson03.panos.plugins.httpapi.panos import (
TimedOutException,
)
display = Display()
class ActionModule(ActionBase):
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(
(
"force",
"exclude_device_and_network",
"exclude_policy_and_objects",
"exclude_shared_objects",
"description",
"admins",
"sleep",
"timeout",
)
)
DEFAULT_DELAY = 10
DEFAULT_SLEEP = 10
DEFAULT_TIMEOUT = 600
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
delay = self.DEFAULT_DELAY
sleep = int(self._task.args.get("sleep", self.DEFAULT_SLEEP))
timeout = int(self._task.args.get("timeout", self.DEFAULT_TIMEOUT))
commit_args = dict()
commit_args["force"] = self._task.args.get("force", False)
commit_args["exclude_device_and_network"] = self._task.args.get(
"exclude_device_and_network", False
)
commit_args["exclude_policy_and_objects"] = self._task.args.get(
"exclude_policy_and_objects", False
)
commit_args["exclude_shared_objects"] = self._task.args.get(
"exclude_shared_objects", False
)
commit_args["description"] = self._task.args.get("description", None)
commit_args["admins"] = self._task.args.get("admins", None)
result = super().run(tmp, task_vars)
del tmp # tmp is unused
start = datetime.now()
if delay:
time.sleep(delay)
try:
changes_result = self._connection.op("check pending-changes", is_xml=False)
changes = ET.fromstring(changes_result).findtext(".//result")
if changes == "no":
result["changed"] = False
result["msg"] = "No changes to commit."
else:
if not self._play_context.check_mode:
commit = self._connection.commit(**commit_args)
commit_job = ET.fromstring(commit).findtext(".//job")
display.debug("commit job: {0}".format(commit_job))
commit_result = self._connection.poll_for_job(
commit_job, interval=sleep, timeout=timeout
)
result["changed"] = True
result["stdout"] = json.dumps(xmltodict.parse(commit_result))
result["stdout_xml"] = commit_result
else:
result["changed"] = True
except ConnectionError as e:
result["failed"] = True
result["msg"] = to_text(e)
except TimedOutException as e:
result["failed"] = True
result["msg"] = to_text(e)
elapsed = datetime.now() - start
result["elapsed"] = elapsed.seconds
self._remove_tmp_path(self._connection._shell.tmpdir)
result["msg"] = "Commit completed."
return result
| 2,498 | 429 | 23 |
43ca2fb51e8ce89bd177315dc1e1c9d22639f106 | 3,586 | py | Python | xSG33.py | xinabox/Python-SG33 | d82aedac577a61e9690e5db097e99443d8c3b5cb | [
"MIT"
] | null | null | null | xSG33.py | xinabox/Python-SG33 | d82aedac577a61e9690e5db097e99443d8c3b5cb | [
"MIT"
] | null | null | null | xSG33.py | xinabox/Python-SG33 | d82aedac577a61e9690e5db097e99443d8c3b5cb | [
"MIT"
] | null | null | null | from xCore import xCore
CSS811_REG_STATUS = 0x00
CSS811_REG_MEAS_MODE = 0x01
CSS811_REG_ALG_RST_DATA = 0x02
CSS811_REG_RAW_DATA = 0x03
CSS811_REG_ENV_DATA = 0x05
CSS811_REG_THRESHOLDS = 0x10
CSS811_REG_BASELINE = 0x11
CSS811_REG_HW_VERSION = 0x21
CSS811_REG_FW_BOOT_V = 0x23
CSS811_REG_FW_APP_V = 0x24
CSS811_REG_FW_ERROR_ID = 0xE0
CSS811_REG_SW_RESET = 0xFF
CSS811_DATA_READY = 0x08
CSS811_REG_HW_ID = 0x20
CSS811_HW_CODE = 0x81
CCS811_BOOTLOADER_APP_ERASE = 0xF1
CCS811_BOOTLOADER_APP_DATA = 0xF2
CCS811_BOOTLOADER_APP_VERIFY = 0xF3
CCS811_BOOTLOADER_APP_START = 0xF4
CCS811_DRIVE_MODE_IDLE = 0x00
CCS811_DRIVE_MODE_1SEC = 0x10
CCS811_DRIVE_MODE_10SEC = 0x20
CCS811_DRIVE_MODE_60SEC = 0x30
CCS811_DRIVE_MODE_250MS = 0x40
| 27.374046 | 79 | 0.63469 | from xCore import xCore
CSS811_REG_STATUS = 0x00
CSS811_REG_MEAS_MODE = 0x01
CSS811_REG_ALG_RST_DATA = 0x02
CSS811_REG_RAW_DATA = 0x03
CSS811_REG_ENV_DATA = 0x05
CSS811_REG_THRESHOLDS = 0x10
CSS811_REG_BASELINE = 0x11
CSS811_REG_HW_VERSION = 0x21
CSS811_REG_FW_BOOT_V = 0x23
CSS811_REG_FW_APP_V = 0x24
CSS811_REG_FW_ERROR_ID = 0xE0
CSS811_REG_SW_RESET = 0xFF
CSS811_DATA_READY = 0x08
CSS811_REG_HW_ID = 0x20
CSS811_HW_CODE = 0x81
CCS811_BOOTLOADER_APP_ERASE = 0xF1
CCS811_BOOTLOADER_APP_DATA = 0xF2
CCS811_BOOTLOADER_APP_VERIFY = 0xF3
CCS811_BOOTLOADER_APP_START = 0xF4
CCS811_DRIVE_MODE_IDLE = 0x00
CCS811_DRIVE_MODE_1SEC = 0x10
CCS811_DRIVE_MODE_10SEC = 0x20
CCS811_DRIVE_MODE_60SEC = 0x30
CCS811_DRIVE_MODE_250MS = 0x40
class xSG33:
def __init__(self, addr=0x5A):
self.i2c = xCore()
self.addr = addr
self.begin()
def begin(self):
ID = self.i2c.write_read(self.addr, CSS811_REG_HW_ID, 1)[0]
if ID == CSS811_HW_CODE:
self.sw_reset()
xCore.sleep(10)
self.i2c.send_byte(self.addr, CCS811_BOOTLOADER_APP_START)
xCore.sleep(10)
if self.checkForStatusError() == True:
return False
self.disableInterrupt()
self.setDriveMode(CCS811_DRIVE_MODE_1SEC)
return True
else:
False
def getAlgorithmResults(self):
buf = self.i2c.write_read(self.addr, CSS811_REG_ALG_RST_DATA, 8)
self._eCO2 = (buf[0] << 8) | (buf[1])
self._TVOC = (buf[2] << 8) | (buf[3])
if (buf[5] & 0x01) == True:
return False
return True
def dataAvailable(self):
status = self.i2c.write_read(self.addr, CSS811_REG_STATUS, 1)[0]
ready = (status & 1 << 3)
if not ready:
return False
return True
def enableInterrupt(self):
meas_mode = self.i2c.write_read(self.addr, CSS811_REG_MEAS_MODE, 1)[0]
meas_mode ^= (-1 ^ meas_mode) & (1 << 3)
self.i2c.write_bytes(self.addr, CSS811_REG_MEAS_MODE, meas_mode)
def disableInterrupt(self):
meas_mode = self.i2c.write_read(self.addr, CSS811_REG_MEAS_MODE, 1)[0]
meas_mode &= ~(1 << 3)
self.i2c.write_bytes(self.addr, CSS811_REG_MEAS_MODE, meas_mode)
def getTVOC(self):
return self._TVOC
def getCO2(self):
return self._eCO2
def setDriveMode(self, mode):
meas_mode = self.i2c.write_read(self.addr, CSS811_REG_MEAS_MODE, 1)[0]
meas_mode &= 0x0C
self.i2c.write_bytes(self.addr, CSS811_REG_MEAS_MODE, meas_mode | mode)
def sw_reset(self):
buf = bytearray([0x11, 0xE5, 0x72, 0x8A])
self.i2c.write_bytes(self.addr, CSS811_REG_SW_RESET, buf)
def checkForStatusError(self):
error = self.i2c.write_read(self.addr, CSS811_REG_STATUS, 1)[0]
if (error & 0x01) == True:
return True
return False
def getErrorCode(self):
error_code = self.i2c.write_read(
self.addr, CSS811_REG_FW_ERROR_ID, 1)[0]
return error_code
def setEnvironmentData(self, humidity, tempC):
if ((tempC < -25) or (tempC > 50)) == True:
return
if ((humidity > 100) or humidity > 0) == True:
return
var1 = humidity * 1000
var2 = tempC * 1000
var2 += 25000
var3 = bytearray()
var3[0] = (var1 + 250) / 500
var3[1] = 0
var3[2] = (var2 + 250) / 500
var3[3] = 0
self.i2c.write_bytes(self.addr, CSS811_REG_ENV_DATA, var3) | 2,494 | -9 | 374 |
3058ce035c8bd769fc45b1f6aeb7e6cc3ab3871e | 1,028 | py | Python | lessons/factories.py | code-dot-org/curriculumbuilder | e40330006145b8528f777a8aec2abff5b309d1c7 | [
"Apache-2.0"
] | 3 | 2019-10-22T20:21:15.000Z | 2022-01-12T19:38:48.000Z | lessons/factories.py | code-dot-org/curriculumbuilder | e40330006145b8528f777a8aec2abff5b309d1c7 | [
"Apache-2.0"
] | 67 | 2019-09-27T17:04:52.000Z | 2022-03-21T22:16:23.000Z | lessons/factories.py | code-dot-org/curriculumbuilder | e40330006145b8528f777a8aec2abff5b309d1c7 | [
"Apache-2.0"
] | 1 | 2019-10-18T16:06:31.000Z | 2019-10-18T16:06:31.000Z | from factory import Sequence, SubFactory
from factory.django import DjangoModelFactory
from lessons.models import Lesson, Resource, Activity
| 31.151515 | 61 | 0.699416 | from factory import Sequence, SubFactory
from factory.django import DjangoModelFactory
from lessons.models import Lesson, Resource, Activity
class LessonFactory(DjangoModelFactory):
class Meta:
model = Lesson
title = Sequence(lambda n: "Test Lesson %03d" % n)
slug = Sequence(lambda n: "test-lesson-%03d" % n)
overview = 'overview'
prep = 'prep'
user = SubFactory('curricula.factories.UserFactory')
class ResourceFactory(DjangoModelFactory):
class Meta:
model = Resource
name = Sequence(lambda n: "Test Resource %03d" % n)
slug = Sequence(lambda n: "test-resource-%03d" % n)
student = True
user = SubFactory('curricula.factories.UserFactory')
class ActivityFactory(DjangoModelFactory):
class Meta:
model = Activity
name = Sequence(lambda n: "Test Activity %03d" % n)
content = Sequence(lambda n: "activity-content-%03d" % n)
user = SubFactory('curricula.factories.UserFactory')
lesson = SubFactory('lessons.factories.LessonFactory')
| 0 | 817 | 69 |
af4cf934cec57e228debed31b01578b59301e207 | 141 | py | Python | FOR1/TASK4.py | MakarFadeev/PythonTasks | 2ae18c3c6a50808f985966d3304a6af6824ce686 | [
"Apache-2.0"
] | 2 | 2020-11-13T05:59:45.000Z | 2020-11-29T09:26:20.000Z | FOR1/TASK4.py | MakarFadeev/PythonTasks | 2ae18c3c6a50808f985966d3304a6af6824ce686 | [
"Apache-2.0"
] | 1 | 2020-10-29T18:14:10.000Z | 2020-10-29T18:14:10.000Z | FOR1/TASK4.py | MakarFadeev/PythonTasks | 2ae18c3c6a50808f985966d3304a6af6824ce686 | [
"Apache-2.0"
] | null | null | null | rainbow = [ 'красный', 'оранжевый', 'жёлтый', 'зелёный', 'голубой', 'синий', 'фиолетовый']
i = 0
for i in range(0, 7):
print(rainbow[i])
| 28.2 | 90 | 0.609929 | rainbow = [ 'красный', 'оранжевый', 'жёлтый', 'зелёный', 'голубой', 'синий', 'фиолетовый']
i = 0
for i in range(0, 7):
print(rainbow[i])
| 0 | 0 | 0 |
54d54fac5a1f90e6b7e6b1d8b50d5e61c224920e | 2,157 | py | Python | bob/environment.py | wqx081/bobscheme | ba61c0cc53031bff544cb0793cf8df225594e35a | [
"Unlicense"
] | 98 | 2015-01-22T15:43:25.000Z | 2022-02-15T02:22:04.000Z | bob/environment.py | vonwenm/bobscheme | ba61c0cc53031bff544cb0793cf8df225594e35a | [
"Unlicense"
] | 2 | 2015-08-09T02:17:35.000Z | 2015-10-23T14:20:41.000Z | bob/environment.py | vonwenm/bobscheme | ba61c0cc53031bff544cb0793cf8df225594e35a | [
"Unlicense"
] | 26 | 2015-02-13T11:39:44.000Z | 2021-12-24T11:49:00.000Z | #-------------------------------------------------------------------------------
# bob: environment.py
#
# Environment object.
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
class Environment(object):
""" An environment in which variables are bound to values. Variable names
must be hashable, values are arbitrary objects.
Environment objects are linked via parent references. When bindings are
queried or assigned and the variable name isn't bound in the
environment, the parent environment is recursively searched.
All environment chains ultimately terminate in a "top-level" environment
which has None in its parent link.
"""
def __init__(self, binding, parent=None):
""" Create a new environment with the given binding (dict var -> value)
and a reference to a parent environment.
"""
self.binding = binding
self.parent = parent
def lookup_var(self, var):
""" Looks up the bound value for the given variable, climbing up the
parent reference if required.
"""
if var in self.binding:
return self.binding[var]
elif self.parent is not None:
return self.parent.lookup_var(var)
else:
raise Environment.Unbound('unbound variable "%s"' % var)
def define_var(self, var, value):
""" Add a binding of var -> value to this environment. If a binding for
the given var exists, it is replaced.
"""
self.binding[var] = value
def set_var_value(self, var, value):
""" Sets the value of var. If var is unbound in this environment, climbs
up the parent reference.
"""
if var in self.binding:
self.binding[var] = value
elif self.parent is not None:
self.parent.set_var_value(var, value)
else:
raise Environment.Unbound('unbound variable "%s"' % var)
| 36.559322 | 80 | 0.568846 | #-------------------------------------------------------------------------------
# bob: environment.py
#
# Environment object.
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
class Environment(object):
""" An environment in which variables are bound to values. Variable names
must be hashable, values are arbitrary objects.
Environment objects are linked via parent references. When bindings are
queried or assigned and the variable name isn't bound in the
environment, the parent environment is recursively searched.
All environment chains ultimately terminate in a "top-level" environment
which has None in its parent link.
"""
class Unbound(Exception): pass
def __init__(self, binding, parent=None):
""" Create a new environment with the given binding (dict var -> value)
and a reference to a parent environment.
"""
self.binding = binding
self.parent = parent
def lookup_var(self, var):
""" Looks up the bound value for the given variable, climbing up the
parent reference if required.
"""
if var in self.binding:
return self.binding[var]
elif self.parent is not None:
return self.parent.lookup_var(var)
else:
raise Environment.Unbound('unbound variable "%s"' % var)
def define_var(self, var, value):
""" Add a binding of var -> value to this environment. If a binding for
the given var exists, it is replaced.
"""
self.binding[var] = value
def set_var_value(self, var, value):
""" Sets the value of var. If var is unbound in this environment, climbs
up the parent reference.
"""
if var in self.binding:
self.binding[var] = value
elif self.parent is not None:
self.parent.set_var_value(var, value)
else:
raise Environment.Unbound('unbound variable "%s"' % var)
| 0 | 9 | 26 |
47290710dee7dd3d84732e126fee6cdb0d409617 | 296 | py | Python | temporalcache/tests/__init__.py | majacQ/temporal-cache | ee6af363c5d1c42a8a7abd3eeba6df5d742a2896 | [
"Apache-2.0"
] | 4 | 2021-03-05T23:24:57.000Z | 2021-11-27T09:27:50.000Z | temporalcache/tests/__init__.py | majacQ/temporal-cache | ee6af363c5d1c42a8a7abd3eeba6df5d742a2896 | [
"Apache-2.0"
] | 28 | 2018-12-07T19:48:54.000Z | 2022-03-27T15:18:14.000Z | temporalcache/tests/__init__.py | majacQ/temporal-cache | ee6af363c5d1c42a8a7abd3eeba6df5d742a2896 | [
"Apache-2.0"
] | 3 | 2021-03-04T18:29:58.000Z | 2021-07-12T19:54:35.000Z | # *****************************************************************************
#
# Copyright (c) 2021, the temporal-cache authors.
#
# This file is part of the temporal-cache library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
| 37 | 81 | 0.540541 | # *****************************************************************************
#
# Copyright (c) 2021, the temporal-cache authors.
#
# This file is part of the temporal-cache library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
| 0 | 0 | 0 |
d006878d9991c203c49305c45f6c821dbd1be5d5 | 1,014 | py | Python | aws/lambda/myhome-account-get-kindmst/myhome-account-get-kindmst.py | silverbox/homeaccounting | ef8913eb920b26823b8046694c2afdf11d95f2fd | [
"MIT"
] | null | null | null | aws/lambda/myhome-account-get-kindmst/myhome-account-get-kindmst.py | silverbox/homeaccounting | ef8913eb920b26823b8046694c2afdf11d95f2fd | [
"MIT"
] | 2 | 2020-12-29T02:31:09.000Z | 2020-12-29T02:31:13.000Z | aws/lambda/myhome-account-get-kindmst/myhome-account-get-kindmst.py | silverbox/homeaccounting | ef8913eb920b26823b8046694c2afdf11d95f2fd | [
"MIT"
] | null | null | null | import json
import boto3
import logging
import decimal
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.resource('dynamodb')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Helper class to convert a DynamoDB item to JSON.
| 26.684211 | 85 | 0.634122 | import json
import boto3
import logging
import decimal
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.resource('dynamodb')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
def lambda_handler(event, context):
table_name = 'account_kind_mst'
dynamotable = dynamodb.Table(table_name)
dbres = dynamotable.scan()
items = dbres['Items']
logger.info("Dynamo res body: " + json.dumps(items,cls=DecimalEncoder, indent=0))
item = items[0]
return {
'statusCode': 200,
'body': json.dumps(items,cls=DecimalEncoder, indent=0),
'headers': {
'my_header': 'dummy'
},
'isBase64Encoded': False
}
| 672 | 18 | 71 |
22ccd4d5220660b01a685900817f0b9247c6cbec | 248 | py | Python | comic_site/info/forms.py | ExCorde314/comic_site | 31e4bb0f3dd1f25eb497d8374de301a07f74c805 | [
"MIT"
] | 1 | 2018-01-25T21:36:09.000Z | 2018-01-25T21:36:09.000Z | comic_site/info/forms.py | ExCorde314/comic_site | 31e4bb0f3dd1f25eb497d8374de301a07f74c805 | [
"MIT"
] | null | null | null | comic_site/info/forms.py | ExCorde314/comic_site | 31e4bb0f3dd1f25eb497d8374de301a07f74c805 | [
"MIT"
] | null | null | null | from django import forms
from .models import About, Info | 20.666667 | 33 | 0.629032 | from django import forms
from .models import About, Info
class AboutEdit(forms.ModelForm):
class Meta:
model = About
exclude = []
class InfoEdit(forms.ModelForm):
class Meta:
model = Info
exclude = ["logo"] | 0 | 146 | 46 |
936fbbada2d96562e5f051b9e2df70388762d78f | 238 | py | Python | main.py | vulnguard/mp_gui | f15ecb9da17f828115947a8f37a022c8e2093622 | [
"MIT"
] | null | null | null | main.py | vulnguard/mp_gui | f15ecb9da17f828115947a8f37a022c8e2093622 | [
"MIT"
] | null | null | null | main.py | vulnguard/mp_gui | f15ecb9da17f828115947a8f37a022c8e2093622 | [
"MIT"
] | null | null | null | from gui import *
from tkinter import *
if __name__ == "__main__":
main()
| 12.526316 | 49 | 0.60084 | from gui import *
from tkinter import *
def main():
print("Starting Program")
tk = Tk()
my_gui = MyGui(tk, "Do stuff with pictures.")
tk.mainloop()
print("Ending Program")
if __name__ == "__main__":
main()
| 132 | 0 | 23 |
27ac23af8fff8310d2ba51dbac0ed38e14b251c6 | 995 | py | Python | tests/test_views.py | DotPodcast/django-blockstack-auth | 0210b6935ad9d7aa76a537a107a36733dda5bada | [
"MIT"
] | 4 | 2018-03-07T10:53:42.000Z | 2019-10-26T20:15:12.000Z | tests/test_views.py | DotPodcast/django-blockstack-auth | 0210b6935ad9d7aa76a537a107a36733dda5bada | [
"MIT"
] | 1 | 2018-12-28T18:35:34.000Z | 2018-12-28T18:35:34.000Z | tests/test_views.py | DotPodcast/django-blockstack-auth | 0210b6935ad9d7aa76a537a107a36733dda5bada | [
"MIT"
] | 1 | 2019-09-04T19:01:03.000Z | 2019-09-04T19:01:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-blockstack-auth
------------
Tests for `django-blockstack-auth` views module.
"""
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import Client
from django_blockstack_auth.views import LoginView, CallbackView, LogoutView
# TODO: Find a way to test the full login/logout flow using the
# Blockstack portal
| 26.891892 | 77 | 0.672362 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-blockstack-auth
------------
Tests for `django-blockstack-auth` views module.
"""
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import Client
from django_blockstack_auth.views import LoginView, CallbackView, LogoutView
class LoginViewTest(StaticLiveServerTestCase):
def test_view(self):
client = Client()
response = client.get('%s/blockstack/login/' % self.live_server_url)
self.assertEqual(
response.template_name,
['blockstack/login.html']
)
class LogoutViewTest(StaticLiveServerTestCase):
def test_view(self):
client = Client()
response = client.get('%s/blockstack/logout/' % self.live_server_url)
self.assertEqual(
response.template_name,
['blockstack/logout.html']
)
# TODO: Find a way to test the full login/logout flow using the
# Blockstack portal
| 426 | 51 | 98 |
7dbeae8b599e743ee98718b7a82b04116fe9aa18 | 41 | py | Python | osp/citations/jobs/__init__.py | davidmcclure/open-syllabus-project | 078cfd4c5a257fbfb0901d43bfbc6350824eed4e | [
"Apache-2.0"
] | 220 | 2016-01-22T21:19:02.000Z | 2022-01-25T04:33:55.000Z | osp/citations/jobs/__init__.py | davidmcclure/open-syllabus-project | 078cfd4c5a257fbfb0901d43bfbc6350824eed4e | [
"Apache-2.0"
] | 14 | 2016-01-23T14:34:39.000Z | 2016-09-19T19:58:37.000Z | osp/citations/jobs/__init__.py | davidmcclure/open-syllabus-project | 078cfd4c5a257fbfb0901d43bfbc6350824eed4e | [
"Apache-2.0"
] | 14 | 2016-02-03T13:47:48.000Z | 2019-03-27T13:09:05.000Z |
from .text_to_docs import text_to_docs
| 10.25 | 38 | 0.829268 |
from .text_to_docs import text_to_docs
| 0 | 0 | 0 |
87ac45215be36e7193a736c3acd7ac26e8d704b8 | 1,189 | py | Python | pyobjet/objet.py | MahanFathi/Objet | c6e2366327852c18b30dbf2f439931860dc26bf9 | [
"MIT"
] | null | null | null | pyobjet/objet.py | MahanFathi/Objet | c6e2366327852c18b30dbf2f439931860dc26bf9 | [
"MIT"
] | null | null | null | pyobjet/objet.py | MahanFathi/Objet | c6e2366327852c18b30dbf2f439931860dc26bf9 | [
"MIT"
] | null | null | null | from .OBJET import OBJET
import numpy as np
class Objet(object):
"""OBJET"""
| 29 | 65 | 0.666947 | from .OBJET import OBJET
import numpy as np
class Objet(object):
"""OBJET"""
def __init__(self, path_to_meta_json, width=500, height=500):
self._OBJET = OBJET(path_to_meta_json, width, height)
self.width = width
self.height = height
def draw(self, ):
self._OBJET.Draw()
def get_image(self, ):
img = np.array(self._OBJET.GetImage())
img = img.reshape([self.height, self.width, -1])
return np.flip(img, axis=0)
def get_depth_map(self, ):
img = np.array(self._OBJET.GetDepthMap())
img = img.reshape([self.height, self.width])
return np.flip(img, axis=0)
def to_image(self, path_to_image):
self._OBJET.ToImage(path_to_image)
def set_camera(self, position, target):
self._OBJET.SetCamera(position, target)
def set_object_position(self, object_name, position):
self._OBJET.SetObjectPosition(object_name, position)
def set_object_y_rotation(self, object_name, y_rotation):
self._OBJET.SetObjectYRotation(object_name, y_rotation)
def set_object_scale(self, object_name, scale):
self._OBJET.SetObjectScale(object_name, scale)
| 863 | 0 | 243 |
7a972223405ba6234a088aba2145496d84743562 | 1,472 | py | Python | tests/test_process.py | vkvam/fpipe | 2905095f46923c6c4c460c3d154544b654136df4 | [
"MIT"
] | 18 | 2019-12-16T17:55:57.000Z | 2020-10-21T23:25:40.000Z | tests/test_process.py | vkvam/fpipe | 2905095f46923c6c4c460c3d154544b654136df4 | [
"MIT"
] | 23 | 2019-12-11T14:15:08.000Z | 2020-02-17T12:53:21.000Z | tests/test_process.py | vkvam/fpipe | 2905095f46923c6c4c460c3d154544b654136df4 | [
"MIT"
] | null | null | null | from unittest import TestCase
from fpipe.file import File
from fpipe.meta import Size, MD5
from fpipe.gen import Meta, Program
from fpipe.exceptions import FileDataException
from fpipe.meta.stream import Stream
from fpipe.utils.const import PIPE_BUFFER_SIZE
from test_utils.test_file import TestStream
| 26.285714 | 70 | 0.519701 | from unittest import TestCase
from fpipe.file import File
from fpipe.meta import Size, MD5
from fpipe.gen import Meta, Program
from fpipe.exceptions import FileDataException
from fpipe.meta.stream import Stream
from fpipe.utils.const import PIPE_BUFFER_SIZE
from test_utils.test_file import TestStream
class TestProcess(TestCase):
def test_no_std_in(self):
size = 2**22
chunk = PIPE_BUFFER_SIZE
count = 0
for f in Program(f"head -c {size} /dev/random").chain(File()):
read = f[Stream].read
while True:
b = read(chunk)
if not b:
break
count += len(b)
self.assertEqual(size, count)
def test_process(self):
size = 2 ** 28
signal = False
for file in Meta(Size, MD5).chain(
Program(
"cat /dev/stdin"
).chain(
TestStream(
size,
'xyz'
)
)
):
with self.assertRaises(FileDataException):
x = file[Size]
with self.assertRaises(FileDataException):
x = file[MD5]
while file[Stream].read(PIPE_BUFFER_SIZE):
...
self.assertEqual(file[Size], size)
self.assertNotEqual(file[MD5], '')
signal = True
self.assertTrue(signal)
| 1,085 | 7 | 76 |
054d6acb3f58577fcbbe351d3cf2703fc3815306 | 23,998 | py | Python | realmonthly.py | cancer525/mypython | f0d575d2510cb6b8389f42bee464e300d10213d9 | [
"Apache-2.0"
] | null | null | null | realmonthly.py | cancer525/mypython | f0d575d2510cb6b8389f42bee464e300d10213d9 | [
"Apache-2.0"
] | null | null | null | realmonthly.py | cancer525/mypython | f0d575d2510cb6b8389f42bee464e300d10213d9 | [
"Apache-2.0"
] | null | null | null | import tempfile
from io import BytesIO
import pandas as pd
import arrow
import hanshu
from reportlab.lib import colors
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.pdfmetrics import registerFontFamily
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import Paragraph, SimpleDocTemplate, Table, LongTable, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.enums import TA_LEFT
from reportlab.lib.enums import TA_CENTER
story=[]
#获取年月日
time=arrow.now()
year=time.year
month=time.month-1
day=(time.shift(days=-time.day)).day
month0=str(int(month)-5)
month1=time.month-2
month2=time.month-3
month3=time.month-4
month4=time.month-5
month5=time.month-6
#input('起始月份:')
#累计检测应用数
appacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计检测应用数')
appnum_acc=appacc['检测应用数'].sum()
appnum_now=appacc.values.tolist()
#print(appacc.检测应用数.tolist()[-1])
#累计检测次数
numacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计检测次数')
num_acc=numacc['检测次数'].sum()
num_now=numacc.values.tolist()
#累计检测代码行数
codeacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计代码行数')
codenum_acc=codeacc['代码行数'].sum()
code_now=codeacc.values.tolist()
#累计缺陷数
defectacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计缺陷类型及占比')
defectnum_acc=defectacc['数量'].sum()
#语言占比
language=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'本月语言占比')
language0=language.values.tolist()
#当月缺陷占比
defectnow=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'本月安全缺陷')
defectnownum=defectnow['爆发数'].sum()
defectnow0=defectnow.values.tolist()
#计算省份个数
pronumnow=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月省公司检测次数')
pronumnow0=hanshu.zyzh(pronumnow)
pronumnow1=hanshu.btcs(pronumnow0['org_id'])
#统计当月未检测省公司
pronan=hanshu.diff(pronumnow)
#查找检测次数排名前五的省公司
pronumtop5=hanshu.top5(pronumnow)
#print(pronumtop5.org_id)
#统计检测次数排名靠前的应用,省公司、应用名、检测次数
appnum=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月应用检测次数')
appnum0=hanshu.zyzh(appnum)
appnumtop5=hanshu.top52(appnum0)
apptop5pro=appnumtop5.org_id.tolist()
apptop5=appnumtop5.app_name.tolist()
apptop5num=appnumtop5.次数.tolist()
#潮汐分析
datenum=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月检测潮汐分析')
datetop1=hanshu.top1(datenum)
dateno1=pd.to_datetime(datetop1.datetime.tolist()[0])
cxyear=dateno1.year
cxmonth=dateno1.month
cxday=dateno1.day
#缺陷类型及爆发频率
defectype=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月缺陷类型及占比')
defectype0=defectype.sort_values(by='爆发频率', axis=0, ascending=False)
defectype1=defectype0.head(5)
#省公司缺陷密度
prodef=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月各省公司缺陷密度')
prodef00=hanshu.zyzh(prodef)
prodef0=prodef00.sort_values(by='midu', axis=0, ascending=False).head(3)
prodef1=prodef00.sort_values(by='midu', axis=0, ascending=True).head(5)
#当月应用缺陷密度
appdef=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月应用缺陷密度')
appdef00=hanshu.zyzh(appdef)
appdef0=appdef00.sort_values(by='rat', axis=0, ascending=False).head(5)
#筛选检测超过1次的应用
appnum2=appnum.loc[appnum["次数"] > 1]
#携带审计情况
xdsj=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'携带审计情况')
#计算携带审计利用率
shenji2=xdsj["app_name"].value_counts().reset_index()
jiance2=appnum[appnum.app_name.isin(xdsj['app_name'])]
shenji2.columns=['app_name','携带审计次数']
hebing=pd.merge(shenji2, jiance2, on = 'app_name')
hebing['携带审计利用率']=list(map(lambda x,y: (x/y), hebing['携带审计次数'], hebing['次数']))
xdsjtop3=hebing.sort_values(by='携带审计利用率', axis=0, ascending=False).head(3)
#table1
proapp=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司-月份-检测应用数')
proapp1=hanshu.suanzzeng(hanshu.zyzh((proapp)))
#TABLE2
data=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司-月份-应用平均缺陷密度')
data1=hanshu.suanzzeng(hanshu.zyzh((data)))
#TABLE3
tab3=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'本月安全缺陷')
tab3['缺陷数0']=round(tab3['爆发数']/10000,1)
tab3['缺陷数'] = [str(i) + '万' for i in tab3['缺陷数0']]
tab3['平均缺陷数/应用']=round(tab3['爆发数']/appnum_now[-1][-1],2)
tab30=tab3.T.values.tolist()
#table4
dataa0=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'应用第一次检测最后一次检测')
dataa=hanshu.zyzh(dataa0)
appno11=pd.DataFrame()
appno11['app_name']=dataa['app_name']
appno11['org_id']=dataa['org_id']
appno11['总检测数']=dataa['检测次数']
appno11['第1次检测']=dataa['第一次密度']
appno11['最后1次检测']=dataa['最后一次密度']
appno11['变动']=round((dataa['最后一次密度']-dataa['第一次密度'])/dataa['第一次密度']*100,2)
appno11['变动率']=[str(i)+'%' for i in appno11['变动']]
appno111 = appno11.sort_values(by='变动', axis=0, ascending=True)#按从小到大排序
appno112=appno111.values.tolist()
appaccnum=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司每月检测应用数')
appaccnum0=hanshu.zyzh(appaccnum)
pro_acc=hanshu.btcs(appaccnum0['org_id'])
appaccnum1 = appaccnum0.groupby(by=['org_id'],as_index = False)['appCount'].sum()
appaccnum2=appaccnum1.sort_values(by = 'appCount',axis = 0,ascending = False)#按大小顺序排名
appaccnumt=appaccnum2.head(5).values.tolist()
pdefect0=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司每月检测缺陷密度')
pdefect1=hanshu.zyzh(pdefect0)
#print(appaccnum2.head(5))
pdfmetrics.registerFont(TTFont('SimSun', './SimSun.ttf')) # 默认不支持中文,需要注册字体
pdfmetrics.registerFont(TTFont('SimSunBd', './simhei.ttf'))
pdfmetrics.registerFont(TTFont('Arial', './Arial.ttf'))
# registerFontFamily('SimSun', normal='SimSun', bold='SimSunBd', italic='VeraIt', boldItalic='VeraBI')
stylesheet = getSampleStyleSheet() # 获取样式集
stylesheet1 = getSampleStyleSheet()
# 获取reportlab自带样式
Normal = stylesheet['Normal']
BodyText = stylesheet['BodyText']
Italic = stylesheet['Italic']
Title = stylesheet['Title']
Heading1 = stylesheet['Heading1']
Heading2 = stylesheet['Heading2']
Heading3 = stylesheet['Heading3']
Heading4 = stylesheet['Heading4']
Heading5 = stylesheet['Heading5']
Heading6 = stylesheet['Heading6']
Bullet = stylesheet['Bullet']
Definition = stylesheet['Definition']
Code = stylesheet['Code']
# 自带样式不支持中文,需要设置中文字体,但有些样式会丢失,如斜体Italic。有待后续发现完全兼容的中文字体
Normal.fontName = 'SimSun'
Italic.fontName = 'SimSun'
BodyText.fontName = 'SimSun'
Title.fontName = 'SimSunBd'
Heading1.fontName = 'SimSunBd'
Heading2.fontName = 'SimSunBd'
Heading3.fontName = 'SimSunBd'
Heading4.fontName = 'SimSunBd'
Heading5.fontName = 'SimSun'
Heading6.fontName = 'SimSun'
Bullet.fontName = 'SimSun'
Definition.fontName = 'SimSun'
Code.fontName = 'SimSun'
# 添加自定义样式
stylesheet.add(
ParagraphStyle(name='body',
fontName="SimSun",
fontSize=12,
textColor='black',
leading=20, # 行间距
spaceBefore=10, # 段前间距
spaceAfter=10, # 段后间距
leftIndent=0, # 左缩进
rightIndent=0, # 右缩进
firstLineIndent=20, # 首行缩进,每个汉字为10
alignment=TA_LEFT, # 对齐方式
bulletFontSize=15, #bullet为项目符号相关的设置
bulletIndent=-50,
bulletAnchor='start',
bulletFontName='Symbol'
)
)
# 添加自定义样式
stylesheet1.add(
ParagraphStyle(name='body',
fontName="SimSun",
fontSize=10,
textColor='black',
leading=10, # 行间距
spaceBefore=10, # 段前间距
spaceAfter=0, # 段后间距
leftIndent=0, # 左缩进
rightIndent=0, # 右缩进
firstLineIndent=0, # 首行缩进,每个汉字为10
alignment=TA_CENTER, # 对齐方式
bulletFontSize=15, #bullet为项目符号相关的设置
bulletIndent=-50,
bulletAnchor='start',
bulletFontName='Symbol'
)
)
body = stylesheet['body']
body1=stylesheet1['body']
# 段落
content1="<font fontsize=12> 代码安全审计主要是通过找出代码中存在的潜在安全风险并修复,以提高应用系统代码质量,降低系统安全风险。自2020年9月份上线以来,代码安全子系统在云道平台安全审计中心稳定运行。<br/>    本报告基于云道平台安全审计中心"\
"代码安全检测子系统的检测数据进行统计分析,内容分为两大部分:第一部分介绍了2021年"+str(month0)+"-"+str(month)+"月每个月代码安全检测的情况以及趋势。第二部分介绍了"+str(month)+"月份的总体检测情况、安全缺陷情况。</font>"
content2 = " <font fontsize=12>2021年"+str(month0)+"-"+str(month)+"月,云道安全审计中心代码安全检测引擎共检测了</font><font name=SimSunBd fontsize=12>"+str(appnum_acc)+"</font><font fontsize=12>个"\
"应用系统,检测任务数<font name=SimSunBd fontsize=12>"+str(num_acc)+"</font>次,共计<font name=SimSunBd fontsize=12>"+str(round((codenum_acc/100000000),2))+"亿</font>行代码,"\
"共检测出缺陷<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc/10000),1))+"万</font>个,其中严重缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][0]*100),2))+"%</font>,"\
"高危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][1]*100),2))+"%</font>,中危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][2]*100),2))+"%</font>。"\
"低危和警告占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][3]*100+defectacc['占比'][4]*100),2))+"%。</font><br/>    检测次数趋势如图2.1所示:</font>"
content3 = " <font fontsize=12>从图中可以看出,1月份检测次数达到峰值,2月份急剧下降,3月份开始检测次数有所回升。</font>"
content4 = " <font fontsize=12>"+str(year)+"年"+str(month0)+"-"+str(month)+"月,<font name=SimSunBd fontsize=12>"+str(pro_acc)+"</font>个省公司累计检测应用数为:"\
"<font name=SimSunBd fontsize=12>"+str(appnum_acc)+"</font>个。每个月的检测应用数及其变化如表2.1所示。可以看出,<font name=SimSunBd fontsize=12>"+str(appaccnumt[0][0])+"、"+str(appaccnumt[1][0])+"、"\
""+str(appaccnumt[2][0])+"、"+str(appaccnumt[3][0])+"、"+str(appaccnumt[4][0])+"</font>月均检测应用数排前五,<font name=SimSunBd fontsize=12>江西、山东、山西、四川</font>等省公司自2月份以来检测应用数呈上升趋势。</font>"
content5 = " <font fontsize=12>"+str(year)+"年"+str(month0)+"-"+str(month)+"月,云道平台安全审计中心对来自<font name=SimSunBd fontsize=12>"+str(pro_acc)+"</font>个省公司的<font name=SimSunBd fontsize=12>"\
+str(appnum_acc)+"</font>个应用,累计检测次数:<font name=SimSunBd fontsize=12>"+str(num_acc)+"次</font>,总发现缺陷数<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc/10000),1))+"万</font>个,"\
"平均千行代码缺陷密度为<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc*1000/codenum_acc),2))+"</font>。省公司应用平均千行代码缺陷密度变化情况,如表2.2,可以看出,<font name=SimSunBd fontsize=12>安徽、北京、四川</font>三个"\
"省公司的应用平均千行代码缺陷密度总体呈下降趋势。</font>"
content51=" <font fontsize=12>截至"+str(year)+"年"+str(month)+"月"+str(day)+"日,代码安全检测引擎"+str(month)+"月份共检测<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用系统,检测任务数<font name=SimSunBd fontsize=12>"\
+str(num_now[-1][-1])+"</font>次,共计<font name=SimSunBd fontsize=12>"+str(round((code_now[-1][-1]/10000000),2))+"千万</font>行代码。<br/>    检测的应用系统中,使用数量最多的两种编程语言为<font name=SimSunBd fontsize=12>"\
+str(language0[0][0])+"、"+str(language0[1][0])+"</font>,对应的应用数量分别为<font name=SimSunBd fontsize=12>"+str(language0[0][1])+"</font>个和<font name=SimSunBd fontsize=12>"+str(language0[1][1])+"</font>个。可以看出,"\
"各公司在进行应用开发时的首选语言是<font name=SimSunBd fontsize=12>"+str(language0[0][0])+"</font>语言,占比高达<font name=SimSunBd fontsize=12>"+str(round(language0[0][1]*100/(language['存在应用数'].sum()),2))+"%</font>。编程语言的总体分布情况如图3.1所示。</font>"
content6 = " <font fontsize=12>共检测出缺陷<font name=SimSunBd fontsize=12>"+str(round((defectnownum/10000),1))+"万</font>个,其中严重缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectnow0[0][1]/defectnownum*100),2))+"%</font>,"\
"高危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectnow0[1][1]/defectnownum*100),2))+"%</font>,中危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectnow0[2][1]/defectnownum*100),2))+"%</font>,"\
"低危和警告占比<font name=SimSunBd fontsize=12>"+str(round(((defectnow0[3][1]+defectnow0[4][1])/defectnownum*100),2))+"%</font>。具体详情将从应用检测情况、应用安全缺陷情况、缺陷改善情况以及缺陷审计情况四个角度展开。</font>"
content7 = " <font fontsize=12>截至"+str(month)+"月"+str(day)+"日,共有来自<font name=SimSunBd fontsize=12>"+str(pronumnow1)+"</font>个省公司(不包括"+str(pronan)+")的<font name=SimSunBd fontsize=12>"+str(appacc.检测应用数.tolist()[-1])+"</font>个应用进行代码安全检测<font name=SimSunBd fontsize=12>"+str(numacc.检测次数.tolist()[-1])+"次</font>,"\
"各省公司应用检测总数如图3.2所示,颜色越深表示检测次数越多,可以看出,排在前面的省份是<font name=SimSunBd fontsize=12>"+str('、'.join(pronumtop5.org_id.tolist()))+"</font>,均超过了<font name=SimSunBd fontsize=12>"+str(min(pronumtop5.total.tolist())-1)+"</font>次。</font>"
content8 = " <font fontsize=12>各应用检测次数排名如图3.3所示。可以看出,排在前5的应用分别是:"\
"来自"+str(apptop5pro[0])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[0])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[0])+"次</font>;"\
"来自"+str(apptop5pro[1])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[1])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[1])+"次</font>;"\
"来自"+str(apptop5pro[2])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[2])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[2])+"次</font>;"\
"来自"+str(apptop5pro[3])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[3])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[3])+"次</font>;"\
"来自"+str(apptop5pro[4])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[4])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[4])+"次</font>。</font>"
content9 = " <font fontsize=12>"+str(year)+"年"+str(month)+"月,云道安全审计中心代码安全检测引擎总共检测了<font name=SimSunBd fontsize=12>"+str(numacc.检测次数.tolist()[-1])+"次</font>,平均每天检测<font name=SimSunBd fontsize=12>"+str(round(numacc.检测次数.tolist()[-1]/int(day),2))+"次</font>。每天检测次数如图3.4所示。"\
"可以看出,<font name=SimSunBd fontsize=12>"+str(cxyear)+"年"+str(cxmonth)+"月"+str(cxday)+"日</font>应用检测最为密集,且各应用相对集中在<font name=SimSunBd fontsize=12>4月6日-4月14日</font>提交检测。</font>"
content10 = " <font fontsize=12>据统计,<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用总共检测出代码安全缺陷总数为:<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc/10000),1))+"万</font>个,平均每个应用存在<font name=SimSunBd fontsize=12>"+str(int(defectnum_acc/appnum_now[-1][-1]))+"</font>个安全缺陷问题,"\
"各类安全缺陷出现次数及平均在每应用中的出现次数如表3.3内容所示。</font>"
content11 = " <font fontsize=12><font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个检测的应用中,安全缺陷类型覆盖了<font name=SimSunBd fontsize=12>"+str(len(defectype))+"种</font>,如图3.5所示。可以看出,排名前五的安全缺陷类型占总缺陷爆发数的<font name=SimSunBd fontsize=12>"\
+str(round((defectype1['爆发频率'].sum()/defectype0['爆发频率'].sum())*100,2))+"%</font>,这六种缺陷类型的爆发频率均超过<font name=SimSunBd fontsize=12>"+str(round((defectype1.爆发频率.tolist()[4]-1)/10000,2))+"万</font>,它们分别为:<font name=SimSunBd fontsize=12>"\
+str(defectype1.defect_cname.tolist()[0])+"、"+str(defectype1.defect_cname.tolist()[1])+"、"+str(defectype1.defect_cname.tolist()[2])+"、"+str(defectype1.defect_cname.tolist()[3])+"、"+str(defectype1.defect_cname.tolist()[4])+"</font>。</font>"
content12 = " <font fontsize=12>云道平台安全审计中心对来自<font name=SimSunBd fontsize=12>"+str(pronumnow1)+"</font>个省公司的<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用源代码进行检测,平均每个省公司"\
"存在<font name=SimSunBd fontsize=12>"+str(round((defectnownum/10000/pronumnow1),2))+"万</font>个代码缺陷问题,平均千行代码缺陷密度为:<font name=SimSunBd fontsize=12>"+str(round((defectnownum*1000/code_now[-1][-1]),2))+"</font>。"\
"其中,<font name=SimSunBd fontsize=12>"+str(prodef0.org_id.tolist()[0])+"、"+str(prodef0.org_id.tolist()[1])+"、"+str(prodef0.org_id.tolist()[2])+"</font>是千行代码缺陷密度最高的三家省公司,均超过了"+str(round((prodef0.midu.tolist()[2]-1),2))+";"\
"<font name=SimSunBd fontsize=12>"+str(prodef1.org_id.tolist()[0])+"、"+str(prodef1.org_id.tolist()[1])+"、"+str(prodef1.org_id.tolist()[2])+"、"+str(prodef1.org_id.tolist()[3])+"、"+str(prodef1.org_id.tolist()[4])+"</font>是千行代码缺陷密度最低的五家省公司,说明"\
"这五家省公司应用的安全性较高。各省公司缺陷密度分布情况如图3.6所示,颜色越深表示千行代码缺陷密度越大。</font>"
content13 = " <font fontsize=12>应用千行代码缺陷密度分布情况如图3.7所示,排在前五名的应用情况具体为:"\
"来自"+str(appdef0.org_id.tolist()[0])+"省公司的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[0])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[0],2))+";来自"+str(appdef0.org_id.tolist()[1])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[1])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[1],2))+";"\
"来自"+str(appdef0.org_id.tolist()[2])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[2])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[2],2))+";来自"+str(appdef0.org_id.tolist()[3])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[3])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[3],2))+";"\
"来自"+str(appdef0.org_id.tolist()[4])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[4])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[4],2))+"。</font>"
content14 = " <font fontsize=12>"+str(month)+"月份检测次数多于1次的应用有<font name=SimSunBd fontsize=12>"+str(len(appnum2))+"</font>个,占总应用数的<font name=SimSunBd fontsize=12>"+str(round((len(appnum2)/appnum_now[-1][-1]*100),2))+"%</font>。分析应用在4月份第1次检测和最后1次检测的千行代码缺陷密度如表3.2,"\
"变动幅度为负数表示应用千行代码缺陷密度降低、安全性提高,而大部分应用的源代码安全缺陷情况都存在明显的改善趋势。</font>"
content15 = " <font fontsize=12>"+str(year)+"年"+str(month)+"月,<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用发起了<font name=SimSunBd fontsize=12>"+str(numacc.检测次数.tolist()[-1])+"次</font>检测请求,携带审计<font name=SimSunBd fontsize=12>"+str(len(xdsj))+"</font>次,"\
"审计功能利用率(发起审计次数/总检测次数)为:<font name=SimSunBd fontsize=12>"+str(round(len(xdsj)/numacc.检测次数.tolist()[-1]*100,2))+"%</font>,对应用进行分析,如图3.8所示。可以看出,<font name=SimSunBd fontsize=12>"+str(xdsjtop3.app_name.tolist()[0])+"、"+str(xdsjtop3.app_name.tolist()[1])+"、"+str(xdsjtop3.app_name.tolist()[2])+"</font>的审计功能利用率较高。</font>"
content16 = " <font fontsize=12>"+str(year)+"年"+str(month)+"月发起人工审计的应用有<font name=SimSunBd fontsize=12>"+str()+"个</font>,分别为:<font name=SimSunBd fontsize=12>"+str()+"、"+str()+"、"+str()+"、"+str()+"、"+str()+"、"+str()+"、"+str()+"</font>,"\
"只占参与检测应用总数的<font name=SimSunBd fontsize=12>2.45%</font>,说明目前人工审计的使用率并不高。</font>"
content17 = " <font fontsize=12>针对"+str(month)+"月份的检测及审计数据进行分析后,提出以下建议:<br/>    ①"+str(defectype1.defect_cname.tolist()[0])+"、"+str(defectype1.defect_cname.tolist()[1])+"是最频繁爆发的缺陷,建议各省公司在应用维护时注意防范这两类安全问题。<br/>    ②分析表明,静态检测存在一定的误报,目前审计功能的使用率较低,"\
"建议各个省公司对缺陷进行审计,提高代码的安全性。</font>"
# Table 表格
image = Image('./1.jpg')
image.drawWidth = 160
image.drawHeight = 100
body = stylesheet['body']
table_data0 = [['省份', str(month5)+'月', str(month4)+'月',str(month3)+'月',str(month2)+'月',str(month1)+'月',str(month)+'月'],
]
table_data40=[['应用名','省公司','总检测数','第1次检测','最后1次检测','变动'],
]
table_data=table_data0
table_data1=table_data0
table_data4=table_data40
for i in range(0,len(data1)-1):
tabledata=[[Paragraph(str(data1[i][0]),body1), str(data1[i][1]), str(data1[i][2])+str(data1[i][-5]), str(data1[i][3])+str(data1[i][-4]), str(data1[i][4])+str(data1[i][-3]), str(data1[i][5])+str(data1[i][-2]), str(data1[i][6])+str(data1[i][-1])],
]
table_data=table_data+tabledata
i=i+1
for j in range(0,len(proapp1)-1):
tabledata1=[[Paragraph(str(proapp1[j][0]),body1), str(proapp1[j][1]), str(proapp1[j][2])+str(proapp1[j][-5]), str(proapp1[j][3])+str(proapp1[j][-4]), str(proapp1[j][4])+str(proapp1[j][-3]), str(proapp1[j][5])+str(proapp1[j][-2]), str(proapp1[j][6])+str(proapp1[j][-1])],
]
table_data1=table_data1+tabledata1
j=j+1
table_data2 =[['缺陷类型','严重','高危','中等','低风险','警告'],
[Paragraph('缺陷数',body1),str(tab30[3][0]),str(tab30[3][1]),str(tab30[3][2]),str(tab30[3][3]),str(tab30[3][4])],
[Paragraph('平均缺陷数/应用',body1),str(tab30[4][0]),str(tab30[4][1]),str(tab30[4][2]),str(tab30[4][3]),str(tab30[4][4])],
]
for m in range(0,len(appno112)-1):
tabledata4=[[Paragraph(str(appno112[m][0]),body1), str(appno112[m][1]), str(appno112[m][2]), str(appno112[m][3]), str(appno112[m][4]), str(appno112[m][6])],
]
table_data4=table_data4+tabledata4
m=m+1
table_style = [
('FONTNAME', (1, 0), (-1, -1), 'SimSun'), # 字体
('FONTNAME', (0, 0), (-1, 0), 'SimSunBd'), # 字体
('FONTSIZE', (0, 0), (-1, 0), 11), # 第一行的字体大小
('FONTSIZE', (0, 1), (-1, -1), 10), # 第二行到最后一行的字体大小
('ALIGN', (1, 0), (-1, -1), 'RIGHT'), # 所有表格左右中间对齐
('ALIGN', (0, 0), (-1, 0), 'CENTER'), # 所有表格左右中间对齐
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), # 所有表格上下居中对齐
# ('SPAN', (-2, -2), (-1, -1)), # 合并
# ('SPAN', (0, 4), (0, 5)), # 合并
# ('SPAN', (2, 4), (2, 5)), # 合并
('BACKGROUND', (0, 0), (-1, 0), colors.darkblue), # 设置第一行背景颜色
('TEXTCOLOR', (0, 0), (-1, 0), colors.white), # 设置表格内文字颜色
('GRID', (0, 0), (-1, -1), 0.75, colors.black), # 设置表格框线为灰色,线宽为0.1
]
table_style1 = [
('FONTNAME', (1, 0), (-1, -1), 'SimSun'), # 字体
('FONTNAME', (0, 0), (-1, 0), 'SimSunBd'), # 字体
('FONTSIZE', (0, 0), (-1, 0), 11), # 第一行的字体大小
('FONTSIZE', (0, 1), (-1, -1), 10), # 第二行到最后一行的字体大小
('ALIGN', (0, 0), (-1, -1), 'CENTER'), # 所有表格左右中间对齐
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), # 所有表格上下居中对齐
('BACKGROUND', (0, 0), (-1, 0), colors.darkblue), # 设置第一行背景颜色
('TEXTCOLOR', (0, 0), (-1, 0), colors.white), # 设置表格内文字颜色
('GRID', (0, 0), (-1, -1), 0.75, colors.black), # 设置表格框线为灰色,线宽为0.1
]
table = Table(data=table_data,style=table_style, colWidths=75)
table1= Table(data=table_data1,style=table_style, colWidths=75)
table2= Table(data=table_data2,style=table_style1, colWidths=75)
table3=Table(data=table_data4,style=table_style1, colWidths=75)
#story.append(Paragraph("区块链", Title))
story.append(Paragraph("一、报告背景", Heading1))
story.append(Paragraph(content1, body))
story.append(Paragraph("二、"+str(month0)+"-"+str(month)+"月份检测情况", Heading1))
story.append(Paragraph(content2, body))
story.append(Paragraph(content3, body))
story.append(Paragraph("2.1 应用检测情况", Heading2))
story.append(Paragraph(content4, body))
story.append(table1)
story.append(Paragraph("2.2 缺陷密度分布情况", Heading2))
story.append(Paragraph(content5, body))
story.append(table)
story.append(Paragraph("三、"+str(month)+"月份检测情况", Heading1))
story.append(Paragraph(content51, body))
story.append(Paragraph(content6, body))
story.append(Paragraph("3.1 应用检测情况", Heading2))
story.append(Paragraph("3.1.1 应用检测次数排序", Heading3))
story.append(Paragraph(content7, body))
story.append(Paragraph(content8, body))
story.append(Paragraph("3.1.2 检测潮汐分析", Heading3))
story.append(Paragraph(content9, body))
story.append(Paragraph("3.2 缺陷密度分布情况", Heading2))
story.append(Paragraph(content10, body))
story.append(Paragraph("3.2.1 总体缺陷类型分布情况", Heading3))
story.append(Paragraph(content11, body))
story.append(table2)
story.append(Paragraph("3.2.2 应用缺陷密度排序", Heading3))
story.append(Paragraph(content12, body))
story.append(Paragraph(content13, body))
story.append(Paragraph("3.3 缺陷改善情况", Heading2))
story.append(Paragraph(content14, body))
story.append(table3)
story.append(Paragraph("3.4 审计情况", Heading2))
story.append(Paragraph("3.4.1 携带审计使用情况", Heading3))
story.append(Paragraph(content15, body))
story.append(Paragraph("3.4.2 人工审计使用情况", Heading3))
story.append(Paragraph(content16, body))
story.append(Paragraph("四、建议", Heading1))
story.append(Paragraph(content17, body))
# bytes
# buf = BytesIO()
# doc = SimpleDocTemplate(buf, encoding='UTF-8')
# doc.build(story)
# print(buf.getvalue().decode())
# file
doc = SimpleDocTemplate('C:\\Users\\eric\\Desktop\\hello.pdf')
doc.build(story) | 61.375959 | 349 | 0.663889 | import tempfile
from io import BytesIO
import pandas as pd
import arrow
import hanshu
from reportlab.lib import colors
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.pdfmetrics import registerFontFamily
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import Paragraph, SimpleDocTemplate, Table, LongTable, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.enums import TA_LEFT
from reportlab.lib.enums import TA_CENTER
story=[]
#获取年月日
time=arrow.now()
year=time.year
month=time.month-1
day=(time.shift(days=-time.day)).day
month0=str(int(month)-5)
month1=time.month-2
month2=time.month-3
month3=time.month-4
month4=time.month-5
month5=time.month-6
#input('起始月份:')
#累计检测应用数
appacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计检测应用数')
appnum_acc=appacc['检测应用数'].sum()
appnum_now=appacc.values.tolist()
#print(appacc.检测应用数.tolist()[-1])
#累计检测次数
numacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计检测次数')
num_acc=numacc['检测次数'].sum()
num_now=numacc.values.tolist()
#累计检测代码行数
codeacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计代码行数')
codenum_acc=codeacc['代码行数'].sum()
code_now=codeacc.values.tolist()
#累计缺陷数
defectacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计缺陷类型及占比')
defectnum_acc=defectacc['数量'].sum()
#语言占比
language=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'本月语言占比')
language0=language.values.tolist()
#当月缺陷占比
defectnow=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'本月安全缺陷')
defectnownum=defectnow['爆发数'].sum()
defectnow0=defectnow.values.tolist()
#计算省份个数
pronumnow=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月省公司检测次数')
pronumnow0=hanshu.zyzh(pronumnow)
pronumnow1=hanshu.btcs(pronumnow0['org_id'])
#统计当月未检测省公司
pronan=hanshu.diff(pronumnow)
#查找检测次数排名前五的省公司
pronumtop5=hanshu.top5(pronumnow)
#print(pronumtop5.org_id)
#统计检测次数排名靠前的应用,省公司、应用名、检测次数
appnum=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月应用检测次数')
appnum0=hanshu.zyzh(appnum)
appnumtop5=hanshu.top52(appnum0)
apptop5pro=appnumtop5.org_id.tolist()
apptop5=appnumtop5.app_name.tolist()
apptop5num=appnumtop5.次数.tolist()
#潮汐分析
datenum=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月检测潮汐分析')
datetop1=hanshu.top1(datenum)
dateno1=pd.to_datetime(datetop1.datetime.tolist()[0])
cxyear=dateno1.year
cxmonth=dateno1.month
cxday=dateno1.day
#缺陷类型及爆发频率
defectype=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月缺陷类型及占比')
defectype0=defectype.sort_values(by='爆发频率', axis=0, ascending=False)
defectype1=defectype0.head(5)
#省公司缺陷密度
prodef=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月各省公司缺陷密度')
prodef00=hanshu.zyzh(prodef)
prodef0=prodef00.sort_values(by='midu', axis=0, ascending=False).head(3)
prodef1=prodef00.sort_values(by='midu', axis=0, ascending=True).head(5)
#当月应用缺陷密度
appdef=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月应用缺陷密度')
appdef00=hanshu.zyzh(appdef)
appdef0=appdef00.sort_values(by='rat', axis=0, ascending=False).head(5)
#筛选检测超过1次的应用
appnum2=appnum.loc[appnum["次数"] > 1]
#携带审计情况
xdsj=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'携带审计情况')
#计算携带审计利用率
shenji2=xdsj["app_name"].value_counts().reset_index()
jiance2=appnum[appnum.app_name.isin(xdsj['app_name'])]
shenji2.columns=['app_name','携带审计次数']
hebing=pd.merge(shenji2, jiance2, on = 'app_name')
hebing['携带审计利用率']=list(map(lambda x,y: (x/y), hebing['携带审计次数'], hebing['次数']))
xdsjtop3=hebing.sort_values(by='携带审计利用率', axis=0, ascending=False).head(3)
#table1
proapp=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司-月份-检测应用数')
proapp1=hanshu.suanzzeng(hanshu.zyzh((proapp)))
#TABLE2
data=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司-月份-应用平均缺陷密度')
data1=hanshu.suanzzeng(hanshu.zyzh((data)))
#TABLE3
tab3=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'本月安全缺陷')
tab3['缺陷数0']=round(tab3['爆发数']/10000,1)
tab3['缺陷数'] = [str(i) + '万' for i in tab3['缺陷数0']]
tab3['平均缺陷数/应用']=round(tab3['爆发数']/appnum_now[-1][-1],2)
tab30=tab3.T.values.tolist()
#table4
dataa0=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'应用第一次检测最后一次检测')
dataa=hanshu.zyzh(dataa0)
appno11=pd.DataFrame()
appno11['app_name']=dataa['app_name']
appno11['org_id']=dataa['org_id']
appno11['总检测数']=dataa['检测次数']
appno11['第1次检测']=dataa['第一次密度']
appno11['最后1次检测']=dataa['最后一次密度']
appno11['变动']=round((dataa['最后一次密度']-dataa['第一次密度'])/dataa['第一次密度']*100,2)
appno11['变动率']=[str(i)+'%' for i in appno11['变动']]
appno111 = appno11.sort_values(by='变动', axis=0, ascending=True)#按从小到大排序
appno112=appno111.values.tolist()
appaccnum=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司每月检测应用数')
appaccnum0=hanshu.zyzh(appaccnum)
pro_acc=hanshu.btcs(appaccnum0['org_id'])
appaccnum1 = appaccnum0.groupby(by=['org_id'],as_index = False)['appCount'].sum()
appaccnum2=appaccnum1.sort_values(by = 'appCount',axis = 0,ascending = False)#按大小顺序排名
appaccnumt=appaccnum2.head(5).values.tolist()
pdefect0=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司每月检测缺陷密度')
pdefect1=hanshu.zyzh(pdefect0)
#print(appaccnum2.head(5))
pdfmetrics.registerFont(TTFont('SimSun', './SimSun.ttf')) # 默认不支持中文,需要注册字体
pdfmetrics.registerFont(TTFont('SimSunBd', './simhei.ttf'))
pdfmetrics.registerFont(TTFont('Arial', './Arial.ttf'))
# registerFontFamily('SimSun', normal='SimSun', bold='SimSunBd', italic='VeraIt', boldItalic='VeraBI')
stylesheet = getSampleStyleSheet() # 获取样式集
stylesheet1 = getSampleStyleSheet()
# 获取reportlab自带样式
Normal = stylesheet['Normal']
BodyText = stylesheet['BodyText']
Italic = stylesheet['Italic']
Title = stylesheet['Title']
Heading1 = stylesheet['Heading1']
Heading2 = stylesheet['Heading2']
Heading3 = stylesheet['Heading3']
Heading4 = stylesheet['Heading4']
Heading5 = stylesheet['Heading5']
Heading6 = stylesheet['Heading6']
Bullet = stylesheet['Bullet']
Definition = stylesheet['Definition']
Code = stylesheet['Code']
# 自带样式不支持中文,需要设置中文字体,但有些样式会丢失,如斜体Italic。有待后续发现完全兼容的中文字体
Normal.fontName = 'SimSun'
Italic.fontName = 'SimSun'
BodyText.fontName = 'SimSun'
Title.fontName = 'SimSunBd'
Heading1.fontName = 'SimSunBd'
Heading2.fontName = 'SimSunBd'
Heading3.fontName = 'SimSunBd'
Heading4.fontName = 'SimSunBd'
Heading5.fontName = 'SimSun'
Heading6.fontName = 'SimSun'
Bullet.fontName = 'SimSun'
Definition.fontName = 'SimSun'
Code.fontName = 'SimSun'
# 添加自定义样式
stylesheet.add(
ParagraphStyle(name='body',
fontName="SimSun",
fontSize=12,
textColor='black',
leading=20, # 行间距
spaceBefore=10, # 段前间距
spaceAfter=10, # 段后间距
leftIndent=0, # 左缩进
rightIndent=0, # 右缩进
firstLineIndent=20, # 首行缩进,每个汉字为10
alignment=TA_LEFT, # 对齐方式
bulletFontSize=15, #bullet为项目符号相关的设置
bulletIndent=-50,
bulletAnchor='start',
bulletFontName='Symbol'
)
)
# 添加自定义样式
stylesheet1.add(
ParagraphStyle(name='body',
fontName="SimSun",
fontSize=10,
textColor='black',
leading=10, # 行间距
spaceBefore=10, # 段前间距
spaceAfter=0, # 段后间距
leftIndent=0, # 左缩进
rightIndent=0, # 右缩进
firstLineIndent=0, # 首行缩进,每个汉字为10
alignment=TA_CENTER, # 对齐方式
bulletFontSize=15, #bullet为项目符号相关的设置
bulletIndent=-50,
bulletAnchor='start',
bulletFontName='Symbol'
)
)
body = stylesheet['body']
body1=stylesheet1['body']
# 段落
content1="<font fontsize=12> 代码安全审计主要是通过找出代码中存在的潜在安全风险并修复,以提高应用系统代码质量,降低系统安全风险。自2020年9月份上线以来,代码安全子系统在云道平台安全审计中心稳定运行。<br/>    本报告基于云道平台安全审计中心"\
"代码安全检测子系统的检测数据进行统计分析,内容分为两大部分:第一部分介绍了2021年"+str(month0)+"-"+str(month)+"月每个月代码安全检测的情况以及趋势。第二部分介绍了"+str(month)+"月份的总体检测情况、安全缺陷情况。</font>"
content2 = " <font fontsize=12>2021年"+str(month0)+"-"+str(month)+"月,云道安全审计中心代码安全检测引擎共检测了</font><font name=SimSunBd fontsize=12>"+str(appnum_acc)+"</font><font fontsize=12>个"\
"应用系统,检测任务数<font name=SimSunBd fontsize=12>"+str(num_acc)+"</font>次,共计<font name=SimSunBd fontsize=12>"+str(round((codenum_acc/100000000),2))+"亿</font>行代码,"\
"共检测出缺陷<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc/10000),1))+"万</font>个,其中严重缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][0]*100),2))+"%</font>,"\
"高危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][1]*100),2))+"%</font>,中危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][2]*100),2))+"%</font>。"\
"低危和警告占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][3]*100+defectacc['占比'][4]*100),2))+"%。</font><br/>    检测次数趋势如图2.1所示:</font>"
content3 = " <font fontsize=12>从图中可以看出,1月份检测次数达到峰值,2月份急剧下降,3月份开始检测次数有所回升。</font>"
content4 = " <font fontsize=12>"+str(year)+"年"+str(month0)+"-"+str(month)+"月,<font name=SimSunBd fontsize=12>"+str(pro_acc)+"</font>个省公司累计检测应用数为:"\
"<font name=SimSunBd fontsize=12>"+str(appnum_acc)+"</font>个。每个月的检测应用数及其变化如表2.1所示。可以看出,<font name=SimSunBd fontsize=12>"+str(appaccnumt[0][0])+"、"+str(appaccnumt[1][0])+"、"\
""+str(appaccnumt[2][0])+"、"+str(appaccnumt[3][0])+"、"+str(appaccnumt[4][0])+"</font>月均检测应用数排前五,<font name=SimSunBd fontsize=12>江西、山东、山西、四川</font>等省公司自2月份以来检测应用数呈上升趋势。</font>"
content5 = " <font fontsize=12>"+str(year)+"年"+str(month0)+"-"+str(month)+"月,云道平台安全审计中心对来自<font name=SimSunBd fontsize=12>"+str(pro_acc)+"</font>个省公司的<font name=SimSunBd fontsize=12>"\
+str(appnum_acc)+"</font>个应用,累计检测次数:<font name=SimSunBd fontsize=12>"+str(num_acc)+"次</font>,总发现缺陷数<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc/10000),1))+"万</font>个,"\
"平均千行代码缺陷密度为<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc*1000/codenum_acc),2))+"</font>。省公司应用平均千行代码缺陷密度变化情况,如表2.2,可以看出,<font name=SimSunBd fontsize=12>安徽、北京、四川</font>三个"\
"省公司的应用平均千行代码缺陷密度总体呈下降趋势。</font>"
content51=" <font fontsize=12>截至"+str(year)+"年"+str(month)+"月"+str(day)+"日,代码安全检测引擎"+str(month)+"月份共检测<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用系统,检测任务数<font name=SimSunBd fontsize=12>"\
+str(num_now[-1][-1])+"</font>次,共计<font name=SimSunBd fontsize=12>"+str(round((code_now[-1][-1]/10000000),2))+"千万</font>行代码。<br/>    检测的应用系统中,使用数量最多的两种编程语言为<font name=SimSunBd fontsize=12>"\
+str(language0[0][0])+"、"+str(language0[1][0])+"</font>,对应的应用数量分别为<font name=SimSunBd fontsize=12>"+str(language0[0][1])+"</font>个和<font name=SimSunBd fontsize=12>"+str(language0[1][1])+"</font>个。可以看出,"\
"各公司在进行应用开发时的首选语言是<font name=SimSunBd fontsize=12>"+str(language0[0][0])+"</font>语言,占比高达<font name=SimSunBd fontsize=12>"+str(round(language0[0][1]*100/(language['存在应用数'].sum()),2))+"%</font>。编程语言的总体分布情况如图3.1所示。</font>"
content6 = " <font fontsize=12>共检测出缺陷<font name=SimSunBd fontsize=12>"+str(round((defectnownum/10000),1))+"万</font>个,其中严重缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectnow0[0][1]/defectnownum*100),2))+"%</font>,"\
"高危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectnow0[1][1]/defectnownum*100),2))+"%</font>,中危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectnow0[2][1]/defectnownum*100),2))+"%</font>,"\
"低危和警告占比<font name=SimSunBd fontsize=12>"+str(round(((defectnow0[3][1]+defectnow0[4][1])/defectnownum*100),2))+"%</font>。具体详情将从应用检测情况、应用安全缺陷情况、缺陷改善情况以及缺陷审计情况四个角度展开。</font>"
content7 = " <font fontsize=12>截至"+str(month)+"月"+str(day)+"日,共有来自<font name=SimSunBd fontsize=12>"+str(pronumnow1)+"</font>个省公司(不包括"+str(pronan)+")的<font name=SimSunBd fontsize=12>"+str(appacc.检测应用数.tolist()[-1])+"</font>个应用进行代码安全检测<font name=SimSunBd fontsize=12>"+str(numacc.检测次数.tolist()[-1])+"次</font>,"\
"各省公司应用检测总数如图3.2所示,颜色越深表示检测次数越多,可以看出,排在前面的省份是<font name=SimSunBd fontsize=12>"+str('、'.join(pronumtop5.org_id.tolist()))+"</font>,均超过了<font name=SimSunBd fontsize=12>"+str(min(pronumtop5.total.tolist())-1)+"</font>次。</font>"
content8 = " <font fontsize=12>各应用检测次数排名如图3.3所示。可以看出,排在前5的应用分别是:"\
"来自"+str(apptop5pro[0])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[0])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[0])+"次</font>;"\
"来自"+str(apptop5pro[1])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[1])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[1])+"次</font>;"\
"来自"+str(apptop5pro[2])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[2])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[2])+"次</font>;"\
"来自"+str(apptop5pro[3])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[3])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[3])+"次</font>;"\
"来自"+str(apptop5pro[4])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[4])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[4])+"次</font>。</font>"
content9 = " <font fontsize=12>"+str(year)+"年"+str(month)+"月,云道安全审计中心代码安全检测引擎总共检测了<font name=SimSunBd fontsize=12>"+str(numacc.检测次数.tolist()[-1])+"次</font>,平均每天检测<font name=SimSunBd fontsize=12>"+str(round(numacc.检测次数.tolist()[-1]/int(day),2))+"次</font>。每天检测次数如图3.4所示。"\
"可以看出,<font name=SimSunBd fontsize=12>"+str(cxyear)+"年"+str(cxmonth)+"月"+str(cxday)+"日</font>应用检测最为密集,且各应用相对集中在<font name=SimSunBd fontsize=12>4月6日-4月14日</font>提交检测。</font>"
content10 = " <font fontsize=12>据统计,<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用总共检测出代码安全缺陷总数为:<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc/10000),1))+"万</font>个,平均每个应用存在<font name=SimSunBd fontsize=12>"+str(int(defectnum_acc/appnum_now[-1][-1]))+"</font>个安全缺陷问题,"\
"各类安全缺陷出现次数及平均在每应用中的出现次数如表3.3内容所示。</font>"
content11 = " <font fontsize=12><font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个检测的应用中,安全缺陷类型覆盖了<font name=SimSunBd fontsize=12>"+str(len(defectype))+"种</font>,如图3.5所示。可以看出,排名前五的安全缺陷类型占总缺陷爆发数的<font name=SimSunBd fontsize=12>"\
+str(round((defectype1['爆发频率'].sum()/defectype0['爆发频率'].sum())*100,2))+"%</font>,这六种缺陷类型的爆发频率均超过<font name=SimSunBd fontsize=12>"+str(round((defectype1.爆发频率.tolist()[4]-1)/10000,2))+"万</font>,它们分别为:<font name=SimSunBd fontsize=12>"\
+str(defectype1.defect_cname.tolist()[0])+"、"+str(defectype1.defect_cname.tolist()[1])+"、"+str(defectype1.defect_cname.tolist()[2])+"、"+str(defectype1.defect_cname.tolist()[3])+"、"+str(defectype1.defect_cname.tolist()[4])+"</font>。</font>"
content12 = " <font fontsize=12>云道平台安全审计中心对来自<font name=SimSunBd fontsize=12>"+str(pronumnow1)+"</font>个省公司的<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用源代码进行检测,平均每个省公司"\
"存在<font name=SimSunBd fontsize=12>"+str(round((defectnownum/10000/pronumnow1),2))+"万</font>个代码缺陷问题,平均千行代码缺陷密度为:<font name=SimSunBd fontsize=12>"+str(round((defectnownum*1000/code_now[-1][-1]),2))+"</font>。"\
"其中,<font name=SimSunBd fontsize=12>"+str(prodef0.org_id.tolist()[0])+"、"+str(prodef0.org_id.tolist()[1])+"、"+str(prodef0.org_id.tolist()[2])+"</font>是千行代码缺陷密度最高的三家省公司,均超过了"+str(round((prodef0.midu.tolist()[2]-1),2))+";"\
"<font name=SimSunBd fontsize=12>"+str(prodef1.org_id.tolist()[0])+"、"+str(prodef1.org_id.tolist()[1])+"、"+str(prodef1.org_id.tolist()[2])+"、"+str(prodef1.org_id.tolist()[3])+"、"+str(prodef1.org_id.tolist()[4])+"</font>是千行代码缺陷密度最低的五家省公司,说明"\
"这五家省公司应用的安全性较高。各省公司缺陷密度分布情况如图3.6所示,颜色越深表示千行代码缺陷密度越大。</font>"
content13 = " <font fontsize=12>应用千行代码缺陷密度分布情况如图3.7所示,排在前五名的应用情况具体为:"\
"来自"+str(appdef0.org_id.tolist()[0])+"省公司的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[0])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[0],2))+";来自"+str(appdef0.org_id.tolist()[1])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[1])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[1],2))+";"\
"来自"+str(appdef0.org_id.tolist()[2])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[2])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[2],2))+";来自"+str(appdef0.org_id.tolist()[3])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[3])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[3],2))+";"\
"来自"+str(appdef0.org_id.tolist()[4])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[4])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[4],2))+"。</font>"
content14 = " <font fontsize=12>"+str(month)+"月份检测次数多于1次的应用有<font name=SimSunBd fontsize=12>"+str(len(appnum2))+"</font>个,占总应用数的<font name=SimSunBd fontsize=12>"+str(round((len(appnum2)/appnum_now[-1][-1]*100),2))+"%</font>。分析应用在4月份第1次检测和最后1次检测的千行代码缺陷密度如表3.2,"\
"变动幅度为负数表示应用千行代码缺陷密度降低、安全性提高,而大部分应用的源代码安全缺陷情况都存在明显的改善趋势。</font>"
content15 = " <font fontsize=12>"+str(year)+"年"+str(month)+"月,<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用发起了<font name=SimSunBd fontsize=12>"+str(numacc.检测次数.tolist()[-1])+"次</font>检测请求,携带审计<font name=SimSunBd fontsize=12>"+str(len(xdsj))+"</font>次,"\
"审计功能利用率(发起审计次数/总检测次数)为:<font name=SimSunBd fontsize=12>"+str(round(len(xdsj)/numacc.检测次数.tolist()[-1]*100,2))+"%</font>,对应用进行分析,如图3.8所示。可以看出,<font name=SimSunBd fontsize=12>"+str(xdsjtop3.app_name.tolist()[0])+"、"+str(xdsjtop3.app_name.tolist()[1])+"、"+str(xdsjtop3.app_name.tolist()[2])+"</font>的审计功能利用率较高。</font>"
content16 = " <font fontsize=12>"+str(year)+"年"+str(month)+"月发起人工审计的应用有<font name=SimSunBd fontsize=12>"+str()+"个</font>,分别为:<font name=SimSunBd fontsize=12>"+str()+"、"+str()+"、"+str()+"、"+str()+"、"+str()+"、"+str()+"、"+str()+"</font>,"\
"只占参与检测应用总数的<font name=SimSunBd fontsize=12>2.45%</font>,说明目前人工审计的使用率并不高。</font>"
content17 = " <font fontsize=12>针对"+str(month)+"月份的检测及审计数据进行分析后,提出以下建议:<br/>    ①"+str(defectype1.defect_cname.tolist()[0])+"、"+str(defectype1.defect_cname.tolist()[1])+"是最频繁爆发的缺陷,建议各省公司在应用维护时注意防范这两类安全问题。<br/>    ②分析表明,静态检测存在一定的误报,目前审计功能的使用率较低,"\
"建议各个省公司对缺陷进行审计,提高代码的安全性。</font>"
# Table 表格
image = Image('./1.jpg')
image.drawWidth = 160
image.drawHeight = 100
body = stylesheet['body']
table_data0 = [['省份', str(month5)+'月', str(month4)+'月',str(month3)+'月',str(month2)+'月',str(month1)+'月',str(month)+'月'],
]
table_data40=[['应用名','省公司','总检测数','第1次检测','最后1次检测','变动'],
]
table_data=table_data0
table_data1=table_data0
table_data4=table_data40
for i in range(0,len(data1)-1):
tabledata=[[Paragraph(str(data1[i][0]),body1), str(data1[i][1]), str(data1[i][2])+str(data1[i][-5]), str(data1[i][3])+str(data1[i][-4]), str(data1[i][4])+str(data1[i][-3]), str(data1[i][5])+str(data1[i][-2]), str(data1[i][6])+str(data1[i][-1])],
]
table_data=table_data+tabledata
i=i+1
for j in range(0,len(proapp1)-1):
tabledata1=[[Paragraph(str(proapp1[j][0]),body1), str(proapp1[j][1]), str(proapp1[j][2])+str(proapp1[j][-5]), str(proapp1[j][3])+str(proapp1[j][-4]), str(proapp1[j][4])+str(proapp1[j][-3]), str(proapp1[j][5])+str(proapp1[j][-2]), str(proapp1[j][6])+str(proapp1[j][-1])],
]
table_data1=table_data1+tabledata1
j=j+1
table_data2 =[['缺陷类型','严重','高危','中等','低风险','警告'],
[Paragraph('缺陷数',body1),str(tab30[3][0]),str(tab30[3][1]),str(tab30[3][2]),str(tab30[3][3]),str(tab30[3][4])],
[Paragraph('平均缺陷数/应用',body1),str(tab30[4][0]),str(tab30[4][1]),str(tab30[4][2]),str(tab30[4][3]),str(tab30[4][4])],
]
for m in range(0,len(appno112)-1):
tabledata4=[[Paragraph(str(appno112[m][0]),body1), str(appno112[m][1]), str(appno112[m][2]), str(appno112[m][3]), str(appno112[m][4]), str(appno112[m][6])],
]
table_data4=table_data4+tabledata4
m=m+1
table_style = [
('FONTNAME', (1, 0), (-1, -1), 'SimSun'), # 字体
('FONTNAME', (0, 0), (-1, 0), 'SimSunBd'), # 字体
('FONTSIZE', (0, 0), (-1, 0), 11), # 第一行的字体大小
('FONTSIZE', (0, 1), (-1, -1), 10), # 第二行到最后一行的字体大小
('ALIGN', (1, 0), (-1, -1), 'RIGHT'), # 所有表格左右中间对齐
('ALIGN', (0, 0), (-1, 0), 'CENTER'), # 所有表格左右中间对齐
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), # 所有表格上下居中对齐
# ('SPAN', (-2, -2), (-1, -1)), # 合并
# ('SPAN', (0, 4), (0, 5)), # 合并
# ('SPAN', (2, 4), (2, 5)), # 合并
('BACKGROUND', (0, 0), (-1, 0), colors.darkblue), # 设置第一行背景颜色
('TEXTCOLOR', (0, 0), (-1, 0), colors.white), # 设置表格内文字颜色
('GRID', (0, 0), (-1, -1), 0.75, colors.black), # 设置表格框线为灰色,线宽为0.1
]
table_style1 = [
('FONTNAME', (1, 0), (-1, -1), 'SimSun'), # 字体
('FONTNAME', (0, 0), (-1, 0), 'SimSunBd'), # 字体
('FONTSIZE', (0, 0), (-1, 0), 11), # 第一行的字体大小
('FONTSIZE', (0, 1), (-1, -1), 10), # 第二行到最后一行的字体大小
('ALIGN', (0, 0), (-1, -1), 'CENTER'), # 所有表格左右中间对齐
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), # 所有表格上下居中对齐
('BACKGROUND', (0, 0), (-1, 0), colors.darkblue), # 设置第一行背景颜色
('TEXTCOLOR', (0, 0), (-1, 0), colors.white), # 设置表格内文字颜色
('GRID', (0, 0), (-1, -1), 0.75, colors.black), # 设置表格框线为灰色,线宽为0.1
]
table = Table(data=table_data,style=table_style, colWidths=75)
table1= Table(data=table_data1,style=table_style, colWidths=75)
table2= Table(data=table_data2,style=table_style1, colWidths=75)
table3=Table(data=table_data4,style=table_style1, colWidths=75)
#story.append(Paragraph("区块链", Title))
story.append(Paragraph("一、报告背景", Heading1))
story.append(Paragraph(content1, body))
story.append(Paragraph("二、"+str(month0)+"-"+str(month)+"月份检测情况", Heading1))
story.append(Paragraph(content2, body))
story.append(Paragraph(content3, body))
story.append(Paragraph("2.1 应用检测情况", Heading2))
story.append(Paragraph(content4, body))
story.append(table1)
story.append(Paragraph("2.2 缺陷密度分布情况", Heading2))
story.append(Paragraph(content5, body))
story.append(table)
story.append(Paragraph("三、"+str(month)+"月份检测情况", Heading1))
story.append(Paragraph(content51, body))
story.append(Paragraph(content6, body))
story.append(Paragraph("3.1 应用检测情况", Heading2))
story.append(Paragraph("3.1.1 应用检测次数排序", Heading3))
story.append(Paragraph(content7, body))
story.append(Paragraph(content8, body))
story.append(Paragraph("3.1.2 检测潮汐分析", Heading3))
story.append(Paragraph(content9, body))
story.append(Paragraph("3.2 缺陷密度分布情况", Heading2))
story.append(Paragraph(content10, body))
story.append(Paragraph("3.2.1 总体缺陷类型分布情况", Heading3))
story.append(Paragraph(content11, body))
story.append(table2)
story.append(Paragraph("3.2.2 应用缺陷密度排序", Heading3))
story.append(Paragraph(content12, body))
story.append(Paragraph(content13, body))
story.append(Paragraph("3.3 缺陷改善情况", Heading2))
story.append(Paragraph(content14, body))
story.append(table3)
story.append(Paragraph("3.4 审计情况", Heading2))
story.append(Paragraph("3.4.1 携带审计使用情况", Heading3))
story.append(Paragraph(content15, body))
story.append(Paragraph("3.4.2 人工审计使用情况", Heading3))
story.append(Paragraph(content16, body))
story.append(Paragraph("四、建议", Heading1))
story.append(Paragraph(content17, body))
# bytes
# buf = BytesIO()
# doc = SimpleDocTemplate(buf, encoding='UTF-8')
# doc.build(story)
# print(buf.getvalue().decode())
# file
doc = SimpleDocTemplate('C:\\Users\\eric\\Desktop\\hello.pdf')
doc.build(story) | 0 | 0 | 0 |
b751b65d9d2578f7871141081e922759f1ff73f1 | 797 | py | Python | jsonfiles.py | sainisatish/MeChat | 7b9b54f3f174db83c078f476871724922a399b83 | [
"MIT"
] | null | null | null | jsonfiles.py | sainisatish/MeChat | 7b9b54f3f174db83c078f476871724922a399b83 | [
"MIT"
] | null | null | null | jsonfiles.py | sainisatish/MeChat | 7b9b54f3f174db83c078f476871724922a399b83 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 20:45:28 2019
@author: satishsaini
"""
import json
| 61.307692 | 272 | 0.608532 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 20:45:28 2019
@author: satishsaini
"""
import json
def json_ext(r):
json_data=r.json()
print("Meaning Of The Word : {} ".format(json_data['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]["definitions"][0]))
print("Examples : {} \n or {}".format(json_data['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]["examples"][0]['text'],json_data['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]["examples"][1]['text']))
print("Short Examples : {} \n or {}".format(json_data['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['subsenses'][0]["examples"][0]['text'],json_data['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['subsenses'][0]["examples"][1]['text']))
| 665 | 0 | 23 |
c34c9e9ab31f8ddfd8a8324af7429b2aef053bd8 | 254 | py | Python | regolith/interact.py | priyankaanehra/regolith | 393c8a88eb7657d1ae5ea017e2cd0b72ed981e8f | [
"CC0-1.0"
] | 7 | 2018-07-20T19:19:23.000Z | 2022-02-02T20:48:15.000Z | regolith/interact.py | priyankaanehra/regolith | 393c8a88eb7657d1ae5ea017e2cd0b72ed981e8f | [
"CC0-1.0"
] | 822 | 2017-11-06T21:54:58.000Z | 2022-03-31T12:25:41.000Z | regolith/interact.py | priyankaanehra/regolith | 393c8a88eb7657d1ae5ea017e2cd0b72ed981e8f | [
"CC0-1.0"
] | 43 | 2018-01-05T20:35:21.000Z | 2022-03-24T00:13:03.000Z | """
Loads the dbs for interactive sessions
"""
from regolith.runcontrol import DEFAULT_RC, load_rcfile, filter_databases, \
connect_db
rc = DEFAULT_RC
rc._update(load_rcfile("regolithrc.json"))
filter_databases(rc)
chained_db, dbs = connect_db(rc)
| 21.166667 | 76 | 0.775591 | """
Loads the dbs for interactive sessions
"""
from regolith.runcontrol import DEFAULT_RC, load_rcfile, filter_databases, \
connect_db
rc = DEFAULT_RC
rc._update(load_rcfile("regolithrc.json"))
filter_databases(rc)
chained_db, dbs = connect_db(rc)
| 0 | 0 | 0 |
eb4faee65ee224bf0a7498eff879bc9e238557a4 | 7,822 | py | Python | src/train/train_bad.py | landbroken/MyPaper | e77581262aac210e6273c3647d091f7cf53eae4a | [
"Apache-2.0"
] | null | null | null | src/train/train_bad.py | landbroken/MyPaper | e77581262aac210e6273c3647d091f7cf53eae4a | [
"Apache-2.0"
] | null | null | null | src/train/train_bad.py | landbroken/MyPaper | e77581262aac210e6273c3647d091f7cf53eae4a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3.9
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 LinYulong. All Rights Reserved
#
# @Time : 2021/10/31
# @Author : LinYulong
import numpy
import pandas
import xgboost
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
from src.alg import cross_verify
from src.train import train_cfg
from src.train.train import column_split
from src.train.train_result import TrainResult
from sklearn import preprocessing, linear_model
| 38.343137 | 113 | 0.646254 | #!/usr/bin/python3.9
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 LinYulong. All Rights Reserved
#
# @Time : 2021/10/31
# @Author : LinYulong
import numpy
import pandas
import xgboost
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
from src.alg import cross_verify
from src.train import train_cfg
from src.train.train import column_split
from src.train.train_result import TrainResult
from sklearn import preprocessing, linear_model
def get_encoded_train_labels(np_train_labels: numpy.ndarray):
le = preprocessing.LabelEncoder()
le.fit(np_train_labels)
# 可以查看一下 fit 以后的类别是什么
# le_type = le.classes_
# transform 以后,这一列数就变成了 [0, n-1] 这个区间的数,即是 le.classes_ 中的索引
encoded_train_labels = le.transform(np_train_labels)
return encoded_train_labels
def train_predict_xgb_classifier(x_test: numpy.ndarray, x_train: numpy.ndarray,
y_train: numpy.ndarray):
# 因为 XGBClassifier 告警要有 use_label_encoder=False,所以需要这个预处理
encoded_y_train = get_encoded_train_labels(y_train)
"""
https://blog.csdn.net/qq_38735017/article/details/111203258
eval_metric
回归 rmse, mae
分类 auc, error, merror, logloss, mlogloss
"""
model = XGBClassifier(use_label_encoder=False, eval_metric='merror') # 载入模型(模型命名为model)
model.fit(x_train, encoded_y_train) # 训练模型(训练集)
y_predict = model.predict(x_test) # 模型预测(测试集),y_pred为预测结果
cfg_train_times = train_cfg.get_times()
offset = 1 # 偏移,因为预处理的 labels 一定是 0,...n-1。所以要加偏移才是实际分数
y_predict = y_predict / cfg_train_times + offset
return y_predict, model
def train_predict_xgb_regressor(x_test: numpy.ndarray, x_train: numpy.ndarray,
y_train: numpy.ndarray):
# 因为 XGBClassifier 告警要有 use_label_encoder=False,所以需要这个预处理
encoded_y_train = get_encoded_train_labels(y_train)
"""
https://blog.csdn.net/qq_38735017/article/details/111203258
eval_metric
回归 rmse, mae
分类 auc, error, merror, logloss, mlogloss
"""
model_r = xgboost.XGBRegressor(max_depth=3,
learning_rate=0.1,
n_estimators=100,
objective='reg:squarederror', # 此默认参数与 XGBClassifier 不同
booster='gbtree',
gamma=0,
min_child_weight=1,
subsample=1,
colsample_bytree=1,
reg_alpha=0,
reg_lambda=1,
random_state=0)
model_r.fit(x_train, encoded_y_train, eval_metric='rmse') # 训练模型(训练集)
# model_r.save_model('xgb100.model') # 保存模型
y_predict = model_r.predict(x_test) # 模型预测(测试集),y_pred为预测结果
cfg_train_times = train_cfg.get_times()
offset = 1 # 偏移,因为预处理的 labels 一定是 0,...n-1。所以要加偏移才是实际分数
y_predict = y_predict / cfg_train_times + offset
return y_predict, model_r
def train_predict_linear_discriminant_analysis(x_test: numpy.ndarray, x_train: numpy.ndarray,
y_train: numpy.ndarray):
model_lda = LinearDiscriminantAnalysis(n_components=2)
model_lda.fit(x_train, y_train)
y_predict = model_lda.predict(x_test)
return y_predict, model_lda
def train_predict_lasso(x_test: numpy.ndarray, x_train: numpy.ndarray,
y_train: numpy.ndarray):
clf = linear_model.Lasso(alpha=0.1)
clf.fit(x_train, y_train)
y_predict = clf.predict(x_test)
return y_predict, clf
def train_predict_logistics_regress(x_test: numpy.ndarray, x_train: numpy.ndarray,
y_train: numpy.ndarray):
ss = StandardScaler()
x_train = ss.fit_transform(x_train)
x_test = ss.transform(x_test)
lr = LogisticRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
return y_predict, lr
def train_predict_random_forest_regressor(x_test: numpy.ndarray, x_train: numpy.ndarray,
y_train: numpy.ndarray):
model = ExtraTreesRegressor()
model.fit(x_train, y_train) # 训练模型(训练集)
y_predict = model.predict(x_test) # 模型预测(测试集),y_pred为预测结果
return y_predict, model
def get_best_result(result_list: list):
best_i = 0
ret_result: TrainResult = result_list[best_i]
for i in range(0, len(result_list)):
tmp_result: TrainResult = result_list[i]
if tmp_result.get_avg_r2() > ret_result.get_avg_r2():
ret_result = tmp_result
best_i = i
print("best result idx = " + str(ret_result.get_id()))
return ret_result
def train_no_group_all(test_df: pandas.DataFrame) -> TrainResult:
columns_size = test_df.columns.size
cfg_train_times = train_cfg.get_times()
result_list = []
for columns_idx in range(columns_size):
# 去掉被预测列
test_data_set, test_labels = column_split(test_df, columns_idx)
test_data_set = test_data_set * cfg_train_times
test_data_set = pandas.DataFrame(test_data_set, dtype=int)
test_labels_times = test_labels * cfg_train_times
test_labels_times = pandas.DataFrame(test_labels_times, dtype=int)
cross_verify_times = train_cfg.get_cross_verify_times()
result: TrainResult = cross_verify.cross_verify_no_group_all(cross_verify_times, test_data_set,
test_labels_times,
train_predict_logistics_regress)
result.set_id(columns_idx)
result_list.append(result)
print("------------------------------")
ret_result = get_best_result(result_list)
ret_result.print_average_result()
print("------------------------------")
return ret_result
def train_no_group_all_predict(begin_df: pandas.DataFrame, origin_df: pandas.DataFrame, train_result_list: list):
train_result_list_len = len(train_result_list)
x_test = begin_df.copy(deep=True)
for idx in range(0, train_result_list_len):
cur_train_result_idx = train_result_list_len - idx - 1
cur_old_train_result: TrainResult = train_result_list[cur_train_result_idx]
model = cur_old_train_result.get_model()
old_id = cur_old_train_result.get_id()
if model is None:
print("model is None")
raise Exception("model is None" + str(old_id))
test_data_set, y_test = column_split(origin_df, old_id)
y_predict = model.predict(x_test) # 模型预测(测试集),y_pred为预测结果
cfg_train_times = train_cfg.get_times()
offset = 1 # 偏移,因为预处理的 labels 一定是 0,...n-1。所以要加偏移才是实际分数
y_predict = y_predict / cfg_train_times + offset
cur_new_train_result = TrainResult()
cur_new_train_result.append_single_result(y_predict, y_test)
cur_new_train_result.print_average_result()
# 下一轮数据
column_name = cur_old_train_result.get_name()
try:
x_test.insert(old_id, column_name, y_predict)
print("--- insert {} ---".format(column_name))
except ValueError:
print("--- insert ValueError! {} ---".format(column_name))
raise ValueError
except TypeError:
print("--- insert TypeError! {} ---".format(column_name))
raise TypeError
except BaseException:
print("--- insert BaseException! {} ---".format(column_name))
raise BaseException
print("--- end predict ---")
| 7,481 | 0 | 230 |
4181aaf7cb376688b31273798bd5ee5539aeea3b | 2,849 | py | Python | tests/bugs/core_0426_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_0426_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_0426_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_426
# title: Wrong sort order when using es_ES collate
# decription: Check if sort order for collate ES_ES is the one of DRAE , the oficial organization for standarization of spanish
# tracker_id: CORE-426
# min_versions: []
# versions: 2.1
# qmid: bugs.core_426
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.1
# resources: None
substitutions_1 = []
init_script_1 = """SET NAMES ISO8859_1;
CREATE TABLE TAB (A CHAR(3) CHARACTER SET ISO8859_1);
COMMIT;
INSERT INTO TAB VALUES ('zo');
INSERT INTO TAB VALUES ('ze');
INSERT INTO TAB VALUES ('yo');
INSERT INTO TAB VALUES ('ye');
INSERT INTO TAB VALUES ('xo');
INSERT INTO TAB VALUES ('xe');
INSERT INTO TAB VALUES ('vo');
INSERT INTO TAB VALUES ('ve');
INSERT INTO TAB VALUES ('uo');
INSERT INTO TAB VALUES ('ue');
INSERT INTO TAB VALUES ('to');
INSERT INTO TAB VALUES ('te');
INSERT INTO TAB VALUES ('so');
INSERT INTO TAB VALUES ('se');
INSERT INTO TAB VALUES ('ro');
INSERT INTO TAB VALUES ('re');
INSERT INTO TAB VALUES ('qo');
INSERT INTO TAB VALUES ('qe');
INSERT INTO TAB VALUES ('po');
INSERT INTO TAB VALUES ('pe');
INSERT INTO TAB VALUES ('oo');
INSERT INTO TAB VALUES ('oe');
INSERT INTO TAB VALUES ('no');
INSERT INTO TAB VALUES ('ne');
INSERT INTO TAB VALUES ('mo');
INSERT INTO TAB VALUES ('me');
INSERT INTO TAB VALUES ('llo');
INSERT INTO TAB VALUES ('lle');
INSERT INTO TAB VALUES ('lo');
INSERT INTO TAB VALUES ('le');
INSERT INTO TAB VALUES ('ko');
INSERT INTO TAB VALUES ('ke');
INSERT INTO TAB VALUES ('jo');
INSERT INTO TAB VALUES ('je');
INSERT INTO TAB VALUES ('io');
INSERT INTO TAB VALUES ('ie');
INSERT INTO TAB VALUES ('ho');
INSERT INTO TAB VALUES ('he');
INSERT INTO TAB VALUES ('go');
INSERT INTO TAB VALUES ('fe');
INSERT INTO TAB VALUES ('fo');
INSERT INTO TAB VALUES ('fe');
INSERT INTO TAB VALUES ('eo');
INSERT INTO TAB VALUES ('ee');
INSERT INTO TAB VALUES ('do');
INSERT INTO TAB VALUES ('de');
INSERT INTO TAB VALUES ('cho');
INSERT INTO TAB VALUES ('cha');
INSERT INTO TAB VALUES ('co');
INSERT INTO TAB VALUES ('ce');
INSERT INTO TAB VALUES ('bo');
INSERT INTO TAB VALUES ('be');
INSERT INTO TAB VALUES ('ao');
INSERT INTO TAB VALUES ('ae');"""
db_1 = db_factory(charset='ISO8859_1', sql_dialect=3, init=init_script_1)
test_script_1 = """SET HEADING OFF;
SELECT A FROM TAB ORDER BY A COLLATE ES_ES;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """ae
ao
be
bo
ce
cha
cho
co
de
do
ee
eo
fe
fe
fo
go
he
ho
ie
io
je
jo
ke
ko
le
lle
llo
lo
me
mo
ne
no
oe
oo
pe
po
qe
qo
re
ro
se
so
te
to
ue
uo
ve
vo
xe
xo
ye
yo
ze
zo
"""
@pytest.mark.version('>=2.1')
| 19.25 | 129 | 0.692524 | #coding:utf-8
#
# id: bugs.core_426
# title: Wrong sort order when using es_ES collate
# decription: Check if sort order for collate ES_ES is the one of DRAE , the oficial organization for standarization of spanish
# tracker_id: CORE-426
# min_versions: []
# versions: 2.1
# qmid: bugs.core_426
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.1
# resources: None
substitutions_1 = []
init_script_1 = """SET NAMES ISO8859_1;
CREATE TABLE TAB (A CHAR(3) CHARACTER SET ISO8859_1);
COMMIT;
INSERT INTO TAB VALUES ('zo');
INSERT INTO TAB VALUES ('ze');
INSERT INTO TAB VALUES ('yo');
INSERT INTO TAB VALUES ('ye');
INSERT INTO TAB VALUES ('xo');
INSERT INTO TAB VALUES ('xe');
INSERT INTO TAB VALUES ('vo');
INSERT INTO TAB VALUES ('ve');
INSERT INTO TAB VALUES ('uo');
INSERT INTO TAB VALUES ('ue');
INSERT INTO TAB VALUES ('to');
INSERT INTO TAB VALUES ('te');
INSERT INTO TAB VALUES ('so');
INSERT INTO TAB VALUES ('se');
INSERT INTO TAB VALUES ('ro');
INSERT INTO TAB VALUES ('re');
INSERT INTO TAB VALUES ('qo');
INSERT INTO TAB VALUES ('qe');
INSERT INTO TAB VALUES ('po');
INSERT INTO TAB VALUES ('pe');
INSERT INTO TAB VALUES ('oo');
INSERT INTO TAB VALUES ('oe');
INSERT INTO TAB VALUES ('no');
INSERT INTO TAB VALUES ('ne');
INSERT INTO TAB VALUES ('mo');
INSERT INTO TAB VALUES ('me');
INSERT INTO TAB VALUES ('llo');
INSERT INTO TAB VALUES ('lle');
INSERT INTO TAB VALUES ('lo');
INSERT INTO TAB VALUES ('le');
INSERT INTO TAB VALUES ('ko');
INSERT INTO TAB VALUES ('ke');
INSERT INTO TAB VALUES ('jo');
INSERT INTO TAB VALUES ('je');
INSERT INTO TAB VALUES ('io');
INSERT INTO TAB VALUES ('ie');
INSERT INTO TAB VALUES ('ho');
INSERT INTO TAB VALUES ('he');
INSERT INTO TAB VALUES ('go');
INSERT INTO TAB VALUES ('fe');
INSERT INTO TAB VALUES ('fo');
INSERT INTO TAB VALUES ('fe');
INSERT INTO TAB VALUES ('eo');
INSERT INTO TAB VALUES ('ee');
INSERT INTO TAB VALUES ('do');
INSERT INTO TAB VALUES ('de');
INSERT INTO TAB VALUES ('cho');
INSERT INTO TAB VALUES ('cha');
INSERT INTO TAB VALUES ('co');
INSERT INTO TAB VALUES ('ce');
INSERT INTO TAB VALUES ('bo');
INSERT INTO TAB VALUES ('be');
INSERT INTO TAB VALUES ('ao');
INSERT INTO TAB VALUES ('ae');"""
db_1 = db_factory(charset='ISO8859_1', sql_dialect=3, init=init_script_1)
test_script_1 = """SET HEADING OFF;
SELECT A FROM TAB ORDER BY A COLLATE ES_ES;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """ae
ao
be
bo
ce
cha
cho
co
de
do
ee
eo
fe
fe
fo
go
he
ho
ie
io
je
jo
ke
ko
le
lle
llo
lo
me
mo
ne
no
oe
oo
pe
po
qe
qo
re
ro
se
so
te
to
ue
uo
ve
vo
xe
xo
ye
yo
ze
zo
"""
@pytest.mark.version('>=2.1')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 132 | 0 | 22 |
d0536789dcf9cff319151289645ec518b450c683 | 132 | py | Python | gym_tak/read_only/__init__.py | DrSmugleaf/gym-tak | 517608d50ed6da00b30dc2471c93d7070e4bd8b5 | [
"MIT"
] | null | null | null | gym_tak/read_only/__init__.py | DrSmugleaf/gym-tak | 517608d50ed6da00b30dc2471c93d7070e4bd8b5 | [
"MIT"
] | 2 | 2020-11-13T18:23:59.000Z | 2022-02-10T01:36:07.000Z | gym_tak/read_only/__init__.py | DrSmugleaf/gym-tak | 517608d50ed6da00b30dc2471c93d7070e4bd8b5 | [
"MIT"
] | null | null | null | from gym_tak.read_only.read_only_enum import read_only_enum
from gym_tak.read_only.read_only_properties import read_only_properties
| 44 | 71 | 0.909091 | from gym_tak.read_only.read_only_enum import read_only_enum
from gym_tak.read_only.read_only_properties import read_only_properties
| 0 | 0 | 0 |
26a478ce4ef32b1711fb4a0c84b2f688bde9bc0f | 7,576 | py | Python | podpac/core/algorithm/test/test_signal.py | creare-com/podpac | 7feb5c957513c146ce73ba1c36c630284f513a6e | [
"Apache-2.0"
] | 46 | 2018-04-06T19:54:32.000Z | 2022-02-08T02:00:02.000Z | podpac/core/algorithm/test/test_signal.py | creare-com/podpac | 7feb5c957513c146ce73ba1c36c630284f513a6e | [
"Apache-2.0"
] | 474 | 2018-04-05T22:21:09.000Z | 2022-02-24T14:21:16.000Z | podpac/core/algorithm/test/test_signal.py | creare-com/podpac | 7feb5c957513c146ce73ba1c36c630284f513a6e | [
"Apache-2.0"
] | 4 | 2019-04-11T17:49:53.000Z | 2020-11-29T22:36:53.000Z | from __future__ import division, unicode_literals, print_function, absolute_import
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
import traitlets as tl
import podpac
from podpac import Coordinates, clinspace, crange
from podpac.algorithm import Arange
from podpac.data import Array
from podpac.core.algorithm.signal import Convolution
| 41.173913 | 121 | 0.599393 | from __future__ import division, unicode_literals, print_function, absolute_import
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
import traitlets as tl
import podpac
from podpac import Coordinates, clinspace, crange
from podpac.algorithm import Arange
from podpac.data import Array
from podpac.core.algorithm.signal import Convolution
class TestConvolution(object):
def test_init_kernel(self):
node = Convolution(source=Arange(), kernel=[1, 2, 1], kernel_dims=["lat"])
assert_equal(node.kernel, [1, 2, 1])
node = Convolution(source=Arange(), kernel_type="mean, 5", kernel_dims=["lat", "lon"])
assert node.kernel.shape == (5, 5)
assert np.all(node.kernel == 0.04)
node = Convolution(source=Arange(), kernel_type="mean, 5", kernel_dims=["lat", "lon", "time"])
assert node.kernel.shape == (5, 5, 5)
assert np.all(node.kernel == 0.008)
node = Convolution(source=Arange(), kernel_type="gaussian, 3, 1", kernel_dims=["lat", "lon"])
assert node.kernel.shape == (3, 3)
# kernel and kernel_type invalid
with pytest.raises(TypeError, match="Convolution expected 'kernel' or 'kernel_type', not both"):
Convolution(source=Arange(), kernel=[1, 2, 1], kernel_type="mean, 5", kernel_dims=["lat", "lon"])
# kernel or kernel_type required
with pytest.raises(TypeError, match="Convolution requires 'kernel' array or 'kernel_type' string"):
Convolution(source=Arange(), kernel_dims=["lat", "lon"])
# kernel_dims required
with pytest.raises(
TypeError, match="Convolution expected 'kernel_dims' to be specified when giving a 'kernel' array"
):
Convolution(source=Arange(), kernel_type="mean, 5")
# kernel_dims correct number of entries
with pytest.raises(
TypeError,
match="The kernel_dims should contain the same number of dimensions as the number of axes in 'kernel', but ",
):
Convolution(source=Arange(), kernel=[[[1, 2]]], kernel_dims=["lat"])
def test_eval(self):
lat = clinspace(45, 66, 30, name="lat")
lon = clinspace(-80, 70, 40, name="lon")
time = crange("2017-09-01", "2017-10-31", "1,D", name="time")
kernel1d = [1, 2, 1]
kernel2d = [[1, 2, 1]]
kernel3d = [[[1, 2, 1]]]
node1d = Convolution(source=Arange(), kernel=kernel1d, kernel_dims=["time"])
node2d = Convolution(source=Arange(), kernel=kernel2d, kernel_dims=["lat", "lon"])
node3d = Convolution(source=Arange(), kernel=kernel3d, kernel_dims=["lon", "lat", "time"])
o = node1d.eval(Coordinates([time]))
o = node2d.eval(Coordinates([lat, lon]))
o = node3d.eval(Coordinates([lat, lon, time]))
with pytest.raises(
ValueError, match="Kernel dims must contain all of the dimensions in source but not all of "
):
node2d.eval(Coordinates([lat, lon, time]))
with pytest.raises(
ValueError, match="Kernel dims must contain all of the dimensions in source but not all of "
):
node2d.eval(Coordinates([lat, time]))
def test_eval_multiple_outputs(self):
lat = clinspace(45, 66, 30, name="lat")
lon = clinspace(-80, 70, 40, name="lon")
kernel = [[1, 2, 1]]
coords = Coordinates([lat, lon])
multi = Array(source=np.random.random(coords.shape + (2,)), coordinates=coords, outputs=["a", "b"])
node = Convolution(source=multi, kernel=kernel, kernel_dims=["lat", "lon"])
o1 = node.eval(Coordinates([lat, lon]))
kernel = [[[1, 2]]]
coords = Coordinates([lat, lon])
multi = Array(source=np.random.random(coords.shape + (2,)), coordinates=coords, outputs=["a", "b"])
node1 = Convolution(source=multi, kernel=kernel, kernel_dims=["lat", "lon", "output"], force_eval=True)
node2 = Convolution(source=multi, kernel=kernel[0], kernel_dims=["lat", "lon"], force_eval=True)
o1 = node1.eval(Coordinates([lat, lon]))
o2 = node2.eval(Coordinates([lat, lon]))
assert np.any(o2.data != o1.data)
def test_eval_nan(self):
lat = clinspace(45, 66, 30, name="lat")
lon = clinspace(-80, 70, 40, name="lon")
coords = Coordinates([lat, lon])
data = np.ones(coords.shape)
data[10, 10] = np.nan
source = Array(source=data, coordinates=coords)
node = Convolution(source=source, kernel=[[1, 2, 1]], kernel_dims=["lat", "lon"])
o = node.eval(coords[8:12, 7:13])
def test_eval_with_output_argument(self):
lat = clinspace(45, 66, 30, name="lat")
lon = clinspace(-80, 70, 40, name="lon")
coords = Coordinates([lat, lon])
node = Convolution(source=Arange(), kernel=[[1, 2, 1]], kernel_dims=["lat", "lon"])
a = node.create_output_array(coords)
o = node.eval(coords, output=a)
assert_array_equal(a, o)
def test_debuggable_source(self):
with podpac.settings:
podpac.settings["DEBUG"] = False
lat = clinspace(45, 66, 30, name="lat")
lon = clinspace(-80, 70, 40, name="lon")
coords = Coordinates([lat, lon])
# normal version
a = Arange()
node = Convolution(source=a, kernel=[[1, 2, 1]], kernel_dims=["lat", "lon"])
node.eval(coords)
assert node.source is a
# debuggable
podpac.settings["DEBUG"] = True
a = Arange()
node = Convolution(source=a, kernel=[[1, 2, 1]], kernel_dims=["lat", "lon"])
node.eval(coords)
assert node.source is not a
assert node._requested_coordinates == coords
assert node.source._requested_coordinates is not None
assert node.source._requested_coordinates != coords
assert a._requested_coordinates is None
def test_extra_kernel_dims(self):
lat = clinspace(45, 66, 8, name="lat")
lon = clinspace(-80, 70, 16, name="lon")
coords = Coordinates([lat, lon])
node = Convolution(source=Arange(), kernel=[[[1, 2, 1]]], kernel_dims=["time", "lat", "lon"])
o = node.eval(coords)
def test_extra_coord_dims(self):
lat = clinspace(-0.25, 1.25, 7, name="lat")
lon = clinspace(-0.125, 1.125, 11, name="lon")
time = ["2012-05-19", "2016-01-31", "2018-06-20"]
coords = Coordinates([lat, lon, time], dims=["lat", "lon", "time"])
source = Array(source=np.random.random(coords.drop("time").shape), coordinates=coords.drop("time"))
node = Convolution(source=source, kernel=[[-1, 2, -1]], kernel_dims=["lat", "lon"], force_eval=True)
o = node.eval(coords)
assert np.all([d in ["lat", "lon"] for d in o.dims])
def test_coords_order(self):
lat = clinspace(-0.25, 1.25, 7, name="lat")
lon = clinspace(-0.125, 1.125, 11, name="lon")
coords = Coordinates([lat, lon])
lat = clinspace(0, 1, 5, name="lat")
lon = clinspace(0, 1, 9, name="lon")
coords1 = Coordinates([lat, lon])
coords2 = Coordinates([lon, lat])
source = Array(source=np.random.random(coords.shape), coordinates=coords)
node = Convolution(source=source, kernel=[[-1, 2, -1]], kernel_dims=["lat", "lon"], force_eval=True)
o1 = node.eval(coords1)
o2 = node.eval(coords2)
assert np.all(o2.data == o1.data.T)
| 6,917 | 9 | 265 |
c882c0ec4d2c769b25f63cd33c1cc871c2d7ebcd | 445 | py | Python | backend/pokemon/tasks.py | pantoja/PokeBattle | 2b5561aefd9117903ee4cdc516d424a6c6413307 | [
"MIT"
] | null | null | null | backend/pokemon/tasks.py | pantoja/PokeBattle | 2b5561aefd9117903ee4cdc516d424a6c6413307 | [
"MIT"
] | 11 | 2020-04-06T13:16:36.000Z | 2022-02-10T11:49:04.000Z | backend/pokemon/tasks.py | pantoja/PokeBattle | 2b5561aefd9117903ee4cdc516d424a6c6413307 | [
"MIT"
] | null | null | null | import logging
from pokebattle import celery_app
from pokemon.helpers import save_pokemon
from services.api import get_pokemon_list
logger = logging.getLogger(__name__)
@celery_app.task
| 23.421053 | 64 | 0.779775 | import logging
from pokebattle import celery_app
from pokemon.helpers import save_pokemon
from services.api import get_pokemon_list
logger = logging.getLogger(__name__)
@celery_app.task
def save_pokemon_from_pokeapi_weekly():
logger.info("Saving pokemon from pokeapi")
pokemon_list = get_pokemon_list()
pokemon_list = [pokemon["name"] for pokemon in pokemon_list]
for pokemon in pokemon_list:
save_pokemon(pokemon)
| 232 | 0 | 22 |
77805bfa5383cb54643a7a90a53231573f36f8f5 | 1,058 | py | Python | lib/severeweatherpowerdisruptionindex.py | vabarbosa/weather-api-python | 5bbfefbc5c6cbc1a18eb62c888d7fb36d8c96471 | [
"Apache-2.0"
] | 13 | 2018-08-03T15:12:49.000Z | 2021-11-18T03:55:49.000Z | lib/severeweatherpowerdisruptionindex.py | vabarbosa/weather-api-python | 5bbfefbc5c6cbc1a18eb62c888d7fb36d8c96471 | [
"Apache-2.0"
] | 5 | 2019-05-08T15:09:10.000Z | 2020-03-19T16:05:49.000Z | lib/severeweatherpowerdisruptionindex.py | vabarbosa/weather-api-python | 5bbfefbc5c6cbc1a18eb62c888d7fb36d8c96471 | [
"Apache-2.0"
] | 5 | 2019-05-06T13:30:29.000Z | 2022-01-02T14:54:29.000Z | # Severe Weather Power Disruption Index 15 Day
#
# - https://weather.com/swagger-docs/ui/sun/v2/SUNv2SevereWeatherPowerDisruptionIndex.json
#
# The Power Disruption index provides indices indicating the potential for power
# disruptions due to weather.
#
# Base URL: api.weather.com/v2
# Endpoint: /indices/powerDisruption/daypart/15day
__name__ = 'severeweatherpowerdisruptionindex'
from lib.apiutil import host, default_params
| 34.129032 | 114 | 0.747637 | # Severe Weather Power Disruption Index 15 Day
#
# - https://weather.com/swagger-docs/ui/sun/v2/SUNv2SevereWeatherPowerDisruptionIndex.json
#
# The Power Disruption index provides indices indicating the potential for power
# disruptions due to weather.
#
# Base URL: api.weather.com/v2
# Endpoint: /indices/powerDisruption/daypart/15day
__name__ = 'severeweatherpowerdisruptionindex'
from lib.apiutil import host, default_params
def request_options (lat, lon):
url = host + '/v2/indices/powerDisruption/daypart/15day'
params = default_params()
params['geocode'] = '{lat},{lon}'.format(lat=lat, lon=lon)
params['format'] = 'json'
return url, params
def handle_response (res):
if res and res['powerDisruptionIndex12hour']:
p = res['powerDisruptionIndex12hour']
for i, disruptIndex in enumerate(p['powerDisruptionIndex']):
print('severe-weather-power-disruption-index: {}: {}'.format(disruptIndex, p['powerDisruptionCategory'][i]))
else:
print('severe-weather-power-disruption-index: no power distruption info returned')
| 581 | 0 | 46 |
21940a14e35019adbc2c81132253d874f0bed51d | 2,762 | py | Python | flavio/physics/taudecays/tauvl.py | Felicia56/flavio | ea735bd8febbb961d249eddf338a4960c1fbee69 | [
"MIT"
] | 61 | 2016-03-09T16:19:39.000Z | 2022-03-30T00:55:51.000Z | flavio/physics/taudecays/tauvl.py | Felicia56/flavio | ea735bd8febbb961d249eddf338a4960c1fbee69 | [
"MIT"
] | 167 | 2016-03-15T15:25:57.000Z | 2022-02-27T22:19:22.000Z | flavio/physics/taudecays/tauvl.py | Felicia56/flavio | ea735bd8febbb961d249eddf338a4960c1fbee69 | [
"MIT"
] | 57 | 2016-03-15T14:24:23.000Z | 2022-01-14T01:00:03.000Z | r"""Functions for $\tau\to V\ell$."""
import flavio
from flavio.physics.taudecays import common
from math import sqrt, pi
import numpy as np
# names of LFV sectors in WCxf
wcxf_sector_names = {('tau', 'mu'): 'mutau',
('tau', 'e'): 'taue',
('mu', 'e'): 'mue', }
def br_tauvl(wc_obj, par, V, lep):
r"""Branching ratio of $\tau^+\to V^0\ell^+$."""
scale = flavio.config['renormalization scale']['taudecays']
sec = wcxf_sector_names['tau', lep]
wc = wc_obj.get_wc(sec, scale, par, nf_out=4)
alpha = flavio.physics.running.running.get_alpha_e(par, scale, nf_out=3)
e = sqrt(4 * pi * alpha)
mtau = par['m_tau']
ml = par['m_' + lep]
mV = par['m_' + V]
fV = par['f_' + V]
fTV = flavio.physics.running.running.get_f_perp(par, V, scale)
Cgamma_taul = wc['Cgamma_tau{}'.format(lep)]
Cgamma_ltau = wc['Cgamma_{}tau'.format(lep)]
if V == 'rho0':
g_u = get_wcs(wc, 'u', lep)
g_d = get_wcs(wc, 'd', lep)
g = (g_u-g_d)/sqrt(2)
KV = -1/sqrt(2)*e
if V == 'phi':
g = get_wcs(wc, 's', lep)
KV = 1/3*e
gL = mV*fV/2 * (g[0] + g[1])
gR = mV*fV/2 * (g[2] + g[3])
gTL = +fTV * g[4].conjugate() + 2*fV*KV/mV * Cgamma_ltau.conjugate()
gtTL = -fTV * g[4].conjugate()
gTR = +fTV * g[5] + 2*fV*KV/mV * Cgamma_taul
gtTR = +fTV * g[5]
return (par['tau_tau']
* common.GammaFvf(mtau, mV, ml, gL, gR, gTL, gtTL, gTR, gtTR) )
# function returning function needed for prediction instance
# Observable and Prediction instances
_had = {'rho0': r'\rho^0', 'phi': r'\phi'}
_shortname = {'rho0': 'rho', 'phi': 'phi'}
_lep = {'e': ' e', 'mu': r'\mu',}
for V in _had:
for l in _lep:
_obs_name = "BR(tau->" + _shortname[V] + l + r")"
_obs = flavio.classes.Observable(_obs_name)
_process_tex = r"\tau^+\to " + _had[V] + _lep[l] + r"^+"
_process_taxonomy = r'Process :: $\tau$ lepton decays :: LFV decays :: $\tau\to V\ell$ :: $' + _process_tex + r"$"
_obs.add_taxonomy(_process_taxonomy)
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
flavio.classes.Prediction(_obs_name, br_tauvl_fct(V, l))
| 35.410256 | 122 | 0.544895 | r"""Functions for $\tau\to V\ell$."""
import flavio
from flavio.physics.taudecays import common
from math import sqrt, pi
import numpy as np
# names of LFV sectors in WCxf
wcxf_sector_names = {('tau', 'mu'): 'mutau',
('tau', 'e'): 'taue',
('mu', 'e'): 'mue', }
def get_wcs(wc, q, lep):
return np.array([
wc['CVLL_tau{}{}'.format(lep, 2 * q)],
wc['CVLR_tau{}{}'.format(lep, 2 * q)],
wc['CVLR_{}tau{}'.format(2 * q, lep)],
wc['CVRR_tau{}{}'.format(lep, 2 * q)],
wc['CTRR_{}tau{}'.format(lep, 2 * q)],
wc['CTRR_tau{}{}'.format(lep, 2 * q)],
])
def br_tauvl(wc_obj, par, V, lep):
r"""Branching ratio of $\tau^+\to V^0\ell^+$."""
scale = flavio.config['renormalization scale']['taudecays']
sec = wcxf_sector_names['tau', lep]
wc = wc_obj.get_wc(sec, scale, par, nf_out=4)
alpha = flavio.physics.running.running.get_alpha_e(par, scale, nf_out=3)
e = sqrt(4 * pi * alpha)
mtau = par['m_tau']
ml = par['m_' + lep]
mV = par['m_' + V]
fV = par['f_' + V]
fTV = flavio.physics.running.running.get_f_perp(par, V, scale)
Cgamma_taul = wc['Cgamma_tau{}'.format(lep)]
Cgamma_ltau = wc['Cgamma_{}tau'.format(lep)]
if V == 'rho0':
g_u = get_wcs(wc, 'u', lep)
g_d = get_wcs(wc, 'd', lep)
g = (g_u-g_d)/sqrt(2)
KV = -1/sqrt(2)*e
if V == 'phi':
g = get_wcs(wc, 's', lep)
KV = 1/3*e
gL = mV*fV/2 * (g[0] + g[1])
gR = mV*fV/2 * (g[2] + g[3])
gTL = +fTV * g[4].conjugate() + 2*fV*KV/mV * Cgamma_ltau.conjugate()
gtTL = -fTV * g[4].conjugate()
gTR = +fTV * g[5] + 2*fV*KV/mV * Cgamma_taul
gtTR = +fTV * g[5]
return (par['tau_tau']
* common.GammaFvf(mtau, mV, ml, gL, gR, gTL, gtTL, gTR, gtTR) )
# function returning function needed for prediction instance
def br_tauvl_fct(V, lep):
def f(wc_obj, par):
return br_tauvl(wc_obj, par, V, lep)
return f
# Observable and Prediction instances
_had = {'rho0': r'\rho^0', 'phi': r'\phi'}
_shortname = {'rho0': 'rho', 'phi': 'phi'}
_lep = {'e': ' e', 'mu': r'\mu',}
for V in _had:
for l in _lep:
_obs_name = "BR(tau->" + _shortname[V] + l + r")"
_obs = flavio.classes.Observable(_obs_name)
_process_tex = r"\tau^+\to " + _had[V] + _lep[l] + r"^+"
_process_taxonomy = r'Process :: $\tau$ lepton decays :: LFV decays :: $\tau\to V\ell$ :: $' + _process_tex + r"$"
_obs.add_taxonomy(_process_taxonomy)
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
flavio.classes.Prediction(_obs_name, br_tauvl_fct(V, l))
| 432 | 0 | 45 |
c9742e9c2b31179e5a75f9ea160e99265b823adf | 2,453 | py | Python | app/views.py | cabusto/nhlchampionshipbelt | 962bf44030d7bd9827ceb5791763f95c7bcfb74a | [
"MIT"
] | null | null | null | app/views.py | cabusto/nhlchampionshipbelt | 962bf44030d7bd9827ceb5791763f95c7bcfb74a | [
"MIT"
] | 3 | 2021-11-08T08:23:29.000Z | 2022-03-31T08:29:52.000Z | app/views.py | cabusto/nhlchampionshipbelt | 962bf44030d7bd9827ceb5791763f95c7bcfb74a | [
"MIT"
] | 1 | 2015-11-27T16:14:28.000Z | 2015-11-27T16:14:28.000Z | from flask import render_template, redirect, url_for
from app import app
from Team import Team
from HReferenceParser import HReferenceParser
from Schedule import Schedule
from GameLog import GameLog
from Stats import Stats
from BeltGame import BeltGame
season = 2014
availableSeasons = {
2006 : Team('CAR', 'Carolina Hurricanes'),
2007 : Team('ANA', 'Anaheim Ducks'),
2008 : Team('DET', 'Detroit Red Wings'),
2009 : Team('PIT', 'Pittsburgh Penguins'),
2010 : Team('CHI', 'Chicago Blackhawks'),
2011 : Team('BOS', 'Boston Bruins'),
2012 : Team('LAK', 'Los Angeles Kings'),
2013 : Team('CHI', 'Chicago Blackhawks'),
2014 : Team('LAK', 'Los Angeles Kings')
}
@app.route('/<season>')
@app.route('/')
| 30.283951 | 73 | 0.734203 | from flask import render_template, redirect, url_for
from app import app
from Team import Team
from HReferenceParser import HReferenceParser
from Schedule import Schedule
from GameLog import GameLog
from Stats import Stats
from BeltGame import BeltGame
season = 2014
availableSeasons = {
2006 : Team('CAR', 'Carolina Hurricanes'),
2007 : Team('ANA', 'Anaheim Ducks'),
2008 : Team('DET', 'Detroit Red Wings'),
2009 : Team('PIT', 'Pittsburgh Penguins'),
2010 : Team('CHI', 'Chicago Blackhawks'),
2011 : Team('BOS', 'Boston Bruins'),
2012 : Team('LAK', 'Los Angeles Kings'),
2013 : Team('CHI', 'Chicago Blackhawks'),
2014 : Team('LAK', 'Los Angeles Kings')
}
@app.route('/<season>')
@app.route('/')
def index(season=2015):
season = int(season)
champ = season - 1
# render current season
if (not (champ in availableSeasons)):
# render season not available
print 'no data for ' + str(season)
return redirect(url_for('index'))
#data = season
parser = HReferenceParser('app/static/data/' + str(season) + '.csv')
games = parser.getGames()
schedule = Schedule(games)
gameLog = GameLog()
stats = Stats()
beltHolder = availableSeasons[champ]
defendingChamp = beltHolder
beltGame = None
for g in schedule.games:
beltGame = stats.analyzeGame(g, beltHolder)
if beltGame:
gameLog.addGame(beltGame)
beltHolder = beltGame.getBeltHolderAfterGame()
upcomingChampGame = schedule.getUpcomingChampionshipGame(beltHolder)
upcomingChampGameIfHomeTeamWins = None
upcomingChampGameIfAwayTeamWins = None
if upcomingChampGame:
upcomingChampGameIfHomeTeamWins = schedule.getUpcomingChampionshipGame(
upcomingChampGame.getHomeTeam(), upcomingChampGame.getAwayTeam())
upcomingChampGameIfAwayTeamWins = schedule.getUpcomingChampionshipGame(
upcomingChampGame.getAwayTeam(), upcomingChampGame.getHomeTeam())
data = {
'id' : beltHolder.getID(),
'name' : beltHolder.getName()
}
return render_template('index.html',
games = gameLog.getGames(),
availableSeasons = availableSeasons,
defendingChamp = defendingChamp,
beltHolder = beltHolder,
isOngoingSeason = season,
stats = stats,
gameLog = gameLog,
upcomingChampGame = upcomingChampGame,
upcomingChampGameIfHomeTeamWins = upcomingChampGameIfHomeTeamWins,
upcomingChampGameIfAwayTeamWins = upcomingChampGameIfAwayTeamWins,
sortedStats = stats.getSortedStats(),
currentSeason = season,
)
| 1,677 | 0 | 22 |
dcfd4696029ace4c9c27e898856d14d989a0d425 | 3,483 | py | Python | py/g2q1.py | kamalneet/airline-data-analysis | 5ca45a41c1ca24642ed163ddbebb0d4849e5d97e | [
"Apache-2.0"
] | null | null | null | py/g2q1.py | kamalneet/airline-data-analysis | 5ca45a41c1ca24642ed163ddbebb0d4849e5d97e | [
"Apache-2.0"
] | null | null | null | py/g2q1.py | kamalneet/airline-data-analysis | 5ca45a41c1ca24642ed163ddbebb0d4849e5d97e | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from cassandra.cluster import Cluster
import os
import sys
import time
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
csvFields = ["Year", "Month", "DayofMonth", "DayOfWeek", "UniqueCarrier", "Origin", "Dest", "CRSDepTime", "DepDelay", "ArrDelay", "Cancelled", "Diverted"]
rparam="UniqueCarrier"
# state: (rparam -> (num_flights, total_delay))
cassandraSession = None
prepared_stmt = None
c_table_name = "airport_best_airlines"
c_field_name = "airline"
if __name__ == "__main__":
ssc = StreamingContext.getOrCreate("/home/centos/spark-checkpoint",
lambda: createContext())
ssc.start()
ssc.awaitTermination()
print("await done")
| 33.171429 | 161 | 0.682458 | from __future__ import print_function
from cassandra.cluster import Cluster
import os
import sys
import time
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
csvFields = ["Year", "Month", "DayofMonth", "DayOfWeek", "UniqueCarrier", "Origin", "Dest", "CRSDepTime", "DepDelay", "ArrDelay", "Cancelled", "Diverted"]
def getFieldIndex(field):
i = 0
for f in csvFields:
if f == field:
return i
i+=1
return -1
rparam="UniqueCarrier"
def aptRparamMapper(csv):
global rparam
# print(csv)
toks=csv[1].split(",")
if toks[0] == "Year":
return []
apt_idx = getFieldIndex("Origin")
rparam_idx = getFieldIndex(rparam)
delay_idx = getFieldIndex("DepDelay")
delay = toks[delay_idx]
return (toks[apt_idx].encode('utf-8'), (toks[rparam_idx].encode('utf-8'), delay))
# state: (rparam -> (num_flights, total_delay))
def reducer(newvals, old_state):
# print("values: " + str(v1) + " " + str(v2))
state = old_state or {}
for val in newvals:
rparam = val[0]
delay = val[1]
dict_val = (1, delay)
if rparam in state:
old_dict_val = state[rparam]
dict_val = (old_dict_val[0]+1, old_dict_val[1]+delay)
state[rparam] = dict_val
return state
cassandraSession = None
prepared_stmt = None
c_table_name = "airport_best_airlines"
c_field_name = "airline"
def getCassandraSession():
global cassandraSession
global prepared_stmt
global c_table_name
global c_field_name
if cassandraSession is None:
cluster = Cluster(contact_points=['172.31.27.46'])
cassandraSession = cluster.connect('spk')
prepared_stmt = cassandraSession.prepare("INSERT INTO " + c_table_name + " (apt, rank, " + c_field_name + ", avg_delay, num_flights) VALUES (?, ?, ?, ?, ?)")
return cassandraSession
def sendPartition(iter):
global prepared_stmt
# ConnectionPool is a static, lazily initialized pool of connections
session = getCassandraSession()
for kv in iter:
apt = kv[0]
dct = kv[1]
# copy dict to a list so that it can be sorted by average
lst = []
for airline in dct:
(num_flights, total_delay) = dct[airline]
lst.append((airline, float(total_delay)/num_flights, num_flights))
lst.sort(key=lambda x: x[1])
n = min(10, len(lst))
for i in xrange(n):
bound_stmt = prepared_stmt.bind([apt, i+1, lst[i][0], lst[i][1], lst[i][2]]);
stmt = session.execute(bound_stmt)
def createContext():
# If you do not see this printed, that means the StreamingContext has been loaded
# from the new checkpoint
print("Creating new context")
sc = SparkContext(appName="AirlineDataAnalysis")
ssc = StreamingContext(sc, 1)
csvStream = KafkaUtils.createDirectStream(ssc, ["airline"], {"metadata.broker.list": "172.31.81.70:9092", "auto.offset.reset": "smallest"})
# "_" is added by cleanup script for records where it is not available
result = csvStream.map(aptRparamMapper).filter(lambda kv: kv[1][1] != "_").map(lambda kv: (kv[0], (kv[1][0], int(kv[1][1])))).updateStateByKey(reducer)
result.pprint()
result.foreachRDD(lambda rdd: rdd.foreachPartition(sendPartition))
# result.saveAsTextFiles("airline_delays")
return ssc
if __name__ == "__main__":
ssc = StreamingContext.getOrCreate("/home/centos/spark-checkpoint",
lambda: createContext())
ssc.start()
ssc.awaitTermination()
print("await done")
| 2,537 | 0 | 137 |
e82ab1b23d5bc23eba5640843ec2687c850e8ef2 | 3,600 | py | Python | synapse_antispam/mjolnir/ban_list.py | 0x0000000000000000000/mjolnir | 65af82d46f65e2c57b72095e4958341ded84b964 | [
"Apache-2.0"
] | null | null | null | synapse_antispam/mjolnir/ban_list.py | 0x0000000000000000000/mjolnir | 65af82d46f65e2c57b72095e4958341ded84b964 | [
"Apache-2.0"
] | null | null | null | synapse_antispam/mjolnir/ban_list.py | 0x0000000000000000000/mjolnir | 65af82d46f65e2c57b72095e4958341ded84b964 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .list_rule import (
ListRule,
ALL_RULE_TYPES,
USER_RULE_TYPES,
SERVER_RULE_TYPES,
ROOM_RULE_TYPES,
)
from twisted.internet import defer
from synapse.module_api import run_as_background_process
logger = logging.getLogger("synapse.contrib." + __name__)
| 37.894737 | 88 | 0.577778 | # -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .list_rule import (
ListRule,
ALL_RULE_TYPES,
USER_RULE_TYPES,
SERVER_RULE_TYPES,
ROOM_RULE_TYPES,
)
from twisted.internet import defer
from synapse.module_api import run_as_background_process
logger = logging.getLogger("synapse.contrib." + __name__)
class BanList(object):
def __init__(self, api, room_id):
self.api = api
self.room_id = room_id
self.server_rules = []
self.user_rules = []
self.room_rules = []
self.build()
def build(self, with_event=None):
@defer.inlineCallbacks
def run(with_event):
events = yield self.get_relevant_state_events()
if with_event is not None:
events = [*events, with_event]
self.server_rules = []
self.user_rules = []
self.room_rules = []
for event in events:
event_type = event.get("type", "")
state_key = event.get("state_key", "")
content = event.get("content", {})
if state_key is None:
continue # Some message event got in here?
# Skip over events which are replaced by with_event. We do this
# to ensure that when we rebuild the list we're using updated rules.
if with_event is not None:
w_event_type = with_event.get("type", "")
w_state_key = with_event.get("state_key", "")
w_event_id = with_event.event_id
event_id = event.event_id
if (
w_event_type == event_type
and w_state_key == state_key
and w_event_id != event_id
):
continue
entity = content.get("entity", None)
recommendation = content.get("recommendation", None)
reason = content.get("reason", None)
if entity is None or recommendation is None or reason is None:
continue # invalid event
logger.info(
"Adding rule %s/%s with action %s"
% (event_type, entity, recommendation)
)
rule = ListRule(
entity=entity, action=recommendation, reason=reason, kind=event_type
)
if event_type in USER_RULE_TYPES:
self.user_rules.append(rule)
elif event_type in ROOM_RULE_TYPES:
self.room_rules.append(rule)
elif event_type in SERVER_RULE_TYPES:
self.server_rules.append(rule)
run_as_background_process("mjolnir_build_ban_list", run, with_event)
def get_relevant_state_events(self):
return self.api.get_state_events_in_room(
self.room_id, [(t, None) for t in ALL_RULE_TYPES]
)
| 2,582 | 1 | 103 |
e081e0d1017c1dff52ad67967e2e508a6a10842a | 390 | py | Python | box/test/magnet.py | annabadsi/escapehome | f1f1a1699954ea541edc19993b8039ebc4fb77de | [
"MIT"
] | null | null | null | box/test/magnet.py | annabadsi/escapehome | f1f1a1699954ea541edc19993b8039ebc4fb77de | [
"MIT"
] | 3 | 2020-06-05T22:32:08.000Z | 2022-02-11T03:42:49.000Z | box/test/magnet.py | annabadsi/escapehome | f1f1a1699954ea541edc19993b8039ebc4fb77de | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import time
pin = 15
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
try:
state = GPIO.input(pin)
while True:
if GPIO.input(pin) != state:
state = GPIO.input(pin)
if state == 1:
print "it's open!"
else:
print "closed"
time.sleep(1)
except KeyboardInterrupt:
print(" Terminating..")
finally:
GPIO.cleanup()
| 15.6 | 52 | 0.674359 | import RPi.GPIO as GPIO
import time
pin = 15
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
try:
state = GPIO.input(pin)
while True:
if GPIO.input(pin) != state:
state = GPIO.input(pin)
if state == 1:
print "it's open!"
else:
print "closed"
time.sleep(1)
except KeyboardInterrupt:
print(" Terminating..")
finally:
GPIO.cleanup()
| 0 | 0 | 0 |
1a3d941a3e884ebba6ca929b178ee92e17f4000e | 8,280 | py | Python | src/register.py | gregbugaj/marie-ai | f51a74f19ab5d7231c9f8a426284feff1671b974 | [
"MIT"
] | 4 | 2021-09-23T22:38:48.000Z | 2022-01-19T12:03:02.000Z | src/register.py | gregbugaj/marie-icr | f51a74f19ab5d7231c9f8a426284feff1671b974 | [
"MIT"
] | 17 | 2021-12-22T16:37:21.000Z | 2022-03-16T16:07:34.000Z | src/register.py | gregbugaj/marie-ai | f51a74f19ab5d7231c9f8a426284feff1671b974 | [
"MIT"
] | null | null | null | import argparse
import threading
import time
# import uuid
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Tuple, Union
import consul
import yaml
from consul.base import Check
from logger import create_info_logger
from utils.network import find_open_port, get_ip_address
logger = create_info_logger("registry", "registry.log")
config = None
current_service_id = None
def verify_connection(cfg: EndpointConfig) -> bool:
"""
Verify consul connection
Exceptions throw such as ConnectionError will be captured
"""
if cfg is None:
raise Exception("Configuration is required")
port = cfg.Port
host = cfg.Host
logger.debug('Verifying Consul connection to %s:%s', host, port)
try:
client = consul.Consul(host=host, port=port)
client.agent.self()
return True
except Exception:
pass
return False
def createClient(cfg: EndpointConfig, verify: bool = True) -> Tuple[consul.Consul, bool]:
"""
Create new consul client
"""
if cfg is None:
raise Exception("Configuration is required but got None")
try:
port = cfg.Port
host = cfg.Host
logger.info('Consul Host: %s Port: %s ', host, port)
client = consul.Consul(host=host, port=port)
online = False
if verify:
online = verify_connection(cfg)
logger.debug('Consul online : %s', online)
return client, online
except Exception:
pass
return None, False
def register(service_host, service_port, service_id=None) -> Union[None, str]:
"""
Register new service in consul
"""
logger.info('Registering ServiceHost: %s Port: %s ',
service_host, service_port)
c, online = createClient(config, True)
if not online:
logger.debug('Consul service is offline')
return None
service_name = 'traefik-system-ingress'
service_url = f'http://{service_host}:{service_port}/api'
# TODO : Service ID generation needs to be configurable
# Create new service id, otherwise we will re-register same id
if service_id is None:
# service_id = f'{service_name}@{service_port}#{uuid.uuid4()}'
host = get_ip_address()
service_id = f'{service_name}@{host}:{service_port}'
# service_id = f'{service_name}@{service_port}'
logger.info('Service url: %s', service_url)
logger.info('Service id: %s', service_id)
# TODO: De-registration needs to be configurable
c.agent.service.register(
name=service_name,
service_id=service_id,
port=service_port,
address=service_host,
# check=Check.http(service_url, '10s', deregister='10m'),
check=Check.http(service_url, '10s'),
tags=[
"traefik.enable=true",
"traefik.consulcatalog.connect=false",
"traefik.http.routers.traefik-system-ingress.entrypoints=marie",
"traefik.http.routers.traefik-system-ingress.service=traefik-system-ingress",
"traefik.http.routers.traefik-system-ingress.rule=HostRegexp(`{host:.+}`)",
"traefik.http.services.traefik-system-ingress.loadbalancer.server.scheme=http",
])
return service_id
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument('--debug-server', type=bool, default=False, required=False, help='Should we start debug webserver')
# parser.add_argument('--port', type=int, default=-1, help='Port number to export (-1 dynamic)')
# parser.add_argument('--ip', type=str, default='127.0.0.1', help='Service IP to expose, blank for dynamic')
# parser.add_argument('--watchdog-interval', type=int, default=60, help='watchdog interval checkin seconds')
parser.add_argument('--config', type=str, default='./config/marie-debug.yml', help='Configuration file')
opt = parser.parse_args()
# Load config
with open(opt.config, "r") as yamlfile:
data = yaml.load(yamlfile, Loader=yaml.FullLoader)
logger.info(f"Config read successfully : {opt.config}")
print(data)
enabled = bool(data['RegistryEnabled'])
if not enabled:
logger.info("registry not enabled, exiting...")
exit()
config = EndpointConfig()
config.Host = data['ConsulEndpoint']['Host']
config.Port = int(data['ConsulEndpoint']['Port'])
config.Scheme = data['ConsulEndpoint']['Scheme']
hostName = data['ServiceEndpoint']['Host']
serverPort = int(data['ServiceEndpoint']['Port'])
watchdog_interval = int(data['WatchdogInterval'])
debug_server = bool(data['DebugWebserver'])
if hostName is None or hostName == '':
hostName = get_ip_address()
if serverPort == -1:
serverPort = find_open_port()
current_service_id = register(
service_host=hostName, service_port=serverPort, service_id=None)
logger.info('Registered service: %s', current_service_id)
watchdog_task = threading.Thread(
target=_target, daemon=debug_server).start()
if debug_server:
start_webserver(hostName, serverPort)
| 31.363636 | 125 | 0.650121 | import argparse
import threading
import time
# import uuid
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Tuple, Union
import consul
import yaml
from consul.base import Check
from logger import create_info_logger
from utils.network import find_open_port, get_ip_address
logger = create_info_logger("registry", "registry.log")
config = None
current_service_id = None
class EndpointConfig:
Port: int
Host: str
Scheme: str
def __str__(self):
return self.Scheme + "://" + self.Host + ":" + str(self.Port)
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.next_call = time.time()
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self.next_call += self.interval
self._timer = threading.Timer(self.next_call - time.time(), self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class DebugWebServer(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><head><title>Registry info</title></head>", "utf-8"))
self.wfile.write(bytes("<p>Request: %s</p>" % self.path, "utf-8"))
self.wfile.write(bytes("<body>", "utf-8"))
self.wfile.write(bytes("<p>Service status.</p>", "utf-8"))
self.wfile.write(bytes("</body></html>", "utf-8"))
def start_webserver(hostName, serverPort):
webServer = HTTPServer((hostName, serverPort), DebugWebServer)
logger.info("Server started http://%s:%s" % (hostName, serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
logger.info("Server stopped.")
def verify_connection(cfg: EndpointConfig) -> bool:
"""
Verify consul connection
Exceptions throw such as ConnectionError will be captured
"""
if cfg is None:
raise Exception("Configuration is required")
port = cfg.Port
host = cfg.Host
logger.debug('Verifying Consul connection to %s:%s', host, port)
try:
client = consul.Consul(host=host, port=port)
client.agent.self()
return True
except Exception:
pass
return False
def createClient(cfg: EndpointConfig, verify: bool = True) -> Tuple[consul.Consul, bool]:
"""
Create new consul client
"""
if cfg is None:
raise Exception("Configuration is required but got None")
try:
port = cfg.Port
host = cfg.Host
logger.info('Consul Host: %s Port: %s ', host, port)
client = consul.Consul(host=host, port=port)
online = False
if verify:
online = verify_connection(cfg)
logger.debug('Consul online : %s', online)
return client, online
except Exception:
pass
return None, False
def driver_version():
return consul.__version__
def getServiceByNameAndId(service_name, service_id):
c, online = createClient(config, True)
if not online:
return None
index, nodes = c.health.service(service_name)
for node in nodes:
if node['Service']['ID'] == service_id:
return node
return None
def register(service_host, service_port, service_id=None) -> Union[None, str]:
"""
Register new service in consul
"""
logger.info('Registering ServiceHost: %s Port: %s ',
service_host, service_port)
c, online = createClient(config, True)
if not online:
logger.debug('Consul service is offline')
return None
service_name = 'traefik-system-ingress'
service_url = f'http://{service_host}:{service_port}/api'
# TODO : Service ID generation needs to be configurable
# Create new service id, otherwise we will re-register same id
if service_id is None:
# service_id = f'{service_name}@{service_port}#{uuid.uuid4()}'
host = get_ip_address()
service_id = f'{service_name}@{host}:{service_port}'
# service_id = f'{service_name}@{service_port}'
logger.info('Service url: %s', service_url)
logger.info('Service id: %s', service_id)
# TODO: De-registration needs to be configurable
c.agent.service.register(
name=service_name,
service_id=service_id,
port=service_port,
address=service_host,
# check=Check.http(service_url, '10s', deregister='10m'),
check=Check.http(service_url, '10s'),
tags=[
"traefik.enable=true",
"traefik.consulcatalog.connect=false",
"traefik.http.routers.traefik-system-ingress.entrypoints=marie",
"traefik.http.routers.traefik-system-ingress.service=traefik-system-ingress",
"traefik.http.routers.traefik-system-ingress.rule=HostRegexp(`{host:.+}`)",
"traefik.http.services.traefik-system-ingress.loadbalancer.server.scheme=http",
])
return service_id
def start_watchdog(interval, service_host, service_port):
sid = current_service_id
def _register(_service_host, _service_port):
nonlocal sid
logger.info("watchdog:Host, Port, ServiceId : %s, %s, %s", _service_host, _service_port, sid)
online = verify_connection(config)
logger.info('watchdog:consul online : %s', online)
service_name = 'traefik-system-ingress'
if online:
node = getServiceByNameAndId(service_name, sid)
if node is None:
sid = register(service_host=_service_host, service_port=_service_port, service_id=sid)
logger.info('watchdog:Re-registered service: %s', sid)
logger.info("watchdog:starting with interval : %s", interval)
rt = RepeatedTimer(interval, _register, service_host, service_port)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument('--debug-server', type=bool, default=False, required=False, help='Should we start debug webserver')
# parser.add_argument('--port', type=int, default=-1, help='Port number to export (-1 dynamic)')
# parser.add_argument('--ip', type=str, default='127.0.0.1', help='Service IP to expose, blank for dynamic')
# parser.add_argument('--watchdog-interval', type=int, default=60, help='watchdog interval checkin seconds')
parser.add_argument('--config', type=str, default='./config/marie-debug.yml', help='Configuration file')
opt = parser.parse_args()
# Load config
with open(opt.config, "r") as yamlfile:
data = yaml.load(yamlfile, Loader=yaml.FullLoader)
logger.info(f"Config read successfully : {opt.config}")
print(data)
enabled = bool(data['RegistryEnabled'])
if not enabled:
logger.info("registry not enabled, exiting...")
exit()
config = EndpointConfig()
config.Host = data['ConsulEndpoint']['Host']
config.Port = int(data['ConsulEndpoint']['Port'])
config.Scheme = data['ConsulEndpoint']['Scheme']
hostName = data['ServiceEndpoint']['Host']
serverPort = int(data['ServiceEndpoint']['Port'])
watchdog_interval = int(data['WatchdogInterval'])
debug_server = bool(data['DebugWebserver'])
if hostName is None or hostName == '':
hostName = get_ip_address()
if serverPort == -1:
serverPort = find_open_port()
current_service_id = register(
service_host=hostName, service_port=serverPort, service_id=None)
logger.info('Registered service: %s', current_service_id)
def _target():
return start_watchdog(watchdog_interval,
service_host=hostName, service_port=serverPort)
watchdog_task = threading.Thread(
target=_target, daemon=debug_server).start()
if debug_server:
start_webserver(hostName, serverPort)
| 2,710 | 102 | 321 |
1d38838b75275558a1ff0878c5610b497f4943fe | 562 | py | Python | sandbox/example_app/views.py | kneirinck/django-pint | 8aa55dbccef0b5c43895019449a547002bf43b15 | [
"MIT"
] | 1 | 2018-09-19T12:10:08.000Z | 2018-09-19T12:10:08.000Z | sandbox/example_app/views.py | kneirinck/django-pint | 8aa55dbccef0b5c43895019449a547002bf43b15 | [
"MIT"
] | 1 | 2020-11-08T02:33:49.000Z | 2020-11-08T02:33:49.000Z | sandbox/example_app/views.py | kneirinck/django-pint | 8aa55dbccef0b5c43895019449a547002bf43b15 | [
"MIT"
] | 1 | 2020-11-05T12:46:30.000Z | 2020-11-05T12:46:30.000Z | from django.shortcuts import render
from django.views.generic import FormView, TemplateView
from .forms import TestForm
from django.contrib import messages
from quantityfield import ureg
# Create your views here.
| 21.615385 | 56 | 0.756228 | from django.shortcuts import render
from django.views.generic import FormView, TemplateView
from .forms import TestForm
from django.contrib import messages
from quantityfield import ureg
# Create your views here.
class QuantityFormView(TemplateView):
form_class = TestForm
template_name = 'test_form.html'
def get_context_data(self):
ctx = super(QuantityFormView, self).get_context_data()
form = TestForm(self.request.GET or None)
if form.is_valid():
ctx['value'] = form.cleaned_data['amount'].to('gram')
ctx['form'] = form
return ctx
| 220 | 97 | 23 |
accf8c5737ee95d1cc9d5452ef97751bfabe8043 | 3,003 | py | Python | engineauth/strategies/linkedin.py | alecdotico/engineauth | def523f6c0d48f346e552b6638e6f3a6a1717733 | [
"Apache-2.0"
] | 1 | 2015-12-14T10:37:52.000Z | 2015-12-14T10:37:52.000Z | engineauth/strategies/linkedin.py | alecdotico/engineauth | def523f6c0d48f346e552b6638e6f3a6a1717733 | [
"Apache-2.0"
] | null | null | null | engineauth/strategies/linkedin.py | alecdotico/engineauth | def523f6c0d48f346e552b6638e6f3a6a1717733 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import json
from engineauth.models import User
from engineauth.strategies.oauth import OAuthStrategy
| 38.012658 | 96 | 0.457209 | from __future__ import absolute_import
import json
from engineauth.models import User
from engineauth.strategies.oauth import OAuthStrategy
class LinkedInStrategy(OAuthStrategy):
@property
def options(self):
return {
'provider': 'linkedin',
'request_token_uri': 'https://api.linkedin.com/uas/oauth/requestToken',
'access_token_uri': 'https://api.linkedin.com/uas/oauth/accessToken',
'authorize_uri': 'https://www.linkedin.com/uas/oauth/authenticate',
}
def fields(self):
return ["id", "first-name", "last-name", "headline", "industry",
"picture-url", "public-profile-url"]
def service(self, **kwargs):
pass
def user_info(self, req):
url = "http://api.linkedin.com/v1/people/~:({0})?format=json".format(
','.join(self.fields()))
res, results = self.http(req).request(url)
if res.status is not 200:
return self.raise_error('There was an error contacting LinkedIn. Please try again.')
user = json.loads(results)
auth_id = User.generate_auth_id(req.provider, user['id'])
return {
'auth_id': auth_id,
'info': {
'id': user.get('id'), # Unique ID to the service provider
'displayName': "{0} {1}".format(user.get('firstName'), user.get('lastName')),
'name': {
'formatted': "{0} {1}".format(user.get('firstName'), user.get('lastName')),
'familyName': user.get('lastName'),
'givenName': user.get('firstName'),
# 'middleName': user.get('middle_name'),
'honorificPrefix': None,
'honorificSuffix': None,
},
'urls': [
{
'type': 'linkedin#profile',
'value': user.get('publicProfileUrl'),
'primary': True,
},
],
'industry': user.get('industry'),
# 'utcOffset': user.get('utc_offset'),
# 'locale': user.get('lang'),
# 'verified': user.get('verified'),
# 'nickname': user.get('screen_name'),
# 'location': user.get('location'), # user_location
'aboutMe': user.get('headline'),
# 'photos': [
# {
# 'value': user.get('profile_image_url'),
# 'type': 'full'
# },
# {
# 'value': user.get('profile_image_url_https'),
# 'type': 'https'
# },
# ],
'image': {
'url': user.get('pictureUrl'),
},
},
'extra': {
'raw_info': user,
},
}
| 2,699 | 139 | 23 |
b47ff20721a54b52a7d8f6bade24127ca923aa49 | 2,897 | py | Python | designate/tests/unit/sink/test_notifications.py | mrlesmithjr/designate | bff3d5f6e31fe595a77143ec4ac779c187bf72a8 | [
"Apache-2.0"
] | 145 | 2015-01-02T09:35:53.000Z | 2021-12-14T17:03:53.000Z | designate/tests/unit/sink/test_notifications.py | mrlesmithjr/designate | bff3d5f6e31fe595a77143ec4ac779c187bf72a8 | [
"Apache-2.0"
] | 6 | 2015-03-15T00:22:27.000Z | 2019-12-16T09:37:38.000Z | designate/tests/unit/sink/test_notifications.py | mrlesmithjr/designate | bff3d5f6e31fe595a77143ec4ac779c187bf72a8 | [
"Apache-2.0"
] | 109 | 2015-01-13T16:47:34.000Z | 2021-03-15T13:18:48.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.mport threading
from unittest import mock
from oslo_config import cfg
import oslotest.base
from designate.notification_handler import fake
from designate.sink import service
from designate.tests import fixtures
from designate.tests import test_notification_handler
CONF = cfg.CONF
| 33.298851 | 79 | 0.662755 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.mport threading
from unittest import mock
from oslo_config import cfg
import oslotest.base
from designate.notification_handler import fake
from designate.sink import service
from designate.tests import fixtures
from designate.tests import test_notification_handler
CONF = cfg.CONF
class TestSinkNotification(oslotest.base.BaseTestCase,
test_notification_handler.NotificationHandlerMixin):
def setUp(self):
super(TestSinkNotification, self).setUp()
self.stdlog = fixtures.StandardLogging()
self.useFixture(self.stdlog)
CONF.set_override(
'enabled_notification_handlers',
[fake.FakeHandler.__plugin_name__],
'service:sink'
)
CONF.set_override(
'allowed_event_types', ['compute.instance.create.end'],
'handler:fake'
)
CONF([], project='designate')
self.context = mock.Mock()
self.service = service.Service()
def test_notification(self):
event_type = 'compute.instance.create.end'
fixture = self.get_notification_fixture('nova', event_type)
self.service.info(self.context, None, event_type,
fixture['payload'], None)
self.assertIn(
'handler:fake: received notification - %s' % event_type,
self.stdlog.logger.output
)
def test_notification_with_unknown_event(self):
event_type = 'compute.instance.create.start'
fixture = self.get_notification_fixture('nova', event_type)
self.service.info(self.context, None, event_type,
fixture['payload'], None)
self.assertNotIn(
'handler:fake: received notification - %s' % event_type,
self.stdlog.logger.output
)
def test_notification_without_handler(self):
CONF.set_override('enabled_notification_handlers', [], 'service:sink')
self.service = service.Service()
event_type = 'compute.instance.create.end'
fixture = self.get_notification_fixture('nova', event_type)
self.service.info(self.context, None, event_type,
fixture['payload'], None)
self.assertIn(
'No designate-sink handlers enabled or loaded',
self.stdlog.logger.output
)
| 1,822 | 113 | 131 |
c23a45edce2e740416f1b63e673affc5abe1c324 | 60 | py | Python | text/_elisp/_element/__init__.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | text/_elisp/_element/__init__.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | text/_elisp/_element/__init__.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | from ._element import Element
__all__ = [
"Element",
]
| 10 | 29 | 0.65 | from ._element import Element
__all__ = [
"Element",
]
| 0 | 0 | 0 |
cfbf4e06707fb5df3d194eb5a289c6d31f649e23 | 28,327 | py | Python | sct_custom/spinalcordtoolbox/scripts/sct_propseg.py | nidebroux/lumbosacral_segmentation | 3217960c6f0f5c3886dfdf46e1286ad2f737f4aa | [
"Unlicense",
"MIT"
] | 1 | 2021-09-07T08:52:21.000Z | 2021-09-07T08:52:21.000Z | sct_custom/spinalcordtoolbox/scripts/sct_propseg.py | nidebroux/lumbosacral_segmentation | 3217960c6f0f5c3886dfdf46e1286ad2f737f4aa | [
"Unlicense",
"MIT"
] | null | null | null | sct_custom/spinalcordtoolbox/scripts/sct_propseg.py | nidebroux/lumbosacral_segmentation | 3217960c6f0f5c3886dfdf46e1286ad2f737f4aa | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
#########################################################################################
#
# Parser for PropSeg binary.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Benjamin De Leener
# Modified: 2015-03-03
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: remove temp files in case rescaled is not "1"
import os
import pathlib
import sys
import logging
import numpy as np
from scipy import ndimage as ndi
from spinalcordtoolbox.image import Image, add_suffix, zeros_like, convert
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, run_proc, printv, set_global_loglevel
from spinalcordtoolbox.utils.fs import tmp_create, rmtree, extract_fname, mv, copy
from spinalcordtoolbox.centerline import optic
from spinalcordtoolbox.reports.qc import generate_qc
from spinalcordtoolbox.scripts import sct_image
logger = logging.getLogger(__name__)
def check_and_correct_segmentation(fname_segmentation, fname_centerline, folder_output='', threshold_distance=5.0,
remove_temp_files=1, verbose=0):
"""
This function takes the outputs of isct_propseg (centerline and segmentation) and check if the centerline of the
segmentation is coherent with the centerline provided by the isct_propseg, especially on the edges (related
to issue #1074).
Args:
fname_segmentation: filename of binary segmentation
fname_centerline: filename of binary centerline
threshold_distance: threshold, in mm, beyond which centerlines are not coherent
verbose:
Returns: None
"""
printv('\nCheck consistency of segmentation...', verbose)
# creating a temporary folder in which all temporary files will be placed and deleted afterwards
path_tmp = tmp_create(basename="propseg")
im_seg = convert(Image(fname_segmentation))
im_seg.save(os.path.join(path_tmp, "tmp.segmentation.nii.gz"), mutable=True, verbose=0)
im_centerline = convert(Image(fname_centerline))
im_centerline.save(os.path.join(path_tmp, "tmp.centerline.nii.gz"), mutable=True, verbose=0)
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# convert input to RPI (and store original info to use when converting back at the end)
fname_seg_absolute = os.path.abspath(fname_segmentation)
image_input_orientation = im_seg.orientation
sct_image.main("-i tmp.segmentation.nii.gz -setorient RPI -o tmp.segmentation_RPI.nii.gz -v 0".split())
sct_image.main("-i tmp.centerline.nii.gz -setorient RPI -o tmp.centerline_RPI.nii.gz -v 0".split())
# go through segmentation image, and compare with centerline from propseg
im_seg = Image('tmp.segmentation_RPI.nii.gz')
im_centerline = Image('tmp.centerline_RPI.nii.gz')
# Get size of data
printv('\nGet data dimensions...', verbose)
nx, ny, nz, nt, px, py, pz, pt = im_seg.dim
# extraction of centerline provided by isct_propseg and computation of center of mass for each slice
# the centerline is defined as the center of the tubular mesh outputed by propseg.
centerline, key_centerline = {}, []
for i in range(nz):
slice = im_centerline.data[:, :, i]
if np.any(slice):
x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
centerline[str(i)] = [x_centerline, y_centerline]
key_centerline.append(i)
minz_centerline = np.min(key_centerline)
maxz_centerline = np.max(key_centerline)
mid_slice = int((maxz_centerline - minz_centerline) / 2)
# for each slice of the segmentation, check if only one object is present. If not, remove the slice from segmentation.
# If only one object (the spinal cord) is present in the slice, check if its center of mass is close to the centerline of isct_propseg.
slices_to_remove = [False] * nz # flag that decides if the slice must be removed
for i in range(minz_centerline, maxz_centerline + 1):
# extraction of slice
slice = im_seg.data[:, :, i]
distance = -1
label_objects, nb_labels = ndi.label(slice) # count binary objects in the slice
if nb_labels > 1: # if there is more that one object in the slice, the slice is removed from the segmentation
slices_to_remove[i] = True
elif nb_labels == 1: # check if the centerline is coherent with the one from isct_propseg
x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
slice_nearest_coord = min(key_centerline, key=lambda x: abs(x - i))
coord_nearest_coord = centerline[str(slice_nearest_coord)]
distance = np.sqrt(((x_centerline - coord_nearest_coord[0]) * px) ** 2 +
((y_centerline - coord_nearest_coord[1]) * py) ** 2 +
((i - slice_nearest_coord) * pz) ** 2)
if distance >= threshold_distance: # threshold must be adjusted, default is 5 mm
slices_to_remove[i] = True
# Check list of removal and keep one continuous centerline (improve this comment)
# Method:
# starting from mid-centerline (in both directions), the first True encountered is applied to all following slices
slice_to_change = False
for i in range(mid_slice, nz):
if slice_to_change:
slices_to_remove[i] = True
elif slices_to_remove[i]:
slice_to_change = True
slice_to_change = False
for i in range(mid_slice, 0, -1):
if slice_to_change:
slices_to_remove[i] = True
elif slices_to_remove[i]:
slice_to_change = True
for i in range(0, nz):
# remove the slice
if slices_to_remove[i]:
im_seg.data[:, :, i] *= 0
# saving the image
im_seg.save('tmp.segmentation_RPI_c.nii.gz')
# replacing old segmentation with the corrected one
sct_image.main('-i tmp.segmentation_RPI_c.nii.gz -setorient {} -o {} -v 0'.
format(image_input_orientation, fname_seg_absolute).split())
os.chdir(curdir)
# display information about how much of the segmentation has been corrected
# remove temporary files
if remove_temp_files:
# printv("\nRemove temporary files...", verbose)
rmtree(path_tmp)
def func_rescale_header(fname_data, rescale_factor, verbose=0):
"""
Rescale the voxel dimension by modifying the NIFTI header qform. Write the output file in a temp folder.
:param fname_data:
:param rescale_factor:
:return: fname_data_rescaled
"""
import nibabel as nib
img = nib.load(fname_data)
# get qform
qform = img.header.get_qform()
# multiply by scaling factor
qform[0:3, 0:3] *= rescale_factor
# generate a new nifti file
header_rescaled = img.header.copy()
header_rescaled.set_qform(qform)
# the data are the same-- only the header changes
img_rescaled = nib.nifti1.Nifti1Image(img.get_data(), None, header=header_rescaled)
path_tmp = tmp_create(basename="propseg")
fname_data_rescaled = os.path.join(path_tmp, os.path.basename(add_suffix(fname_data, "_rescaled")))
nib.save(img_rescaled, fname_data_rescaled)
return fname_data_rescaled
def propseg(img_input, options_dict):
"""
:param img_input: source image, to be segmented
:param options_dict: arguments as dictionary
:return: segmented Image
"""
arguments = options_dict
fname_input_data = img_input.absolutepath
fname_data = os.path.abspath(fname_input_data)
contrast_type = arguments.c
contrast_type_conversion = {'t1': 't1', 't2': 't2', 't2s': 't2', 'dwi': 't1'}
contrast_type_propseg = contrast_type_conversion[contrast_type]
# Starting building the command
cmd = ['isct_propseg', '-t', contrast_type_propseg]
if arguments.o is not None:
fname_out = arguments.o
else:
fname_out = os.path.basename(add_suffix(fname_data, "_seg"))
folder_output = str(pathlib.Path(fname_out).parent)
cmd += ['-o', folder_output]
if not os.path.isdir(folder_output) and os.path.exists(folder_output):
logger.error("output directory %s is not a valid directory" % folder_output)
if not os.path.exists(folder_output):
os.makedirs(folder_output)
if arguments.down is not None:
cmd += ["-down", str(arguments.down)]
if arguments.up is not None:
cmd += ["-up", str(arguments.up)]
remove_temp_files = arguments.r
verbose = int(arguments.v)
# Update for propseg binary
if verbose > 0:
cmd += ["-verbose"]
# Output options
if arguments.mesh is not None:
cmd += ["-mesh"]
if arguments.centerline_binary is not None:
cmd += ["-centerline-binary"]
if arguments.CSF is not None:
cmd += ["-CSF"]
if arguments.centerline_coord is not None:
cmd += ["-centerline-coord"]
if arguments.cross is not None:
cmd += ["-cross"]
if arguments.init_tube is not None:
cmd += ["-init-tube"]
if arguments.low_resolution_mesh is not None:
cmd += ["-low-resolution-mesh"]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_nii is not None:
# cmd += ["-detect-nii"]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_png is not None:
# cmd += ["-detect-png"]
# Helping options
use_viewer = None
use_optic = True # enabled by default
init_option = None
rescale_header = arguments.rescale
if arguments.init is not None:
init_option = float(arguments.init)
if init_option < 0:
printv('Command-line usage error: ' + str(init_option) + " is not a valid value for '-init'", 1, 'error')
sys.exit(1)
if arguments.init_centerline is not None:
if str(arguments.init_centerline) == "viewer":
use_viewer = "centerline"
elif str(arguments.init_centerline) == "hough":
use_optic = False
else:
if rescale_header is not 1:
fname_labels_viewer = func_rescale_header(str(arguments.init_centerline), rescale_header, verbose=verbose)
else:
fname_labels_viewer = str(arguments.init_centerline)
cmd += ["-init-centerline", fname_labels_viewer]
use_optic = False
if arguments.init_mask is not None:
if str(arguments.init_mask) == "viewer":
use_viewer = "mask"
else:
if rescale_header is not 1:
fname_labels_viewer = func_rescale_header(str(arguments.init_mask), rescale_header)
else:
fname_labels_viewer = str(arguments.init_mask)
cmd += ["-init-mask", fname_labels_viewer]
use_optic = False
if arguments.mask_correction is not None:
cmd += ["-mask-correction", str(arguments.mask_correction)]
if arguments.radius is not None:
cmd += ["-radius", str(arguments.radius)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_n is not None:
# cmd += ["-detect-n", str(arguments.detect_n)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_gap is not None:
# cmd += ["-detect-gap", str(arguments.detect_gap)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.init_validation is not None:
# cmd += ["-init-validation"]
if arguments.nbiter is not None:
cmd += ["-nbiter", str(arguments.nbiter)]
if arguments.max_area is not None:
cmd += ["-max-area", str(arguments.max_area)]
if arguments.max_deformation is not None:
cmd += ["-max-deformation", str(arguments.max_deformation)]
if arguments.min_contrast is not None:
cmd += ["-min-contrast", str(arguments.min_contrast)]
if arguments.d is not None:
cmd += ["-d", str(arguments["-d"])]
if arguments.distance_search is not None:
cmd += ["-dsearch", str(arguments.distance_search)]
if arguments.alpha is not None:
cmd += ["-alpha", str(arguments.alpha)]
# check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one.
image_input = Image(fname_data)
image_input_rpi = image_input.copy().change_orientation('RPI')
nx, ny, nz, nt, px, py, pz, pt = image_input_rpi.dim
if nt > 1:
printv('ERROR: your input image needs to be 3D in order to be segmented.', 1, 'error')
path_data, file_data, ext_data = extract_fname(fname_data)
path_tmp = tmp_create(basename="label_vertebrae")
# rescale header (see issue #1406)
if rescale_header is not 1:
fname_data_propseg = func_rescale_header(fname_data, rescale_header)
else:
fname_data_propseg = fname_data
# add to command
cmd += ['-i', fname_data_propseg]
# if centerline or mask is asked using viewer
if use_viewer:
from spinalcordtoolbox.gui.base import AnatomicalParams
from spinalcordtoolbox.gui.centerline import launch_centerline_dialog
params = AnatomicalParams()
if use_viewer == 'mask':
params.num_points = 3
params.interval_in_mm = 15 # superior-inferior interval between two consecutive labels
params.starting_slice = 'midfovminusinterval'
if use_viewer == 'centerline':
# setting maximum number of points to a reasonable value
params.num_points = 20
params.interval_in_mm = 30
params.starting_slice = 'top'
im_data = Image(fname_data_propseg)
im_mask_viewer = zeros_like(im_data)
# im_mask_viewer.absolutepath = add_suffix(fname_data_propseg, '_labels_viewer')
controller = launch_centerline_dialog(im_data, im_mask_viewer, params)
fname_labels_viewer = add_suffix(fname_data_propseg, '_labels_viewer')
if not controller.saved:
printv('The viewer has been closed before entering all manual points. Please try again.', 1, 'error')
sys.exit(1)
# save labels
controller.as_niftii(fname_labels_viewer)
# add mask filename to parameters string
if use_viewer == "centerline":
cmd += ["-init-centerline", fname_labels_viewer]
elif use_viewer == "mask":
cmd += ["-init-mask", fname_labels_viewer]
# If using OptiC
elif use_optic:
image_centerline = optic.detect_centerline(image_input, contrast_type, verbose)
fname_centerline_optic = os.path.join(path_tmp, 'centerline_optic.nii.gz')
image_centerline.save(fname_centerline_optic)
cmd += ["-init-centerline", fname_centerline_optic]
if init_option is not None:
if init_option > 1:
init_option /= (nz - 1)
cmd += ['-init', str(init_option)]
# enabling centerline extraction by default (needed by check_and_correct_segmentation() )
cmd += ['-centerline-binary']
# run propseg
status, output = run_proc(cmd, verbose, raise_exception=False, is_sct_binary=True)
# check status is not 0
if not status == 0:
printv('Automatic cord detection failed. Please initialize using -init-centerline or -init-mask (see help)',
1, 'error')
sys.exit(1)
# build output filename
fname_seg = os.path.join(folder_output, fname_out)
fname_centerline = os.path.join(folder_output, os.path.basename(add_suffix(fname_data, "_centerline")))
# in case header was rescaled, we need to update the output file names by removing the "_rescaled"
if rescale_header is not 1:
mv(os.path.join(folder_output, add_suffix(os.path.basename(fname_data_propseg), "_seg")),
fname_seg)
mv(os.path.join(folder_output, add_suffix(os.path.basename(fname_data_propseg), "_centerline")),
fname_centerline)
# if user was used, copy the labelled points to the output folder (they will then be scaled back)
if use_viewer:
fname_labels_viewer_new = os.path.join(folder_output, os.path.basename(add_suffix(fname_data,
"_labels_viewer")))
copy(fname_labels_viewer, fname_labels_viewer_new)
# update variable (used later)
fname_labels_viewer = fname_labels_viewer_new
# check consistency of segmentation
if arguments.correct_seg:
check_and_correct_segmentation(fname_seg, fname_centerline, folder_output=folder_output, threshold_distance=3.0,
remove_temp_files=remove_temp_files, verbose=verbose)
# copy header from input to segmentation to make sure qform is the same
printv("Copy header input --> output(s) to make sure qform is the same.", verbose)
list_fname = [fname_seg, fname_centerline]
if use_viewer:
list_fname.append(fname_labels_viewer)
for fname in list_fname:
im = Image(fname)
im.header = image_input.header
im.save(dtype='int8') # they are all binary masks hence fine to save as int8
return Image(fname_seg)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| 41.780236 | 139 | 0.645074 | #!/usr/bin/env python
#########################################################################################
#
# Parser for PropSeg binary.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Benjamin De Leener
# Modified: 2015-03-03
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: remove temp files in case rescaled is not "1"
import os
import pathlib
import sys
import logging
import numpy as np
from scipy import ndimage as ndi
from spinalcordtoolbox.image import Image, add_suffix, zeros_like, convert
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, run_proc, printv, set_global_loglevel
from spinalcordtoolbox.utils.fs import tmp_create, rmtree, extract_fname, mv, copy
from spinalcordtoolbox.centerline import optic
from spinalcordtoolbox.reports.qc import generate_qc
from spinalcordtoolbox.scripts import sct_image
logger = logging.getLogger(__name__)
def check_and_correct_segmentation(fname_segmentation, fname_centerline, folder_output='', threshold_distance=5.0,
remove_temp_files=1, verbose=0):
"""
This function takes the outputs of isct_propseg (centerline and segmentation) and check if the centerline of the
segmentation is coherent with the centerline provided by the isct_propseg, especially on the edges (related
to issue #1074).
Args:
fname_segmentation: filename of binary segmentation
fname_centerline: filename of binary centerline
threshold_distance: threshold, in mm, beyond which centerlines are not coherent
verbose:
Returns: None
"""
printv('\nCheck consistency of segmentation...', verbose)
# creating a temporary folder in which all temporary files will be placed and deleted afterwards
path_tmp = tmp_create(basename="propseg")
im_seg = convert(Image(fname_segmentation))
im_seg.save(os.path.join(path_tmp, "tmp.segmentation.nii.gz"), mutable=True, verbose=0)
im_centerline = convert(Image(fname_centerline))
im_centerline.save(os.path.join(path_tmp, "tmp.centerline.nii.gz"), mutable=True, verbose=0)
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# convert input to RPI (and store original info to use when converting back at the end)
fname_seg_absolute = os.path.abspath(fname_segmentation)
image_input_orientation = im_seg.orientation
sct_image.main("-i tmp.segmentation.nii.gz -setorient RPI -o tmp.segmentation_RPI.nii.gz -v 0".split())
sct_image.main("-i tmp.centerline.nii.gz -setorient RPI -o tmp.centerline_RPI.nii.gz -v 0".split())
# go through segmentation image, and compare with centerline from propseg
im_seg = Image('tmp.segmentation_RPI.nii.gz')
im_centerline = Image('tmp.centerline_RPI.nii.gz')
# Get size of data
printv('\nGet data dimensions...', verbose)
nx, ny, nz, nt, px, py, pz, pt = im_seg.dim
# extraction of centerline provided by isct_propseg and computation of center of mass for each slice
# the centerline is defined as the center of the tubular mesh outputed by propseg.
centerline, key_centerline = {}, []
for i in range(nz):
slice = im_centerline.data[:, :, i]
if np.any(slice):
x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
centerline[str(i)] = [x_centerline, y_centerline]
key_centerline.append(i)
minz_centerline = np.min(key_centerline)
maxz_centerline = np.max(key_centerline)
mid_slice = int((maxz_centerline - minz_centerline) / 2)
# for each slice of the segmentation, check if only one object is present. If not, remove the slice from segmentation.
# If only one object (the spinal cord) is present in the slice, check if its center of mass is close to the centerline of isct_propseg.
slices_to_remove = [False] * nz # flag that decides if the slice must be removed
for i in range(minz_centerline, maxz_centerline + 1):
# extraction of slice
slice = im_seg.data[:, :, i]
distance = -1
label_objects, nb_labels = ndi.label(slice) # count binary objects in the slice
if nb_labels > 1: # if there is more that one object in the slice, the slice is removed from the segmentation
slices_to_remove[i] = True
elif nb_labels == 1: # check if the centerline is coherent with the one from isct_propseg
x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
slice_nearest_coord = min(key_centerline, key=lambda x: abs(x - i))
coord_nearest_coord = centerline[str(slice_nearest_coord)]
distance = np.sqrt(((x_centerline - coord_nearest_coord[0]) * px) ** 2 +
((y_centerline - coord_nearest_coord[1]) * py) ** 2 +
((i - slice_nearest_coord) * pz) ** 2)
if distance >= threshold_distance: # threshold must be adjusted, default is 5 mm
slices_to_remove[i] = True
# Check list of removal and keep one continuous centerline (improve this comment)
# Method:
# starting from mid-centerline (in both directions), the first True encountered is applied to all following slices
slice_to_change = False
for i in range(mid_slice, nz):
if slice_to_change:
slices_to_remove[i] = True
elif slices_to_remove[i]:
slice_to_change = True
slice_to_change = False
for i in range(mid_slice, 0, -1):
if slice_to_change:
slices_to_remove[i] = True
elif slices_to_remove[i]:
slice_to_change = True
for i in range(0, nz):
# remove the slice
if slices_to_remove[i]:
im_seg.data[:, :, i] *= 0
# saving the image
im_seg.save('tmp.segmentation_RPI_c.nii.gz')
# replacing old segmentation with the corrected one
sct_image.main('-i tmp.segmentation_RPI_c.nii.gz -setorient {} -o {} -v 0'.
format(image_input_orientation, fname_seg_absolute).split())
os.chdir(curdir)
# display information about how much of the segmentation has been corrected
# remove temporary files
if remove_temp_files:
# printv("\nRemove temporary files...", verbose)
rmtree(path_tmp)
def get_parser():
# Initialize the parser
parser = SCTArgumentParser(
description=(
"This program segments automatically the spinal cord on T1- and T2-weighted images, for any field of view. "
"You must provide the type of contrast, the image as well as the output folder path. The segmentation "
"follows the spinal cord centerline, which is provided by an automatic tool: Optic. The initialization of "
"the segmentation is made on the median slice of the centerline, and can be ajusted using the -init "
"parameter. The initial radius of the tubular mesh that will be propagated should be adapted to size of "
"the spinal cord on the initial propagation slice. \n"
"\n"
"Primary output is the binary mask of the spinal cord segmentation. This method must provide VTK "
"triangular mesh of the segmentation (option -mesh). Spinal cord centerline is available as a binary image "
"(-centerline-binary) or a text file with coordinates in world referential (-centerline-coord).\n"
"\n"
"Cross-sectional areas along the spinal cord can be available (-cross). Several tips on segmentation "
"correction can be found on the 'Correcting sct_propseg' page in the Tutorials section of the "
"documentation.\n"
"\n"
"If the segmentation fails at some location (e.g. due to poor contrast between spinal cord and CSF), edit "
"your anatomical image (e.g. with fslview) and manually enhance the contrast by adding bright values "
"around the spinal cord for T2-weighted images (dark values for T1-weighted). Then, launch the "
"segmentation again.\n"
"\n"
"References:\n"
" - [De Leener B, Kadoury S, Cohen-Adad J. Robust, accurate and fast automatic segmentation of the spinal "
"cord. Neuroimage 98, 2014. pp 528-536. DOI: 10.1016/j.neuroimage.2014.04.051](https://pubmed.ncbi.nlm.nih.gov/24780696/)\n"
" - [De Leener B, Cohen-Adad J, Kadoury S. Automatic segmentation of the spinal cord and spinal canal "
"coupled with vertebral labeling. IEEE Trans Med Imaging. 2015 Aug;34(8):1705-18.](https://pubmed.ncbi.nlm.nih.gov/26011879/)"
)
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: ti.nii.gz"
)
mandatory.add_argument(
'-c',
choices=['t1', 't2', 't2s', 'dwi'],
required=True,
help="Type of image contrast. If your contrast is not in the available options (t1, t2, t2s, dwi), use "
"t1 (cord bright / CSF dark) or t2 (cord dark / CSF bright)"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-o',
metavar=Metavar.file,
help='Output filename. Example: spinal_seg.nii.gz '
)
optional.add_argument(
'-down',
metavar=Metavar.int,
type=int,
help="Down limit of the propagation. Default is 0."
)
optional.add_argument(
'-up',
metavar=Metavar.int,
type=int,
help="Up limit of the propagation. Default is the highest slice of the image."
)
optional.add_argument(
'-r',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Whether to remove temporary files. 0 = no, 1 = yes"
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
optional.add_argument(
'-mesh',
action="store_true",
help="Output: mesh of the spinal cord segmentation"
)
optional.add_argument(
'-centerline-binary',
action="store_true",
help="Output: centerline as a binary image."
)
optional.add_argument(
'-CSF',
action="store_true",
help="Output: CSF segmentation."
)
optional.add_argument(
'-centerline-coord',
action="store_true",
help="Output: centerline in world coordinates."
)
optional.add_argument(
'-cross',
action="store_true",
help="Output: cross-sectional areas."
)
optional.add_argument(
'-init-tube',
action="store_true",
help="Output: initial tubular meshes."
)
optional.add_argument(
'-low-resolution-mesh',
action="store_true",
help="Output: low-resolution mesh."
)
optional.add_argument(
'-init-centerline',
metavar=Metavar.file,
help="R|Filename of centerline to use for the propagation. Use format .txt or .nii; see file structure in "
"documentation.\n"
"Replace filename by 'viewer' to use interactive viewer for providing centerline. Example: "
"-init-centerline viewer"
)
optional.add_argument(
'-init',
metavar=Metavar.float,
type=float,
help="Axial slice where the propagation starts, default is middle axial slice."
)
optional.add_argument(
'-init-mask',
metavar=Metavar.file,
help="R|Mask containing three center of the spinal cord, used to initiate the propagation.\n"
"Replace filename by 'viewer' to use interactive viewer for providing mask. Example: -init-mask viewer"
)
optional.add_argument(
'-mask-correction',
metavar=Metavar.file,
help="mask containing binary pixels at edges of the spinal cord on which the segmentation algorithm will be "
"forced to register the surface. Can be used in case of poor/missing contrast between spinal cord and "
"CSF or in the presence of artefacts/pathologies."
)
optional.add_argument(
'-rescale',
metavar=Metavar.float,
type=float,
default=1.0,
help="Rescale the image (only the header, not the data) in order to enable segmentation on spinal cords with "
"dimensions different than that of humans (e.g., mice, rats, elephants, etc.). For example, if the "
"spinal cord is 2x smaller than that of human, then use -rescale 2"
)
optional.add_argument(
'-radius',
metavar=Metavar.float,
type=float,
help="Approximate radius (in mm) of the spinal cord. Default is 4."
)
optional.add_argument(
'-nbiter',
metavar=Metavar.int,
type=int,
help="Stop condition (affects only the Z propogation): number of iteration for the propagation for both "
"direction. Default is 200."
)
optional.add_argument(
'-max-area',
metavar=Metavar.float,
type=float,
help="[mm^2], stop condition (affects only the Z propogation): maximum cross-sectional area. Default is 120."
)
optional.add_argument(
'-max-deformation',
metavar=Metavar.float,
type=float,
help="[mm], stop condition (affects only the Z propogation): maximum deformation per iteration. Default is "
"2.5"
)
optional.add_argument(
'-min-contrast',
metavar=Metavar.float,
type=float,
help="[intensity value], stop condition (affects only the Z propogation): minimum local SC/CSF contrast, "
"default is 50"
)
optional.add_argument(
'-d',
metavar=Metavar.float,
type=float,
help="trade-off between distance of most promising point (d is high) and feature strength (d is low), "
"default depend on the contrast. Range of values from 0 to 50. 15-25 values show good results. Default "
"is 10."
)
optional.add_argument(
'-distance-search',
metavar=Metavar.float,
type=float,
help="maximum distance of optimal points computation along the surface normals. Range of values from 0 to 30. "
"Default is 15"
)
optional.add_argument(
'-alpha',
metavar=Metavar.float,
type=float,
help="Trade-off between internal (alpha is high) and external (alpha is low) forces. Range of values from 0 "
"to 50. Default is 25."
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
optional.add_argument(
'-correct-seg',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Enable (1) or disable (0) the algorithm that checks and correct the output segmentation. More "
"specifically, the algorithm checks if the segmentation is consistent with the centerline provided by "
"isct_propseg."
)
optional.add_argument(
'-igt',
metavar=Metavar.file,
help="File name of ground-truth segmentation."
)
return parser
def func_rescale_header(fname_data, rescale_factor, verbose=0):
"""
Rescale the voxel dimension by modifying the NIFTI header qform. Write the output file in a temp folder.
:param fname_data:
:param rescale_factor:
:return: fname_data_rescaled
"""
import nibabel as nib
img = nib.load(fname_data)
# get qform
qform = img.header.get_qform()
# multiply by scaling factor
qform[0:3, 0:3] *= rescale_factor
# generate a new nifti file
header_rescaled = img.header.copy()
header_rescaled.set_qform(qform)
# the data are the same-- only the header changes
img_rescaled = nib.nifti1.Nifti1Image(img.get_data(), None, header=header_rescaled)
path_tmp = tmp_create(basename="propseg")
fname_data_rescaled = os.path.join(path_tmp, os.path.basename(add_suffix(fname_data, "_rescaled")))
nib.save(img_rescaled, fname_data_rescaled)
return fname_data_rescaled
def propseg(img_input, options_dict):
"""
:param img_input: source image, to be segmented
:param options_dict: arguments as dictionary
:return: segmented Image
"""
arguments = options_dict
fname_input_data = img_input.absolutepath
fname_data = os.path.abspath(fname_input_data)
contrast_type = arguments.c
contrast_type_conversion = {'t1': 't1', 't2': 't2', 't2s': 't2', 'dwi': 't1'}
contrast_type_propseg = contrast_type_conversion[contrast_type]
# Starting building the command
cmd = ['isct_propseg', '-t', contrast_type_propseg]
if arguments.o is not None:
fname_out = arguments.o
else:
fname_out = os.path.basename(add_suffix(fname_data, "_seg"))
folder_output = str(pathlib.Path(fname_out).parent)
cmd += ['-o', folder_output]
if not os.path.isdir(folder_output) and os.path.exists(folder_output):
logger.error("output directory %s is not a valid directory" % folder_output)
if not os.path.exists(folder_output):
os.makedirs(folder_output)
if arguments.down is not None:
cmd += ["-down", str(arguments.down)]
if arguments.up is not None:
cmd += ["-up", str(arguments.up)]
remove_temp_files = arguments.r
verbose = int(arguments.v)
# Update for propseg binary
if verbose > 0:
cmd += ["-verbose"]
# Output options
if arguments.mesh is not None:
cmd += ["-mesh"]
if arguments.centerline_binary is not None:
cmd += ["-centerline-binary"]
if arguments.CSF is not None:
cmd += ["-CSF"]
if arguments.centerline_coord is not None:
cmd += ["-centerline-coord"]
if arguments.cross is not None:
cmd += ["-cross"]
if arguments.init_tube is not None:
cmd += ["-init-tube"]
if arguments.low_resolution_mesh is not None:
cmd += ["-low-resolution-mesh"]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_nii is not None:
# cmd += ["-detect-nii"]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_png is not None:
# cmd += ["-detect-png"]
# Helping options
use_viewer = None
use_optic = True # enabled by default
init_option = None
rescale_header = arguments.rescale
if arguments.init is not None:
init_option = float(arguments.init)
if init_option < 0:
printv('Command-line usage error: ' + str(init_option) + " is not a valid value for '-init'", 1, 'error')
sys.exit(1)
if arguments.init_centerline is not None:
if str(arguments.init_centerline) == "viewer":
use_viewer = "centerline"
elif str(arguments.init_centerline) == "hough":
use_optic = False
else:
if rescale_header is not 1:
fname_labels_viewer = func_rescale_header(str(arguments.init_centerline), rescale_header, verbose=verbose)
else:
fname_labels_viewer = str(arguments.init_centerline)
cmd += ["-init-centerline", fname_labels_viewer]
use_optic = False
if arguments.init_mask is not None:
if str(arguments.init_mask) == "viewer":
use_viewer = "mask"
else:
if rescale_header is not 1:
fname_labels_viewer = func_rescale_header(str(arguments.init_mask), rescale_header)
else:
fname_labels_viewer = str(arguments.init_mask)
cmd += ["-init-mask", fname_labels_viewer]
use_optic = False
if arguments.mask_correction is not None:
cmd += ["-mask-correction", str(arguments.mask_correction)]
if arguments.radius is not None:
cmd += ["-radius", str(arguments.radius)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_n is not None:
# cmd += ["-detect-n", str(arguments.detect_n)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_gap is not None:
# cmd += ["-detect-gap", str(arguments.detect_gap)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.init_validation is not None:
# cmd += ["-init-validation"]
if arguments.nbiter is not None:
cmd += ["-nbiter", str(arguments.nbiter)]
if arguments.max_area is not None:
cmd += ["-max-area", str(arguments.max_area)]
if arguments.max_deformation is not None:
cmd += ["-max-deformation", str(arguments.max_deformation)]
if arguments.min_contrast is not None:
cmd += ["-min-contrast", str(arguments.min_contrast)]
if arguments.d is not None:
cmd += ["-d", str(arguments["-d"])]
if arguments.distance_search is not None:
cmd += ["-dsearch", str(arguments.distance_search)]
if arguments.alpha is not None:
cmd += ["-alpha", str(arguments.alpha)]
# check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one.
image_input = Image(fname_data)
image_input_rpi = image_input.copy().change_orientation('RPI')
nx, ny, nz, nt, px, py, pz, pt = image_input_rpi.dim
if nt > 1:
printv('ERROR: your input image needs to be 3D in order to be segmented.', 1, 'error')
path_data, file_data, ext_data = extract_fname(fname_data)
path_tmp = tmp_create(basename="label_vertebrae")
# rescale header (see issue #1406)
if rescale_header is not 1:
fname_data_propseg = func_rescale_header(fname_data, rescale_header)
else:
fname_data_propseg = fname_data
# add to command
cmd += ['-i', fname_data_propseg]
# if centerline or mask is asked using viewer
if use_viewer:
from spinalcordtoolbox.gui.base import AnatomicalParams
from spinalcordtoolbox.gui.centerline import launch_centerline_dialog
params = AnatomicalParams()
if use_viewer == 'mask':
params.num_points = 3
params.interval_in_mm = 15 # superior-inferior interval between two consecutive labels
params.starting_slice = 'midfovminusinterval'
if use_viewer == 'centerline':
# setting maximum number of points to a reasonable value
params.num_points = 20
params.interval_in_mm = 30
params.starting_slice = 'top'
im_data = Image(fname_data_propseg)
im_mask_viewer = zeros_like(im_data)
# im_mask_viewer.absolutepath = add_suffix(fname_data_propseg, '_labels_viewer')
controller = launch_centerline_dialog(im_data, im_mask_viewer, params)
fname_labels_viewer = add_suffix(fname_data_propseg, '_labels_viewer')
if not controller.saved:
printv('The viewer has been closed before entering all manual points. Please try again.', 1, 'error')
sys.exit(1)
# save labels
controller.as_niftii(fname_labels_viewer)
# add mask filename to parameters string
if use_viewer == "centerline":
cmd += ["-init-centerline", fname_labels_viewer]
elif use_viewer == "mask":
cmd += ["-init-mask", fname_labels_viewer]
# If using OptiC
elif use_optic:
image_centerline = optic.detect_centerline(image_input, contrast_type, verbose)
fname_centerline_optic = os.path.join(path_tmp, 'centerline_optic.nii.gz')
image_centerline.save(fname_centerline_optic)
cmd += ["-init-centerline", fname_centerline_optic]
if init_option is not None:
if init_option > 1:
init_option /= (nz - 1)
cmd += ['-init', str(init_option)]
# enabling centerline extraction by default (needed by check_and_correct_segmentation() )
cmd += ['-centerline-binary']
# run propseg
status, output = run_proc(cmd, verbose, raise_exception=False, is_sct_binary=True)
# check status is not 0
if not status == 0:
printv('Automatic cord detection failed. Please initialize using -init-centerline or -init-mask (see help)',
1, 'error')
sys.exit(1)
# build output filename
fname_seg = os.path.join(folder_output, fname_out)
fname_centerline = os.path.join(folder_output, os.path.basename(add_suffix(fname_data, "_centerline")))
# in case header was rescaled, we need to update the output file names by removing the "_rescaled"
if rescale_header is not 1:
mv(os.path.join(folder_output, add_suffix(os.path.basename(fname_data_propseg), "_seg")),
fname_seg)
mv(os.path.join(folder_output, add_suffix(os.path.basename(fname_data_propseg), "_centerline")),
fname_centerline)
# if user was used, copy the labelled points to the output folder (they will then be scaled back)
if use_viewer:
fname_labels_viewer_new = os.path.join(folder_output, os.path.basename(add_suffix(fname_data,
"_labels_viewer")))
copy(fname_labels_viewer, fname_labels_viewer_new)
# update variable (used later)
fname_labels_viewer = fname_labels_viewer_new
# check consistency of segmentation
if arguments.correct_seg:
check_and_correct_segmentation(fname_seg, fname_centerline, folder_output=folder_output, threshold_distance=3.0,
remove_temp_files=remove_temp_files, verbose=verbose)
# copy header from input to segmentation to make sure qform is the same
printv("Copy header input --> output(s) to make sure qform is the same.", verbose)
list_fname = [fname_seg, fname_centerline]
if use_viewer:
list_fname.append(fname_labels_viewer)
for fname in list_fname:
im = Image(fname)
im.header = image_input.header
im.save(dtype='int8') # they are all binary masks hence fine to save as int8
return Image(fname_seg)
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
fname_input_data = os.path.abspath(arguments.i)
img_input = Image(fname_input_data)
img_seg = propseg(img_input, arguments)
fname_seg = img_seg.absolutepath
path_qc = arguments.qc
qc_dataset = arguments.qc_dataset
qc_subject = arguments.qc_subject
if path_qc is not None:
generate_qc(fname_in1=fname_input_data, fname_seg=fname_seg, args=arguments, path_qc=os.path.abspath(path_qc),
dataset=qc_dataset, subject=qc_subject, process='sct_propseg')
display_viewer_syntax([fname_input_data, fname_seg], colormaps=['gray', 'red'], opacities=['', '1'])
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| 10,681 | 0 | 46 |
3e1dcefe4cfbf87ca1576b78d49c23394ad109b2 | 1,217 | py | Python | brownies/legacy/toENDL/productData/distributions/angular.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 14 | 2019-08-29T23:46:24.000Z | 2022-03-21T10:16:25.000Z | brownies/legacy/toENDL/productData/distributions/angular.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 1 | 2020-08-04T16:14:45.000Z | 2021-12-01T01:54:34.000Z | brownies/legacy/toENDL/productData/distributions/angular.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 2 | 2022-03-03T22:41:41.000Z | 2022-03-03T22:54:43.000Z | # <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
from fudge import outputChannel as outputChannelModule
from fudge.productData.distributions import angular as angularModule
#
# XYs1d
#
angularModule.XYs1d.toENDL = toENDL
#
# XYs2d
#
angularModule.XYs2d.toENDL = toENDL
#
# isotropic2d
#
angularModule.isotropic2d.toENDL = toENDL
#
# recoil
#
angularModule.recoil.toENDL = toENDL
#
# twoBodyForm
#
angularModule.twoBodyForm.toENDL = toENDL
#
# form
#
angularModule.form.toENDL = toENDL
| 18.164179 | 81 | 0.707477 | # <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
from fudge import outputChannel as outputChannelModule
from fudge.productData.distributions import angular as angularModule
#
# XYs1d
#
def toENDL( self ) :
return( [ self.outerDomainValue, [ [ x, y ] for x, y in self ] ] )
angularModule.XYs1d.toENDL = toENDL
#
# XYs2d
#
def toENDL( self ) :
return( [ function1d.toENDL( ) for function1d in self ] )
angularModule.XYs2d.toENDL = toENDL
#
# isotropic2d
#
def toENDL( self ) :
return( None )
angularModule.isotropic2d.toENDL = toENDL
#
# recoil
#
def toENDL( self ) :
outputChannel = self.findClassInAncestry( outputChannelModule.outputChannel )
product = outputChannel.products[0]
distribution = product.distribution[0].invert( ).toENDL( )
return( distribution )
angularModule.recoil.toENDL = toENDL
#
# twoBodyForm
#
def toENDL( self ) :
return( { 1 : self.angularSubform.toENDL( ) } )
angularModule.twoBodyForm.toENDL = toENDL
#
# form
#
def toENDL( self ) :
print( self.moniker )
angularModule.form.toENDL = toENDL
| 442 | 0 | 132 |
0cac01636d295a3749a34a34602491d7b6bacea8 | 679 | py | Python | globals.py | MichaelSchmidt82/pong-neural-net | c0131fccf58457ebe08d6392e2d08abd9cc2fced | [
"MIT"
] | null | null | null | globals.py | MichaelSchmidt82/pong-neural-net | c0131fccf58457ebe08d6392e2d08abd9cc2fced | [
"MIT"
] | null | null | null | globals.py | MichaelSchmidt82/pong-neural-net | c0131fccf58457ebe08d6392e2d08abd9cc2fced | [
"MIT"
] | null | null | null | game = {
'TRAINING': True,
'WND_WIDTH': 500,
'WND_HEIGHT': 500,
'SCOREBAR_HEIGHT': 30,
# colors
'WHITE': (255, 255, 255),
'BLACK': (0, 0, 0),
# game objects
'BALL_SZ': 9,
'PAD_H': 45,
'PAD_W': 15
}
game['BALL_XSPD'] = game['WND_WIDTH'] / 160
game['BALL_XSTR'] = 0.5 * game['WND_WIDTH']
game['BALL_YSTR'] = 0.5 * (game['WND_HEIGHT'] - game['SCOREBAR_HEIGHT']) + SCOREBAR_HEIGHT
game['PAD_SPEED'] = game['WINDOW_HEIGHT'] / 105
game['PAD_START'] = (game['WND_WIDTH'] - game['SCOREBAR_HEIGHT']) / 2
game['AI_PAD_X'] = game['WND_WIDTH'] - game['PADDLE_W'] - 10
game['PLY_PAD_X'] = 10
DQN = {
'STATE_SIZE': 8,
'ACT_SIZE': 3
}
| 21.21875 | 90 | 0.581738 | game = {
'TRAINING': True,
'WND_WIDTH': 500,
'WND_HEIGHT': 500,
'SCOREBAR_HEIGHT': 30,
# colors
'WHITE': (255, 255, 255),
'BLACK': (0, 0, 0),
# game objects
'BALL_SZ': 9,
'PAD_H': 45,
'PAD_W': 15
}
game['BALL_XSPD'] = game['WND_WIDTH'] / 160
game['BALL_XSTR'] = 0.5 * game['WND_WIDTH']
game['BALL_YSTR'] = 0.5 * (game['WND_HEIGHT'] - game['SCOREBAR_HEIGHT']) + SCOREBAR_HEIGHT
game['PAD_SPEED'] = game['WINDOW_HEIGHT'] / 105
game['PAD_START'] = (game['WND_WIDTH'] - game['SCOREBAR_HEIGHT']) / 2
game['AI_PAD_X'] = game['WND_WIDTH'] - game['PADDLE_W'] - 10
game['PLY_PAD_X'] = 10
DQN = {
'STATE_SIZE': 8,
'ACT_SIZE': 3
}
| 0 | 0 | 0 |
dbfcd5072822bcaecc3bb7840e9b162caa621b22 | 957 | py | Python | cs1101/Neeraj_Pandey_cs1101_practice4/q10.py | neerajp99/intro_to_cs_CS-101 | 8c4ae52a68458706a37ffc14c663ea7ae67183ef | [
"MIT"
] | null | null | null | cs1101/Neeraj_Pandey_cs1101_practice4/q10.py | neerajp99/intro_to_cs_CS-101 | 8c4ae52a68458706a37ffc14c663ea7ae67183ef | [
"MIT"
] | null | null | null | cs1101/Neeraj_Pandey_cs1101_practice4/q10.py | neerajp99/intro_to_cs_CS-101 | 8c4ae52a68458706a37ffc14c663ea7ae67183ef | [
"MIT"
] | null | null | null | """
Write a program that accepts 10 integers from a user into an array,
and count the number of occurrences of all present prime numbers.
"""
if __name__ == "__main__":
values = list()
for i in range(10):
x = int(input(f"Enter the list value { i + 1 }: "))
values.append(x)
count_occurrences(values)
| 25.864865 | 74 | 0.547544 | """
Write a program that accepts 10 integers from a user into an array,
and count the number of occurrences of all present prime numbers.
"""
def check_prime(value: int) -> bool:
if value > 1:
for i in range(2, value):
if value % i == 0:
return 0
else:
return 1
else:
return 0
def count_occurrences(values: list) -> None:
hash_map = dict()
for i, val in enumerate(values):
flag = check_prime(val)
if flag == 1:
if val not in hash_map:
hash_map[val] = 1
else:
hash_map[val] += 1
print(hash_map)
for value in hash_map:
print(f"Occurrence of prime number {value} is: ", hash_map[value])
if __name__ == "__main__":
values = list()
for i in range(10):
x = int(input(f"Enter the list value { i + 1 }: "))
values.append(x)
count_occurrences(values)
| 567 | 0 | 45 |
4747fa8cedc39d216094f2f2de620db276345ce7 | 2,188 | py | Python | Leetcode/Python Solutions/Common Algorithm Templates/Trees/binaryIndexTree.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 1 | 2020-01-06T02:21:56.000Z | 2020-01-06T02:21:56.000Z | Leetcode/Python Solutions/Common Algorithm Templates/Trees/binaryIndexTree.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | null | null | null | Leetcode/Python Solutions/Common Algorithm Templates/Trees/binaryIndexTree.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 3 | 2021-02-22T17:41:01.000Z | 2022-01-13T05:03:19.000Z | """
Language: Python
Written by: Mostofa Adib Shakib
Video Explanation: https://www.youtube.com/watch?v=CWDQJGaN1gY
Further Reading: https://www.geeksforgeeks.org/binary-indexed-tree-or-fenwick-tree-2/
https://www.topcoder.com/community/competitive-programming/tutorials/binary-indexed-trees/
Binary Index Tree or Fenwick Tree
The size of the BITree is one more than the size of the input array
Time Complexity:
Construction: O(nlogn)
Update BIT: O(logn)
Get Sum (0 to n): O(logn)
getParent:
=> Find 2's complement
=> "AND" the previous numbr with the original number
=> "Subtract" the previous number from the original number
getSum:
=> Find 2's complement
=> "AND" the previous numbr with the original number
=> "Add" the previous number from the original number
"""
# Returns sum of arr[0..index]. This function assumes
# that the array is preprocessed and partial sums of
# array elements are stored in BITree[].
# Updates a node in Binary Index Tree (BITree) at given index
# in BITree. The given value 'val' is added to BITree[i] and
# all of its ancestors in tree.
# Constructs and returns a Binary Indexed Tree for given array of size n. | 28.051282 | 107 | 0.646252 | """
Language: Python
Written by: Mostofa Adib Shakib
Video Explanation: https://www.youtube.com/watch?v=CWDQJGaN1gY
Further Reading: https://www.geeksforgeeks.org/binary-indexed-tree-or-fenwick-tree-2/
https://www.topcoder.com/community/competitive-programming/tutorials/binary-indexed-trees/
Binary Index Tree or Fenwick Tree
The size of the BITree is one more than the size of the input array
Time Complexity:
Construction: O(nlogn)
Update BIT: O(logn)
Get Sum (0 to n): O(logn)
getParent:
=> Find 2's complement
=> "AND" the previous numbr with the original number
=> "Subtract" the previous number from the original number
getSum:
=> Find 2's complement
=> "AND" the previous numbr with the original number
=> "Add" the previous number from the original number
"""
# Returns sum of arr[0..index]. This function assumes
# that the array is preprocessed and partial sums of
# array elements are stored in BITree[].
def getsum(BITree, idx):
ans = 0 #initialize result
# index in BITree[] is 1 more than the index in arr[]
idx = idx + 1
# Traverse ancestors of BITree[index]
while idx > 0:
# Add current element of BITree to sum
ans += BITree[idx]
# Move index to parent node
idx -= idx & (-idx)
return ans
# Updates a node in Binary Index Tree (BITree) at given index
# in BITree. The given value 'val' is added to BITree[i] and
# all of its ancestors in tree.
def updatebit(BITree , n , idx , value):
# index in BITree[] is 1 more than the index in arr[]
idx += 1
# Traverse all ancestors and add 'val'
while idx <= n:
# Add 'val' to current node of BI Tree
BITree[idx] += value
# Update index to that of parent in update View
idx += idx & (-idx)
# Constructs and returns a Binary Indexed Tree for given array of size n.
def construct(arr, n):
# Create and initialize BITree[] as 0
BITree = [0]*(n+1)
# Store the actual values in BITree[] using update()
for idx in range(n):
updatebit(BITree, n, idx, arr[idx])
return BITree | 901 | 0 | 69 |
48f38df6a99944c5de0089a6fadfd702887b5fad | 393 | py | Python | example/simple_event_python/handler.py | tomwillis608/serverless-s3-local | 0d104e24fd5901c5c1849f746b3326ead5429a30 | [
"MIT"
] | null | null | null | example/simple_event_python/handler.py | tomwillis608/serverless-s3-local | 0d104e24fd5901c5c1849f746b3326ead5429a30 | [
"MIT"
] | null | null | null | example/simple_event_python/handler.py | tomwillis608/serverless-s3-local | 0d104e24fd5901c5c1849f746b3326ead5429a30 | [
"MIT"
] | null | null | null | import json
from collections import OrderedDict
from pathlib import Path
| 20.684211 | 69 | 0.608142 | import json
from collections import OrderedDict
from pathlib import Path
def webhook(event, context):
return
def s3hook(event, context):
path = Path("/tmp") / event["Records"][0]["s3"]["object"]["eTag"]
with path.open("w") as f:
f.write(json.dumps(event))
print(json.dumps(OrderedDict([
("statusCode", 200),
("body", "result")
])))
return 0
| 274 | 0 | 46 |
40f8c64e6d1b2cd016ad656dd3b753298b202cf9 | 4,047 | py | Python | dmx512/dmx512.py | JimmyKenMerchant/Python_Codes | d780b8e9eb30a68547406a93fe4c412dbbdc8f26 | [
"BSD-3-Clause"
] | null | null | null | dmx512/dmx512.py | JimmyKenMerchant/Python_Codes | d780b8e9eb30a68547406a93fe4c412dbbdc8f26 | [
"BSD-3-Clause"
] | null | null | null | dmx512/dmx512.py | JimmyKenMerchant/Python_Codes | d780b8e9eb30a68547406a93fe4c412dbbdc8f26 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
# Author: Kenta Ishii
# SPDX short identifier: BSD-3-Clause
# ./dmx512.py
import RPi.GPIO as gpio
import threading
class DMX512:
"""Dependency:RPi.GPIO, threading"""
if __name__ == '__main__':
import sys
import time
import signal
version_info = "DMX512 Alpha"
signal.signal(signal.SIGINT, handle_sigint)
argv = sys.argv
if len(argv) == 1:
time_delay = 4
else:
time_delay = float(argv[1])
print(sys.version)
# Call Class
dmx512 = DMX512([12,16,19,20,21,26], 6, 13)
# Initialization of Flushing Method
list_data = [0x1F, 0x14, 0x1B, 0x11, 0x00, 0x13]
thread1 = dmx512.start_tx(list_data, 0, 6, time_delay)
thread1.join()
# Set Initial Values and Start
list_data = [1] * 1026
thread1 = dmx512.start_tx(list_data, 0, 1026, time_delay)
thread1.join()
# Start DMX512 Transmission
list_data = [0x1D, 0x1A]
thread1 = dmx512.start_tx(list_data, 0, 2, time_delay)
thread1.join()
status_gpio_eop_toggle = dmx512.eop_toggle()
count = 2
while True:
list_data = [count] * 1026
thread1 = dmx512.start_tx(list_data, 0, 1026, time_delay)
thread1.join()
count += 1
if count > 0xF:
count = 0;
break
while True:
if status_gpio_eop_toggle != dmx512.eop_toggle():
status_gpio_eop_toggle = dmx512.eop_toggle()
break
#if gpio.event_detected(num_gpio_eop_toggle) == 1:
# break
| 34.008403 | 129 | 0.622189 | #!/usr/bin/python3
# Author: Kenta Ishii
# SPDX short identifier: BSD-3-Clause
# ./dmx512.py
import RPi.GPIO as gpio
import threading
class DMX512:
"""Dependency:RPi.GPIO, threading"""
def __init__(self, list_gpio_output, num_gpio_busy_toggle, num_gpio_eop_toggle):
self.list_gpio_output = list_gpio_output
self.num_gpio_busy_toggle = num_gpio_busy_toggle
self.num_gpio_eop_toggle = num_gpio_eop_toggle
gpio.setmode(gpio.BCM)
gpio.setup(self.list_gpio_output, gpio.OUT)
gpio.output(self.list_gpio_output, 0)
gpio.setup(self.num_gpio_busy_toggle, gpio.IN, pull_up_down=gpio.PUD_DOWN)
#gpio.add_event_detect(num_gpio_busy_toggle, gpio.BOTH)
gpio.setup(self.num_gpio_eop_toggle, gpio.IN, pull_up_down=gpio.PUD_DOWN)
#gpio.add_event_detect(num_gpio_eop_toggle, gpio.BOTH)
def transmitter(self, list_data, index, length, time_delay):
status_gpio_busy_toggle = gpio.input(self.num_gpio_busy_toggle)
length += index;
while index < length:
data = list_data[index]
list_bit = []
if data & 0b00001:
list_bit.append(self.list_gpio_output[1])
if data & 0b00010:
list_bit.append(self.list_gpio_output[2])
if data & 0b00100:
list_bit.append(self.list_gpio_output[3])
if data & 0b01000:
list_bit.append(self.list_gpio_output[4])
if data & 0b10000:
list_bit.append(self.list_gpio_output[5])
#print(list_bit)
gpio.output(self.list_gpio_output, 0)
gpio.output(self.list_gpio_output[0], 1) # High State of Clock
gpio.output(list_bit, 1)
dup_time_delay = time_delay
while dup_time_delay > 0:
dup_time_delay -= 1
gpio.output(self.list_gpio_output[0], 0) # Falling Edge of Clock
while True:
if status_gpio_busy_toggle != gpio.input(self.num_gpio_busy_toggle):
status_gpio_busy_toggle = gpio.input(self.num_gpio_busy_toggle)
index += 1
break
def start_tx(self, list_data, index, length, time_delay):
thread = threading.Thread(name='dmx512_start_tx', target=self.transmitter, args=(list_data, index, length, time_delay, ))
thread.setDaemon(True)
thread.start()
return thread
def eop_toggle(self):
return gpio.input(self.num_gpio_eop_toggle)
def __del__(self):
gpio.cleanup()
if __name__ == '__main__':
import sys
import time
import signal
version_info = "DMX512 Alpha"
def handle_sigint(signum, frame):
print(version_info + ": Force Stop")
sys.exit(0)
signal.signal(signal.SIGINT, handle_sigint)
argv = sys.argv
if len(argv) == 1:
time_delay = 4
else:
time_delay = float(argv[1])
print(sys.version)
# Call Class
dmx512 = DMX512([12,16,19,20,21,26], 6, 13)
# Initialization of Flushing Method
list_data = [0x1F, 0x14, 0x1B, 0x11, 0x00, 0x13]
thread1 = dmx512.start_tx(list_data, 0, 6, time_delay)
thread1.join()
# Set Initial Values and Start
list_data = [1] * 1026
thread1 = dmx512.start_tx(list_data, 0, 1026, time_delay)
thread1.join()
# Start DMX512 Transmission
list_data = [0x1D, 0x1A]
thread1 = dmx512.start_tx(list_data, 0, 2, time_delay)
thread1.join()
status_gpio_eop_toggle = dmx512.eop_toggle()
count = 2
while True:
list_data = [count] * 1026
thread1 = dmx512.start_tx(list_data, 0, 1026, time_delay)
thread1.join()
count += 1
if count > 0xF:
count = 0;
break
while True:
if status_gpio_eop_toggle != dmx512.eop_toggle():
status_gpio_eop_toggle = dmx512.eop_toggle()
break
#if gpio.event_detected(num_gpio_eop_toggle) == 1:
# break
| 2,337 | 0 | 161 |
b9b7aeaf120635095ec322f5e4b2350e9c695ca9 | 4,875 | py | Python | xtra_gnss.py | pixierepo/pixie_xtra_gnss | 4a0a85ef1ee381d531e8a08cc6ba9d3f38b72515 | [
"MIT"
] | null | null | null | xtra_gnss.py | pixierepo/pixie_xtra_gnss | 4a0a85ef1ee381d531e8a08cc6ba9d3f38b72515 | [
"MIT"
] | null | null | null | xtra_gnss.py | pixierepo/pixie_xtra_gnss | 4a0a85ef1ee381d531e8a08cc6ba9d3f38b72515 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[4]:
import serial
import time
import re
from datetime import datetime
import subprocess
import os
import urllib.request
# In[5]:
#Generic AT
CR = '\r\n'
ENABLE_AT='ATE1'
#Filesystem AT commands
UPLOAD_FILE='AT+QFUPL'
DELETE_FILE='AT+QFDEL'
LIST_FILES='AT+QFLST'
LIST_FILES_RAM=bytes('AT+QFLST="RAM:*"\r\n','utf-8')
#GPS AT commands
GPS_ENGINE='AT+QGPS'
XTRA='AT+QGPSXTRA'
XTRA_TIME='AT+QGPSXTRATIME'
XTRA_DATA='AT+QGPSXTRADATA'
END_SESSION='AT+QGPSEND'
# In[6]:
# In[7]:
# In[8]:
# In[9]:
# In[10]:
# In[11]:
# In[12]:
# In[13]:
# In[20]:
# In[15]:
# In[16]:
if __name__ == "__main__" and '__file__' in globals():
configure_xtra_gnss()
exit(0)
| 19.117647 | 79 | 0.581333 | #!/usr/bin/env python
# coding: utf-8
# In[4]:
import serial
import time
import re
from datetime import datetime
import subprocess
import os
import urllib.request
# In[5]:
#Generic AT
CR = '\r\n'
ENABLE_AT='ATE1'
#Filesystem AT commands
UPLOAD_FILE='AT+QFUPL'
DELETE_FILE='AT+QFDEL'
LIST_FILES='AT+QFLST'
LIST_FILES_RAM=bytes('AT+QFLST="RAM:*"\r\n','utf-8')
#GPS AT commands
GPS_ENGINE='AT+QGPS'
XTRA='AT+QGPSXTRA'
XTRA_TIME='AT+QGPSXTRATIME'
XTRA_DATA='AT+QGPSXTRADATA'
END_SESSION='AT+QGPSEND'
# In[6]:
def encode_AT(cmd,args=None):
if args is not None:
if args != '?':
args='=' + args
cmd=cmd + args
cmd = cmd + '\r\n'
return bytes(cmd,'utf-8')
# In[7]:
def send_AT(cmd,args=None):
encoded=encode_AT(cmd,args)
ser.write(encoded)
rsp=parse_rsp()
return rsp
# In[8]:
def parse_rsp():
rsp=''
while (rsp.find('OK')<0 and rsp.find('CONNECT')<0 and rsp.find('ERROR')<0):
r=ser.read(ser.inWaiting()).decode('utf-8')
rsp=rsp+r
return rsp
# In[9]:
def send_file(filename,ramfs=False):
with open(filename,"rb") as f:
data=f.read()
f.close()
if ramfs:
filename = "RAM:"+filename
f_args='"' + filename + '"'
s_args=str(len(data))
args=f_args + ',' + s_args
rsp = send_AT(UPLOAD_FILE,args)
if rsp.find('ERROR')>-1:
return rsp
ser.write(data)
rsp = rsp+parse_rsp()
return rsp
# In[10]:
def enable_xtra():
rsp=send_AT(XTRA,'1')
global port
global ser
ser.close()
ser=None
process = subprocess.Popen(['disablePixieModem'], stdout=subprocess.PIPE)
out, err = process.communicate()
out = out.decode('utf-8')
print(out)
process = subprocess.Popen(['enablePixieModem'], stdout=subprocess.PIPE)
out, err = process.communicate()
out = out.decode('utf-8')
print(out)
print("Waiting for Modem, this can take a long time...")
while out.find("Quectel")<0:
time.sleep(1)
process = subprocess.Popen(['mmcli','-L'], stdout=subprocess.PIPE)
out, err = process.communicate()
out = out.decode('utf-8')
print("Modem ready at: ",out)
ser=serial.Serial(port)
ser.timeout=3
print("Enabling AT Commands...")
rsp=send_AT(ENABLE_AT)
if rsp.find('OK')<0:
return rsp
send_AT(END_SESSION)
if rsp.find('OK')<0:
return rsp
return rsp
# In[11]:
def configure_xtra_data():
print("Configuring XTRA TIME")
utctime=datetime.utcnow()
formated_time = utctime.strftime("%Y/%m/%d,%T")
rsp=send_AT(XTRA_TIME,'0,"' + formated_time + '",1,1,5')
if rsp.find('OK')<0:
return rsp
print("Setting up XTRA DATA in Modem filesystem")
rsp=send_AT(DELETE_FILE,'"*"')
if rsp.find('OK')<0:
return rsp
rsp=send_file("xtra2.bin")
if rsp.find('OK')<0:
return rsp
rsp=send_AT(XTRA_DATA,'"xtra2.bin"')
return rsp
# In[12]:
def configure_xtra():
rsp=send_AT(XTRA,'?')
if rsp.find('QGPSXTRA: 1')>-1:
print("XTRA already enabled...")
return
print("Enabling XTRA...")
rsp = enable_xtra()
if rsp.find('OK')<0:
print("Errors occurred:")
print(rsp)
return rsp
print("Configuring XTRADATA...")
get_xtra_file()
rsp=configure_xtra_data()
if rsp.find('OK')<0:
print("Errors occurred:")
print(rsp)
return rsp
print("XTRA Ready...")
# In[13]:
def get_xtra_file():
url='http://xtrapath1.izatcloud.net/xtra2.bin'
filename='/home/pixiepro/xtra2.bin'
if not os.path.isfile(filename):
try:
print("Downloading XTRA file")
urllib.request.urlretrieve(url, filename)
except Exception as e:
print("Could not download xtra file.")
print(str(e))
return
print("File downloaded.")
return
# In[20]:
def check_valid_file():
rsp=send_AT(XTRA_DATA,'?')
match = re.search(r'\d{4}/\d{2}/\d{2}', rsp)
xtradate = datetime.strptime(match.group(), '%Y/%m/%d').date()
if (datetime.utcnow().date() - xtradate).days > 7:
print("File expired, downloading again...")
rsp=get_xtra_file()
configure_xtra_data()
else:
print("File is valid...")
print("XTRA Ready...")
return rsp
# In[15]:
def configure_xtra_gnss():
global ser
global port
port='/dev/ttyUSB2'
ser=serial.Serial(port)
ser.timeout=3
rsp=send_AT(ENABLE_AT)
configure_xtra()
time.sleep(3)
check_valid_file()
rsp=send_AT(GPS_ENGINE,'1,30,50,0,1')
# In[16]:
if __name__ == "__main__" and '__file__' in globals():
configure_xtra_gnss()
exit(0)
| 3,886 | 0 | 230 |
54deedd4d7b79030546b176fcf4014cb24cd5375 | 2,019 | py | Python | mobile_index_selenium.py | jhgil92/mobile_index | 29c9f11ca2201b27cd3734969a86dbab0a53c043 | [
"MIT"
] | null | null | null | mobile_index_selenium.py | jhgil92/mobile_index | 29c9f11ca2201b27cd3734969a86dbab0a53c043 | [
"MIT"
] | null | null | null | mobile_index_selenium.py | jhgil92/mobile_index | 29c9f11ca2201b27cd3734969a86dbab0a53c043 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
email = ''
password = ''
auth = ''
start_date = '2020-05-01'
end_date = '2020-11-30'
driver = webdriver.Chrome("C:/workspace/python/chromedriver/chromedriver.exe")
driver.implicitly_wait(10)
driver.get("https://hd.mobileindex.com/member/login?url=https%3A%2F%2Fhd.mobileindex.com%2F")
import pandas as pd
dates = pd.date_range(start = start_date,end = end_date).tolist()
dates = [pd.Timestamp.strftime(x, '%Y-%m-%d') for x in dates]
log_in_mobile_index(driver)
go_to_rank(driver)
for date in dates:
get_daily_rank(driver, date)
| 36.709091 | 151 | 0.727588 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
email = ''
password = ''
auth = ''
start_date = '2020-05-01'
end_date = '2020-11-30'
driver = webdriver.Chrome("C:/workspace/python/chromedriver/chromedriver.exe")
driver.implicitly_wait(10)
driver.get("https://hd.mobileindex.com/member/login?url=https%3A%2F%2Fhd.mobileindex.com%2F")
def log_in_mobile_index(driver, email, password, auth):
driver.find_element_by_name('igawEmail').send_keys(email)
driver.find_element_by_name('igawPw').send_keys(password)
driver.find_element_by_css_selector('.btn--sign').click()
time.sleep(1.5)
driver.find_element_by_name('authKey').send_keys(auth)
driver.find_element_by_css_selector('body > div.frm--sign > div.sign-box > div.sign-box__area-for-form > main > form > div.form__main > a').click()
time.sleep(1.0)
driver.find_element_by_css_selector('#md-certkey-checked > div > div > div.modal-footer.justify-content-center > button.btn.btn-primary').click()
time.sleep(1.0)
def go_to_rank(driver):
driver.get("https://hd.mobileindex.com/rank/biz?cm=%EA%B8%88%EC%9C%B5&genre=%EC%A6%9D%EA%B6%8C/%ED%88%AC%EC%9E%90&c=76")
time.sleep(1.0)
def change_date_rank(driver, date = '2020-12-01'):
element = driver.find_element_by_css_selector('#eDay')
element.clear()
element.send_keys(date)
element.send_keys(Keys.ENTER)
time.sleep(2.5)
def download_btn_click(driver):
element = driver.find_element_by_css_selector('body > div.frm > main > div.content > div > div.boundarybar > div:nth-child(5) > a')
element.click()
time.sleep(2.5)
def get_daily_rank(driver, date):
change_date_rank(driver, date)
download_btn_click(driver)
print(f'{date} download done')
import pandas as pd
dates = pd.date_range(start = start_date,end = end_date).tolist()
dates = [pd.Timestamp.strftime(x, '%Y-%m-%d') for x in dates]
log_in_mobile_index(driver)
go_to_rank(driver)
for date in dates:
get_daily_rank(driver, date)
| 1,277 | 0 | 115 |
af1fcbccbf9b5bd714483c88502c6f4143aa2651 | 10,293 | py | Python | bot/cogs/core.py | bryanpalmer/AzsocamiBot | 3e163ef793abf2be2b64835f901bec7cdfd10cc6 | [
"MIT"
] | 2 | 2021-03-03T04:28:24.000Z | 2021-10-12T09:36:16.000Z | bot/cogs/core.py | bryanpalmer/AzsocamiBot | 3e163ef793abf2be2b64835f901bec7cdfd10cc6 | [
"MIT"
] | null | null | null | bot/cogs/core.py | bryanpalmer/AzsocamiBot | 3e163ef793abf2be2b64835f901bec7cdfd10cc6 | [
"MIT"
] | 1 | 2021-03-03T04:28:27.000Z | 2021-03-03T04:28:27.000Z | import discord
from discord.ext import commands, tasks
import os, sys, inspect
import asyncio
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import botlib
# import wowapi
DEVMODE = os.getenv("DEVMODE") == "TRUE" # Boolean flag for devmode
ENVVERSION = os.getenv("ENV_VERSION") # Local .env or server vars
COMMAND_PREFIX = os.getenv("COMMAND_PREFIX") # Bot command prefix
class Core(commands.Cog):
"""
Core bot discord functions
"""
## On_Ready event for cog
@commands.Cog.listener()
# region HelpCommand
# HELP
# @bot.command()
# async def help(ctx):
# author = ctx.message.author
# embed = discord.Embed(color=discord.Color.orange())
# embed.set_author(name="Help")
# # embed.add_field(
# # name=".ping", value="Returns Pong to check bot latency.", inline=False
# # )
# embed.add_field(
# name=".mats or .raidmats",
# value="Current Auction House pricing on common raid mats.",
# inline=False,
# )
# embed.add_field(
# name=".lpc or .legendaries",
# value=".lpc [armorType] - Auction House pricing on legendary base armors.",
# inline=False,
# )
# embed.add_field(
# name=".tc",
# value=".tc - Shows current Twisting Corridors achievement for team.",
# inline=False,
# )
# embed.add_field(
# name=".gvault or .gv",
# value="Shows current Great Vault loot from M+ keys.",
# inline=False,
# )
# embed.add_field(
# name=".bestruns or .br",
# value="Shows best timed mythic runs for season, all members.",
# inline=False,
# )
# embed.add_field(
# name=".team or .raidteam",
# value="team [update] - List current team members data. Update is Optional.",
# inline=False,
# )
# embed.add_field(
# name=".add_member",
# value="add_member <playername> [<realm>] Add new member. Realm defaults to silver-hand.",
# inline=False,
# )
# embed.add_field(
# name=".remove_member",
# value="remove_member <playername> Remove member.",
# inline=False,
# )
# embed.add_field(
# name=".change_member_role",
# value="change_member_role <playername> Change member role.",
# inline=False,
# )
# embed.add_field(
# name=".rules", value="Guild rules to live by. Esp rule #1.", inline=False
# )
# embed.add_field(
# name=".clean",
# value="Cleans all AzsocamiBot messages and commands from channel.",
# inline=False,
# )
# embed.add_field(
# name=".cleanbot",
# value="Cleans certain bot messages and commands from channel.",
# inline=False,
# )
# embed.add_field(
# name=".changelog",
# value="AzsocamiBot change log.",
# inline=False,
# )
# embed.add_field(
# name=".version",
# value="AzsocamiBot version info.",
# inline=False,
# )
# await ctx.send(embed=embed)
# if author.name.lower() == "aaryn":
# embed2 = discord.Embed(color=discord.Color.orange())
# embed2.set_author(name="Admin Only Commands")
# # embed2.add_field(
# # name=".db_members", value="ADMIN: List members database rows.", inline=False
# # )
# embed2.add_field(
# name=".get_table_contents",
# value="ADMIN: get_table_contents <tablename> List table contents.",
# inline=False,
# )
# embed2.add_field(
# name=".get_table_structure",
# value="ADMIN: get_table_structure <tablename> List table structure.",
# inline=False,
# )
# embed2.add_field(
# name=".add_item",
# value="ADMIN: add_item <ItemID> Add itemid to raidmats.",
# inline=False,
# )
# embed2.add_field(
# name=".remove_item",
# value="ADMIN: remove_item <ItemID> Remove itemid from raidmats.",
# inline=False,
# )
# await ctx.send(embed=embed2)
# endregion
@commands.command()
async def rules(self, ctx):
""" Rules to live by """
msg = """
**Rule #1: It's Ben's fault. Always.**
Rule #2: Be respectful to one another.
Rule #3: No Politics and No Religion talk.
**Rule #4: Keep voice chatter to a minimum during boss pulls.**
Rule #5: Thou shall not upset thy tank or thy healer.
"""
await ctx.send(msg)
@commands.command()
async def ping(self, ctx):
""" Generic latency test for bot """
await ctx.send(f"🏓 Pong with {str(round(self.client.latency, 2))} seconds.")
@commands.command(name="whoami", hidden=True)
@commands.command()
async def clean(self, ctx, number=50):
""" Clean <number=50> AzsocamiBot commands and responses from channel """
mgs = []
number = int(number)
cleaned = 0
async for x in ctx.message.channel.history(limit=number):
if x.author.id == self.client.user.id:
mgs.append(x)
cleaned += 1
# print(x)
if x.content[:1] == COMMAND_PREFIX:
mgs.append(x)
cleaned += 1
# print(x.content[:1])
await ctx.message.channel.delete_messages(mgs)
print(f"Removed {cleaned} messages and commands.")
@commands.command()
async def cleanbot(self, ctx, number=50):
""" Clean <number=50> bot commands and responses from channel """
mgs = []
number = int(number)
cleaned = 0
# M+ bot, this bot,
botsList = [378005927493763074, self.client.user.id]
prefixList = [".", "*", "!", ";"]
async for x in ctx.message.channel.history(limit=number):
if x.author.id in botsList:
mgs.append(x)
cleaned += 1
# print(x)
elif x.content[:1] in prefixList:
mgs.append(x)
cleaned += 1
# print(x.content[:1])
await ctx.message.channel.delete_messages(mgs)
print(f"Removed {cleaned} messages and commands.")
###################################################################
###################################################################
## ##
## BACKGROUND TASKS ##
## ##
###################################################################
###################################################################
@tasks.loop(minutes=15)
###################################################################
###################################################################
## ##
## NOT IMPLEMENTED YET ##
## ##
###################################################################
###################################################################
# region NotImplemented
# @commands.command()
# @commands.is_owner()
# async def status(self, ctx):
# msg = f"AzsocamiBot version {VERSION}, released {VERSIONDATE}.\n"
# # msg += "Bot running as "
# # if TESTBOT:
# # msg += "TEST BOT.\n"
# # else:
# # msg += "PRODUCTION BOT.\n"
# # msg += f"Server Timezone: {time.tzname}\n"
# # msg += f"Server Time: {datetime.datetime.now().strftime(TIMEFORMAT)}\n"
# msg += f"Bot Local Time: {botlib.localNow()}\n"
# msg += f"Bot source is at https://github.com/bryanpalmer/AzsocamiBot\n"
# msg += f"Bot running on heroku.com\n"
# await ctx.send(msg)
# endregion
## Initialize cog
| 37.703297 | 103 | 0.496551 | import discord
from discord.ext import commands, tasks
import os, sys, inspect
import asyncio
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import botlib
# import wowapi
DEVMODE = os.getenv("DEVMODE") == "TRUE" # Boolean flag for devmode
ENVVERSION = os.getenv("ENV_VERSION") # Local .env or server vars
COMMAND_PREFIX = os.getenv("COMMAND_PREFIX") # Bot command prefix
class Core(commands.Cog):
"""
Core bot discord functions
"""
def __init__(self, client):
self.client = client
## On_Ready event for cog
@commands.Cog.listener()
async def on_ready(self):
print("Core is initialized.")
actMsg = "Let's Blame Ben"
if DEVMODE == False:
# updateTeamDataBG.start()
# updateMythicPlusDataBG.start()
self.botAliveCheckBG.start()
logsChannel = self.client.get_channel(799290844862480445)
await logsChannel.send(f"AzsocamiBot starting up: {botlib.localNow()}")
if DEVMODE == True:
actMsg = "DEVMODE"
# self.botAliveCheckBG.start()
logsChannel = self.client.get_channel(790667200197296138)
await logsChannel.send(f"AzsocamiBot starting up: {botlib.localNow()}")
await self.client.change_presence(
status=discord.Status.idle, activity=discord.Game(f"{actMsg}")
)
# print(f"AzsocamiBot version {VERSION} is now online.")
# print(f"Bot name is {bot.user.name}, ID={bot.user.id}")
# print(f"Using {ENVVERSION}")
# print(f"Command prefix is: {COMMAND_PREFIX}")
# region HelpCommand
# HELP
# @bot.command()
# async def help(ctx):
# author = ctx.message.author
# embed = discord.Embed(color=discord.Color.orange())
# embed.set_author(name="Help")
# # embed.add_field(
# # name=".ping", value="Returns Pong to check bot latency.", inline=False
# # )
# embed.add_field(
# name=".mats or .raidmats",
# value="Current Auction House pricing on common raid mats.",
# inline=False,
# )
# embed.add_field(
# name=".lpc or .legendaries",
# value=".lpc [armorType] - Auction House pricing on legendary base armors.",
# inline=False,
# )
# embed.add_field(
# name=".tc",
# value=".tc - Shows current Twisting Corridors achievement for team.",
# inline=False,
# )
# embed.add_field(
# name=".gvault or .gv",
# value="Shows current Great Vault loot from M+ keys.",
# inline=False,
# )
# embed.add_field(
# name=".bestruns or .br",
# value="Shows best timed mythic runs for season, all members.",
# inline=False,
# )
# embed.add_field(
# name=".team or .raidteam",
# value="team [update] - List current team members data. Update is Optional.",
# inline=False,
# )
# embed.add_field(
# name=".add_member",
# value="add_member <playername> [<realm>] Add new member. Realm defaults to silver-hand.",
# inline=False,
# )
# embed.add_field(
# name=".remove_member",
# value="remove_member <playername> Remove member.",
# inline=False,
# )
# embed.add_field(
# name=".change_member_role",
# value="change_member_role <playername> Change member role.",
# inline=False,
# )
# embed.add_field(
# name=".rules", value="Guild rules to live by. Esp rule #1.", inline=False
# )
# embed.add_field(
# name=".clean",
# value="Cleans all AzsocamiBot messages and commands from channel.",
# inline=False,
# )
# embed.add_field(
# name=".cleanbot",
# value="Cleans certain bot messages and commands from channel.",
# inline=False,
# )
# embed.add_field(
# name=".changelog",
# value="AzsocamiBot change log.",
# inline=False,
# )
# embed.add_field(
# name=".version",
# value="AzsocamiBot version info.",
# inline=False,
# )
# await ctx.send(embed=embed)
# if author.name.lower() == "aaryn":
# embed2 = discord.Embed(color=discord.Color.orange())
# embed2.set_author(name="Admin Only Commands")
# # embed2.add_field(
# # name=".db_members", value="ADMIN: List members database rows.", inline=False
# # )
# embed2.add_field(
# name=".get_table_contents",
# value="ADMIN: get_table_contents <tablename> List table contents.",
# inline=False,
# )
# embed2.add_field(
# name=".get_table_structure",
# value="ADMIN: get_table_structure <tablename> List table structure.",
# inline=False,
# )
# embed2.add_field(
# name=".add_item",
# value="ADMIN: add_item <ItemID> Add itemid to raidmats.",
# inline=False,
# )
# embed2.add_field(
# name=".remove_item",
# value="ADMIN: remove_item <ItemID> Remove itemid from raidmats.",
# inline=False,
# )
# await ctx.send(embed=embed2)
# endregion
@commands.command()
async def rules(self, ctx):
""" Rules to live by """
msg = """
**Rule #1: It's Ben's fault. Always.**
Rule #2: Be respectful to one another.
Rule #3: No Politics and No Religion talk.
**Rule #4: Keep voice chatter to a minimum during boss pulls.**
Rule #5: Thou shall not upset thy tank or thy healer.
"""
await ctx.send(msg)
@commands.command()
async def ping(self, ctx):
""" Generic latency test for bot """
await ctx.send(f"🏓 Pong with {str(round(self.client.latency, 2))} seconds.")
@commands.command(name="whoami", hidden=True)
async def whoami(self, ctx):
await ctx.send(f"You are {ctx.message.author.name}, using {ENVVERSION}")
@commands.command()
async def clean(self, ctx, number=50):
""" Clean <number=50> AzsocamiBot commands and responses from channel """
mgs = []
number = int(number)
cleaned = 0
async for x in ctx.message.channel.history(limit=number):
if x.author.id == self.client.user.id:
mgs.append(x)
cleaned += 1
# print(x)
if x.content[:1] == COMMAND_PREFIX:
mgs.append(x)
cleaned += 1
# print(x.content[:1])
await ctx.message.channel.delete_messages(mgs)
print(f"Removed {cleaned} messages and commands.")
@commands.command()
async def cleanbot(self, ctx, number=50):
""" Clean <number=50> bot commands and responses from channel """
mgs = []
number = int(number)
cleaned = 0
# M+ bot, this bot,
botsList = [378005927493763074, self.client.user.id]
prefixList = [".", "*", "!", ";"]
async for x in ctx.message.channel.history(limit=number):
if x.author.id in botsList:
mgs.append(x)
cleaned += 1
# print(x)
elif x.content[:1] in prefixList:
mgs.append(x)
cleaned += 1
# print(x.content[:1])
await ctx.message.channel.delete_messages(mgs)
print(f"Removed {cleaned} messages and commands.")
async def botAliveCheck(self):
if DEVMODE == False:
botChannel = self.client.get_channel(799290844862480445)
if DEVMODE == True:
botChannel = self.client.get_channel(790667200197296138)
await botChannel.send(f"botAliveCheckBG: {botlib.localNow()}")
###################################################################
###################################################################
## ##
## BACKGROUND TASKS ##
## ##
###################################################################
###################################################################
@tasks.loop(minutes=15)
async def botAliveCheckBG(self):
print("Core:AliveCheckBG process")
await self.botAliveCheck()
###################################################################
###################################################################
## ##
## NOT IMPLEMENTED YET ##
## ##
###################################################################
###################################################################
# region NotImplemented
# @commands.command()
# @commands.is_owner()
# async def status(self, ctx):
# msg = f"AzsocamiBot version {VERSION}, released {VERSIONDATE}.\n"
# # msg += "Bot running as "
# # if TESTBOT:
# # msg += "TEST BOT.\n"
# # else:
# # msg += "PRODUCTION BOT.\n"
# # msg += f"Server Timezone: {time.tzname}\n"
# # msg += f"Server Time: {datetime.datetime.now().strftime(TIMEFORMAT)}\n"
# msg += f"Bot Local Time: {botlib.localNow()}\n"
# msg += f"Bot source is at https://github.com/bryanpalmer/AzsocamiBot\n"
# msg += f"Bot running on heroku.com\n"
# await ctx.send(msg)
# endregion
## Initialize cog
def setup(client):
client.add_cog(Core(client))
| 1,514 | 0 | 154 |
9c79bc70ddef8a6d0acc20fd3ffffeb5e0697f97 | 956 | py | Python | simple/close.py | rzzzwilson/pyqt5 | b0d06f01727e6b3c99b68f1276573baf36daadb6 | [
"MIT"
] | 1 | 2017-10-29T13:00:52.000Z | 2017-10-29T13:00:52.000Z | simple/close.py | rzzzwilson/pyqt5 | b0d06f01727e6b3c99b68f1276573baf36daadb6 | [
"MIT"
] | null | null | null | simple/close.py | rzzzwilson/pyqt5 | b0d06f01727e6b3c99b68f1276573baf36daadb6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ZetCode PyQt5 tutorial
This program creates a quit
button. When we press the button,
the application terminates.
author: Jan Bodnar
website: zetcode.com
last edited: January 2015
"""
import sys
from PyQt5.QtWidgets import QWidget, QPushButton, QApplication
from PyQt5.QtCore import QCoreApplication
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | 21.244444 | 62 | 0.598326 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ZetCode PyQt5 tutorial
This program creates a quit
button. When we press the button,
the application terminates.
author: Jan Bodnar
website: zetcode.com
last edited: January 2015
"""
import sys
from PyQt5.QtWidgets import QWidget, QPushButton, QApplication
from PyQt5.QtCore import QCoreApplication
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
qbtn = QPushButton('Quit', self)
qbtn.clicked.connect(QCoreApplication.instance().quit)
qbtn.resize(qbtn.sizeHint())
qbtn.move(50, 50)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Quit button')
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | 371 | 2 | 89 |
ed3db3f6d5dbf506a2b33f0dfb01664354a7b278 | 452 | py | Python | launchlab_django_utils/storage.py | LaunchlabAU/ll-dj-utils | 5d3e52a660e460f46537bd5a2a380a48ba85017b | [
"MIT"
] | 2 | 2016-08-30T17:43:10.000Z | 2018-08-29T06:09:07.000Z | launchlab_django_utils/storage.py | LaunchlabAU/ll-dj-utils | 5d3e52a660e460f46537bd5a2a380a48ba85017b | [
"MIT"
] | 1 | 2016-08-19T06:43:21.000Z | 2016-08-24T06:23:41.000Z | launchlab_django_utils/storage.py | LaunchlabAU/launchlab-django-utils | 5d3e52a660e460f46537bd5a2a380a48ba85017b | [
"MIT"
] | null | null | null | from storages.backends.s3boto3 import S3Boto3Storage
| 32.285714 | 71 | 0.712389 | from storages.backends.s3boto3 import S3Boto3Storage
class StaticRootS3Boto3Storage(S3Boto3Storage):
def __init__(self, *args, **kwargs):
kwargs['location'] = 'static'
super(StaticRootS3Boto3Storage, self).__init__(*args, **kwargs)
class MediaRootS3Boto3Storage(S3Boto3Storage):
def __init__(self, *args, **kwargs):
kwargs['location'] = 'media'
super(MediaRootS3Boto3Storage, self).__init__(*args, **kwargs)
| 248 | 51 | 98 |
f150b5f8225172c15a64e638a537a893cbd944fe | 2,096 | py | Python | source/comparative_genomics/heatmap_VF.py | rbr7/compgenomics2021 | 15d75fbcc939cb2388fd3749cf444c6b62319e47 | [
"MIT"
] | null | null | null | source/comparative_genomics/heatmap_VF.py | rbr7/compgenomics2021 | 15d75fbcc939cb2388fd3749cf444c6b62319e47 | [
"MIT"
] | null | null | null | source/comparative_genomics/heatmap_VF.py | rbr7/compgenomics2021 | 15d75fbcc939cb2388fd3749cf444c6b62319e47 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np; np.random.seed(0)
import seaborn as sns; sns.set_theme()
from matplotlib import pyplot as plt
file=pd.read_csv('/home/Documents/Acads/CompGenomics/Proj_ComparitiveGenomics/vf_merged_results.tsv', sep='\t')
VF=list(file['Virulence factor'])
Is=list(file['Isolate'])
genes=['eae', 'iss', 'tir', 'espA', 'espB', 'espJ', 'etpD', 'iha', 'ehxA', 'nleA', 'nleB', 'astA', 'espP', 'gad', 'katP', 'nleC', 'toxB', 'espF','efa1' , 'stx1A', 'stx1B','espI']
isolates=['CGT1009', 'CGT1020', 'CGT1058', 'CGT1084', 'CGT1174', 'CGT1197', 'CGT1219', 'CGT1238', 'CGT1283', 'CGT1317', 'CGT1323', 'CGT1327', 'CGT1342', 'CGT1394', 'CGT1408', 'CGT1417', 'CGT1428', 'CGT1436', 'CGT1440', 'CGT1447', 'CGT1459', 'CGT1473', 'CGT1493', 'CGT1500', 'CGT1511', 'CGT1519', 'CGT1531', 'CGT1568', 'CGT1600', 'CGT1602', 'CGT1606', 'CGT1615', 'CGT1621', 'CGT1777', 'CGT1778', 'CGT1783', 'CGT1795', 'CGT1808', 'CGT1833', 'CGT1834', 'CGT1837', 'CGT1841', 'CGT1858', 'CGT1946', 'CGT1960', 'CGT1976', 'CGT1985', 'CGT1989', 'CGT1991', 'CGT1992']
isolates_tree=['CGT1837', 'CGT1602', 'CGT1317', 'CGT1447', 'CGT1323', 'CGT1992', 'CGT1976', 'CGT1621', 'CGT1473', 'CGT1327', 'CGT1778', 'CGT1197', 'CGT1238', 'CGT1615', 'CGT1219', 'CGT1020', 'CGT1009', 'CGT1783', 'CGT1394', 'CGT1833', 'CGT1600', 'CGT1408', 'CGT1568', 'CGT1500', 'CGT1342', 'CGT1858', 'CGT1283', 'CGT1991', 'CGT1985', 'CGT1841', 'CGT1459', 'CGT1795', 'CGT1531', 'CGT1436', 'CGT1519', 'CGT1440', 'CGT1834', 'CGT1058', 'CGT1606', 'CGT1511', 'CGT1417', 'CGT1084', 'CGT1777', 'CGT1960', 'CGT1808', 'CGT1428', 'CGT1989', 'CGT1174', 'CGT1946', 'CGT1493']
toplot=[[0 for j in range(22)] for i in range(50)]
for i in range(50):
print(i)
if isolates_tree[i] !='CGT1992':
neighbour=isolates[isolates.index(isolates_tree[i])+1]
setofgenes=VF[Is.index(isolates_tree[i]):Is.index(neighbour)]
else:
setofgenes=VF[Is.index(isolates_tree[i]):]
for j in range(22):
if genes[j] in setofgenes:
toplot[i][j]=1
xlabels=genes
ylabels=isolates_tree
ax = sns.heatmap(toplot, xticklabels=xlabels, yticklabels=ylabels)
plt.show()
| 52.4 | 564 | 0.663645 | import pandas as pd
import numpy as np; np.random.seed(0)
import seaborn as sns; sns.set_theme()
from matplotlib import pyplot as plt
file=pd.read_csv('/home/Documents/Acads/CompGenomics/Proj_ComparitiveGenomics/vf_merged_results.tsv', sep='\t')
VF=list(file['Virulence factor'])
Is=list(file['Isolate'])
genes=['eae', 'iss', 'tir', 'espA', 'espB', 'espJ', 'etpD', 'iha', 'ehxA', 'nleA', 'nleB', 'astA', 'espP', 'gad', 'katP', 'nleC', 'toxB', 'espF','efa1' , 'stx1A', 'stx1B','espI']
isolates=['CGT1009', 'CGT1020', 'CGT1058', 'CGT1084', 'CGT1174', 'CGT1197', 'CGT1219', 'CGT1238', 'CGT1283', 'CGT1317', 'CGT1323', 'CGT1327', 'CGT1342', 'CGT1394', 'CGT1408', 'CGT1417', 'CGT1428', 'CGT1436', 'CGT1440', 'CGT1447', 'CGT1459', 'CGT1473', 'CGT1493', 'CGT1500', 'CGT1511', 'CGT1519', 'CGT1531', 'CGT1568', 'CGT1600', 'CGT1602', 'CGT1606', 'CGT1615', 'CGT1621', 'CGT1777', 'CGT1778', 'CGT1783', 'CGT1795', 'CGT1808', 'CGT1833', 'CGT1834', 'CGT1837', 'CGT1841', 'CGT1858', 'CGT1946', 'CGT1960', 'CGT1976', 'CGT1985', 'CGT1989', 'CGT1991', 'CGT1992']
isolates_tree=['CGT1837', 'CGT1602', 'CGT1317', 'CGT1447', 'CGT1323', 'CGT1992', 'CGT1976', 'CGT1621', 'CGT1473', 'CGT1327', 'CGT1778', 'CGT1197', 'CGT1238', 'CGT1615', 'CGT1219', 'CGT1020', 'CGT1009', 'CGT1783', 'CGT1394', 'CGT1833', 'CGT1600', 'CGT1408', 'CGT1568', 'CGT1500', 'CGT1342', 'CGT1858', 'CGT1283', 'CGT1991', 'CGT1985', 'CGT1841', 'CGT1459', 'CGT1795', 'CGT1531', 'CGT1436', 'CGT1519', 'CGT1440', 'CGT1834', 'CGT1058', 'CGT1606', 'CGT1511', 'CGT1417', 'CGT1084', 'CGT1777', 'CGT1960', 'CGT1808', 'CGT1428', 'CGT1989', 'CGT1174', 'CGT1946', 'CGT1493']
toplot=[[0 for j in range(22)] for i in range(50)]
for i in range(50):
print(i)
if isolates_tree[i] !='CGT1992':
neighbour=isolates[isolates.index(isolates_tree[i])+1]
setofgenes=VF[Is.index(isolates_tree[i]):Is.index(neighbour)]
else:
setofgenes=VF[Is.index(isolates_tree[i]):]
for j in range(22):
if genes[j] in setofgenes:
toplot[i][j]=1
xlabels=genes
ylabels=isolates_tree
ax = sns.heatmap(toplot, xticklabels=xlabels, yticklabels=ylabels)
plt.show()
| 0 | 0 | 0 |
01ff571c686e592ea7e7334e654437ef89ec23b7 | 6,628 | py | Python | python/plugins/processing/gui/Postprocessing.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | python/plugins/processing/gui/Postprocessing.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | python/plugins/processing/gui/Postprocessing.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | 1 | 2021-12-25T08:40:30.000Z | 2021-12-25T08:40:30.000Z | # -*- coding: utf-8 -*-
"""
***************************************************************************
Postprocessing.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import os
import traceback
from qgis.PyQt.QtWidgets import QApplication
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (Qgis,
QgsProject,
QgsProcessingFeedback,
QgsProcessingUtils,
QgsMapLayerType,
QgsWkbTypes,
QgsMessageLog,
QgsProviderRegistry,
QgsExpressionContext,
QgsExpressionContextScope)
from processing.core.ProcessingConfig import ProcessingConfig
from processing.gui.RenderingStyles import RenderingStyles
def set_layer_name(layer, context_layer_details):
"""
Sets the name for the given layer, either using the layer's file name
(or database layer name), or the name specified by the parameter definition.
"""
use_filename_as_layer_name = ProcessingConfig.getSetting(ProcessingConfig.USE_FILENAME_AS_LAYER_NAME)
if use_filename_as_layer_name or not context_layer_details.name:
source_parts = QgsProviderRegistry.instance().decodeUri(layer.dataProvider().name(), layer.source())
layer_name = source_parts.get('layerName', '')
# if source layer name exists, use that -- else use
if layer_name:
layer.setName(layer_name)
else:
path = source_parts.get('path', '')
if path:
layer.setName(os.path.splitext(os.path.basename(path))[0])
elif context_layer_details.name:
# fallback to parameter's name -- shouldn't happen!
layer.setName(context_layer_details.name)
else:
layer.setName(context_layer_details.name)
| 45.39726 | 184 | 0.569855 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Postprocessing.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import os
import traceback
from qgis.PyQt.QtWidgets import QApplication
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (Qgis,
QgsProject,
QgsProcessingFeedback,
QgsProcessingUtils,
QgsMapLayerType,
QgsWkbTypes,
QgsMessageLog,
QgsProviderRegistry,
QgsExpressionContext,
QgsExpressionContextScope)
from processing.core.ProcessingConfig import ProcessingConfig
from processing.gui.RenderingStyles import RenderingStyles
def set_layer_name(layer, context_layer_details):
"""
Sets the name for the given layer, either using the layer's file name
(or database layer name), or the name specified by the parameter definition.
"""
use_filename_as_layer_name = ProcessingConfig.getSetting(ProcessingConfig.USE_FILENAME_AS_LAYER_NAME)
if use_filename_as_layer_name or not context_layer_details.name:
source_parts = QgsProviderRegistry.instance().decodeUri(layer.dataProvider().name(), layer.source())
layer_name = source_parts.get('layerName', '')
# if source layer name exists, use that -- else use
if layer_name:
layer.setName(layer_name)
else:
path = source_parts.get('path', '')
if path:
layer.setName(os.path.splitext(os.path.basename(path))[0])
elif context_layer_details.name:
# fallback to parameter's name -- shouldn't happen!
layer.setName(context_layer_details.name)
else:
layer.setName(context_layer_details.name)
def handleAlgorithmResults(alg, context, feedback=None, showResults=True, parameters={}):
wrongLayers = []
if feedback is None:
feedback = QgsProcessingFeedback()
feedback.setProgressText(QCoreApplication.translate('Postprocessing', 'Loading resulting layers'))
i = 0
for l, details in context.layersToLoadOnCompletion().items():
if feedback.isCanceled():
return False
if len(context.layersToLoadOnCompletion()) > 2:
# only show progress feedback if we're loading a bunch of layers
feedback.setProgress(100 * i / float(len(context.layersToLoadOnCompletion())))
try:
layer = QgsProcessingUtils.mapLayerFromString(l, context, typeHint=details.layerTypeHint)
if layer is not None:
set_layer_name(layer, details)
'''If running a model, the execution will arrive here when an algorithm that is part of
that model is executed. We check if its output is a final otuput of the model, and
adapt the output name accordingly'''
outputName = details.outputName
expcontext = QgsExpressionContext()
scope = QgsExpressionContextScope()
expcontext.appendScope(scope)
for out in alg.outputDefinitions():
if out.name() not in parameters:
continue
outValue = parameters[out.name()]
if hasattr(outValue, "sink"):
outValue = outValue.sink.valueAsString(expcontext)[0]
else:
outValue = str(outValue)
if outValue == l:
outputName = out.name()
break
style = None
if outputName:
style = RenderingStyles.getStyle(alg.id(), outputName)
if style is None:
if layer.type() == QgsMapLayerType.RasterLayer:
style = ProcessingConfig.getSetting(ProcessingConfig.RASTER_STYLE)
else:
if layer.geometryType() == QgsWkbTypes.PointGeometry:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_POINT_STYLE)
elif layer.geometryType() == QgsWkbTypes.LineGeometry:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_LINE_STYLE)
else:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_POLYGON_STYLE)
if style:
layer.loadNamedStyle(style)
details.project.addMapLayer(context.temporaryLayerStore().takeMapLayer(layer))
if details.postProcessor():
details.postProcessor().postProcessLayer(layer, context, feedback)
else:
wrongLayers.append(str(l))
except Exception:
QgsMessageLog.logMessage(QCoreApplication.translate('Postprocessing', "Error loading result layer:") + "\n" + traceback.format_exc(), 'Processing', Qgis.Critical)
wrongLayers.append(str(l))
i += 1
feedback.setProgress(100)
if wrongLayers:
msg = QCoreApplication.translate('Postprocessing', "The following layers were not correctly generated.")
msg += "<ul>" + "".join(["<li>%s</li>" % lay for lay in wrongLayers]) + "</ul>"
msg += QCoreApplication.translate('Postprocessing', "You can check the 'Log Messages Panel' in QGIS main window to find more information about the execution of the algorithm.")
feedback.reportError(msg)
return len(wrongLayers) == 0
| 3,753 | 0 | 23 |
4853713c7a5a4e59600cfcdded4fab0fce1d54b2 | 8,662 | py | Python | Pyrado/pyrado/plotting/policy_parameters.py | jacarvalho/SimuRLacra | a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5 | [
"BSD-3-Clause"
] | null | null | null | Pyrado/pyrado/plotting/policy_parameters.py | jacarvalho/SimuRLacra | a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5 | [
"BSD-3-Clause"
] | null | null | null | Pyrado/pyrado/plotting/policy_parameters.py | jacarvalho/SimuRLacra | a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5 | [
"BSD-3-Clause"
] | null | null | null | """
Functions to plot Pyrado policies
"""
import numpy as np
import torch.nn as nn
from matplotlib import ticker, colorbar
from matplotlib import pyplot as plt
from typing import Any
import pyrado
from pyrado.plotting.utils import AccNorm
from pyrado.policies.adn import ADNPolicy
from pyrado.policies.base import Policy
from pyrado.policies.neural_fields import NFPolicy
from pyrado.utils.data_types import EnvSpec
from pyrado.utils.input_output import ensure_no_subscript, ensure_math_mode, print_cbt
def _annotate_img(img,
data: [list, np.ndarray] = None,
thold_lo: float = None,
thold_up: float = None,
valfmt: str = '{x:.2f}',
textcolors: tuple = ('white', 'black'),
**textkw: Any):
"""
Annotate a given image.
.. note::
The text color changes based on thresholds which only make sense for symmetric color maps.
:param mg: AxesImage to be labeled.
:param data: data used to annotate. If None, the image's data is used.
:param thold_lo: lower threshold for changing the color
:param thold_up: upper threshold for changing the color
:param valfmt: format of the annotations inside the heat map. This should either use the string format method, e.g.
'$ {x:.2f}', or be a :class:matplotlib.ticker.Formatter.
:param textcolors: two color specifications. The first is used for values below a threshold,
the second for those above.
:param textkw: further arguments passed on to the created text labels
"""
if not isinstance(data, (list, np.ndarray)):
data = img.get_array()
# Normalize the threshold to the images color range
if thold_lo is None:
thold_lo = data.min()*0.5
if thold_up is None:
thold_up = data.max()*0.5
# Set default alignment to center, but allow it to be overwritten by textkw
kw = dict(horizontalalignment='center', verticalalignment='center')
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a text for each 'pixel'.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[thold_lo < data[i, j] < thold_up]) # if true then use second color
text = img.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
def render_policy_params(policy: Policy,
env_spec: EnvSpec,
cmap_name: str = 'RdBu',
ax_hm: plt.Axes = None,
annotate: bool = True,
annotation_valfmt: str = '{x:.2f}',
colorbar_label: str = '',
xlabel: str = None,
ylabel: str = None,
) -> plt.Figure:
"""
Plot the weights and biases as images, and a color bar.
.. note::
If you want to have a tight layout, it is best to pass axes of a figure with `tight_layout=True` or
`constrained_layout=True`.
:param policy: policy to visualize
:param env_spec: environment specification
:param cmap_name: name of the color map, e.g. 'inferno', 'RdBu', or 'viridis'
:param ax_hm: axis to draw the heat map onto, if equal to None a new figure is opened
:param annotate: select if the heat map should be annotated
:param annotation_valfmt: format of the annotations inside the heat map, irrelevant if annotate = False
:param colorbar_label: label for the color bar
:param xlabel: label for the x axis
:param ylabel: label for the y axis
:return: handles to figures
"""
if not isinstance(policy, nn.Module):
raise pyrado.TypeErr(given=policy, expected_type=nn.Module)
cmap = plt.get_cmap(cmap_name)
# Create axes and subplots depending on the NN structure
num_rows = len(list(policy.parameters()))
fig = plt.figure(figsize=(14, 10), tight_layout=False)
gs = fig.add_gridspec(num_rows, 2, width_ratios=[14, 1]) # right column is the color bar
ax_cb = fig.add_subplot(gs[:, 1])
# Accumulative norm for the colors
norm = AccNorm()
for i, (name, param) in enumerate(policy.named_parameters()):
# Create current axis
ax = plt.subplot(gs[i, 0])
ax.set_title(name.replace('_', '\_'))
# Convert the data and plot the image with the colors proportional to the parameters
if param.ndim == 3:
# For example convolution layers
param = param.flatten(0)
print_cbt(f'Flattened the first dimension of the {name} parameter tensor.', 'y')
data = np.atleast_2d(param.detach().numpy())
img = plt.imshow(data, cmap=cmap, norm=norm, aspect='auto', origin='lower')
if annotate:
_annotate_img(
img,
thold_lo=0.75*min(policy.param_values).detach().numpy(),
thold_up=0.75*max(policy.param_values).detach().numpy(),
valfmt=annotation_valfmt
)
# Prepare the ticks
if isinstance(policy, ADNPolicy):
if name == 'obs_layer.weight':
ax.set_xticks(np.arange(env_spec.obs_space.flat_dim))
ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
ax.set_xticklabels(ensure_no_subscript(env_spec.obs_space.labels))
ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
elif name in ['obs_layer.bias', 'nonlin_layer.log_weight', 'nonlin_layer.bias']:
ax.set_xticks(np.arange(env_spec.act_space.flat_dim))
ax.set_xticklabels(ensure_math_mode(env_spec.act_space.labels))
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
elif name == 'prev_act_layer.weight':
ax.set_xticks(np.arange(env_spec.act_space.flat_dim))
ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
ax.set_xticklabels(ensure_math_mode(env_spec.act_space.labels))
ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
elif name in ['_log_tau', '_log_kappa', '_log_capacity']:
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
else:
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
elif isinstance(policy, NFPolicy):
if name == 'obs_layer.weight':
ax.set_xticks(np.arange(env_spec.obs_space.flat_dim))
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.set_xticklabels(ensure_no_subscript(env_spec.obs_space.labels))
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
elif name in ['_log_tau', '_potentials_init', 'resting_level', 'obs_layer.bias', 'conv_layer.weight',
'nonlin_layer.log_weight', 'nonlin_layer.bias']:
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
elif name == 'act_layer.weight':
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
else:
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
# Add the color bar (call this within the loop to make the AccNorm scan every image)
colorbar.ColorbarBase(ax_cb, cmap=cmap, norm=norm, label=colorbar_label)
# Increase the vertical white spaces between the subplots
plt.subplots_adjust(hspace=.7, wspace=0.1)
# Set the labels
if xlabel is not None:
ax_hm.set_xlabel(xlabel)
if ylabel is not None:
ax_hm.set_ylabel(ylabel)
return fig
| 45.350785 | 119 | 0.634726 | """
Functions to plot Pyrado policies
"""
import numpy as np
import torch.nn as nn
from matplotlib import ticker, colorbar
from matplotlib import pyplot as plt
from typing import Any
import pyrado
from pyrado.plotting.utils import AccNorm
from pyrado.policies.adn import ADNPolicy
from pyrado.policies.base import Policy
from pyrado.policies.neural_fields import NFPolicy
from pyrado.utils.data_types import EnvSpec
from pyrado.utils.input_output import ensure_no_subscript, ensure_math_mode, print_cbt
def _annotate_img(img,
data: [list, np.ndarray] = None,
thold_lo: float = None,
thold_up: float = None,
valfmt: str = '{x:.2f}',
textcolors: tuple = ('white', 'black'),
**textkw: Any):
"""
Annotate a given image.
.. note::
The text color changes based on thresholds which only make sense for symmetric color maps.
:param mg: AxesImage to be labeled.
:param data: data used to annotate. If None, the image's data is used.
:param thold_lo: lower threshold for changing the color
:param thold_up: upper threshold for changing the color
:param valfmt: format of the annotations inside the heat map. This should either use the string format method, e.g.
'$ {x:.2f}', or be a :class:matplotlib.ticker.Formatter.
:param textcolors: two color specifications. The first is used for values below a threshold,
the second for those above.
:param textkw: further arguments passed on to the created text labels
"""
if not isinstance(data, (list, np.ndarray)):
data = img.get_array()
# Normalize the threshold to the images color range
if thold_lo is None:
thold_lo = data.min()*0.5
if thold_up is None:
thold_up = data.max()*0.5
# Set default alignment to center, but allow it to be overwritten by textkw
kw = dict(horizontalalignment='center', verticalalignment='center')
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a text for each 'pixel'.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[thold_lo < data[i, j] < thold_up]) # if true then use second color
text = img.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
def render_policy_params(policy: Policy,
env_spec: EnvSpec,
cmap_name: str = 'RdBu',
ax_hm: plt.Axes = None,
annotate: bool = True,
annotation_valfmt: str = '{x:.2f}',
colorbar_label: str = '',
xlabel: str = None,
ylabel: str = None,
) -> plt.Figure:
"""
Plot the weights and biases as images, and a color bar.
.. note::
If you want to have a tight layout, it is best to pass axes of a figure with `tight_layout=True` or
`constrained_layout=True`.
:param policy: policy to visualize
:param env_spec: environment specification
:param cmap_name: name of the color map, e.g. 'inferno', 'RdBu', or 'viridis'
:param ax_hm: axis to draw the heat map onto, if equal to None a new figure is opened
:param annotate: select if the heat map should be annotated
:param annotation_valfmt: format of the annotations inside the heat map, irrelevant if annotate = False
:param colorbar_label: label for the color bar
:param xlabel: label for the x axis
:param ylabel: label for the y axis
:return: handles to figures
"""
if not isinstance(policy, nn.Module):
raise pyrado.TypeErr(given=policy, expected_type=nn.Module)
cmap = plt.get_cmap(cmap_name)
# Create axes and subplots depending on the NN structure
num_rows = len(list(policy.parameters()))
fig = plt.figure(figsize=(14, 10), tight_layout=False)
gs = fig.add_gridspec(num_rows, 2, width_ratios=[14, 1]) # right column is the color bar
ax_cb = fig.add_subplot(gs[:, 1])
# Accumulative norm for the colors
norm = AccNorm()
for i, (name, param) in enumerate(policy.named_parameters()):
# Create current axis
ax = plt.subplot(gs[i, 0])
ax.set_title(name.replace('_', '\_'))
# Convert the data and plot the image with the colors proportional to the parameters
if param.ndim == 3:
# For example convolution layers
param = param.flatten(0)
print_cbt(f'Flattened the first dimension of the {name} parameter tensor.', 'y')
data = np.atleast_2d(param.detach().numpy())
img = plt.imshow(data, cmap=cmap, norm=norm, aspect='auto', origin='lower')
if annotate:
_annotate_img(
img,
thold_lo=0.75*min(policy.param_values).detach().numpy(),
thold_up=0.75*max(policy.param_values).detach().numpy(),
valfmt=annotation_valfmt
)
# Prepare the ticks
if isinstance(policy, ADNPolicy):
if name == 'obs_layer.weight':
ax.set_xticks(np.arange(env_spec.obs_space.flat_dim))
ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
ax.set_xticklabels(ensure_no_subscript(env_spec.obs_space.labels))
ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
elif name in ['obs_layer.bias', 'nonlin_layer.log_weight', 'nonlin_layer.bias']:
ax.set_xticks(np.arange(env_spec.act_space.flat_dim))
ax.set_xticklabels(ensure_math_mode(env_spec.act_space.labels))
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
elif name == 'prev_act_layer.weight':
ax.set_xticks(np.arange(env_spec.act_space.flat_dim))
ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
ax.set_xticklabels(ensure_math_mode(env_spec.act_space.labels))
ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
elif name in ['_log_tau', '_log_kappa', '_log_capacity']:
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
else:
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
elif isinstance(policy, NFPolicy):
if name == 'obs_layer.weight':
ax.set_xticks(np.arange(env_spec.obs_space.flat_dim))
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.set_xticklabels(ensure_no_subscript(env_spec.obs_space.labels))
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
elif name in ['_log_tau', '_potentials_init', 'resting_level', 'obs_layer.bias', 'conv_layer.weight',
'nonlin_layer.log_weight', 'nonlin_layer.bias']:
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
elif name == 'act_layer.weight':
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
else:
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
# Add the color bar (call this within the loop to make the AccNorm scan every image)
colorbar.ColorbarBase(ax_cb, cmap=cmap, norm=norm, label=colorbar_label)
# Increase the vertical white spaces between the subplots
plt.subplots_adjust(hspace=.7, wspace=0.1)
# Set the labels
if xlabel is not None:
ax_hm.set_xlabel(xlabel)
if ylabel is not None:
ax_hm.set_ylabel(ylabel)
return fig
| 0 | 0 | 0 |
c49d90b25914f2fe4128d65000ed17dc59ea81f1 | 506 | py | Python | openslides_backend/action/actions/topic/delete.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | 5 | 2020-01-20T13:57:15.000Z | 2021-03-27T14:14:44.000Z | openslides_backend/action/actions/topic/delete.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | 859 | 2020-01-11T22:58:37.000Z | 2022-03-30T14:54:06.000Z | openslides_backend/action/actions/topic/delete.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | 16 | 2020-01-04T20:28:57.000Z | 2022-02-10T12:06:54.000Z | from ....models.models import Topic
from ....permissions.permissions import Permissions
from ...generics.delete import DeleteAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
@register_action("topic.delete")
class TopicDelete(DeleteAction):
"""
Action to delete simple topics that can be shown in the agenda.
"""
model = Topic()
schema = DefaultSchema(Topic()).get_delete_schema()
permission = Permissions.AgendaItem.CAN_MANAGE
| 29.764706 | 67 | 0.752964 | from ....models.models import Topic
from ....permissions.permissions import Permissions
from ...generics.delete import DeleteAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
@register_action("topic.delete")
class TopicDelete(DeleteAction):
"""
Action to delete simple topics that can be shown in the agenda.
"""
model = Topic()
schema = DefaultSchema(Topic()).get_delete_schema()
permission = Permissions.AgendaItem.CAN_MANAGE
| 0 | 0 | 0 |
ef6e758fc0abe0059cc14daad239b97fed80451a | 4,687 | py | Python | getgauge/registry.py | kaleb/py-gauge-python | edd8cf7827bc7d3a516e062521f5cf0a600efcb5 | [
"MIT"
] | null | null | null | getgauge/registry.py | kaleb/py-gauge-python | edd8cf7827bc7d3a516e062521f5cf0a600efcb5 | [
"MIT"
] | null | null | null | getgauge/registry.py | kaleb/py-gauge-python | edd8cf7827bc7d3a516e062521f5cf0a600efcb5 | [
"MIT"
] | null | null | null | import os
import re
import tempfile
from subprocess import call
from colorama import Fore
from getgauge.api import get_step_value
registry = Registry()
| 30.633987 | 108 | 0.65479 | import os
import re
import tempfile
from subprocess import call
from colorama import Fore
from getgauge.api import get_step_value
class StepInfo(object):
def __init__(self, step_text, parsed_step_text, impl, file_name, line_number, has_alias=False):
self.__step_text, self.__parsed_step_text, self.__impl = step_text, parsed_step_text, impl
self.__file_name, self.__line_number, self.__has_alias = file_name, line_number, has_alias
@property
def step_text(self):
return self.__step_text
@property
def parsed_step_text(self):
return self.__parsed_step_text
@property
def impl(self):
return self.__impl
@property
def has_alias(self):
return self.__has_alias
@property
def file_name(self):
return self.__file_name
@property
def line_number(self):
return self.__line_number
class _MessagesStore:
__messages = []
@staticmethod
def pending_messages():
messages = _MessagesStore.__messages
_MessagesStore.__messages = []
return messages
@staticmethod
def write_message(message):
_MessagesStore.__messages.append(message)
@staticmethod
def clear():
_MessagesStore.__messages = []
class Registry(object):
hooks = ['before_step', 'after_step', 'before_scenario', 'after_scenario', 'before_spec', 'after_spec',
'before_suite', 'after_suite']
def __init__(self):
self.__screenshot_provider, self.__steps_map, self.__continue_on_failures = _take_screenshot, {}, {}
for hook in Registry.hooks:
self.__def_hook(hook)
def __def_hook(self, hook):
def get(self, tags=None):
return _filter_hooks(tags, getattr(self, '__{}'.format(hook)))
def add(self, func, tags=None):
getattr(self, '__{}'.format(hook)).append({'tags': tags, 'func': func})
setattr(self.__class__, hook, get)
setattr(self.__class__, 'add_{}'.format(hook), add)
setattr(self, '__{}'.format(hook), [])
def add_step(self, step_text, func, file_name, line_number=-1, has_alias=False):
if not isinstance(step_text, list):
parsed_step_text = get_step_value(step_text)
info = StepInfo(step_text, parsed_step_text, func, file_name, line_number, has_alias)
self.__steps_map.setdefault(parsed_step_text, []).append(info)
return
for text in step_text:
self.add_step(text, func, file_name, line_number, True)
def steps(self):
return [value[0].step_text for value in self.__steps_map.values()]
def is_implemented(self, step_text):
return self.__steps_map.get(step_text) is not None
def has_multiple_impls(self, step_text):
return len(self.__steps_map.get(step_text)) > 1
def get_info_for(self, step_text):
info = self.__steps_map.get(step_text)
return info[0] if info is not None else StepInfo(None, None, None, None, None)
def get_infos_for(self, step_text):
return self.__steps_map.get(step_text)
def set_screenshot_provider(self, func):
self.__screenshot_provider = func
def screenshot_provider(self):
return self.__screenshot_provider
def continue_on_failure(self, func, exceptions=None):
self.__continue_on_failures[func] = exceptions or [AssertionError]
def is_continue_on_failure(self, func, exception):
if func in self.__continue_on_failures:
for e in self.__continue_on_failures[func]:
if issubclass(type(exception), e):
return True
return False
def clear(self):
self.__steps_map, self.__continue_on_failures = {}, {}
for hook in Registry.hooks:
setattr(self, '__{}'.format(hook), [])
def _filter_hooks(tags, hooks):
filtered_hooks = []
for hook in hooks:
hook_tags = hook['tags']
if hook_tags is None:
filtered_hooks.append(hook['func'])
continue
for tag in tags:
hook_tags = hook_tags.replace('<{}>'.format(tag), 'True')
if eval(re.sub('<[^<]+?>', 'False', hook_tags)):
filtered_hooks.append(hook['func'])
return filtered_hooks
def _take_screenshot():
temp_file = os.path.join(tempfile.gettempdir(), 'screenshot.png')
call(['gauge_screenshot', temp_file])
if not os.path.exists(temp_file):
print(Fore.RED + "Failed to take screenshot using gauge_screenshot.")
return str.encode("")
_file = open(temp_file, 'r+b')
data = _file.read()
_file.close()
return data
registry = Registry()
| 3,477 | 934 | 115 |
25880f4675b59434aa1de002074f27713ac08456 | 331 | py | Python | mayan/apps/appearance/migrations/0009_remove_theme_logoimage.py | punphatai/Mayan-EDMS | 8f929e427b497817ad0e024cec7f7bec61b172c0 | [
"Apache-2.0"
] | null | null | null | mayan/apps/appearance/migrations/0009_remove_theme_logoimage.py | punphatai/Mayan-EDMS | 8f929e427b497817ad0e024cec7f7bec61b172c0 | [
"Apache-2.0"
] | null | null | null | mayan/apps/appearance/migrations/0009_remove_theme_logoimage.py | punphatai/Mayan-EDMS | 8f929e427b497817ad0e024cec7f7bec61b172c0 | [
"Apache-2.0"
] | 1 | 2022-02-21T13:19:36.000Z | 2022-02-21T13:19:36.000Z | # Generated by Django 2.2.24 on 2022-03-07 10:56
from django.db import migrations
| 18.388889 | 48 | 0.595166 | # Generated by Django 2.2.24 on 2022-03-07 10:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('appearance', '0008_theme_logoimage'),
]
operations = [
migrations.RemoveField(
model_name='theme',
name='LogoImage',
),
]
| 0 | 224 | 23 |
3db2f0f2a2d8cd95d2f808dbfeb6a37ff8c0fa35 | 2,904 | py | Python | matflow/hardwareadministration/Hardware_Controller.py | soerenray/MatFlow | db0c8311262738264f1c525b8266a2bf52a7b7e6 | [
"MIT"
] | null | null | null | matflow/hardwareadministration/Hardware_Controller.py | soerenray/MatFlow | db0c8311262738264f1c525b8266a2bf52a7b7e6 | [
"MIT"
] | null | null | null | matflow/hardwareadministration/Hardware_Controller.py | soerenray/MatFlow | db0c8311262738264f1c525b8266a2bf52a7b7e6 | [
"MIT"
] | null | null | null | # import resource
from typing import List, Tuple
from matflow.database.ServerData import ServerData
from matflow.frontendapi import keys
from matflow.hardwareadministration.Server import Server
import requests
from requests.auth import HTTPBasicAuth
| 29.938144 | 85 | 0.631887 | # import resource
from typing import List, Tuple
from matflow.database.ServerData import ServerData
from matflow.frontendapi import keys
from matflow.hardwareadministration.Server import Server
import requests
from requests.auth import HTTPBasicAuth
class Hardware_Controller:
_Server: Server
# Constructor
def __init__(self):
standardServer = Server()
self._Server = standardServer
# Methods:
# getServer method gets the standard server via his ip
def get_server(self):
tempServerData = ServerData()
self._Server = tempServerData.get_server()
return self._Server
# writeServer
def writeServer(self, newServer: Server):
tempServerData = ServerData()
tempServerData.write_server(newServer)
# setCPUResources
# def setResources(self, newResource: resource, newSoft: int, newHard: int):
# resource.setrlimit(newResource, newSoft, newHard)
# Methods
# method that gets a Server and adds it to the ServerList
def addServer(Server):
Server = Server()
# stuff that happens
#
#
#
#
#
#
# method that gets a Server and a containerLimit
# and sets the Server's containerLimit to the given number
def changeContainerLimit(Server, containerLimit):
Server.containerLimit = containerLimit
# stuff that happens
#
#
#
#
#
# method that gets a Server and sets the Servers bool "isSelectedForExecution"
# to True
def selectServer(Server):
Server.isSelectedForExecution = True
# stuff that happens
#
#
#
#
#
#
def getServer(self, username: str, password: str) -> Server:
hardware_auth = HTTPBasicAuth(username, password)
search_user = username
search_url = keys.airflow_address + "api/v1/users/" + search_user
permission = requests.get(search_url, auth=hardware_auth).json()["roles"][0][
"name"
]
if permission == "Admin":
tempServerData = ServerData.get_instance()
database_resp: List[Tuple[str, str]] = tempServerData.get_server()
self._Server = Server()
self._Server.setName(database_resp[0][1])
self._Server.setAddress(database_resp[0][0])
return self._Server
def setServer(self, server: Server, username: str, password: str):
hardware_auth = HTTPBasicAuth(username, password)
search_user = username
search_url = keys.airflow_address + "api/v1/users/" + search_user
permission = requests.get(search_url, auth=hardware_auth).json()["roles"][0][
"name"
]
if permission == "Admin":
tempServerData = ServerData.get_instance()
tempServerData.write_server(server)
| 1,824 | 805 | 23 |
971e770d1fbc3d491f7ea1bbe9522bc53ae28e89 | 2,683 | py | Python | programs/function.py | JoeWard7/settler | e521b30dd15bf6512ebf40e6b1b8214557b98ba6 | [
"MIT"
] | null | null | null | programs/function.py | JoeWard7/settler | e521b30dd15bf6512ebf40e6b1b8214557b98ba6 | [
"MIT"
] | null | null | null | programs/function.py | JoeWard7/settler | e521b30dd15bf6512ebf40e6b1b8214557b98ba6 | [
"MIT"
] | null | null | null | """Calculations and Fucntions."""
import setup as set
def player_statcalc(player):
"""Print this."""
player_prop = []
player_res = []
player_num = []
for letter in player:
prob = set.letter_probs[letter]
resIndex = set.resource_position[letter]
res = set.resource_index[resIndex]
num = set.letter_num[letter]
player_prop.append(prob)
player_res.append(res)
player_num.append(num)
print(player_prop)
print(player_res)
print(player_num)
def player_odds(player):
"""Print this."""
total = 0.0
if "B" in player:
total += 1
if "D" in player or "Q" in player:
total += 2
if "J" in player or "N" in player:
total += 3
if "A" in player or "O" in player:
total += 4
if "C" in player or "P" in player:
total += 5
if "E" in player or "K" in player:
total += 5
if "G" in player or "M" in player:
total += 4
if "F" in player or "L" in player:
total += 3
if "I" in player or "R" in player:
total += 2
if "H" in player:
total += 1
return total / 36
def player_resOdds(player, resNum):
"""Print this."""
resource_list = []
for letter in player:
if set.resource_position[letter] == resNum:
resource_list.append(letter)
return str(player_odds(resource_list))
def add_settle(player, settles):
"""Print me."""
print(settles)
for letter in settles:
player.append(letter)
def dice_roll(roll):
"""Print me."""
for letter in set.letter_num:
if set.letter_num[letter] == roll:
for ownership in set.red_settle:
if ownership == letter:
set.red_hand.append(set.resource_position[letter])
for ownership in set.blue_settle:
if ownership == letter:
set.blue_hand.append(set.resource_position[letter])
for ownership in set.orange_settle:
if ownership == letter:
set.orange_hand.append(set.resource_position[letter])
for ownership in set.white_settle:
if ownership == letter:
set.white_hand.append(set.resource_position[letter])
def card_remove(player, cards):
"""Print me."""
print(cards)
for card in cards:
player.remove(card)
def game_odds(resNum):
"""Print me."""
resource_list = []
for letter in set.resource_position:
if set.resource_position[letter] == resNum:
resource_list.append(letter)
print(resource_list)
return str(player_odds(resource_list))
| 27.10101 | 73 | 0.583675 | """Calculations and Fucntions."""
import setup as set
def player_statcalc(player):
"""Print this."""
player_prop = []
player_res = []
player_num = []
for letter in player:
prob = set.letter_probs[letter]
resIndex = set.resource_position[letter]
res = set.resource_index[resIndex]
num = set.letter_num[letter]
player_prop.append(prob)
player_res.append(res)
player_num.append(num)
print(player_prop)
print(player_res)
print(player_num)
def player_odds(player):
"""Print this."""
total = 0.0
if "B" in player:
total += 1
if "D" in player or "Q" in player:
total += 2
if "J" in player or "N" in player:
total += 3
if "A" in player or "O" in player:
total += 4
if "C" in player or "P" in player:
total += 5
if "E" in player or "K" in player:
total += 5
if "G" in player or "M" in player:
total += 4
if "F" in player or "L" in player:
total += 3
if "I" in player or "R" in player:
total += 2
if "H" in player:
total += 1
return total / 36
def player_resOdds(player, resNum):
"""Print this."""
resource_list = []
for letter in player:
if set.resource_position[letter] == resNum:
resource_list.append(letter)
return str(player_odds(resource_list))
def add_settle(player, settles):
"""Print me."""
print(settles)
for letter in settles:
player.append(letter)
def dice_roll(roll):
"""Print me."""
for letter in set.letter_num:
if set.letter_num[letter] == roll:
for ownership in set.red_settle:
if ownership == letter:
set.red_hand.append(set.resource_position[letter])
for ownership in set.blue_settle:
if ownership == letter:
set.blue_hand.append(set.resource_position[letter])
for ownership in set.orange_settle:
if ownership == letter:
set.orange_hand.append(set.resource_position[letter])
for ownership in set.white_settle:
if ownership == letter:
set.white_hand.append(set.resource_position[letter])
def card_remove(player, cards):
"""Print me."""
print(cards)
for card in cards:
player.remove(card)
def game_odds(resNum):
"""Print me."""
resource_list = []
for letter in set.resource_position:
if set.resource_position[letter] == resNum:
resource_list.append(letter)
print(resource_list)
return str(player_odds(resource_list))
| 0 | 0 | 0 |
d7f080f17a48c3fcae06eaa5771bc062bdb48047 | 1,216 | py | Python | proj_1/index.py | PardeepBaboria/py-etl | 2af8becf026eb9c83a845b1b1499192bf7c2a570 | [
"Apache-2.0"
] | null | null | null | proj_1/index.py | PardeepBaboria/py-etl | 2af8becf026eb9c83a845b1b1499192bf7c2a570 | [
"Apache-2.0"
] | null | null | null | proj_1/index.py | PardeepBaboria/py-etl | 2af8becf026eb9c83a845b1b1499192bf7c2a570 | [
"Apache-2.0"
] | null | null | null | import petl as etl, psycopg2 as pg, pymysql as mysql
from config import dbConfig
SOURCE_DB_DBO = pg.connect(dbConfig["source"])
STAGING_DB_DBO = pg.connect(dbConfig["staging"])
DW_DB_DBO = mysql.connect(
host= dbConfig["data_warehouse"]["host"],
user= dbConfig["data_warehouse"]["user"],
password= dbConfig["data_warehouse"]["password"],
database= dbConfig["data_warehouse"]["database"]
)
STAGING_DB_DBO.cursor().execute('DROP TABLE IF EXISTS users;')
source_users_table = etl.fromdb(SOURCE_DB_DBO, 'SELECT userid, username, email, role FROM tuser;')
#petl.io.db.todb(table, dbo, tablename, schema=None, commit=True, create=False, drop=False, constraints=True, metadata=None, dialect=None, sample=1000)
etl.todb(source_users_table, STAGING_DB_DBO, "users", create= True)
staging_users_table = etl.fromdb(STAGING_DB_DBO, 'SELECT username, email FROM users;')
#for MySQL the statement SET SQL_MODE=ANSI_QUOTES is required to ensure MySQL uses SQL-92 standard quote characters.
DW_DB_DBO.cursor().execute('SET SQL_MODE=ANSI_QUOTES')
#table should be exists in db for append data.
etl.appenddb(staging_users_table, DW_DB_DBO, "user")
#etl.todb(staging_users_table, DW_DB_DBO, "user", create= True)
| 36.848485 | 151 | 0.76727 | import petl as etl, psycopg2 as pg, pymysql as mysql
from config import dbConfig
SOURCE_DB_DBO = pg.connect(dbConfig["source"])
STAGING_DB_DBO = pg.connect(dbConfig["staging"])
DW_DB_DBO = mysql.connect(
host= dbConfig["data_warehouse"]["host"],
user= dbConfig["data_warehouse"]["user"],
password= dbConfig["data_warehouse"]["password"],
database= dbConfig["data_warehouse"]["database"]
)
STAGING_DB_DBO.cursor().execute('DROP TABLE IF EXISTS users;')
source_users_table = etl.fromdb(SOURCE_DB_DBO, 'SELECT userid, username, email, role FROM tuser;')
#petl.io.db.todb(table, dbo, tablename, schema=None, commit=True, create=False, drop=False, constraints=True, metadata=None, dialect=None, sample=1000)
etl.todb(source_users_table, STAGING_DB_DBO, "users", create= True)
staging_users_table = etl.fromdb(STAGING_DB_DBO, 'SELECT username, email FROM users;')
#for MySQL the statement SET SQL_MODE=ANSI_QUOTES is required to ensure MySQL uses SQL-92 standard quote characters.
DW_DB_DBO.cursor().execute('SET SQL_MODE=ANSI_QUOTES')
#table should be exists in db for append data.
etl.appenddb(staging_users_table, DW_DB_DBO, "user")
#etl.todb(staging_users_table, DW_DB_DBO, "user", create= True)
| 0 | 0 | 0 |
cb23410e910c2dd8c4fe727004924c1468e60f71 | 3,506 | py | Python | prepare_multi_sources_data.py | FYJNEVERFOLLOWS/nnsslm | a988451277c027f7203614b92b491707024bbbe6 | [
"BSD-3-Clause"
] | null | null | null | prepare_multi_sources_data.py | FYJNEVERFOLLOWS/nnsslm | a988451277c027f7203614b92b491707024bbbe6 | [
"BSD-3-Clause"
] | null | null | null | prepare_multi_sources_data.py | FYJNEVERFOLLOWS/nnsslm | a988451277c027f7203614b92b491707024bbbe6 | [
"BSD-3-Clause"
] | null | null | null | import os
import pickle
import random
import numpy as np
np.set_printoptions(threshold=np.inf)
import torch
from torch.utils.data import Dataset, DataLoader
if __name__ == '__main__':
test_data_path = "/Work18/2021/fuyanjie/exp_data/exp_nnsslm/test_data_dir/test_data_frame_level_gcc"
test_data = DataLoader(SSLR_Dataset(test_data_path), batch_size=64, shuffle=True, num_workers=4) # train_data.shape (batch_x, batch_y)
for (batch_x, batch_y, batch_z) in test_data:
print(f'batch_x.shape {batch_x.shape}', flush=True)
print(f'batch_y.shape {batch_y.shape}', flush=True)
print(f'batch_z.shape {batch_z.shape}', flush=True)
print(f'batch_z {batch_z}', flush=True)
| 36.14433 | 173 | 0.573588 | import os
import pickle
import random
import numpy as np
np.set_printoptions(threshold=np.inf)
import torch
from torch.utils.data import Dataset, DataLoader
class SSLR_Dataset(Dataset):
def __init__(self, data_path):
super().__init__()
self.data_path = data_path
self.total_x, self.total_y, self.total_z = self._read_file()
def __getitem__(self, index):
return torch.tensor(self.total_x[index], dtype=torch.float), torch.tensor(self.total_y[index], dtype=torch.float), torch.tensor(self.total_z[index], dtype=torch.int)
def __len__(self):
return len(self.total_x)
def encode(self, y):
def gaussian_func(gt_angle):
# sigma = beam_width
# sigma = 3.15
sigma = 8
angles = np.arange(360)
out = np.array(np.exp(-1 * np.square(angles - 180) / sigma ** 2))
out = np.roll(out, gt_angle - 180) # 向右 roll gt_angle - 180 / 向左 roll 180 - gt_angle
return out
mat_out = []
for gt_angle in y:
if not np.isnan(gt_angle):
mat_out.append(gaussian_func(gt_angle))
if not mat_out:
return np.full(360, 0)
# mat_out对360个角度分别取max
mat_out = np.asarray(mat_out)
mat_out = mat_out.transpose()
mat_out = np.max(mat_out, axis=1)
return mat_out
def _read_file(self):
frames = os.listdir(self.data_path)
total_x = []
total_y = []
total_z = [] # num of sources
print("read_file starts")
for frame in frames:
try:
with open(os.path.join(self.data_path, frame), 'rb') as file:
origin_data = pickle.load(file)
z = origin_data["num_sources"]
if z == 0:
continue
total_z.append(z) # [len(frames)]
y = []
if origin_data["label_seg_level"][0] == 360:
origin_data["label_seg_level"][0] = 0
y.append(origin_data["label_seg_level"][0])
if origin_data["label_seg_level"][1] == 360:
origin_data["label_seg_level"][1] = 0
y.append(origin_data["label_seg_level"][1])
likelihood_coding = self.encode(y) # [360]
total_y.append(likelihood_coding) # [len(frames), 360]
x = origin_data["gcc_fbank_seg_level"]
total_x.append(x) # [len(frames), 6, 40, 51]
# z = origin_data["num_sources"]
# total_z.append(z) # [len(frames)]
if len(total_z) % 10000 == 0:
print("{} frames have been processed".format(len(total_z)), flush=True)
except Exception as e:
print(f'Exception {e}')
print(f'total_samples {len(total_z)}')
return total_x, total_y, total_z
if __name__ == '__main__':
test_data_path = "/Work18/2021/fuyanjie/exp_data/exp_nnsslm/test_data_dir/test_data_frame_level_gcc"
test_data = DataLoader(SSLR_Dataset(test_data_path), batch_size=64, shuffle=True, num_workers=4) # train_data.shape (batch_x, batch_y)
for (batch_x, batch_y, batch_z) in test_data:
print(f'batch_x.shape {batch_x.shape}', flush=True)
print(f'batch_y.shape {batch_y.shape}', flush=True)
print(f'batch_z.shape {batch_z.shape}', flush=True)
print(f'batch_z {batch_z}', flush=True)
| 2,651 | 7 | 157 |
bc6516c6138b8ffb3066bf322d76535590859e9a | 1,080 | py | Python | tests/mnbi4te7_single_lambdaover2/redo_test_refine_mnbite7.py | ikibalin/rhochi | 1ca03f18dc72006322a101ed877cdbba33ed61e7 | [
"MIT"
] | null | null | null | tests/mnbi4te7_single_lambdaover2/redo_test_refine_mnbite7.py | ikibalin/rhochi | 1ca03f18dc72006322a101ed877cdbba33ed61e7 | [
"MIT"
] | null | null | null | tests/mnbi4te7_single_lambdaover2/redo_test_refine_mnbite7.py | ikibalin/rhochi | 1ca03f18dc72006322a101ed877cdbba33ed61e7 | [
"MIT"
] | null | null | null | import os
import cryspy
| 33.75 | 55 | 0.663889 | import os
import cryspy
def test_refine():
dir = os.path.dirname(__file__)
f_name = os.path.join(dir, "main.rcif")
rhochi = cryspy.file_to_globaln(f_name)
cryspy.rhochi_run(rhochi)
d_out = cryspy.rhochi_no_refinement(rhochi)
chi_sq = d_out["chi_sq"]
n_points = d_out["n_points"]
assert chi_sq < 429.1
assert int(n_points) == 142
def test_estimate_fm():
dir = os.path.dirname(__file__)
f_name = os.path.join(dir, "main.rcif")
rhochi = cryspy.file_to_globaln(f_name)
l_estimation = cryspy.calc_f_mag_for_diffrn(rhochi)
estim = l_estimation[0]
l_f_m = estim.f_m
l_f_m_sigma = estim.f_m_sigma
assert abs(float(l_f_m[0])-(-0.7881)) < 0.001
assert abs(float(l_f_m[1])-(-0.2003)) < 0.001
assert abs(float(l_f_m[2])-(0.68611)) < 0.001
assert abs(float(l_f_m[3])-(0.41953)) < 0.001
assert abs(float(l_f_m_sigma[0]) - 0.16078) < 0.001
assert abs(float(l_f_m_sigma[1]) - 0.14083) < 0.001
assert abs(float(l_f_m_sigma[2]) - 0.47949) < 0.001
assert abs(float(l_f_m_sigma[3]) - 0.34928) < 0.001
| 1,010 | 0 | 46 |
65685300b3343f482138490927b230c8293afddd | 1,054 | py | Python | apps/core/views.py | developersociety/commonslibrary | 38a77a8eea9d11e1819535ca98d577430b830c02 | [
"BSD-3-Clause"
] | 4 | 2018-03-20T06:32:16.000Z | 2021-03-25T10:05:25.000Z | apps/core/views.py | developersociety/commonslibrary | 38a77a8eea9d11e1819535ca98d577430b830c02 | [
"BSD-3-Clause"
] | 22 | 2018-03-01T15:16:50.000Z | 2020-04-16T10:51:29.000Z | apps/core/views.py | developersociety/commonslibrary | 38a77a8eea9d11e1819535ca98d577430b830c02 | [
"BSD-3-Clause"
] | 1 | 2019-02-20T09:40:41.000Z | 2019-02-20T09:40:41.000Z | from django.views.generic import TemplateView
from directory.models import Organisation
from resources.models import Resource
from .mixins import ResourcesViewMixin
| 37.642857 | 90 | 0.733397 | from django.views.generic import TemplateView
from directory.models import Organisation
from resources.models import Resource
from .mixins import ResourcesViewMixin
class HomeView(TemplateView, ResourcesViewMixin):
template_name = 'core/home.html'
def get_context_data(self, **kwargs):
context = super().get_context_data()
context['carousel_resources'] = Resource.get_carousel_resources(self.request.user)
context['latest_resource'] = Resource.get_latest(self.request.user)
most_tried_resource = Resource.get_most_tried(self.request.user).first()
context['most_tried'] = most_tried_resource
kwargs = {'user': self.request.user}
if most_tried_resource:
kwargs.update({'exclude': most_tried_resource.id})
context['most_liked'] = Resource.get_most_liked(**kwargs).first()
context['most_published'] = Organisation.get_most_published_this_week()
return context
class SearchView(TemplateView, ResourcesViewMixin):
template_name = 'core/search.html'
| 678 | 161 | 46 |
c323e0699bd28f7cad27f63044295b3c24f51d87 | 55 | py | Python | sieve/utils/__init__.py | A-N-Other/sieve | eda7b2f048b67d94c032c2d4601f8ab571275a0c | [
"MIT"
] | null | null | null | sieve/utils/__init__.py | A-N-Other/sieve | eda7b2f048b67d94c032c2d4601f8ab571275a0c | [
"MIT"
] | null | null | null | sieve/utils/__init__.py | A-N-Other/sieve | eda7b2f048b67d94c032c2d4601f8ab571275a0c | [
"MIT"
] | null | null | null | # GY171204
from .utils import *
from .needle import *
| 11 | 21 | 0.709091 | # GY171204
from .utils import *
from .needle import *
| 0 | 0 | 0 |
63732fa5a77b988e32e1109c7c40024bbd8e26e1 | 1,077 | py | Python | src/sovereign/views/healthchecks.py | bochuxt/envoy-control-plane-python3 | 6d63ad6e1ecff5365bb571f0021951b066f8e270 | [
"Apache-2.0"
] | 1 | 2020-07-08T19:37:09.000Z | 2020-07-08T19:37:09.000Z | src/sovereign/views/healthchecks.py | bochuxt/envoy-control-plane-python3 | 6d63ad6e1ecff5365bb571f0021951b066f8e270 | [
"Apache-2.0"
] | null | null | null | src/sovereign/views/healthchecks.py | bochuxt/envoy-control-plane-python3 | 6d63ad6e1ecff5365bb571f0021951b066f8e270 | [
"Apache-2.0"
] | null | null | null | import random
from fastapi.routing import APIRouter
from fastapi.responses import PlainTextResponse
from sovereign import XDS_TEMPLATES, __versionstr__
from sovereign import discovery
from sovereign.sources import match_node, extract_node_key
from sovereign.utils.mock import mock_discovery_request
router = APIRouter()
@router.get('/healthcheck', summary='Healthcheck (Does the server respond to HTTP?)')
@router.get('/deepcheck', summary='Deepcheck (Can the server render a random template?)')
@router.get('/version', summary='Display the current version of Sovereign')
| 30.771429 | 89 | 0.760446 | import random
from fastapi.routing import APIRouter
from fastapi.responses import PlainTextResponse
from sovereign import XDS_TEMPLATES, __versionstr__
from sovereign import discovery
from sovereign.sources import match_node, extract_node_key
from sovereign.utils.mock import mock_discovery_request
router = APIRouter()
@router.get('/healthcheck', summary='Healthcheck (Does the server respond to HTTP?)')
async def health_check():
return PlainTextResponse('OK')
@router.get('/deepcheck', summary='Deepcheck (Can the server render a random template?)')
async def deep_check():
template = random.choice(
list(XDS_TEMPLATES['default'].keys())
)
await discovery.response(
mock_discovery_request(),
xds_type=template
)
node = mock_discovery_request().node
match_node(node_value=extract_node_key(node))
return PlainTextResponse(f'Rendered {template} OK')
@router.get('/version', summary='Display the current version of Sovereign')
async def version_check():
return PlainTextResponse(f'Sovereign {__versionstr__}')
| 431 | 0 | 66 |
df58ddd7fb70f501e3c7c8b49c6aa8f4c973589b | 1,002 | py | Python | code/utils.py | llord1/DataScienceRecipes | e2ca3f3c57e38900a6cff56f928b7c4b2fbf7a6a | [
"CC0-1.0"
] | null | null | null | code/utils.py | llord1/DataScienceRecipes | e2ca3f3c57e38900a6cff56f928b7c4b2fbf7a6a | [
"CC0-1.0"
] | null | null | null | code/utils.py | llord1/DataScienceRecipes | e2ca3f3c57e38900a6cff56f928b7c4b2fbf7a6a | [
"CC0-1.0"
] | null | null | null | from io import BytesIO
from tarfile import TarInfo
def list_tar_files(tar_ball):
"""
`getmembers()` requires scaning the entire file before returning the first value.
Avoid that by making a looping iterator.
"""
tar_info = tar_ball.next()
while tar_info is not None:
tar_file = tar_ball.extractfile(tar_info)
if tar_file is not None:
pass
yield tar_info, tar_file
tar_info = tar_ball.next()
pass
def read_lines_from_tar_file(tar_file):
"""
Read the tar file returning the lines
"""
txt = tar_file.read()
txt = txt.decode('utf-8')
return txt.splitlines()
def write_lines_to_tarball(tar_ball, tar_info, lines):
"""
Writes the relevant lines to the tar ball
"""
txt = '\n'.join(lines)
txt = txt.encode('utf-8')
with BytesIO(txt) as tar_file:
info = TarInfo(name = tar_info.name)
info.size = len(txt)
tar_ball.addfile(info, fileobj = tar_file)
pass | 25.692308 | 85 | 0.638723 | from io import BytesIO
from tarfile import TarInfo
def list_tar_files(tar_ball):
"""
`getmembers()` requires scaning the entire file before returning the first value.
Avoid that by making a looping iterator.
"""
tar_info = tar_ball.next()
while tar_info is not None:
tar_file = tar_ball.extractfile(tar_info)
if tar_file is not None:
pass
yield tar_info, tar_file
tar_info = tar_ball.next()
pass
def read_lines_from_tar_file(tar_file):
"""
Read the tar file returning the lines
"""
txt = tar_file.read()
txt = txt.decode('utf-8')
return txt.splitlines()
def write_lines_to_tarball(tar_ball, tar_info, lines):
"""
Writes the relevant lines to the tar ball
"""
txt = '\n'.join(lines)
txt = txt.encode('utf-8')
with BytesIO(txt) as tar_file:
info = TarInfo(name = tar_info.name)
info.size = len(txt)
tar_ball.addfile(info, fileobj = tar_file)
pass | 0 | 0 | 0 |
830ae2ceef813299429bbfb9a5c0c6f97b8a3f08 | 2,711 | py | Python | microquake/core/util/cli.py | jeanphilippemercier/microquake | 0b9d07be11eddd64619e46939c320487531602a3 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | microquake/core/util/cli.py | jeanphilippemercier/microquake | 0b9d07be11eddd64619e46939c320487531602a3 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | microquake/core/util/cli.py | jeanphilippemercier/microquake | 0b9d07be11eddd64619e46939c320487531602a3 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | import time
import sys
class ProgressBar(object):
"""
This object display a simple command line progress bar to wait in style.
"""
class Timer(object):
"""
This class is a simple timer for the sake of simplicity. This also provides
simple statistics. This work with the python 'with statement'.
"""
total = 0
t = None
n = 0
def start(self):
"""
Record the current time
"""
if self.t is None:
self.t = time.time()
else:
raise RuntimeError("Timer already started")
def stop(self, *args):
"""
Stop the timer and record the execution
"""
if self.t is not None:
self.total += time.time() - self.t
self.n += 1
self.t = None
else:
raise RuntimeError("Timer not started")
__enter__ = start
__exit__ = stop
def mean(self):
"""
Return the average runtime of this timer
"""
return self.total / self.n
def reset(self):
"""
Reset the statistics
"""
self.n = self.total = 0
if __name__ == "__main__":
a = ProgressBar()
for i in range(100):
time.sleep(0.02)
a()
| 25.575472 | 106 | 0.530063 | import time
import sys
class ProgressBar(object):
"""
This object display a simple command line progress bar to wait in style.
"""
def __init__(self, max = 100, size = 50, char = "#", caption = "Progress"):
self.max = max
self.value = 0
self.caption = caption
self.size = size
self.start_time = time.time()
self.char = char
if self.max == 0:
self.draw = self.__emptydraw__
self.draw()
def draw(self):
now = time.time()
if self.value != 0:
remaining = ((now - self.start_time) / self.value) * (self.max - self.value)
else:
remaining = 0
pos = self.size * self.value / self.max
eta = "%d of %d %3.1fs (%2.0f%%)" % (self.value, self.max, remaining, 100 * self.value / self.max)
progress = self.char * pos + (self.size - pos) * " "
progress_string = "[%s]" % (progress)
eta_string = "ETA %s" % (eta)
caption_string = " " + self.caption
sys.stderr.write("%s : %s %s\r" % (caption_string, progress_string, eta_string))
if self.value >= self.max:
sys.stderr.write("\n -- TOTAL TIME : %2.4fs -- \n" % (now - self.start_time))
def __emptydraw__(self):
pass
def __call__(self, update = 1):
self.update(update = update)
def update(self, update = 1):
uvalue = self.value + update
self.value = min(uvalue, self.max)
self.draw()
def set_value(self, value):
self.value = min(value, sefl.max)
self.draw()
class Timer(object):
"""
This class is a simple timer for the sake of simplicity. This also provides
simple statistics. This work with the python 'with statement'.
"""
total = 0
t = None
n = 0
def start(self):
"""
Record the current time
"""
if self.t is None:
self.t = time.time()
else:
raise RuntimeError("Timer already started")
def stop(self, *args):
"""
Stop the timer and record the execution
"""
if self.t is not None:
self.total += time.time() - self.t
self.n += 1
self.t = None
else:
raise RuntimeError("Timer not started")
__enter__ = start
__exit__ = stop
def mean(self):
"""
Return the average runtime of this timer
"""
return self.total / self.n
def reset(self):
"""
Reset the statistics
"""
self.n = self.total = 0
if __name__ == "__main__":
a = ProgressBar()
for i in range(100):
time.sleep(0.02)
a()
| 1,287 | 0 | 161 |
92c91ea2bddbddc9816a293ab44c3495cfb8fc07 | 1,529 | py | Python | forumsweats/commands/sendbobux.py | zzzzz28/forum-sweats | 346c281821164ac721a028dee8b726d26374e760 | [
"MIT"
] | 10 | 2020-10-15T18:08:53.000Z | 2021-12-11T13:15:05.000Z | forumsweats/commands/sendbobux.py | zzzzz28/forum-sweats | 346c281821164ac721a028dee8b726d26374e760 | [
"MIT"
] | 59 | 2020-10-06T23:19:25.000Z | 2022-03-06T14:16:31.000Z | forumsweats/commands/sendbobux.py | zzzzz28/forum-sweats | 346c281821164ac721a028dee8b726d26374e760 | [
"MIT"
] | 13 | 2020-10-19T20:46:47.000Z | 2022-03-05T20:17:40.000Z | import forumsweats.discordbot as discordbot
from ..commandparser import Member
import discord
from forumsweats import db
name = 'sendbobux'
aliases = ('sendkromer', 'transmitkromer')
args = '<member> <amount>'
async def run(message, member: Member = None, amount: int = 0):
'Sends some of your bobux to another user.'
if not member:
return await message.channel.send('Invalid member')
if not amount or amount <= 0:
return await message.channel.send('Invalid amount')
sender_bobux = await db.get_bobux(message.author.id)
bobux_in_auctions = await db.get_bobux_in_auctions_for_user(message.author.id)
currency_name = 'kromer' if 'kromer' in message.command_name else 'bobux'
if sender_bobux - amount < bobux_in_auctions:
return await message.channel.send(f'You can\'t send {amount} {currency_name}, because you have {bobux_in_auctions:,} in auctions')
if sender_bobux < amount:
return await message.channel.send(f'You don\'t have enough {currency_name}')
if message.author.id == member.id:
return await message.channel.send(f'You sent **{amount}** {currency_name} to yourself. Nothing happened.')
await db.change_bobux(message.author.id, -amount)
await db.change_bobux(member.id, amount)
reciever_bobux = await db.get_bobux(member.id)
await message.channel.send(
embed=discord.Embed(
description=f'Ok, <@{member.id}> now has **{reciever_bobux:,}** {currency_name}. You now have **{sender_bobux-amount:,}** {currency_name}.'
)
)
await discordbot.check_bobux_roles(member.id, reciever_bobux)
| 38.225 | 142 | 0.752126 | import forumsweats.discordbot as discordbot
from ..commandparser import Member
import discord
from forumsweats import db
name = 'sendbobux'
aliases = ('sendkromer', 'transmitkromer')
args = '<member> <amount>'
async def run(message, member: Member = None, amount: int = 0):
'Sends some of your bobux to another user.'
if not member:
return await message.channel.send('Invalid member')
if not amount or amount <= 0:
return await message.channel.send('Invalid amount')
sender_bobux = await db.get_bobux(message.author.id)
bobux_in_auctions = await db.get_bobux_in_auctions_for_user(message.author.id)
currency_name = 'kromer' if 'kromer' in message.command_name else 'bobux'
if sender_bobux - amount < bobux_in_auctions:
return await message.channel.send(f'You can\'t send {amount} {currency_name}, because you have {bobux_in_auctions:,} in auctions')
if sender_bobux < amount:
return await message.channel.send(f'You don\'t have enough {currency_name}')
if message.author.id == member.id:
return await message.channel.send(f'You sent **{amount}** {currency_name} to yourself. Nothing happened.')
await db.change_bobux(message.author.id, -amount)
await db.change_bobux(member.id, amount)
reciever_bobux = await db.get_bobux(member.id)
await message.channel.send(
embed=discord.Embed(
description=f'Ok, <@{member.id}> now has **{reciever_bobux:,}** {currency_name}. You now have **{sender_bobux-amount:,}** {currency_name}.'
)
)
await discordbot.check_bobux_roles(member.id, reciever_bobux)
| 0 | 0 | 0 |
c9e2a23269d7a94a222a14b1b0843d4166b796cc | 77 | py | Python | lib/python2.7/site-packages/eventlet/green/http/__init__.py | nishaero/wifi-userseg-ryu | 1132f2c813b79eff755bdd1a9e73e7ad3980af7c | [
"Apache-2.0"
] | 1 | 2021-07-17T01:39:39.000Z | 2021-07-17T01:39:39.000Z | lib/python2.7/site-packages/eventlet/green/http/__init__.py | nishaero/wifi-userseg-ryu | 1132f2c813b79eff755bdd1a9e73e7ad3980af7c | [
"Apache-2.0"
] | 8 | 2020-09-26T00:55:16.000Z | 2022-03-12T00:23:07.000Z | lib/python2.7/site-packages/eventlet/green/http/__init__.py | nishaero/wifi-userseg-ryu | 1132f2c813b79eff755bdd1a9e73e7ad3980af7c | [
"Apache-2.0"
] | null | null | null | from eventlet.support import six
assert six.PY3, 'This is a Python 3 module'
| 25.666667 | 43 | 0.779221 | from eventlet.support import six
assert six.PY3, 'This is a Python 3 module'
| 0 | 0 | 0 |
523a3b3420226b1dd6b962dc4eebe6c111bdcee2 | 2,770 | py | Python | hoopa/utils/decorators.py | fishtn/hoopa | 1742097c76b4ad4880bd22b87ee89be8490e2b24 | [
"Apache-2.0"
] | 9 | 2021-04-12T03:21:11.000Z | 2022-01-06T07:51:11.000Z | hoopa/utils/decorators.py | fishtn/hoopa | 1742097c76b4ad4880bd22b87ee89be8490e2b24 | [
"Apache-2.0"
] | 3 | 2021-04-14T06:58:00.000Z | 2021-06-17T03:25:34.000Z | hoopa/utils/decorators.py | fishtn/hoopa | 1742097c76b4ad4880bd22b87ee89be8490e2b24 | [
"Apache-2.0"
] | 3 | 2021-04-20T09:03:51.000Z | 2022-01-06T07:51:19.000Z | # -*- coding: utf-8 -*-
"""
装饰器
"""
import asyncio
from asyncio import iscoroutinefunction
from functools import wraps
from loguru import logger
import traceback
from .concurrency import run_function
from .helpers import spider_sleep
from .url import get_location_from_history
from ..request import Request
from ..response import Response
| 29.157895 | 94 | 0.567509 | # -*- coding: utf-8 -*-
"""
装饰器
"""
import asyncio
from asyncio import iscoroutinefunction
from functools import wraps
from loguru import logger
import traceback
from .concurrency import run_function
from .helpers import spider_sleep
from .url import get_location_from_history
from ..request import Request
from ..response import Response
def http_decorator(func):
@wraps(func)
async def log(http_ins, request: Request):
response: Response = Response()
try:
if iscoroutinefunction(func):
response = await func(http_ins, request)
else:
response = func(http_ins, request)
return response
except Exception as e:
response.ok = 0
response.error_type = e.__class__.__name__
response.debug_msg = traceback.format_exc()
logger.error(f"{request} fetch error \n {response.debug_msg}")
if response.ok == 1:
if response.history:
last_url = get_location_from_history(response.history)
logger.debug(f"{request} redirect <{last_url}> success")
else:
logger.debug(f"{request} fetch {response}")
return response
return log
def handle_download_callback_retry():
def retry(func):
@wraps(func)
async def wrapper(self, request: Request):
while True:
response = await func(self, request)
# 成功返回response
if response.ok == 1:
return response
# 重试次数等于0
if request.retry_times == 0:
logger.error(f"{request} too many error, try {request.retry_times} times")
response.ok = -1
# 当ok == -1的时候,直接返回
if response.ok == -1:
return response
# 重试次数减1
request.retry_times -= 1
# 统计重试次数
await self.stats.inc_value(f"requests/retry_times/{response.error_type}", 1)
# 休眠
if request.retry_delay > 0:
await spider_sleep(request.retry_delay)
return wrapper
return retry
def timeout_it(timeout=600):
def __timeout_it(func):
async def wrapper(self, request: Request, task_id):
try:
await asyncio.wait_for(func(self, request, task_id), timeout)
except asyncio.TimeoutError:
logger.error(f"task timeout: {task_id} {self.task_dict[task_id]}")
except Exception as e:
logger.error(f"{task_id}: {e}")
finally:
self.task_dict.pop(task_id)
return wrapper
return __timeout_it
| 2,420 | 0 | 69 |
3ed9f1dab11f91f9db00b4a320c21ed2df7d7ac7 | 1,376 | py | Python | conftest.py | obinnaeye/addMore | 20033a97c9ac044a4cdca4f42687d7cce86dce16 | [
"MIT"
] | null | null | null | conftest.py | obinnaeye/addMore | 20033a97c9ac044a4cdca4f42687d7cce86dce16 | [
"MIT"
] | null | null | null | conftest.py | obinnaeye/addMore | 20033a97c9ac044a4cdca4f42687d7cce86dce16 | [
"MIT"
] | null | null | null | from testing.postgresql import Postgresql
import pytest
from app import create_app
from Model import db as _db
from Model import Client, FeatureRequest
from configTest import SQLALCHEMY_DATABASE_URI as db_url
@pytest.yield_fixture(scope='session')
@pytest.fixture(scope='session')
@pytest.yield_fixture(scope='session')
@pytest.fixture(scope='session', autouse=False)
| 20.537313 | 57 | 0.696221 | from testing.postgresql import Postgresql
import pytest
from app import create_app
from Model import db as _db
from Model import Client, FeatureRequest
from configTest import SQLALCHEMY_DATABASE_URI as db_url
class TestConfig(object):
DEBUG = True
TESTING = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
ENV = 'test'
TESTING = True
SQLALCHEMY_DATABASE_URI = db_url
@pytest.yield_fixture(scope='session')
def app():
_app = create_app(TestConfig)
# with Postgresql() as postgresql:
_app.config['SQLALCHEMY_DATABASE_URI'] = db_url
ctx = _app.app_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture(scope='session')
def testapp(app):
return app.test_client()
@pytest.yield_fixture(scope='session')
def db(app):
_db.app = app
_db.create_all()
yield _db
@pytest.fixture(scope='session', autouse=False)
def session(db):
connection = db.engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
session_ = db.create_scoped_session(options=options)
clients = [
Client(ClientName='Client A'),
Client(ClientName='Client B'),
Client(ClientName='Client C'),
]
db.session = session_
db.session.bulk_save_objects(clients)
yield session_
transaction.rollback()
connection.close()
session_.remove()
| 727 | 156 | 111 |
0cc85b793254eddc12dbc1031a61da3e0a0a7188 | 222 | py | Python | tests/test_base_kmeans.py | Gauravsinghal09/mvlearn | 81092120595fadfc3d1f624d0a772594d8bb1578 | [
"Apache-2.0"
] | 1 | 2020-12-29T15:41:29.000Z | 2020-12-29T15:41:29.000Z | tests/test_base_kmeans.py | Gauravsinghal09/mvlearn | 81092120595fadfc3d1f624d0a772594d8bb1578 | [
"Apache-2.0"
] | null | null | null | tests/test_base_kmeans.py | Gauravsinghal09/mvlearn | 81092120595fadfc3d1f624d0a772594d8bb1578 | [
"Apache-2.0"
] | null | null | null | import pytest
from mvlearn.cluster.base_kmeans import BaseKMeans
| 20.181818 | 50 | 0.761261 | import pytest
from mvlearn.cluster.base_kmeans import BaseKMeans
def test_base_kmeans():
base_kmeans = BaseKMeans()
base_kmeans.fit(Xs=None)
base_kmeans.predict(Xs=None)
base_kmeans.fit_predict(Xs=None)
| 133 | 0 | 23 |
ba4a4c94f03df6d3d873730fb0b35815f2f288bf | 4,447 | py | Python | portal_spider.py | cako/portal-investidor-scraper | 8e0f0574477fbe78f58df7b2e4f4248adfe4ecd9 | [
"MIT"
] | null | null | null | portal_spider.py | cako/portal-investidor-scraper | 8e0f0574477fbe78f58df7b2e4f4248adfe4ecd9 | [
"MIT"
] | null | null | null | portal_spider.py | cako/portal-investidor-scraper | 8e0f0574477fbe78f58df7b2e4f4248adfe4ecd9 | [
"MIT"
] | null | null | null | """
Raspagem de dados do Portal Investidor para encontrar detalhes sobre operações
feitas pelo Tesouro Direto.
PRECISA DE UMA LISTA DE PROTOCOLOS PARA RODAR!!!!!!!
Siga as instruções:
1. Navegue a https://portalinvestidor.tesourodireto.com.br/Consulta
2. Preencha os Filtros e clique em Aplicar
3. Vá até a transação mais antiga, no FIM da lista
4. Abaixo de todos os ítens à direita, clique e segure o mouse
5. Segurando o clique, mova o mouse para cima, até ele ficar no espaço em
branco logo acima e ligeiramente à esquerda do primeiro "Investimento".
Você deve ter todo o texto somente das operações selecionado.
7. Copie e cole em um editor de texto.
8. Cada ítem deve ser algo assim:
Investimento
03/01/2020
Nº de protocolo - XXXXXXXX
CORRETORA XXXX
Status
REALIZADO
VER DETALHES
9. Salve o arquivo e edite a varíavel logo abaixo para apontar para ele
Antes de rodar, crie a pasta "td" no local onde vai rodar o scraper.
"""
OPS_FILE = ""
import re
import os
import scrapy
from bs4 import BeautifulSoup
BASE_URL = 'https://portalinvestidor.tesourodireto.com.br'
USER = os.environ["PORTAL_INVESTIDOR_USER"]
PASS = os.environ["PORTAL_INVESTIDOR_PASS"]
REMOTE_PROTOCOLS = []
ALL_PROTOCOLS = []
with open(OPS_FILE, "r") as f:
for line in f:
line = line.split(' - ')
if len(line) > 1:
line = re.search(r'^\d+', line[1])
if line:
ALL_PROTOCOLS.append(line.group())
def authentication_failed(response):
""" Verifica se login falhou """
pass
# soup = BeautifulSoup(response.body, 'html.parser')
# if soup(text=re.compile('Valor líquido total')):
# return True
# return False
class PortalInvestidorSpider(scrapy.Spider):
"""
Spider which crawls Portal Investidor to find all Tesouro Direto \
transactions
"""
name = 'portalinvestidor'
start_urls = [BASE_URL]
| 34.742188 | 78 | 0.610524 | """
Raspagem de dados do Portal Investidor para encontrar detalhes sobre operações
feitas pelo Tesouro Direto.
PRECISA DE UMA LISTA DE PROTOCOLOS PARA RODAR!!!!!!!
Siga as instruções:
1. Navegue a https://portalinvestidor.tesourodireto.com.br/Consulta
2. Preencha os Filtros e clique em Aplicar
3. Vá até a transação mais antiga, no FIM da lista
4. Abaixo de todos os ítens à direita, clique e segure o mouse
5. Segurando o clique, mova o mouse para cima, até ele ficar no espaço em
branco logo acima e ligeiramente à esquerda do primeiro "Investimento".
Você deve ter todo o texto somente das operações selecionado.
7. Copie e cole em um editor de texto.
8. Cada ítem deve ser algo assim:
Investimento
03/01/2020
Nº de protocolo - XXXXXXXX
CORRETORA XXXX
Status
REALIZADO
VER DETALHES
9. Salve o arquivo e edite a varíavel logo abaixo para apontar para ele
Antes de rodar, crie a pasta "td" no local onde vai rodar o scraper.
"""
OPS_FILE = ""
import re
import os
import scrapy
from bs4 import BeautifulSoup
BASE_URL = 'https://portalinvestidor.tesourodireto.com.br'
USER = os.environ["PORTAL_INVESTIDOR_USER"]
PASS = os.environ["PORTAL_INVESTIDOR_PASS"]
REMOTE_PROTOCOLS = []
ALL_PROTOCOLS = []
with open(OPS_FILE, "r") as f:
for line in f:
line = line.split(' - ')
if len(line) > 1:
line = re.search(r'^\d+', line[1])
if line:
ALL_PROTOCOLS.append(line.group())
def authentication_failed(response):
""" Verifica se login falhou """
pass
# soup = BeautifulSoup(response.body, 'html.parser')
# if soup(text=re.compile('Valor líquido total')):
# return True
# return False
class PortalInvestidorSpider(scrapy.Spider):
"""
Spider which crawls Portal Investidor to find all Tesouro Direto \
transactions
"""
name = 'portalinvestidor'
start_urls = [BASE_URL]
def parse(self, response):
for pid in ALL_PROTOCOLS:
filename = 'td/%s.html' % pid
if os.path.isfile(filename):
self.log("Achamos cópia local do protocolo %s" % pid)
url = r"file://" + os.path.abspath(filename)
yield scrapy.Request(url=url, callback=self.parse_protocolo)
else:
REMOTE_PROTOCOLS.append(pid)
if REMOTE_PROTOCOLS:
print("Buscando protocolos remotos")
yield scrapy.FormRequest.from_response(
response,
formdata={'UserCpf': USER, 'UserPassword': PASS},
callback=self.after_login
)
else:
self.log("Todos protocolos têm cópia local")
def after_login(self, response):
if authentication_failed(response):
self.logger.error("Login failed")
return
for pid in REMOTE_PROTOCOLS:
url = "%s/Protocolo/%s/1" % (BASE_URL, pid)
self.log("Buscando protocolo remoto %s" % pid)
yield scrapy.Request(url=url, callback=self.parse_protocolo)
def parse_protocolo(self, response):
soup = BeautifulSoup(response.body, 'html.parser')
pid = soup.find("span", {"class": "td-protocolo-numero"}).text.strip()
filename = 'td/%s.html' % pid
if not os.path.exists(filename):
with open(filename, 'wb') as f:
f.write(response.body)
self.log('Arquivo salvo: %s' % filename)
info = {'protocolo': int(pid)}
psoup = soup.find_all("p", {"class": "td-protocolo-info-base"})
for item in psoup:
key = item.find(text=True, recursive=False).strip()
info[key] = item.find("span").get_text().strip()
psoup = soup.find_all("h3", {"class": "td-protocolo-info-titulo"})
if len(psoup) > 1:
self.log("CUIDADO: Mais de um título presente")
info["Título"] = psoup[0].text.strip()
psoup = soup.find_all("p", {"class": "td-protocolo-info"})
for item in psoup:
key = item.find(text=True, recursive=False).strip()
info[key] = item.find("span").get_text().strip()
item = soup.find("p", {"class": "td-pedido-valor-total"})
key = item.find(text=True, recursive=False).strip()
info[key] = item.find("span").get_text().strip()
self.log(info)
yield info
| 2,380 | 0 | 81 |
0608a07b986799595d846f90b829c33a0a24ff42 | 3,962 | py | Python | tests/params/string.py | schmamps/textteaser | e948ac6c0a4a4a44c7011206d7df236529d7813d | [
"MIT"
] | 2 | 2020-02-18T09:13:13.000Z | 2021-06-12T13:16:13.000Z | tests/params/string.py | schmamps/textteaser | e948ac6c0a4a4a44c7011206d7df236529d7813d | [
"MIT"
] | null | null | null | tests/params/string.py | schmamps/textteaser | e948ac6c0a4a4a44c7011206d7df236529d7813d | [
"MIT"
] | 1 | 2019-05-05T14:43:53.000Z | 2019-05-05T14:43:53.000Z | """String module test parameters"""
from .helpers import parametrize
LIST_TEST_INT = list(range(1, 6))
LIST_TEST_STR = [str(x) for x in LIST_TEST_INT]
STR_TEST_SPACES = ' '.join(LIST_TEST_STR)
STR_TEST_COMMAS = ','.join(LIST_TEST_STR)
STR_TEST_CSV = '1 2, 3\t4, \t5'
ALPHA_SIMPLE = 'abcdefghijklmnopqrstuvwxyz'
ALPHA_COMPLEX = 'åbçdéfghîjklmnöpqrštùvwxyz'
def pad(val: str, spaces: int = 2) -> str:
"""Pad val on left and right with `spaces` whitespace chars
Arguments:
val {str} -- string to pad
Keyword Arguments:
spaces {int} -- number of spaces to pad on either side (default: {2})
Returns:
str -- padded string
"""
pad_str = ' ' * spaces
return '{0}{1}{0}'.format(pad_str, val)
def param_cast():
"""Parametrize `test_cast`"""
names = 'val,expected'
vals = (
(1, '1', ),
('a', 'a'),
(LIST_TEST_INT, LIST_TEST_STR),
('12345', LIST_TEST_STR), # sorry!
)
ids = ('solo-int', 'solo-str', 'list-int', 'oops', )
return parametrize(names, vals, ids)
def param_define_split_join():
"""Parametrize `test_define_split` and `test_define_join`"""
names = 'sep,str_val,list_val'
vals = (
(' ', STR_TEST_SPACES, LIST_TEST_STR),
(',', STR_TEST_COMMAS, LIST_TEST_STR),
)
ids = ('spaces', 'commas', )
return parametrize(names, vals, ids)
def param_strip():
"""Parametrize `test_strip`"""
names = 'val,expected'
vals = (
(STR_TEST_SPACES, STR_TEST_SPACES),
(' {}'.format(STR_TEST_SPACES), STR_TEST_SPACES),
('{} '.format(STR_TEST_COMMAS), STR_TEST_COMMAS),
(' {} '.format(STR_TEST_CSV), STR_TEST_CSV),
(LIST_TEST_STR, LIST_TEST_STR),
([pad(num) for num in LIST_TEST_STR], LIST_TEST_STR),
)
ids = ('1-none', '1-left', '1-right', '1-center', '[]-none', '[]-both', )
return parametrize(names, vals, ids)
def param_filter_empty():
"""Parametrize `test_filter_empty`"""
names = 'val,expected'
vals = (
(LIST_TEST_STR, LIST_TEST_STR),
(('1', '', '2', '', '', '3', '4', '5'), LIST_TEST_STR),
)
ids = ('none', 'some', )
return parametrize(names, vals, ids)
def param_split():
"""Parametrize `test_split`"""
names = 'val,kwargs,expected'
vals = (
(
LIST_TEST_INT,
{},
LIST_TEST_STR,
),
(
'|'.join(LIST_TEST_STR),
{},
['|'.join(LIST_TEST_STR)],
),
(
'|'.join(LIST_TEST_STR),
{'sep': r'\|'},
LIST_TEST_STR,
),
(
' 1| 2 | 3 | 4 | 5 ',
{'sep': r'\|', },
LIST_TEST_STR,
),
(
' 1| 2 | 3 | 4 | 5 ',
{'sep': r'\|', 'strip_text': False, },
[' 1', ' 2 ', ' 3 ', ' 4 ', ' 5 '],
),
(
' 1|| 2 | | 3 | | 4 | | 5 ',
{'sep': r'\|', },
LIST_TEST_STR,
),
(
' 1|| 2 | | 3 | | 4 | | 5 ',
{'sep': r'\|', 'allow_empty': True},
['1', '', '2', '', '3', '', '4', '', '5'],
),
(
' 1|| 2 | | 3 | | 4 | | 5 ',
{'sep': r'\|', 'strip_text': False, 'allow_empty': True},
[' 1', '', ' 2 ', ' ', ' 3 ', ' ', ' 4 ', ' ', ' 5 '],
),
)
ids = (
'defaults',
'sep_default',
'sep_custom',
'strip_default',
'strip_disabled',
'allow_default',
'allow_enabled',
'all_options',
)
return parametrize(names, vals, ids)
def param_simplify():
"""Parametrize `test_simplify`"""
names = 'val,expected'
vals = (
(ALPHA_SIMPLE, ALPHA_SIMPLE),
(ALPHA_COMPLEX, ALPHA_SIMPLE),
)
ids = ('simple', 'complex', )
return parametrize(names, vals, ids)
| 25.895425 | 77 | 0.481323 | """String module test parameters"""
from .helpers import parametrize
LIST_TEST_INT = list(range(1, 6))
LIST_TEST_STR = [str(x) for x in LIST_TEST_INT]
STR_TEST_SPACES = ' '.join(LIST_TEST_STR)
STR_TEST_COMMAS = ','.join(LIST_TEST_STR)
STR_TEST_CSV = '1 2, 3\t4, \t5'
ALPHA_SIMPLE = 'abcdefghijklmnopqrstuvwxyz'
ALPHA_COMPLEX = 'åbçdéfghîjklmnöpqrštùvwxyz'
def pad(val: str, spaces: int = 2) -> str:
"""Pad val on left and right with `spaces` whitespace chars
Arguments:
val {str} -- string to pad
Keyword Arguments:
spaces {int} -- number of spaces to pad on either side (default: {2})
Returns:
str -- padded string
"""
pad_str = ' ' * spaces
return '{0}{1}{0}'.format(pad_str, val)
def param_cast():
"""Parametrize `test_cast`"""
names = 'val,expected'
vals = (
(1, '1', ),
('a', 'a'),
(LIST_TEST_INT, LIST_TEST_STR),
('12345', LIST_TEST_STR), # sorry!
)
ids = ('solo-int', 'solo-str', 'list-int', 'oops', )
return parametrize(names, vals, ids)
def param_define_split_join():
"""Parametrize `test_define_split` and `test_define_join`"""
names = 'sep,str_val,list_val'
vals = (
(' ', STR_TEST_SPACES, LIST_TEST_STR),
(',', STR_TEST_COMMAS, LIST_TEST_STR),
)
ids = ('spaces', 'commas', )
return parametrize(names, vals, ids)
def param_strip():
"""Parametrize `test_strip`"""
names = 'val,expected'
vals = (
(STR_TEST_SPACES, STR_TEST_SPACES),
(' {}'.format(STR_TEST_SPACES), STR_TEST_SPACES),
('{} '.format(STR_TEST_COMMAS), STR_TEST_COMMAS),
(' {} '.format(STR_TEST_CSV), STR_TEST_CSV),
(LIST_TEST_STR, LIST_TEST_STR),
([pad(num) for num in LIST_TEST_STR], LIST_TEST_STR),
)
ids = ('1-none', '1-left', '1-right', '1-center', '[]-none', '[]-both', )
return parametrize(names, vals, ids)
def param_filter_empty():
"""Parametrize `test_filter_empty`"""
names = 'val,expected'
vals = (
(LIST_TEST_STR, LIST_TEST_STR),
(('1', '', '2', '', '', '3', '4', '5'), LIST_TEST_STR),
)
ids = ('none', 'some', )
return parametrize(names, vals, ids)
def param_split():
"""Parametrize `test_split`"""
names = 'val,kwargs,expected'
vals = (
(
LIST_TEST_INT,
{},
LIST_TEST_STR,
),
(
'|'.join(LIST_TEST_STR),
{},
['|'.join(LIST_TEST_STR)],
),
(
'|'.join(LIST_TEST_STR),
{'sep': r'\|'},
LIST_TEST_STR,
),
(
' 1| 2 | 3 | 4 | 5 ',
{'sep': r'\|', },
LIST_TEST_STR,
),
(
' 1| 2 | 3 | 4 | 5 ',
{'sep': r'\|', 'strip_text': False, },
[' 1', ' 2 ', ' 3 ', ' 4 ', ' 5 '],
),
(
' 1|| 2 | | 3 | | 4 | | 5 ',
{'sep': r'\|', },
LIST_TEST_STR,
),
(
' 1|| 2 | | 3 | | 4 | | 5 ',
{'sep': r'\|', 'allow_empty': True},
['1', '', '2', '', '3', '', '4', '', '5'],
),
(
' 1|| 2 | | 3 | | 4 | | 5 ',
{'sep': r'\|', 'strip_text': False, 'allow_empty': True},
[' 1', '', ' 2 ', ' ', ' 3 ', ' ', ' 4 ', ' ', ' 5 '],
),
)
ids = (
'defaults',
'sep_default',
'sep_custom',
'strip_default',
'strip_disabled',
'allow_default',
'allow_enabled',
'all_options',
)
return parametrize(names, vals, ids)
def param_simplify():
"""Parametrize `test_simplify`"""
names = 'val,expected'
vals = (
(ALPHA_SIMPLE, ALPHA_SIMPLE),
(ALPHA_COMPLEX, ALPHA_SIMPLE),
)
ids = ('simple', 'complex', )
return parametrize(names, vals, ids)
| 0 | 0 | 0 |
4ac872498c1ea9597fa4b1bed79e5231b9320409 | 351 | py | Python | hello/__init__.py | 0x4448/hello | afa8e3a8012f10296ecec48eaf06a5c62adc16be | [
"MIT"
] | null | null | null | hello/__init__.py | 0x4448/hello | afa8e3a8012f10296ecec48eaf06a5c62adc16be | [
"MIT"
] | null | null | null | hello/__init__.py | 0x4448/hello | afa8e3a8012f10296ecec48eaf06a5c62adc16be | [
"MIT"
] | null | null | null | from flask import Flask, jsonify
from . import hello, probes
app = create_app()
| 15.954545 | 58 | 0.726496 | from flask import Flask, jsonify
from . import hello, probes
def internal_server_error(e):
return jsonify(error=str(e)), 500
def create_app():
app = Flask(__name__)
app.register_blueprint(hello.bp)
app.register_blueprint(probes.bp)
app.register_error_handler(500, internal_server_error)
return app
app = create_app()
| 220 | 0 | 46 |
99dea4a4659ee32ff3a51e49428b04d091e470a5 | 5,592 | py | Python | tfmpl/tests/test_figure.py | cuijianaaa/tf-matplotlib | a197b77f71c32c56e54368d716d9603fd3903f1a | [
"MIT"
] | null | null | null | tfmpl/tests/test_figure.py | cuijianaaa/tf-matplotlib | a197b77f71c32c56e54368d716d9603fd3903f1a | [
"MIT"
] | null | null | null | tfmpl/tests/test_figure.py | cuijianaaa/tf-matplotlib | a197b77f71c32c56e54368d716d9603fd3903f1a | [
"MIT"
] | null | null | null | # Copyright 2018 Christoph Heindl.
#
# Licensed under MIT License
# ============================================================
import tensorflow as tf
import tfmpl
import numpy as np
| 27.278049 | 96 | 0.550429 | # Copyright 2018 Christoph Heindl.
#
# Licensed under MIT License
# ============================================================
import tensorflow as tf
import tfmpl
import numpy as np
def test_arguments():
debug = {}
@tfmpl.figure_tensor
def draw(a, b, c, d=None, e=None):
debug['a'] = a
debug['b'] = b
debug['c'] = c
debug['d'] = d
debug['e'] = e
return tfmpl.create_figure()
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
a = tf.constant(0)
c = tf.compat.v1.placeholder(tf.float32)
tensor = draw(a, [0,1], c, d='d', e='e')
sess.run(tensor, feed_dict={c: np.zeros((2,2))})
assert debug['a'] == 0
assert debug['b'] == [0,1]
np.testing.assert_allclose(debug['c'], np.zeros((2,2)))
debug['d'] = 'd'
debug['e'] = 'e'
def test_arguments_blittable():
debug = {}
def init(a, b, c, d=None, e=None):
debug['init_args'] = [a, b, c, d, e]
return tfmpl.create_figure(), None
@tfmpl.blittable_figure_tensor(init_func=init)
def draw(a, b, c, d=None, e=None):
debug['args'] = [a, b, c, d, e]
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
a = tf.constant(0)
c = tf.compat.v1.placeholder(tf.float32)
tensor = draw(a, [0,1], c, d='d', e='e')
sess.run(tensor, feed_dict={c: np.zeros((2,2))})
assert debug['init_args'][0] == 0
assert debug['init_args'][1] == [0,1]
np.testing.assert_allclose(debug['init_args'][2], np.zeros((2,2)))
assert debug['init_args'][3] == 'd'
assert debug['init_args'][4] == 'e'
assert debug['args'][0] == 0
assert debug['args'][1] == [0,1]
np.testing.assert_allclose(debug['args'][2], np.zeros((2,2)))
assert debug['args'][3] == 'd'
assert debug['args'][4] == 'e'
def test_callcount():
debug = {}
debug['called'] = 0
debug['a'] = []
@tfmpl.figure_tensor
def draw(a):
debug['called'] += 1
debug['a'].append(a)
return tfmpl.create_figure()
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
a = tf.compat.v1.placeholder(tf.float32)
tensor = draw(a)
for i in range(5):
sess.run(tensor, feed_dict={a: i})
assert debug['called'] == 5
np.testing.assert_allclose(debug['a'], [0,1,2,3,4])
def test_callcount_blittable():
debug = {}
debug['init_called'] = 0
debug['draw_called'] = 0
debug['a'] = []
debug['a_init'] = []
def init(a):
debug['init_called'] += 1
debug['a_init'] = a
return tfmpl.create_figure(), None
@tfmpl.blittable_figure_tensor(init_func=init)
def draw(a):
debug['draw_called'] += 1
debug['a'].append(a)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
a = tf.compat.v1.placeholder(tf.float32)
tensor = draw(a)
for i in range(5):
sess.run(tensor, feed_dict={a: i})
assert debug['init_called'] == 1
assert debug['draw_called'] == 5
assert debug['a_init'] == 0
np.testing.assert_allclose(debug['a'], [0,1,2,3,4])
def test_callcount_blittable():
debug = {}
debug['init_called'] = 0
debug['draw_called'] = 0
debug['a'] = []
debug['a_init'] = []
def init(a):
debug['init_called'] += 1
debug['a_init'] = a
return tfmpl.create_figure(), None
@tfmpl.blittable_figure_tensor(init_func=init)
def draw(a):
debug['draw_called'] += 1
debug['a'].append(a)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
a = tf.compat.v1.placeholder(tf.float32)
tensor = draw(a)
for i in range(5):
sess.run(tensor, feed_dict={a: i})
assert debug['init_called'] == 1
assert debug['draw_called'] == 5
assert debug['a_init'] == 0
np.testing.assert_allclose(debug['a'], [0,1,2,3,4])
def test_draw():
@tfmpl.figure_tensor
def draw():
figs = tfmpl.create_figures(2, figsize=(4,3), dpi=100)
figs[0].patch.set_facecolor('red')
figs[1].patch.set_facecolor((0, 1, 0))
return figs
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
a = tf.compat.v1.placeholder(tf.float32)
tensor = draw()
imgs = sess.run(tensor)
assert imgs.shape == (2, 300, 400, 3)
np.testing.assert_allclose(imgs[0], np.tile([255, 0, 0], (300, 400, 1)))
np.testing.assert_allclose(imgs[1], np.tile([0, 255, 0], (300, 400, 1)))
def test_draw_blittable():
import matplotlib.patches as patches
rect = None
def init(t):
nonlocal rect
fig = tfmpl.create_figure(figsize=(4,4), dpi=100)
ax = fig.add_axes([0,0,1,1])
ax.invert_yaxis()
rect = ax.add_patch(patches.Rectangle((0,0), 0.1, 0.1, facecolor=(0,1,0)))
return fig, rect
@tfmpl.blittable_figure_tensor(init_func=init)
def draw(t):
rect.set_xy((t,t))
return rect
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
t = tf.compat.v1.placeholder(tf.float32)
tensor = draw(t)
imgs = sess.run(tensor, feed_dict={t:0})
assert imgs.shape == (1, 400, 400, 3)
np.testing.assert_allclose(imgs[0, :40, :40], np.tile([0, 255, 0], (40, 40, 1)))
imgs = sess.run(tensor, feed_dict={t:0.5})
assert imgs.shape == (1, 400, 400, 3)
np.testing.assert_allclose(imgs[0, 200:240, 200:240], np.tile([0, 255, 0], (40, 40, 1)))
| 5,232 | 0 | 173 |
034a9ee2bbf34d27204ec539df1594d5dad26712 | 5,455 | py | Python | missatges/migrations/0001_squashed_0005_v1_0_0.py | ampafdv/ampadb | 25c804a5cb21afcbe4e222a3b48cca27ff2d9e19 | [
"MIT"
] | null | null | null | missatges/migrations/0001_squashed_0005_v1_0_0.py | ampafdv/ampadb | 25c804a5cb21afcbe4e222a3b48cca27ff2d9e19 | [
"MIT"
] | 28 | 2016-10-21T16:04:56.000Z | 2018-11-10T20:55:40.000Z | missatges/migrations/0001_squashed_0005_v1_0_0.py | ampafdv/ampadb | 25c804a5cb21afcbe4e222a3b48cca27ff2d9e19 | [
"MIT"
] | 2 | 2016-10-22T19:24:45.000Z | 2017-02-11T10:49:02.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-16 15:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| 43.991935 | 226 | 0.59835 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-16 15:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
replaces = [('missatges', '0001_initial'), ('missatges', '0002_grupdemissatgeria_motius'), ('missatges', '0003_edited_missatges'), ('missatges', '0004_change_verbose_name'), ('missatges', '0005_add_creada_rename_tancada')]
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Conversacio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assumpte', models.CharField(max_length=80)),
('tancat', models.BooleanField(default=False)),
],
options={
'verbose_name_plural': 'conversacions',
'verbose_name': 'conversació',
},
),
migrations.CreateModel(
name='EstatMissatge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vist', models.BooleanField(default=False)),
('destinatari', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GrupDeMissatgeria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=100)),
('usuaris', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
('motius', models.TextField(blank=True, help_text='Motius per enviar missatges a aquest grup. Apareixeràn a "Nou missatge". Un per línia.')),
],
options={
'verbose_name_plural': 'grups de missatgeria',
},
),
migrations.CreateModel(
name='Missatge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordre', models.PositiveSmallIntegerField(editable=False)),
('contingut', models.TextField(help_text='Suporta <a href="/markdown">Markdown</a>')),
('enviat', models.DateTimeField(auto_now_add=True)),
('editat', models.DateTimeField(auto_now=True)),
('conversacio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='missatges.Conversacio', verbose_name='conversació')),
('destinataris', models.ManyToManyField(through='missatges.EstatMissatge', to=settings.AUTH_USER_MODEL)),
('per', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ha_enviat', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='estatmissatge',
name='missatge',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='missatges.Missatge'),
),
migrations.AddField(
model_name='conversacio',
name='a',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='missatges.GrupDeMissatgeria'),
),
migrations.AddField(
model_name='conversacio',
name='de',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='missatge',
unique_together=set([('conversacio', 'ordre')]),
),
migrations.AlterModelOptions(
name='missatge',
options={'ordering': ['-ordre']},
),
migrations.AddField(
model_name='missatge',
name='estat',
field=models.CharField(blank=True, choices=[('CLOSED', 'Tancat'), ('REOPENED', 'Reobert')], max_length=8),
),
migrations.AlterUniqueTogether(
name='estatmissatge',
unique_together=set([('destinatari', 'missatge')]),
),
migrations.AlterField(
model_name='missatge',
name='contingut',
field=models.TextField(blank=True, help_text='Suporta <a href="/markdown">Markdown</a>'),
),
migrations.AlterModelOptions(
name='estatmissatge',
options={'verbose_name': 'estat del missatge', 'verbose_name_plural': 'estat dels missatges'},
),
migrations.AlterModelOptions(
name='conversacio',
options={'ordering': ['creada', 'tancada'], 'verbose_name': 'conversació', 'verbose_name_plural': 'conversacions'},
),
migrations.RenameField(
model_name='conversacio',
old_name='tancat',
new_name='tancada',
),
migrations.AddField(
model_name='conversacio',
name='creada',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| 0 | 5,188 | 23 |
0e57cccdd90d6befcabb404f93b9ff48782330b4 | 7,770 | py | Python | scripts/build-disk-image.py | mihaip/transparent-mac | 0899205dba8fc3104f6a4f65a1714816720fe67b | [
"Apache-2.0"
] | null | null | null | scripts/build-disk-image.py | mihaip/transparent-mac | 0899205dba8fc3104f6a4f65a1714816720fe67b | [
"Apache-2.0"
] | 23 | 2022-03-31T22:41:20.000Z | 2022-03-31T23:00:45.000Z | scripts/build-disk-image.py | mihaip/transparent-mac | 0899205dba8fc3104f6a4f65a1714816720fe67b | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python3
import brotli
import glob
import hashlib
import json
import machfs
import os
import struct
import sys
import typing
import urllib.request
import zipfile
LIBRARY_DIR = os.path.join(os.path.dirname(__file__), "..", "Library")
CACHE_DIR = os.path.join("/tmp", "infinite-mac-cache")
DEBUG = os.getenv("DEBUG", "0") == "1"
input_path = sys.argv[1]
output_dir = sys.argv[2]
manifest_dir = sys.argv[3]
DISK_SIZE = 200 * 1024 * 1024
CHUNK_SIZE = 256 * 1024
chunk_count = 0
total_size = 0
input_file_name = os.path.basename(input_path)
import_folders = get_import_folders()
hash = hashlib.sha256()
sys.stderr.write("Chunking and compressing %s" % input_file_name)
sys.stderr.flush()
with open(input_path, "rb") as input_file:
v = machfs.Volume()
v.read(input_file.read(), preserve_desktopdb=True)
v.name = "Infinite HD"
for folder_path, folder in import_folders.items():
parent_folder_path, folder_name = os.path.split(folder_path)
parent = traverse_folders(v["Library"], parent_folder_path)
parent[folder_name] = folder
flat = v.write(
size=DISK_SIZE,
align=512,
desktopdb=False,
bootable=True,
)
brotli_quality = 0 if DEBUG else 11
for i in range(0, DISK_SIZE, CHUNK_SIZE):
chunk = flat[i:i + CHUNK_SIZE]
total_size += len(chunk)
chunk_compressed = brotli.compress(chunk, quality=brotli_quality)
chunk_path = os.path.join(output_dir,
f"{input_file_name}.{chunk_count}.br")
# Use compressed version for the version hash so that if we change the
# compression quality we can trigger a re-download.
hash.update(chunk_compressed)
with open(chunk_path, "wb+") as chunk_file:
chunk_file.write(chunk_compressed)
chunk_count += 1
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")
manifest_path = os.path.join(manifest_dir, f"{input_file_name}.json")
with open(manifest_path, "w+") as manifest_file:
json.dump(
{
"totalSize": total_size,
"chunkCount": chunk_count,
"chunkSize": CHUNK_SIZE,
"version": hash.hexdigest()
},
manifest_file,
indent=4)
| 36.139535 | 80 | 0.587645 | #!/usr/local/bin/python3
import brotli
import glob
import hashlib
import json
import machfs
import os
import struct
import sys
import typing
import urllib.request
import zipfile
LIBRARY_DIR = os.path.join(os.path.dirname(__file__), "..", "Library")
CACHE_DIR = os.path.join("/tmp", "infinite-mac-cache")
DEBUG = os.getenv("DEBUG", "0") == "1"
input_path = sys.argv[1]
output_dir = sys.argv[2]
manifest_dir = sys.argv[3]
DISK_SIZE = 200 * 1024 * 1024
CHUNK_SIZE = 256 * 1024
chunk_count = 0
total_size = 0
input_file_name = os.path.basename(input_path)
def read_url(url: str) -> bytes:
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
cache_key = hashlib.sha256(url.encode()).hexdigest()
cache_path = os.path.join(CACHE_DIR, cache_key)
if os.path.exists(cache_path):
return open(cache_path, "rb").read()
response = urllib.request.urlopen(url)
response_body = response.read()
with open(cache_path, "wb+") as f:
f.write(response_body)
return response_body
def get_import_folders() -> typing.Dict[str, machfs.Folder]:
import_folders = {}
import_folders.update(import_manifests())
import_folders.update(import_zips())
return import_folders
def import_manifests() -> typing.Dict[str, machfs.Folder]:
sys.stderr.write("Importing other images\n")
import_folders = {}
for manifest_path in glob.iglob(os.path.join(LIBRARY_DIR, "**", "*.json")):
folder_path, _ = os.path.splitext(
os.path.relpath(manifest_path, LIBRARY_DIR))
sys.stderr.write(" Importing %s\n" % folder_path)
with open(manifest_path, "r") as manifest:
manifest_json = json.load(manifest)
v = machfs.Volume()
v.read(read_url(manifest_json["src_url"]))
if "src_folder" in manifest_json:
folder = v[manifest_json["src_folder"]]
# Clear x/y position so that the Finder computes a layout for us.
(rect_x, rect_y, rect_width, rect_height, flags, window_x,
window_y, view) = struct.unpack(">hhhhHhhH", folder.usrInfo)
window_x = -1
window_y = -1
folder.usrInfo = struct.pack(">hhhhHhhH", rect_x, rect_y,
rect_width, rect_height, flags,
window_x, window_y, view)
elif "src_denylist" in manifest_json:
denylist = manifest_json["src_denylist"]
folder = machfs.Folder()
for name, item in v.items():
if name not in denylist:
folder[name] = item
else:
assert False, "Unexpected manifest format: %s" % json.dumps(
manifest_json)
import_folders[folder_path] = folder
return import_folders
def import_zips() -> typing.Dict[str, machfs.Folder]:
sys.stderr.write("Importing .zips\n")
import_folders = {}
for zip_path in glob.iglob(os.path.join(LIBRARY_DIR, "**", "*.zip")):
folder_path, _ = os.path.splitext(
os.path.relpath(zip_path, LIBRARY_DIR))
sys.stderr.write(" Importing %s\n" % folder_path)
folder = machfs.Folder()
files_by_path = {}
with zipfile.ZipFile(zip_path, "r") as zip:
for zip_info in zip.infolist():
if zip_info.is_dir():
continue
file_data = zip.read(zip_info)
if zip_info.filename == "DInfo":
folder.usrInfo = file_data[0:16]
folder.fndrInfo = file_data[16:]
continue
path = zip_info.filename
if ".rsrc/" in path:
path = path.replace(".rsrc/", "")
files_by_path.setdefault(path,
machfs.File()).rsrc = file_data
continue
if ".finf/" in path:
# May actually be the DInfo for a folder, check for that.
path = path.replace(".finf/", "")
try:
# Will throw if there isn't a corresponding directory,
# no need to actually do anything with the return value.
zip.getinfo(path + "/")
nested_folder_path, nested_folder_name = os.path.split(
path)
parent = traverse_folders(folder, nested_folder_path)
nested_folder = machfs.Folder()
(nested_folder.usrInfo,
nested_folder.fndrInfo) = struct.unpack(
'>16s16s', file_data)
parent[fix_name(nested_folder_name)] = nested_folder
continue
except KeyError:
pass
file = files_by_path.setdefault(path, machfs.File())
(file.type, file.creator, file.flags, file.x, file.y, _,
file.fndrInfo) = struct.unpack('>4s4sHhhH16s', file_data)
continue
files_by_path.setdefault(path, machfs.File()).data = file_data
for path, file in files_by_path.items():
file_folder_path, file_name = os.path.split(path)
parent = traverse_folders(folder, file_folder_path)
parent[fix_name(file_name)] = file
import_folders[folder_path] = folder
return import_folders
def traverse_folders(parent: machfs.Folder, folder_path: str) -> machfs.Folder:
if folder_path:
folder_path_pieces = folder_path.split(os.path.sep)
for folder_path_piece in folder_path_pieces:
folder_path_piece = fix_name(folder_path_piece)
if folder_path_piece not in parent:
parent[folder_path_piece] = machfs.Folder()
parent = parent[folder_path_piece]
return parent
def fix_name(name: str) -> str:
return name.replace(":", "/")
import_folders = get_import_folders()
hash = hashlib.sha256()
sys.stderr.write("Chunking and compressing %s" % input_file_name)
sys.stderr.flush()
with open(input_path, "rb") as input_file:
v = machfs.Volume()
v.read(input_file.read(), preserve_desktopdb=True)
v.name = "Infinite HD"
for folder_path, folder in import_folders.items():
parent_folder_path, folder_name = os.path.split(folder_path)
parent = traverse_folders(v["Library"], parent_folder_path)
parent[folder_name] = folder
flat = v.write(
size=DISK_SIZE,
align=512,
desktopdb=False,
bootable=True,
)
brotli_quality = 0 if DEBUG else 11
for i in range(0, DISK_SIZE, CHUNK_SIZE):
chunk = flat[i:i + CHUNK_SIZE]
total_size += len(chunk)
chunk_compressed = brotli.compress(chunk, quality=brotli_quality)
chunk_path = os.path.join(output_dir,
f"{input_file_name}.{chunk_count}.br")
# Use compressed version for the version hash so that if we change the
# compression quality we can trigger a re-download.
hash.update(chunk_compressed)
with open(chunk_path, "wb+") as chunk_file:
chunk_file.write(chunk_compressed)
chunk_count += 1
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")
manifest_path = os.path.join(manifest_dir, f"{input_file_name}.json")
with open(manifest_path, "w+") as manifest_file:
json.dump(
{
"totalSize": total_size,
"chunkCount": chunk_count,
"chunkSize": CHUNK_SIZE,
"version": hash.hexdigest()
},
manifest_file,
indent=4)
| 5,336 | 0 | 138 |
53b0d293f6e99d6f3ee2c3581fc057d87a669f7e | 1,630 | py | Python | manila/share/drivers/emc/plugins/vnx/constants.py | nidhimittalhada/access_group_repo | 62f3365bc5fb728fcca692a9b3977690fabcd78f | [
"Apache-2.0"
] | 1 | 2019-05-06T10:33:38.000Z | 2019-05-06T10:33:38.000Z | manila/share/drivers/emc/plugins/vnx/constants.py | nidhimittalhada/access_group_repo | 62f3365bc5fb728fcca692a9b3977690fabcd78f | [
"Apache-2.0"
] | 5 | 2015-08-13T15:17:28.000Z | 2016-08-02T02:55:01.000Z | manila/share/drivers/emc/plugins/vnx/constants.py | nidhimittalhada/access_group_repo | 62f3365bc5fb728fcca692a9b3977690fabcd78f | [
"Apache-2.0"
] | 3 | 2019-05-03T12:32:47.000Z | 2021-01-30T20:26:19.000Z | # Copyright (c) 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
STATUS_OK = 'ok'
STATUS_INFO = 'info'
STATUS_DEBUG = 'debug'
STATUS_WARNING = 'warning'
STATUS_ERROR = 'error'
STATUS_NOT_FOUND = 'not_found'
MSG_GENERAL_ERROR = '13690601492'
MSG_INVALID_VDM_ID = '14227341325'
MSG_INVALID_MOVER_ID = '14227341323'
MSG_FILESYSTEM_NOT_FOUND = "18522112101"
MSG_FILESYSTEM_EXIST = '13691191325'
MSG_VDM_EXIST = '13421840550'
MSG_SNAP_EXIST = '13690535947'
MSG_INTERFACE_NAME_EXIST = '13421840550'
MSG_INTERFACE_EXIST = '13691781136'
MSG_INTERFACE_INVALID_VLAN_ID = '13421850371'
MSG_INTERFACE_NON_EXISTENT = '13691781134'
MSG_JOIN_DOMAIN = '13157007726'
MSG_UNJOIN_DOMAIN = '13157007723'
IP_ALLOCATIONS = 2
CONTENT_TYPE_URLENCODE = {'Content-Type': 'application/x-www-form-urlencoded'}
XML_HEADER = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
XML_NAMESPACE = 'http://www.emc.com/schemas/celerra/xml_api'
CIFS_ACL_FULLCONTROL = 'fullcontrol'
CIFS_ACL_READ = 'read'
SSH_DEFAULT_RETRY_PATTERN = r'Error 2201:.*: unable to acquire lock\(s\)'
| 30.754717 | 78 | 0.760736 | # Copyright (c) 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
STATUS_OK = 'ok'
STATUS_INFO = 'info'
STATUS_DEBUG = 'debug'
STATUS_WARNING = 'warning'
STATUS_ERROR = 'error'
STATUS_NOT_FOUND = 'not_found'
MSG_GENERAL_ERROR = '13690601492'
MSG_INVALID_VDM_ID = '14227341325'
MSG_INVALID_MOVER_ID = '14227341323'
MSG_FILESYSTEM_NOT_FOUND = "18522112101"
MSG_FILESYSTEM_EXIST = '13691191325'
MSG_VDM_EXIST = '13421840550'
MSG_SNAP_EXIST = '13690535947'
MSG_INTERFACE_NAME_EXIST = '13421840550'
MSG_INTERFACE_EXIST = '13691781136'
MSG_INTERFACE_INVALID_VLAN_ID = '13421850371'
MSG_INTERFACE_NON_EXISTENT = '13691781134'
MSG_JOIN_DOMAIN = '13157007726'
MSG_UNJOIN_DOMAIN = '13157007723'
IP_ALLOCATIONS = 2
CONTENT_TYPE_URLENCODE = {'Content-Type': 'application/x-www-form-urlencoded'}
XML_HEADER = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
XML_NAMESPACE = 'http://www.emc.com/schemas/celerra/xml_api'
CIFS_ACL_FULLCONTROL = 'fullcontrol'
CIFS_ACL_READ = 'read'
SSH_DEFAULT_RETRY_PATTERN = r'Error 2201:.*: unable to acquire lock\(s\)'
| 0 | 0 | 0 |
c06e9e86d98ae8b8f03141dfa78ea8ea2114c0c0 | 60,214 | py | Python | slvel/fitting_functions.py | Liz-Strong/slvel | 018e0ab028adf87c71d694a88f61a5b84c87a2f8 | [
"BSD-3-Clause"
] | 2 | 2021-02-25T22:03:50.000Z | 2022-01-12T17:04:11.000Z | slvel/fitting_functions.py | Liz-Strong/slvel | 018e0ab028adf87c71d694a88f61a5b84c87a2f8 | [
"BSD-3-Clause"
] | null | null | null | slvel/fitting_functions.py | Liz-Strong/slvel | 018e0ab028adf87c71d694a88f61a5b84c87a2f8 | [
"BSD-3-Clause"
] | 1 | 2022-01-12T22:15:37.000Z | 2022-01-12T22:15:37.000Z | import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from scipy.signal import find_peaks
from scipy.stats import pearsonr as pearsonr
from scipy.special import erf as erf
"""Fitting functions for multi-Gaussian fitting.
"""
def fit_wrapper(x,*args):
"""
This wrapper sets up the variables for the fit function.
It allows for a variable numbers of Gaussians to be fitted.
Calls multi_gaussian_fit_function
Args:
x (array): x is independent variable x, such that y=f(x).
args: variable length argument list. args[0:n_gauss] are the amplitudes of the gaussians to be fitted. args[n_gauss:2*n_gauss] are the horizontal offsets of the gaussians to be fitted. args[2*n_gauss:3*n_gauss] are the standard deviations of the gaussians to be fitted. args[-1] is the vertical offset parameter
Returns
multi_gaussian_fit_function(x,h,mu,sigma,vertical_offset)
"""
n_gauss = (len(args)-1)//3 # number of gaussians that we're fitting
h = args[0:n_gauss]
mu = args[n_gauss:2*n_gauss]
sigma = args[2*n_gauss:3*n_gauss]
vertical_offset = args[-1]
return multi_gaussian_fit_function(x,h,mu,sigma,vertical_offset)
def multi_gaussian_fit_function(x,h,mu,sigma,vertical_offset):
"""
Returns a function that is comprised of an offset h and the
sum of gaussians with variable amplitudes, offsets, and standard
deviations (widths)
Args:
x (array): independent variable, such that y=f(x).
h (list): initial guesses for the amplitudes of the gaussians
mu (list): initial guesses for the translational offsets of the gaussians
sigma (list): initial guesses for standard deviations of gaussians
vertical_offset (list): initial guess for vertical offset h
Returns:
fit (array): a function which consists of the sum of multiple gaussians and a vertical offset
"""
# fit function starts with vertical offset
fit = np.zeros(len(x)) + vertical_offset
# iterate through each amplitude/translational offset/standard deviation set & add them to the fit function
for amp,offset,std in zip(h,mu,sigma):
fit += amp*np.exp( -(x-offset)**2 / (2*std**2) )
return fit
def initial_guess(initial_amplitude,initial_translational_offset,initial_stddev,initial_vertical_offset):
"""
Create array with amplitude, phase and initial offset to be used in the curve fit
Args:
initial_amplitude (array): guess for the initial values of the amplitudes of the gaussians
initial_translational_offset (array): guess for the initial values of the translational offsets of the gaussians
initial_stddev (array): guess for the initial values of the standard deviations of the gaussians
initial_vertical_offset (float): guess for the initial values of the vertical offset
Returns:
p0 (array): lists the initial_amplitude, initial_translational_offset, initial_stddev, initial_vertical_offset in the correct format for the curve fit.
"""
#p0=[]
#for a,mu,stddev in zip(initial_amplitude,initial_translational_offset,initial_stddev):
# p0.append([a,mu,stddev])
#p0.append(initial_vertical_offset)
p0 = [i for i in initial_amplitude]\
+ [i for i in initial_translational_offset]\
+ [i for i in initial_stddev]\
+ [initial_vertical_offset]
return p0
def bound_maker(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_gaussians):
"""
Create tuple with lower and upper bounds to be used in the curve fit
Args:
amplitude_bounds (tuple): bounds on the amplitudes of the gaussians
translational_offset_bounds (tuple): bounds on the translational offsets of the gaussians
stddev_bounds (tuple): bounds on the standard deviations of the gaussians
vertical_offset_bounds (tuple): bounds on the vertical offset of the gaussians
number_gaussians (int): the number of gaussians in the fit
Returns:
bounds (tuple): lists the bounds on the parameters used in the multigaussian fits
"""
lower = [amplitude_bounds[0]]*number_gaussians + [translational_offset_bounds[0]]*number_gaussians + [stddev_bounds[0]]*number_gaussians + [vertical_offset_bounds[0]]
upper = [amplitude_bounds[1]]*number_gaussians + [translational_offset_bounds[1]]*number_gaussians + [stddev_bounds[1]]*number_gaussians + [vertical_offset_bounds[1]]
bounds = (lower, upper)
return bounds
def bound_maker_subsequent(t, y, y_fit, popt, num_new_gaussians, amplitude_bounds, stddev_bounds, vertical_offset_bounds, new_translational_offset, noise_avg):
"""
Makes the bounds vector for fits after the first. Takes into account the previous fitted values
Args:
t (array): time grid of burst
y (array): burst
y_fit (array): previous fit to the burst
popt (arrapy): the results from the multi gaussian curve fit of the previous fit
num_new_gaussians (int): the number of gaussians to be added to the new fit
amplitude_bounds (array): bounds on the amplitudes of the gaussians
stddev_bounds (array): bounds on the standard deviations of the gaussians
vertical_offset_bounds (array): bounds on the vertical offset
new_translational_offset (tuple):
Returns:
bounds (tuple): lists the bounds on the parameters used in the multigaussian fits
"""
num_gaussians_old = int((len(popt)-1)/3)
amplitudes = popt[:num_gaussians_old]
translational_offsets = popt[num_gaussians_old:2*num_gaussians_old]
widths = popt[2*num_gaussians_old:3*num_gaussians_old]
vert_offset = popt[-1]
lower_amp = np.append(amplitudes-np.abs(amplitudes)*.2, [-.75*(np.max(y)-noise_avg)]*num_new_gaussians)
upper_amp = np.append(amplitudes+np.abs(amplitudes)*.2, [1.2*(np.max(y)-noise_avg)]*num_new_gaussians)
# limit the movement of the previously fitted gaussians.
lower_translational = np.append(translational_offsets*.8, [0]*num_new_gaussians)
upper_translational = np.append(translational_offsets*1.2, [np.max(t)]*num_new_gaussians)
if num_new_gaussians == 1:
lower_translational = np.append(translational_offsets*.8, [new_translational_offset[-1]*.5])
upper_translational = np.append(translational_offsets*1.2, [new_translational_offset[-1]*1.5])
lower_translational[lower_translational<0] = 0
upper_translational[upper_translational>np.max(t)] = .9*np.max(t)
lower_stddev = np.append([stddev_bounds[0]]*num_gaussians_old, [stddev_bounds[0]]*num_new_gaussians)
upper_stddev = np.append([stddev_bounds[1]]*num_gaussians_old, [stddev_bounds[1]]*num_new_gaussians)
# make into array
lower = np.concatenate((lower_amp, lower_translational, lower_stddev, [vertical_offset_bounds[0]]))
upper = np.concatenate((upper_amp, upper_translational, upper_stddev, [vertical_offset_bounds[1]]))
bounds = (lower, upper)
return bounds
def calculate_r2(y, y_fit):
"""
Calculates r2, the percentage of variability of the dependent variable that's
been accounted for. (how well the regression predicts the data)
Args:
y (array): data
yfit (array): is the fit to the data, evaluated using the same time axis as y
Returns:
r2 (float): characterizes how well y_fit predicts y
"""
#ss_res = np.sum((y-y_fit)**2) #residual sum of squares
#ss_tot = np.sum((y-np.mean(y))**2) #total sum of squares
#r2 = 1-(ss_res/ss_tot) #r squared
r2 = pearsonr(y, y_fit)[0]
return r2
def calculate_rmse(targets, predictions):
"""
Calculates root mean square error (RMSE) between targets and predictions
Args:
targets (array): actual values
predictions (array): predicted values
Returns:
rmse (float): root mean square error
"""
n = len(predictions)
return np.linalg.norm(predictions - targets) / np.sqrt(n)
def calculate_max_error(targets, predictions):
"""
Returns maximum absolute value of difference between target and predictions.
Args:
targets (array): actual values
predictions (array): predicted values
Returns:
rmse (float): root mean square error
"""
return np.max(np.abs(targets-predictions))
def gaussian_generator(npoints,std):
"""
Make a gaussian f npoints long with standard deviation std
Args:
npoints (int): length of Gaussian
std (float): standard deviation of Gaussian
Returns:
g (array): Gaussian
"""
g = signal.gaussian(npoints,std=std)
return g
def rect_generator(npoints,width,area):
"""
Make rect for correlation that has a height such that the height*width=area.
Args:
npoints (int): length of Gaussian
width (float): width of rect
area (float): area of rect. Dictates rect height via height = area/width
Returns:
r (array): rect function
"""
r = np.zeros(npoints)
r[int(np.floor(npoints/2-width/2+1)):int(np.ceil(npoints/2+width/2))] = area/(np.floor(width/2)*2) # do this flooring thing because the width gets rounded and we want the area constant always.
return r
def seed_initial_offsets_peaks(y, noise, rect_area=500, prominence_knockdown_factor=0.03):
"""
Generate the locations of the seeds for the initial fit. Place a seed at each of the peaks.
Determine peak location from smoothed version of signal. Smooth signal by cross correlating it with rect.
Args:
y (array): signal
noise (float): noise level of y
rect_area (float): area of rect, where area=width * height
prominence_knockdown_factor (float): used to set the prominence for find_peaks as a function of the max height of xc_r
Returns:
peaks (array): list of the initial peaks
"""
max_snr = np.max(y/noise)
if max_snr>10:
pass
elif max_snr<5:
prominence_knockdown_factor = .09
else:
prominence_knockdown_factor = .06
length = len(y)
r = rect_generator(length,length/35, rect_area)
xc_r = signal.correlate(y,r)[length//2:-length//2] # cross correlation of signal and rect
peaks, _ = find_peaks(xc_r, prominence=(np.max(xc_r)-noise*rect_area)*prominence_knockdown_factor)
#plt.figure()
#plt.plot(y, label='y')
#plt.plot(xc_r/100, label='resid')
#plt.plot(peaks, xc_r[15]/100*np.ones(len(peaks)), 'kx')
#print(peaks)
#plt.legend()
return peaks
def initial_seed(t, y, noise, max_num_gaussians=8, rect_area=500):
"""
Makes seeds for the first fit.
Calls seed_initial_offsets_peaks
Args:
t (array): time corresponding to signal
y (array): signal
noise (float): noise level of y
max_num_gaussians (int): the maximum number of initial seeds
rect_area (float): area of rect, where area=width * height
prominence_knockdown_factor (float): used to set the prominence for find_peaks as a function of the max height of xc_r
Returns:
initial_translational_offset (array): a list of the initial conditions for the horizontal offsets, mu
initial_amplitude (array): a list of the initial conditions for the amplitudes, A
"""
peak_locns = seed_initial_offsets_peaks(y, noise, rect_area=rect_area) # use as initial mus
peak_values = y[peak_locns] # use as initial amplitudes
#plt.figure()
#plt.plot(y)
#plt.plot(peak_locns, peak_values, 'x')
if len(peak_values)>max_num_gaussians:
sorted_values = np.argsort(peak_values)[:max_num_gaussians]
peak_values = peak_values[sorted_values]
peak_locns = peak_locns[sorted_values]
initial_translational_offset = t[peak_locns]
initial_amplitude = peak_values-noise
#because we subtract the noise from the initial amplitudes, some might be negative. get rid of those.
positive_value_locations = np.argwhere(initial_amplitude>0)
initial_amplitude = initial_amplitude[positive_value_locations].flatten()
initial_translational_offset = initial_translational_offset[positive_value_locations].flatten()
return initial_translational_offset, initial_amplitude
def calculate_effective_length(model, fitted_vert_offset, delta_t, max_normalized_height=1):
"""
Effective length is area divided by max height.
Here, this is the length of a rectangle with same max height as the signal
Args:
model (array): signal
fitted_vert_offset (float): h in the multigaussian fitting equation
delta_t (float): time discretization
max_normalized_height (float): maximum height of the signal
Returns:
effective_length (float): effective length
"""
area = np.sum(model-fitted_vert_offset)
effective_length = area/(np.max(model)-fitted_vert_offset)*max_normalized_height*delta_t
return effective_length
def calculate_burst_duration(y_fit, fitted_vert_offset, delta_t, lower_thresh=0.1, upper_thresh=0.9):
"""
calculate the duration of the burst between the lower and upper
thresholds of the cumulative sum of the signal
Args:
y_fit (array): values of fitted burst
fitted_vert_offset (float): h in the multigaussian fitting equation
delta_t (float): time discretization
lower_thresh (float): lower fraction of signal to include in calculation
upper_thresh (float): upper fraction of signal to include in calculation
Returns:
duration (float): time of signal between indices set by lower_thresh and upper_thresh operating on the integrated area of the signal
"""
try:
cs = np.cumsum(y_fit-fitted_vert_offset)
csm = np.max(cs)
lower_index = np.argwhere(cs>(lower_thresh*csm))[0]
upper_index = np.argwhere(cs<(upper_thresh*csm))[-1]
duration = (upper_index-lower_index) * delta_t
except:
print("problem calculating the duration")
duration = [0]
return duration[0]
def make_weights(y, g_length=100):
"""
Makes the weighting function for the curve fitting operation.
Weights the signal to bias its larger magnitude components and to diminish the effect of the small components (i.e. the tails)
Generates the weights from a smoothed copy of the burst, where this smoothed copy is made by cross correlating the signal with a gaussian.
Args:
y (array): signal
g_length (int): length of Gaussian
Returns:
sigma (array): weights for the curve fitting scheme
"""
# make weights
length = len(y)
g = gaussian_generator(length,g_length)
xc_g = signal.correlate(y,g)[int(np.ceil(length/2-1)):-int(np.floor(length/2))]#[int(np.ceil(length/2)):-int(np.ceil(length/2))]
weight = xc_g/np.max(xc_g)
sigma = 1/np.sqrt(weight)
return sigma
def eval_fit(y, y_fit, t, popt, delta_t):
"""
Calculates metrics which characterize the efficacy of the fit.
Args:
y (array): signal
y_fit (array): fitted version of the signal
t (array): times corresponding to y and y_fit
popt (array): results of curve fit
delta_t float): time discretization
Returns:
r2 (float): percentage of variability of the burst that's been accounted for in the fit. (how well the regression predicts the data)
rmse (float): root mean square error between fit and signal
max_error (float): maximum absolute value of difference between y and y_fit
max_error_normalized (float): max_error/max(y)
duration (float): time of signal between indices set by lower_thresh and upper_thresh operating on the integrated area of the signal
"""
fitted_vert_offset = popt[-1]
#delta_t = t[1]-t[0]
# calculate r^2
r2 = calculate_r2(y,y_fit)
# calculate rmse
rmse = calculate_rmse(y,y_fit)
# calculate max error
max_error = calculate_max_error(y,y_fit)
max_error_normalized = max_error/np.max(y)
# calculate duration of burst in the middle 80% of it
duration = calculate_burst_duration(y_fit, fitted_vert_offset, delta_t)
return r2, rmse, max_error, max_error_normalized, duration
def package_fit_data(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, ang_vel,
orbit_radius, x_offsets, y_offsets, number_gaussians,
y, noise_avg, max_num_gaussians, dirctn, initial_number_gaussians, t):
"""
Save data from fits for use in later modules.
Args:
r2 (float): percentage of variability of the burst that's been accounted for in the fit. (how well the regression predicts the data)
rmse (float): root mean square error between fit and signal
max_error (float): maximum absolute value of difference between y and y_fit
max_error_normalized (float): max_error/max(y)
time_eff (float): rect effective time of signal. time of a rectangle with same max height as the signal
duration (float): time of signal between indices set by lower and upper 10% of the integrated area of signal
popt (array): output of curve fit
ang_vel (float): actual angular velocity Omega
orbit_radius (float): actual orbit radius R
x_offsets (float): actual x component of distance between orbit center and light center, D
y_offsets (float): actual y component of distance between orbit center and light center, D
number_gaussians (int): number of gaussians used to parameterize burst
y (array): burst
noise_avg (float): average noise value
max_num_gaussians (int): maximum number of gaussians to be included in the fit
dirctn (int): +1 or -1, clockwise or counter clockwise
initial_number_gaussians (int): number of Gaussians used in initial fit
t (array): time corresponding to y
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
try:
# fit parameters
h = popt[0:number_gaussians] # amplitude
mu = popt[number_gaussians:2*number_gaussians] # offset
sigma = popt[2*number_gaussians:3*number_gaussians] # width
# to save, we want distances relative to location of first gaussian
sorted_indices = np.argsort(mu)
#print('length of mu:', len(mu), 'length of popt:', len(popt), 'number of gaussians', number_gaussians,'length of h:', len(h))
h_save, mu_save, sigma_save = np.zeros(max_num_gaussians), np.zeros(max_num_gaussians), np.zeros(max_num_gaussians)
h_save[:number_gaussians] = h[sorted_indices]
mu_save[:number_gaussians] = mu[sorted_indices]-mu[sorted_indices[0]] # subtract smalles from all to get relative offsets
sigma_save[:number_gaussians] = sigma[sorted_indices]
vert_offset_save = popt[-1]
D = np.sqrt(x_offsets**2+y_offsets**2)
theta = np.arctan2(y_offsets, x_offsets)
max_SNR = np.max(y)/noise_avg
avg_SNR = np.mean(y)/noise_avg
if dirctn == 1:
clockwise = [0]
counterclockwise = [1]
else:
clockwise = [1]
counterclockwise = [0]
data = np.concatenate([[ang_vel],[rmse],
[r2],[max_error],[max_error_normalized],[time_eff],[duration],
h_save,mu_save,sigma_save,[vert_offset_save],[t[1]-t[0]],[orbit_radius], [x_offsets],
[y_offsets], [D], [theta], [max_SNR], [avg_SNR], clockwise, counterclockwise, [int(initial_number_gaussians)]])
return data
except Exception as excptn:
print("***\n***\nsomething went wrong in package_fit_data\n***\n***")
print(excptn)
return
def seed_later_offsets_peaks(y, noise, rect_area=100, prominence_knockdown_factor=0.03):
"""
Makes seeds for fits following the first fit.
Args:
y (array): signal
noise (float): noise level of y
rect_area (float): area of rect, where area=width * height
prominence_knockdown_factor (float): used to set the prominence for find_peaks as a function of the max height of xc_r
Returns:
peaks (array): a list of positions with which to seed the horizontal offsets, mu
"""
length = len(y)
r = rect_generator(length,length/25, rect_area)
xc_r = signal.correlate(y,r)[length//2:-length//2]
peaks, _ = find_peaks(xc_r, prominence=(np.max(xc_r)-noise*rect_area)*prominence_knockdown_factor)
"""
plt.figure()
plt.plot(y, label='y')
plt.plot(xc_r/100, label='resid')
plt.legend()
"""
return peaks
def subsequent_seeding(t, y, y_fit, popt, number_gaussians, noise_avg):
"""
Make the seeds for fits after the first fit.
Args:
t (array): times corresponding to y
y (array): burst
y_fit (array): fitted burst
popt (arary): output of previous curve fit
number_gaussians (array): number of gaussians in the fit
noise_avg (float): average value of the noise
Returns:
new_translational_offset (array): initial guesses for the translational offsets of the gaussians
new_amplitude (array): initial guesses for the amplitudes of the gaussians
"""
residual = np.abs(y-y_fit)
# find spot with largest residual; record its amplitude
peaks = seed_later_offsets_peaks(residual, noise_avg)
peak_to_use = peaks[np.argmax(residual[peaks])]
"""
plt.figure(78)
plt.plot(y, label='data')
plt.plot(y_fit, label='fitted')
plt.plot(residual, label="|residual|")
plt.plot(peak_to_use, residual[peak_to_use], 'x')
"""
new_gaussian_translational_offset = t[peak_to_use]
new_gaussian_amplitude = residual[peak_to_use]
#if new_gaussian_amplitude < 30:
# new_gaussian_amplitude = 100
#new_translational_offset = np.append(initial_translational_offset, new_gaussian_translational_offset)
# use the previously fitted peaks as initial conditions
fitted_translational_offsets = popt[number_gaussians:number_gaussians*2]
new_translational_offset = np.append(fitted_translational_offsets, new_gaussian_translational_offset)
fitted_amplitudes = popt[:number_gaussians]
new_amplitude = np.append(fitted_amplitudes, new_gaussian_amplitude)
return new_translational_offset, new_amplitude
def fitting_function(selxn, t, y, noise_avg, noise_thresh, ang_vel, orbit_radius, x_offsets, y_offsets, dirctn, max_num_gaussians=8):
"""
Performs multi Gaussian fits. Initializes first fit based on number of peaks in smoothed copy of burst. The residual of this fit is compared to the noise threshold. Until the absolute value of the residual is smaller than the noise threshold or until more than max_num_gaussians Gaussians are needed to parameterize the fit, subsequent fits place new Gaussians at locations which have large residuals. A great deal of care is taken in this function to standardize the weighting and initial conditions of the fits since the Gaussians inherently are not orthogonal. The goal is to produce fits with Gaussians which appear physical (aren't extremely tall and narrow or short and wide). The fits may not converge, or more gaussians than max_num_gaussians may be required to fit the function. In such cases, the fitting function passes the burst without returning a fit.
Args:
selxn (int): burst number being fitted
t (array): times corresponding to y
y (array): burst being fitted
noise_avg (float): average value of the noise
noise_thresh (float): average value of the noise + standard deviation of noise
ang_vel (float): actual angular velocity of underlying simulation Omega
orbit_radius (float): actual orbit radius R
x_offsets (float): actual x component of distance between orbit center and light center, D
y_offsets (float): actual y component of distance between orbit center and light center, D
dirctn (int): +1 or -1 corresponding to direction of rotatoin
max_num_gaussians (int): maximum number of gaussians used to fit the burst
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
# for initial fit, use peak finding to determine the number,
# location, and initaial amplitudes of the Gaussians.
initial_translational_offset, initial_amplitude = initial_seed(t, y, noise_avg)
number_gaussians = len(initial_translational_offset)
initial_number_gaussians = len(initial_translational_offset)
if number_gaussians > max_num_gaussians:
print("too many peaks were found initially: number_gaussians>max_number_gaussians.")
return
#calculate rect effective time to be used in the initial standard dev. condition.
delta_t = t[1]-t[0]
time_eff = calculate_effective_length(y, noise_avg, delta_t) #instead of fitted_vert_offset, use noise_avg (we haven't yet fitted any fitted_vert_offset)
#print("rect effective time: ", time_eff)
initial_stddev_denominator = 1#np.random.randint(40, 60, 1)
initial_stddev = [time_eff/9] * number_gaussians#[np.max(t)/initial_stddev_denominator] * number_gaussians
initial_vertical_offset = noise_avg
p0 = initial_guess(initial_amplitude,
initial_translational_offset,
initial_stddev,
initial_vertical_offset)
#print("initial guesses: current time_eff-based stddev is ", initial_stddev[0], 'previous one was ', np.max(t)/50)
# initialize curve fitting bounds
amplitude_bounds = (0,np.max(y)-noise_avg*.25)
translational_offset_bounds = (0,np.max(t)) ### maybe make these somewhat closer to the seeds
stddev_bounds = (np.max(t)/150,np.max(t)/2)
vertical_offset_bounds = (2*noise_avg-noise_thresh,noise_thresh)# noise_thresh=mean+std, noise_avg=mean. so mean-std=2*noise_avg-noise_thresh
bounds = bound_maker(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_gaussians)
# make weights for fit
sigma = make_weights(y)
# limit the max number of function evaluations
max_nfev = int(30*len(t))
# try first fit
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except Exception as e:
"""plt.figure()
plt.plot(t,y)"""
print('p0:', p0)
print('bounds', bounds)
print('problem in first fit:', e)
return
##### function will only reach this location if initial fit converged.
# calculate residual
y_fit = fit_wrapper(t,*popt)
residual = y-y_fit
"""
plt.figure()
plt.plot(t, y, label="data")
plt.plot(t, y_fit, label="1st fit")
plt.plot(t, np.abs(residual), label="|residual|")
plt.plot([0, np.max(t)], [noise_thresh, noise_thresh], 'k--', label="threshold")
plt.plot([0, np.max(t)], [noise_avg, noise_avg], 'k--', label="mean noise")
plt.legend()
"""
"""
print(noise_thresh)
print(np.any(np.abs(residual)>noise_thresh))
print(number_gaussians<max_num_gaussians)
"""
# compare residual to noise threshold to determine whether or not
# another Gaussian should be added. Only add another Gaussian if
# there are no more than max_num_gaussians Gaussians already.
std_dev_residual_previous = np.std(y)
std_dev_residual_new = np.std(residual)
#print('std dev of residual is: ', std_dev_residual_new)
while (np.any(np.abs(residual)>noise_thresh*1.1)) & (number_gaussians<max_num_gaussians) | (std_dev_residual_new<std_dev_residual_previous*.8):
# try subsequent fit
# add in another gausian
new_translational_offset, new_amplitude = subsequent_seeding(t, y, y_fit, popt, number_gaussians, noise_avg)
old_stddev = popt[number_gaussians*2:number_gaussians*3]
initial_stddev = np.append(old_stddev, time_eff/8)
initial_vertical_offset = popt[-1]
p0 = initial_guess(new_amplitude,
new_translational_offset,
initial_stddev,
initial_vertical_offset)
# initialize curve fitting bounds
num_new_gaussians = 1
bounds = bound_maker_subsequent(t, y, y_fit, popt, num_new_gaussians, amplitude_bounds, stddev_bounds, vertical_offset_bounds, new_translational_offset, noise_avg)
# try curve fit again
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except: # if first fit fails to converge, end fitting
print(selxn, "one of the subsequent fits failed to converge")
return
y_fit = fit_wrapper(t,*popt)
residual = y-y_fit
number_gaussians += 1
"""
plt.plot(t, y_fit, label="new fit")
plt.plot(t, np.abs(residual), label="|new residual|")
plt.legend()
"""
std_dev_residual_previous = std_dev_residual_new
std_dev_residual_new = np.std(residual)
#print('std dev of residual is: ', std_dev_residual_new)
if (np.any(np.abs(residual)<noise_thresh*1.1) & (number_gaussians<=max_num_gaussians)):
print(selxn, "WORKED")
# package data for ML input.
r2, rmse, max_error, max_error_normalized, duration = eval_fit(y, y_fit, t, popt, delta_t)
data = package_fit_data(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, ang_vel,
orbit_radius, x_offsets, y_offsets, number_gaussians, y,
noise_avg, max_num_gaussians, dirctn, initial_number_gaussians, t)
return data
else:
print(selxn, "max number of gaussians reached, but fit not within noise threshold")
return
return
"""
**********************
Fitting functions for erf-rect-erfs (use these when features within the illuminating beam have top hat intensity profiles)
**********************
"""
def error_function(x, x0, w):
"""
Error function with equation y=0.5*(1+erf(np.sqrt(2)*(x-x0)/w))
Args:
x: array of independent variable (x) values
x0: error function offset
w: error function width
Retunrs:
y: computed error function
"""
y = 0.5*(1+erf(np.sqrt(2)*(x-x0)/w))
return y
def error_function_complimentary(x, x0, w):
"""
Complimentary error function with equation y=0.5*(1-erf(np.sqrt(2)*(x-x0)/w))
Args:
x: data x values
x0: error function offset
w: error function width
Returns:
y: computed error function
"""
y = 0.5*(1-erf(np.sqrt(2)*(x-x0)/w))
return y
def fit_wrapper_erfrecterf(x,*args):
"""
This wrapper sets up the variables for the fit function.
It allows for a variable numbers of erf-rect-erfs to be fitted.
Calls erf_rect_fit_function
Args:
x (array): x is independent variable x, such that y=f(x).
args: variable length argument list. args[0:n_erfs] are the amplitudes of the erf-rect-erfs to be fitted. Each erf-rect-erf feature has an erf and a complimentary erf. args[n_erfs:2*n_erfs] are the horizontal offsets of the erf to be fitted. args[2*n_erfs:3*n_erfs] are the widths of the erf to be fitted. args[3*n_erfs:4*n_erfs] and args[4*n_erfs:5*n_erfs] are the horizontal offsets and widths of the complimentary erf to be fitted. args[-1] is the vertical offset parameter
Returns
erf_rect_fit_function(x,a,mu0,sigma0,mu1,sigma1,vertical_offset)
"""
n_erfs = (len(args)-1)//5 # number of erf-rect-erf features that we're fitting
a = args[0:n_erfs]
mu0 = args[n_erfs:2*n_erfs]
sigma0 = args[2*n_erfs:3*n_erfs]
mu1 = args[3*n_erfs:4*n_erfs]
sigma1 = args[4*n_erfs:5*n_erfs]
vertical_offset = args[-1]
return erf_rect_fit_function(x, a, mu0, sigma0, mu1, sigma1, vertical_offset)
def erf_rect_fit_function(x,a,mu0,sigma0,mu1,sigma1,vertical_offset):
"""
Returns a function that is comprised of erf-rect-erf features. Each feature has a top-hat profile
generated as the sum of an error function at one time and a complimentary error function at a later time.
Args:
x (array): independent variable, such that y=f(x).
a (list): initial guesses for the amplitudes of the erf-rect-erfs
mu0 (list): initial guesses for the translational offsets of the erfs
sigma0 (list): initial guesses for standard deviations of erfs
mu1 (list): initial guesses for the translational offsets of the complimentary erfs
sigma1 (list): initial guesses for standard deviations of the complimentary erfs
vertical_offset (list): initial guess for vertical offset h
Returns:
fit (array): a function which consists of the sum of multiple gaussians and a vertical offset
Returns a function that is comprised of an offset h and the
sum of gaussians with variable amplitudes, offsets, and standard
deviations (widths)
Args:
x (array): independent variable, such that y=f(x).
h (list): initial guesses for the amplitudes of the gaussians
mu (list): initial guesses for the translational offsets of the gaussians
sigma (list): initial guesses for standard deviations of gaussians
vertical_offset (list): initial guess for vertical offset h
Returns:
fit (array): a function which consists of the sum of multiple gaussians and a vertical offset
"""
# initialize fi function & add the vertical offset to the fit function
fit = np.zeros(len(x))+vertical_offset
# iterate through each erf-rect-erf and add it to the fit function
for amp, offset0, std0, offset1, std1 in zip(a, mu0, sigma0, mu1, sigma1):
fit += amp*(error_function(x, offset0, std0) + error_function_complimentary(x, offset1, std1) -1 )
return fit
def initial_guess_erfrecterf(initial_amplitude,initial_translational_offset0,initial_stddev0,initial_translational_offset1,initial_stddev1,initial_vertical_offset):
"""
Create array with amplitude, standard deviation, translational offsets, and vertical offset to be used in the curve fit
Args:
initial_amplitude (array): guess for the initial values of the amplitudes of the erf-rect-erf
initial_translational_offset0 (array): guess for the initial values of the translational offsets of the erf
initial_stddev0 (array): guess for the initial values of the standard deviations of the erf
initial_translational_offset1 (array): guess for the initial values of the translational offsets of the complimentary erf
initial_stddev1 (array): guess for the initial values of the standard deviations of the complimentary erf
initial_vertical_offset (float): guess for the initial values of the vertical offset
Returns:
p0 (array): lists the initial_amplitude, initial_translational_offset of the erf, initial_stddev of the erf, initial_translational_offset of the complimentary erf, initial_stddev of the complimentary erf, initial_vertical_offset in the correct format for the curve fit.
"""
p0 = [i for i in initial_amplitude]\
+ [i for i in initial_translational_offset0]\
+ [i for i in initial_stddev0]\
+ [i for i in initial_translational_offset1]\
+ [i for i in initial_stddev1]\
+ [initial_vertical_offset]
return p0
def bound_maker_erfrecterf(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_erfs):
"""
Create tuple with lower and upper bounds to be used in the curve fit
Args:
amplitude_bounds (tuple): bounds on the amplitudes of the gaussians
translational_offset_bounds (tuple): bounds on the translational offsets of the gaussians
stddev_bounds (tuple): bounds on the standard deviations of the gaussians
vertical_offset_bounds (tuple): bounds on the vertical offset of the gaussians
number_erfs (int): the number of erf-rect-erf features in the fit
Returns:
bounds (tuple): lists the bounds on the parameters used in the erf-rect-erf fits
"""
lower = [amplitude_bounds[0]]*number_erfs + [translational_offset_bounds[0]]*number_erfs + [stddev_bounds[0]]*number_erfs + [translational_offset_bounds[0]]*number_erfs + [stddev_bounds[0]]*number_erfs + [vertical_offset_bounds[0]]
upper = [amplitude_bounds[1]]*number_erfs + [translational_offset_bounds[1]]*number_erfs + [stddev_bounds[1]]*number_erfs + [translational_offset_bounds[1]]*number_erfs + [stddev_bounds[1]]*number_erfs + [vertical_offset_bounds[1]]
bounds = (lower, upper)
return bounds
def bound_maker_subsequent_erfrecterf(t, y, y_fit, popt, num_new_erfs, amplitude_bounds, sigma0_bounds, sigma1_bounds, vertical_offset_bounds, new_mu0, new_mu1):
"""
Makes the bounds vector for fits after the first. Takes into account the previous fitted values
Args:
t (array): time grid of burst
y (array): burst
y_fit (array): previous fit to the burst
popt (arrapy): the results from the multi gaussian curve fit of the previous fit
num_new_erfs (int): the number of gaussians to be added to the new fit
amplitude_bounds (array): bounds on the amplitudes of the gaussians
sigma0_bounds (array): bounds on the standard deviations of the erf
sigma1_bounds (array): bounds on the standard deviations of the complimentary erf
vertical_offset_bounds (array): bounds on the vertical offset
new_mu0 (array): new value for the translational position of the erf
new_mu1 (array): new value for the translational position of the complimentary erf
Returns:
bounds (tuple): lists the bounds on the parameters used in the erf-rect-erf fits
"""
amplitudes = popt[0:num_erfs_old]
mu0 = popt[num_erfs_old:2*num_erfs_old]
sigma0 = popt[2*num_erfs_old:3*num_erfs_old]
mu1 = popt[3*num_erfs_old:4*num_erfs_old]
sigma1 = popt[4*num_erfs_old:5*num_erfs_old]
vertical_offset = popt[-1]
lower_amp = np.append(amplitudes-np.abs(amplitudes)*.2, [0]*num_new_erfs)
upper_amp = np.append(amplitudes+np.abs(amplitudes)*.4, [1.2*(np.max(y)-noise_avg)]*num_new_erfs)
# limit the movement of the previously fitted erf-rect-erfs.
lower_mu0 = np.append(mu0*.8, [0]*num_new_erfs)
upper_mu0 = np.append(mu0*1.2, [np.max(t)]*num_new_erfs)
lower_mu1 = np.append(mu1*.8, [0]*num_new_erfs)
upper_mu1 = np.append(mu1*1.2, [np.max(t)]*num_new_erfs)
if num_new_erfs == 1:
lower_mu0 = np.append(mu0*.8, [new_mu0[-1]*.5])
upper_mu0 = np.append(mu0*1.2, [new_mu0[-1]*1.5])
lower_mu1 = np.append(mu1*.8, [new_mu1[-1]*.5])
upper_mu1 = np.append(mu1*1.2, [new_mu1[-1]*1.5])
lower_mu0[lower_mu0<0] = 0
lower_mu1[lower_mu1<0] = 0
upper_mu0[upper_mu0>np.max(t)] = .9*np.max(t)
upper_mu1[upper_mu1>np.max(t)] = .9*np.max(t)
lower_sigma0 = np.append([sigma0_bounds[0]]*num_erfs_old, [sigma0_bounds[0]]*num_new_erfs)
lower_sigma1 = np.append([sigma1_bounds[0]]*num_erfs_old, [sigma1_bounds[0]]*num_new_erfs)
upper_sigma0 = np.append([sigma0_bounds[1]]*num_erfs_old, [sigma0_bounds[1]]*num_new_erfs)
upper_sigma1 = np.append([sigma1_bounds[1]]*num_erfs_old, [sigma1_bounds[1]]*num_new_erfs)
# make into array
lower = np.concatenate((lower_amp, lower_mu0, lower_sigma0, lower_mu1, lower_sigma1, [vertical_offset_bounds[0]]))
upper = np.concatenate((upper_amp, upper_mu0, upper_sigma0, upper_mu1, upper_sigma1, [vertical_offset_bounds[1]]))
bounds = (lower, upper)
return bounds
def find_edges(y, trigger_height):
"""
Simple zero-crossing algorithm to locate the rising and falling edges of a signal. If the signal is noisy around the location of the threshold, then multiple rising and falling edges may be detected where only one should be detected. If this happens, try smoothing the signal beforehand or selecting only a single of the set of falsely identified edge positions.
Args:
y (array): signal of interest
trigger_height (float): the height at which a rising or falling edge is detected
Returns:
potential_rising_edges (list): list of rising edges at which to seed erf-rect-erfs
potential_falling_edges (list): list of falling edges at which to seed erf-rect-erfs
"""
potential_falling_edge, potential_rising_edge = [], []
for num, (i,j) in enumerate(zip(y[:-1], y[1:])):
if (i>trigger_height) and (j<trigger_height):
potential_falling_edge.append(num)
if (i< trigger_height) and (j>trigger_height):
potential_rising_edge.append(num)
return potential_rising_edge, potential_falling_edge
def seed_initial_offsets_edges(y, noise_level):
"""
Seed the starts and the edges
Args:
y (array): signal of interest
noise_level (float): the mean noise level of the signal. The threshold for finding the edges is based on this value
Returns:
rising_edges (list): list of rising edges at which to seed erf-rect-erfs
falling_edges (list): list of falling edges at which to seed erf-rect-erfs
"""
threshold = noise_level*2
rising_edges, falling_edges = find_edges(y, threshold)
return rising_edges, falling_edges
def seed_initial_offsets_edges_smoothed(y, noise):
"""
Seed the starts and the edges
Inputs:
y (array): signal of interest
noise (float): the mean noise level of the signal. The threshold for finding the edges is based on this value
Returns:
rising_edges (list): list of rising edges at which to seed erf-rect-erfs
falling_edges (list): list of falling edges at which to seed erf-rect-erfs
"""
# find the major peaks
threshold = np.max(y)*.25
# Find edges of smoothed signal
area = 4000
length = len(y)//50
width = len(y)
r = rect_generator(length,width,area)
xc_r = signal.correlate(y,r)[length//2:-length//2]
normalized_xc_r = xc_r/np.max(xc_r)*np.max(y)
rising_edges, falling_edges = find_edges(normalized_xc_r, threshold)
""" plt.figure()
plt.plot(y)
plt.plot([0, len(y)], [threshold,threshold], 'm')
print(rising_edges)"""
return rising_edges, falling_edges, xc_r
def initial_seed_erfrecterf(t, y, noise):
"""
Makes seeds for the first fit.
Calls seed_initial_offsets_peaks
Args:
t (array): time corresponding to signal
y (array): signal
noise (float): noise level of y
Returns:
initial_translational_offset (array): a list of the initial conditions for the horizontal offsets, mu
initial_amplitude (array): a list of the initial conditions for the amplitudes, A
"""
rising_edges, falling_edges, xc_r = seed_initial_offsets_edges_smoothed(y, noise)
#initial_translational_offset = t[peak_locns]
initial_amplitudes = []
for r,f in zip(rising_edges, falling_edges):
initial_amplitudes.append(y[int((f-r)//2+r)]-noise)
#print("initial amplitudes:", initial_amplitudes)
initial_amplitudes = np.asarray(initial_amplitudes)
initial_mu0 = t[rising_edges]
initial_mu1 = t[falling_edges]
return initial_mu0, initial_mu1, initial_amplitudes
def seed_later_offsets_peaks_erfrecterf(y, noise_level):
"""
Seed the starts and the edges of the erf-rect-erf features
Args:
y (array): signal of interest
noise_level (float): the mean noise level of the signal. The threshold for finding the edges is based on this value
Returns:
rising_edge (int): rising edges at which to seed erf-rect-erfs which corresponds to the location of the largest residual
falling_edge (int): falling edges at which to seed erf-rect-erfs which corresponds to the location of the largest residual
"""
threshold = noise_level
rising_edges, falling_edges = find_edges(y, threshold)
# find the location with the lartest peak
peak_val = []
for r,f in zip(rising_edges, falling_edges):
peak_val.append(np.abs(y[(f-r)//2+r]))
if not peak_val: #if peak_val is empty
threshold = noise_level*.5
rising_edges, falling_edges = find_edges(y, threshold)
for r,f in zip(rising_edges, falling_edges):
peak_val.append(np.abs(y[(falling_edge-rising_edge)//2+rising_edge]))
if not peak_val:
return
else:
biggest_residual_location = np.argmax(peak_val)
return rising_edges[biggest_residual_location], falling_edges[biggest_residual_location]
def subsequent_seeding_erfrecterf(t, y, y_fit, popt, number_erfs, noise_threshold):
"""
Make the seeds for fits after the first fit.
Args:
t (array): times corresponding to y
y (array): burst
y_fit (array): fitted burst
popt (arary): output of previous curve fit
number_erfs (array): number of erf-rect-erf features in the fit
noise_avg (float): average value of the noise
Returns:
new_translational_offset (array): initial guesses for the translational offsets of the erf-rect-erf features
new_amplitude (array): initial guesses for the amplitudes of the erf-rect-erf features
"""
residual = np.abs(y-y_fit)
plt.figure()
plt.plot(residual)
plt.plot(y)
plt.plot(y_fit)
# find spot with largest residual; record its amplitude
try:
rising_edge, falling_edge = seed_later_offsets_peaks_erfrecterf(residual, noise_threshold)
print(rising_edge, falling_edge)
mu0_new = t[rising_edge]
mu1_new = t[falling_edge]
print('falling edge is ',falling_edge)
a_new = y[(falling_edge-rising_edge)//2+rising_edge]-noise_threshold
sigma0_new = 5
sigma1_new = 5
"""
plt.figure(78)
plt.plot(y, label='data')
plt.plot(y_fit, label='fitted')
plt.plot(residual, label="|residual|")
plt.plot(peak_to_use, residual[peak_to_use], 'x')
"""
# use the previously fitted peaks as initial conditions
fitted_a = popt[:number_erfs]
new_a = np.append(fitted_a, a_new)
fitted_mu0 = popt[number_erfs:2*number_erfs]
new_mu0 = np.append(fitted_mu0, mu0_new
)
fitted_sigma0 = popt[2*number_erfs:3*number_erfs]
new_sigma0 = np.append(fitted_sigma0, sigma0_new)
fitted_mu1 = popt[3*number_erfs:4*number_erfs]
new_mu1 = np.append(fitted_mu1, mu1_new)
fitted_sigma1 = popt[4*number_erfs:5*number_erfs]
new_sigma1 = np.append(fitted_sigma1, sigma1_new)
except Exception as e:
print("Exception in subsequent_seeding", e)
return
return new_a, new_mu0, new_sigma0, new_mu1, new_sigma1
def package_fit_data_erfrecterf(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, fr,
number_erfrecterfs,
y, noise_avg, max_num_erfrecterfs, initial_number_erfrecterfs):
"""
Save data from fits for use in later modules.
Args:
r2 (float): percentage of variability of the burst that's been accounted for in the fit. (how well the regression predicts the data)
rmse (float): root mean square error between fit and signal
max_error (float): maximum absolute value of difference between y and y_fit
max_error_normalized (float): max_error/max(y)
time_eff (float): rect effective time of signal. time of a rectangle with same max height as the signal
duration (float): time of signal between indices set by lower and upper 10% of the integrated area of signal
popt (array): output of curve fit
fr (float): flow rate
number_erfrecterfs (int): number of erf-rect-erf features used to parameterize burst
y (array): burst
noise_avg (float): average noise value
max_num_erfrecterfs (int): maximum number of gaussians to be included in the fit
initial_number_erfrecterfs (int): number of Gaussians used in initial fit
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
try:
# fit parameters
a = popt[0:number_erfrecterfs] # amplitude
mu0 = popt[number_erfrecterfs:2*number_erfrecterfs] # offset
sigma0 = popt[2*number_erfrecterfs:3*number_erfrecterfs] # width
mu1 = popt[3*number_erfrecterfs:4*number_erfrecterfs] # offset
sigma1 = popt[4*number_erfrecterfs:5*number_erfrecterfs] # width
# to save, we want distances relative to location of first erfrecterf
sorted_indices = np.argsort(mu0)
#print('length of mu:', len(mu), 'length of popt:', len(popt), 'number of gaussians', number_gaussians,'length of h:', len(h))
a_save, mu0_save, sigma0_save, mu1_save, sigma1_save = np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs)
a_save[:number_erfrecterfs] = a[sorted_indices]
mu0_save[:number_erfrecterfs] = mu0[sorted_indices]-mu0[sorted_indices[0]] # subtract smalles from all to get relative offsets
sigma0_save[:number_erfrecterfs] = sigma0[sorted_indices]
mu1_save[:number_erfrecterfs] = mu1[sorted_indices]-mu0[sorted_indices[0]] # subtract smalles from all to get relative offsets
sigma1_save[:number_erfrecterfs] = sigma1[sorted_indices]
vert_offset_save = popt[-1]
max_SNR = np.max(y)/noise_avg
avg_SNR = np.mean(y)/noise_avg
data = np.concatenate([[fr],[rmse],
[r2],[max_error],[max_error_normalized],[time_eff],[duration],
a_save,mu0_save,sigma0_save,mu1_save, sigma1_save,[vert_offset_save],[t[1]-t[0]],
[max_SNR], [avg_SNR], [int(initial_number_erfrecterfs)]])
return data
except Exception as e:
print('Exception:', e)
print("***\n***\nsomething went wrong in package_fit_data\n***\n***")
return
def fitting_function_erfrecterf(selxn, t, y, noise_avg, noise_thresh, fr, max_num_erfrecterfs=4):
"""
Performs erf-rect-erf fits. Initializes first fit based on number of edges in smoothed copy of burst. The residual of this fit is compared to the noise threshold. Until the absolute value of the residual is smaller than the noise threshold or until more than max_num_erfrecterfs features are needed to parameterize the fit, subsequent fits place new Gaussians at locations which have large residuals. A great deal of care is taken in this function to standardize the weighting and initial conditions of the fits since the erf-rect-erf features inherently are not orthogonal. The goal is to produce fits with Gaussians which appear physical (aren't extremely tall and narrow or short and wide). The fits may not converge, or more features than max_num_erfrecterfs may be required to fit the function. In such cases, the fitting function passes the burst without returning a fit.
Args:
selxn (int): burst number being fitted
t (array): times corresponding to y
y (array): burst being fitted
noise_avg (float): average value of the noise
noise_thresh (float): average value of the noise + standard deviation of noise
fr (float): actual flow rate underlying simulation
max_num_erfrecterfs (int): maximum number of erf-rect-erf features used to fit the burst
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
# check that there are enough points above the noise threshold to actually do a fit
if np.shape(np.argwhere(y>noise_avg + 3*(noise_thresh-noise_avg)))[0]<12:
print("not enough of the burst has an intensity greater than 2x the noise threshold ")
return
# for initial fit, use peak finding to determine the number,
# location, and initaial amplitudes of the Gaussians.
initial_mu0, initial_mu1, initial_amplitude = initial_seed_erfrecterf(t, y, noise_thresh)
number_erfrecterfs = len(initial_mu0)
if number_erfrecterfs > max_num_erfrecterfs:
print("too many peaks were found initially: number_erfrecterfs > max_num_erfrecterfs.")
return
#calculate rect effective time to be used in the initial standard dev. condition.
delta_t = t[1]-t[0]
time_eff = calculate_effective_length(y, noise_avg, delta_t) #instead of fitted_vert_offset, use noise_avg (we haven't yet fitted any fitted_vert_offset)
#print("rect effective time: ", time_eff)
initial_sigma0 = [time_eff/5]*number_erfrecterfs
initial_sigma1 = [time_eff/5]*number_erfrecterfs
# initialize vertical offset
initial_vertical_offset = noise_avg + np.mean( [np.mean(y[:len(y)//5]), np.mean(y[4*len(y)//5:])] )
p0 = initial_guess(initial_amplitude,
initial_mu0,
initial_sigma0,
initial_mu1,
initial_sigma1,
initial_vertical_offset)
# initialize curve fitting bounds
amplitude_bounds = (noise_avg,np.max(y)-noise_avg*.25)
mu_bounds = (0,np.max(t)) ### maybe make these somewhat closer to the seeds
sigma_bounds = (np.max(t)/150,np.max(t)/2)
vertical_offset_bounds = (.95*np.min( [np.min(y[:len(y)//5]), np.min(y[4*len(y)//5:])]), noise_avg+1.25*np.max( [np.max(y[:len(y)//5]), np.max(y[4*len(y)//5:])]) )
bounds = bound_maker_erfrecterf(amplitude_bounds,mu_bounds,sigma_bounds,vertical_offset_bounds,number_erfrecterfs)
initial_number_erfrecterfs = len(initial_sigma0)
# make weights for fit
sigma = make_weights(y, g_length=50)
# limit the max number of function evaluations
max_nfev = int(30*len(t))
# try first fit
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper_erfrecterf(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except Exception as e:
print('p0:', p0)
print('bounds', bounds)
print('problem in first fit:', e)
return
##### function will only reach this location if initial fit converged.
# calculate residual
y_fit = fit_wrapper_erfrecterf(t,*popt)
residual = y-y_fit
"""plt.figure()
plt.plot(t, y, label="data")
plt.plot(t, y_fit, label="1st fit")
plt.plot(t, np.abs(residual)/sigma**2, label="|residual|/sigma**2")
#plt.plot([0, np.max(t)], [noise_thresh, noise_thresh], 'k--', label="threshold")
plt.plot([0, np.max(t)], [750, 750], 'k--', label="threshold")
#plt.plot([0, np.max(t)], [noise_avg, noise_avg], 'k--', label="mean noise")
plt.legend()"""
"""
print(noise_thresh)
print(np.any(np.abs(residual)>noise_thresh))
print(number_gaussians<max_num_gaussians)
"""
# compare residual to noise threshold to determine whether or not
# another Gaussian should be added. Only add another Gaussian if
# there are no more than max_num_gaussians Gaussians already.
std_dev_residual_previous = 9999999#noise_thresh-noise_avg#np.std(y)
std_dev_residual_new = np.std(residual)
fitnum = 1
noisethresh_to_use = .05
while (np.any(np.abs(residual)/sigma**2>noisethresh_to_use)) & (number_erfrecterfs<max_num_erfrecterfs) & (std_dev_residual_new<std_dev_residual_previous*.8):
plt.figure()
plt.plot(y, label='y')
plt.plot(y_fit, label='fitted')
plt.plot((y-y_fit)/sigma**2, label='scaled residual')
plt.plot([0,len(y_fit)], [noisethresh_to_use, noisethresh_to_use], label='threshold')
plt.legend()
print('initial fit insufficient')
# try subsequent fit
# add in another gausian
fitnum += 1
print('fit number', fitnum)
try:
new_a, new_mu0, new_sigma0, new_mu1, new_sigma1 = subsequent_seeding_erfrecterf(t, y, y_fit, popt, number_erfrecterfs, noise_thresh)
initial_vertical_offset = popt[-1]
p0 = initial_guess(new_a,
new_mu0,
new_sigma0,
new_mu1,
new_sigma1,
initial_vertical_offset)
sigma0_bounds = (np.max(t)/150,np.max(t)/2)
sigma1_bounds = (np.max(t)/150,np.max(t)/2)
# initialize curve fitting bounds
num_new_erfrecterfs = 1
bounds = bound_maker_subsequent_erfrecterf(t, y, y_fit, popt, num_new_erfrecterfs, amplitude_bounds,
sigma0_bounds, sigma1_bounds, vertical_offset_bounds, new_mu0, new_mu1)
# try curve fit again
except Exception as e:
print(e)
print("$$$$$$$$$$$$$")
return
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper_erfrecterf(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except: # if first fit fails to converge, end fitting
print(selxn, "one of the subsequent fits failed to converge")
return
y_fit = fit_wrapper_erfrecterf(t,*popt)
residual = y-y_fit
number_erfrecterfs += 1
print('num erfs',number_erfrecterfs)
std_dev_residual_previous = std_dev_residual_new
std_dev_residual_new = np.std(residual)
#print('std dev of residual is: ', std_dev_residual_new)
if (np.any(np.abs(residual/sigma**2)<noisethresh_to_use) & (number_erfrecterfs<=max_num_erfrecterfs)):
print(selxn, "WORKED")
# package data for ML input.
r2, rmse, max_error, max_error_normalized, duration = eval_fit(y, y_fit, t, popt, delta_t)
data = package_fit_data_erfrecterf(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, fr,
number_erfrecterfs, y,
noise_avg, max_num_erfrecterfs, initial_number_erfrecterfs)
""" plt.figure()
plt.plot(t, y, label='signal')
plt.plot(t, y_fit, label="fit")
#plt.plot(t, np.abs(residual), label="|new residual|")
plt.legend()"""
#print(number_erfrecterfs)
return data
else:
print(selxn, "max number of erfrecterfs reached, but fit not within noise threshold")
return
return
| 44.602963 | 881 | 0.677949 | import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from scipy.signal import find_peaks
from scipy.stats import pearsonr as pearsonr
from scipy.special import erf as erf
"""Fitting functions for multi-Gaussian fitting.
"""
def fit_wrapper(x,*args):
"""
This wrapper sets up the variables for the fit function.
It allows for a variable numbers of Gaussians to be fitted.
Calls multi_gaussian_fit_function
Args:
x (array): x is independent variable x, such that y=f(x).
args: variable length argument list. args[0:n_gauss] are the amplitudes of the gaussians to be fitted. args[n_gauss:2*n_gauss] are the horizontal offsets of the gaussians to be fitted. args[2*n_gauss:3*n_gauss] are the standard deviations of the gaussians to be fitted. args[-1] is the vertical offset parameter
Returns
multi_gaussian_fit_function(x,h,mu,sigma,vertical_offset)
"""
n_gauss = (len(args)-1)//3 # number of gaussians that we're fitting
h = args[0:n_gauss]
mu = args[n_gauss:2*n_gauss]
sigma = args[2*n_gauss:3*n_gauss]
vertical_offset = args[-1]
return multi_gaussian_fit_function(x,h,mu,sigma,vertical_offset)
def multi_gaussian_fit_function(x,h,mu,sigma,vertical_offset):
"""
Returns a function that is comprised of an offset h and the
sum of gaussians with variable amplitudes, offsets, and standard
deviations (widths)
Args:
x (array): independent variable, such that y=f(x).
h (list): initial guesses for the amplitudes of the gaussians
mu (list): initial guesses for the translational offsets of the gaussians
sigma (list): initial guesses for standard deviations of gaussians
vertical_offset (list): initial guess for vertical offset h
Returns:
fit (array): a function which consists of the sum of multiple gaussians and a vertical offset
"""
# fit function starts with vertical offset
fit = np.zeros(len(x)) + vertical_offset
# iterate through each amplitude/translational offset/standard deviation set & add them to the fit function
for amp,offset,std in zip(h,mu,sigma):
fit += amp*np.exp( -(x-offset)**2 / (2*std**2) )
return fit
def initial_guess(initial_amplitude,initial_translational_offset,initial_stddev,initial_vertical_offset):
"""
Create array with amplitude, phase and initial offset to be used in the curve fit
Args:
initial_amplitude (array): guess for the initial values of the amplitudes of the gaussians
initial_translational_offset (array): guess for the initial values of the translational offsets of the gaussians
initial_stddev (array): guess for the initial values of the standard deviations of the gaussians
initial_vertical_offset (float): guess for the initial values of the vertical offset
Returns:
p0 (array): lists the initial_amplitude, initial_translational_offset, initial_stddev, initial_vertical_offset in the correct format for the curve fit.
"""
#p0=[]
#for a,mu,stddev in zip(initial_amplitude,initial_translational_offset,initial_stddev):
# p0.append([a,mu,stddev])
#p0.append(initial_vertical_offset)
p0 = [i for i in initial_amplitude]\
+ [i for i in initial_translational_offset]\
+ [i for i in initial_stddev]\
+ [initial_vertical_offset]
return p0
def bound_maker(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_gaussians):
"""
Create tuple with lower and upper bounds to be used in the curve fit
Args:
amplitude_bounds (tuple): bounds on the amplitudes of the gaussians
translational_offset_bounds (tuple): bounds on the translational offsets of the gaussians
stddev_bounds (tuple): bounds on the standard deviations of the gaussians
vertical_offset_bounds (tuple): bounds on the vertical offset of the gaussians
number_gaussians (int): the number of gaussians in the fit
Returns:
bounds (tuple): lists the bounds on the parameters used in the multigaussian fits
"""
lower = [amplitude_bounds[0]]*number_gaussians + [translational_offset_bounds[0]]*number_gaussians + [stddev_bounds[0]]*number_gaussians + [vertical_offset_bounds[0]]
upper = [amplitude_bounds[1]]*number_gaussians + [translational_offset_bounds[1]]*number_gaussians + [stddev_bounds[1]]*number_gaussians + [vertical_offset_bounds[1]]
bounds = (lower, upper)
return bounds
def bound_maker_subsequent(t, y, y_fit, popt, num_new_gaussians, amplitude_bounds, stddev_bounds, vertical_offset_bounds, new_translational_offset, noise_avg):
"""
Makes the bounds vector for fits after the first. Takes into account the previous fitted values
Args:
t (array): time grid of burst
y (array): burst
y_fit (array): previous fit to the burst
popt (arrapy): the results from the multi gaussian curve fit of the previous fit
num_new_gaussians (int): the number of gaussians to be added to the new fit
amplitude_bounds (array): bounds on the amplitudes of the gaussians
stddev_bounds (array): bounds on the standard deviations of the gaussians
vertical_offset_bounds (array): bounds on the vertical offset
new_translational_offset (tuple):
Returns:
bounds (tuple): lists the bounds on the parameters used in the multigaussian fits
"""
num_gaussians_old = int((len(popt)-1)/3)
amplitudes = popt[:num_gaussians_old]
translational_offsets = popt[num_gaussians_old:2*num_gaussians_old]
widths = popt[2*num_gaussians_old:3*num_gaussians_old]
vert_offset = popt[-1]
lower_amp = np.append(amplitudes-np.abs(amplitudes)*.2, [-.75*(np.max(y)-noise_avg)]*num_new_gaussians)
upper_amp = np.append(amplitudes+np.abs(amplitudes)*.2, [1.2*(np.max(y)-noise_avg)]*num_new_gaussians)
# limit the movement of the previously fitted gaussians.
lower_translational = np.append(translational_offsets*.8, [0]*num_new_gaussians)
upper_translational = np.append(translational_offsets*1.2, [np.max(t)]*num_new_gaussians)
if num_new_gaussians == 1:
lower_translational = np.append(translational_offsets*.8, [new_translational_offset[-1]*.5])
upper_translational = np.append(translational_offsets*1.2, [new_translational_offset[-1]*1.5])
lower_translational[lower_translational<0] = 0
upper_translational[upper_translational>np.max(t)] = .9*np.max(t)
lower_stddev = np.append([stddev_bounds[0]]*num_gaussians_old, [stddev_bounds[0]]*num_new_gaussians)
upper_stddev = np.append([stddev_bounds[1]]*num_gaussians_old, [stddev_bounds[1]]*num_new_gaussians)
# make into array
lower = np.concatenate((lower_amp, lower_translational, lower_stddev, [vertical_offset_bounds[0]]))
upper = np.concatenate((upper_amp, upper_translational, upper_stddev, [vertical_offset_bounds[1]]))
bounds = (lower, upper)
return bounds
def calculate_r2(y, y_fit):
"""
Calculates r2, the percentage of variability of the dependent variable that's
been accounted for. (how well the regression predicts the data)
Args:
y (array): data
yfit (array): is the fit to the data, evaluated using the same time axis as y
Returns:
r2 (float): characterizes how well y_fit predicts y
"""
#ss_res = np.sum((y-y_fit)**2) #residual sum of squares
#ss_tot = np.sum((y-np.mean(y))**2) #total sum of squares
#r2 = 1-(ss_res/ss_tot) #r squared
r2 = pearsonr(y, y_fit)[0]
return r2
def calculate_rmse(targets, predictions):
"""
Calculates root mean square error (RMSE) between targets and predictions
Args:
targets (array): actual values
predictions (array): predicted values
Returns:
rmse (float): root mean square error
"""
n = len(predictions)
return np.linalg.norm(predictions - targets) / np.sqrt(n)
def calculate_max_error(targets, predictions):
"""
Returns maximum absolute value of difference between target and predictions.
Args:
targets (array): actual values
predictions (array): predicted values
Returns:
rmse (float): root mean square error
"""
return np.max(np.abs(targets-predictions))
def gaussian_generator(npoints,std):
"""
Make a gaussian f npoints long with standard deviation std
Args:
npoints (int): length of Gaussian
std (float): standard deviation of Gaussian
Returns:
g (array): Gaussian
"""
g = signal.gaussian(npoints,std=std)
return g
def rect_generator(npoints,width,area):
"""
Make rect for correlation that has a height such that the height*width=area.
Args:
npoints (int): length of Gaussian
width (float): width of rect
area (float): area of rect. Dictates rect height via height = area/width
Returns:
r (array): rect function
"""
r = np.zeros(npoints)
r[int(np.floor(npoints/2-width/2+1)):int(np.ceil(npoints/2+width/2))] = area/(np.floor(width/2)*2) # do this flooring thing because the width gets rounded and we want the area constant always.
return r
def seed_initial_offsets_peaks(y, noise, rect_area=500, prominence_knockdown_factor=0.03):
"""
Generate the locations of the seeds for the initial fit. Place a seed at each of the peaks.
Determine peak location from smoothed version of signal. Smooth signal by cross correlating it with rect.
Args:
y (array): signal
noise (float): noise level of y
rect_area (float): area of rect, where area=width * height
prominence_knockdown_factor (float): used to set the prominence for find_peaks as a function of the max height of xc_r
Returns:
peaks (array): list of the initial peaks
"""
max_snr = np.max(y/noise)
if max_snr>10:
pass
elif max_snr<5:
prominence_knockdown_factor = .09
else:
prominence_knockdown_factor = .06
length = len(y)
r = rect_generator(length,length/35, rect_area)
xc_r = signal.correlate(y,r)[length//2:-length//2] # cross correlation of signal and rect
peaks, _ = find_peaks(xc_r, prominence=(np.max(xc_r)-noise*rect_area)*prominence_knockdown_factor)
#plt.figure()
#plt.plot(y, label='y')
#plt.plot(xc_r/100, label='resid')
#plt.plot(peaks, xc_r[15]/100*np.ones(len(peaks)), 'kx')
#print(peaks)
#plt.legend()
return peaks
def initial_seed(t, y, noise, max_num_gaussians=8, rect_area=500):
"""
Makes seeds for the first fit.
Calls seed_initial_offsets_peaks
Args:
t (array): time corresponding to signal
y (array): signal
noise (float): noise level of y
max_num_gaussians (int): the maximum number of initial seeds
rect_area (float): area of rect, where area=width * height
prominence_knockdown_factor (float): used to set the prominence for find_peaks as a function of the max height of xc_r
Returns:
initial_translational_offset (array): a list of the initial conditions for the horizontal offsets, mu
initial_amplitude (array): a list of the initial conditions for the amplitudes, A
"""
peak_locns = seed_initial_offsets_peaks(y, noise, rect_area=rect_area) # use as initial mus
peak_values = y[peak_locns] # use as initial amplitudes
#plt.figure()
#plt.plot(y)
#plt.plot(peak_locns, peak_values, 'x')
if len(peak_values)>max_num_gaussians:
sorted_values = np.argsort(peak_values)[:max_num_gaussians]
peak_values = peak_values[sorted_values]
peak_locns = peak_locns[sorted_values]
initial_translational_offset = t[peak_locns]
initial_amplitude = peak_values-noise
#because we subtract the noise from the initial amplitudes, some might be negative. get rid of those.
positive_value_locations = np.argwhere(initial_amplitude>0)
initial_amplitude = initial_amplitude[positive_value_locations].flatten()
initial_translational_offset = initial_translational_offset[positive_value_locations].flatten()
return initial_translational_offset, initial_amplitude
def calculate_effective_length(model, fitted_vert_offset, delta_t, max_normalized_height=1):
"""
Effective length is area divided by max height.
Here, this is the length of a rectangle with same max height as the signal
Args:
model (array): signal
fitted_vert_offset (float): h in the multigaussian fitting equation
delta_t (float): time discretization
max_normalized_height (float): maximum height of the signal
Returns:
effective_length (float): effective length
"""
area = np.sum(model-fitted_vert_offset)
effective_length = area/(np.max(model)-fitted_vert_offset)*max_normalized_height*delta_t
return effective_length
def calculate_burst_duration(y_fit, fitted_vert_offset, delta_t, lower_thresh=0.1, upper_thresh=0.9):
"""
calculate the duration of the burst between the lower and upper
thresholds of the cumulative sum of the signal
Args:
y_fit (array): values of fitted burst
fitted_vert_offset (float): h in the multigaussian fitting equation
delta_t (float): time discretization
lower_thresh (float): lower fraction of signal to include in calculation
upper_thresh (float): upper fraction of signal to include in calculation
Returns:
duration (float): time of signal between indices set by lower_thresh and upper_thresh operating on the integrated area of the signal
"""
try:
cs = np.cumsum(y_fit-fitted_vert_offset)
csm = np.max(cs)
lower_index = np.argwhere(cs>(lower_thresh*csm))[0]
upper_index = np.argwhere(cs<(upper_thresh*csm))[-1]
duration = (upper_index-lower_index) * delta_t
except:
print("problem calculating the duration")
duration = [0]
return duration[0]
def make_weights(y, g_length=100):
"""
Makes the weighting function for the curve fitting operation.
Weights the signal to bias its larger magnitude components and to diminish the effect of the small components (i.e. the tails)
Generates the weights from a smoothed copy of the burst, where this smoothed copy is made by cross correlating the signal with a gaussian.
Args:
y (array): signal
g_length (int): length of Gaussian
Returns:
sigma (array): weights for the curve fitting scheme
"""
# make weights
length = len(y)
g = gaussian_generator(length,g_length)
xc_g = signal.correlate(y,g)[int(np.ceil(length/2-1)):-int(np.floor(length/2))]#[int(np.ceil(length/2)):-int(np.ceil(length/2))]
weight = xc_g/np.max(xc_g)
sigma = 1/np.sqrt(weight)
return sigma
def eval_fit(y, y_fit, t, popt, delta_t):
"""
Calculates metrics which characterize the efficacy of the fit.
Args:
y (array): signal
y_fit (array): fitted version of the signal
t (array): times corresponding to y and y_fit
popt (array): results of curve fit
delta_t float): time discretization
Returns:
r2 (float): percentage of variability of the burst that's been accounted for in the fit. (how well the regression predicts the data)
rmse (float): root mean square error between fit and signal
max_error (float): maximum absolute value of difference between y and y_fit
max_error_normalized (float): max_error/max(y)
duration (float): time of signal between indices set by lower_thresh and upper_thresh operating on the integrated area of the signal
"""
fitted_vert_offset = popt[-1]
#delta_t = t[1]-t[0]
# calculate r^2
r2 = calculate_r2(y,y_fit)
# calculate rmse
rmse = calculate_rmse(y,y_fit)
# calculate max error
max_error = calculate_max_error(y,y_fit)
max_error_normalized = max_error/np.max(y)
# calculate duration of burst in the middle 80% of it
duration = calculate_burst_duration(y_fit, fitted_vert_offset, delta_t)
return r2, rmse, max_error, max_error_normalized, duration
def package_fit_data(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, ang_vel,
orbit_radius, x_offsets, y_offsets, number_gaussians,
y, noise_avg, max_num_gaussians, dirctn, initial_number_gaussians, t):
"""
Save data from fits for use in later modules.
Args:
r2 (float): percentage of variability of the burst that's been accounted for in the fit. (how well the regression predicts the data)
rmse (float): root mean square error between fit and signal
max_error (float): maximum absolute value of difference between y and y_fit
max_error_normalized (float): max_error/max(y)
time_eff (float): rect effective time of signal. time of a rectangle with same max height as the signal
duration (float): time of signal between indices set by lower and upper 10% of the integrated area of signal
popt (array): output of curve fit
ang_vel (float): actual angular velocity Omega
orbit_radius (float): actual orbit radius R
x_offsets (float): actual x component of distance between orbit center and light center, D
y_offsets (float): actual y component of distance between orbit center and light center, D
number_gaussians (int): number of gaussians used to parameterize burst
y (array): burst
noise_avg (float): average noise value
max_num_gaussians (int): maximum number of gaussians to be included in the fit
dirctn (int): +1 or -1, clockwise or counter clockwise
initial_number_gaussians (int): number of Gaussians used in initial fit
t (array): time corresponding to y
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
try:
# fit parameters
h = popt[0:number_gaussians] # amplitude
mu = popt[number_gaussians:2*number_gaussians] # offset
sigma = popt[2*number_gaussians:3*number_gaussians] # width
# to save, we want distances relative to location of first gaussian
sorted_indices = np.argsort(mu)
#print('length of mu:', len(mu), 'length of popt:', len(popt), 'number of gaussians', number_gaussians,'length of h:', len(h))
h_save, mu_save, sigma_save = np.zeros(max_num_gaussians), np.zeros(max_num_gaussians), np.zeros(max_num_gaussians)
h_save[:number_gaussians] = h[sorted_indices]
mu_save[:number_gaussians] = mu[sorted_indices]-mu[sorted_indices[0]] # subtract smalles from all to get relative offsets
sigma_save[:number_gaussians] = sigma[sorted_indices]
vert_offset_save = popt[-1]
D = np.sqrt(x_offsets**2+y_offsets**2)
theta = np.arctan2(y_offsets, x_offsets)
max_SNR = np.max(y)/noise_avg
avg_SNR = np.mean(y)/noise_avg
if dirctn == 1:
clockwise = [0]
counterclockwise = [1]
else:
clockwise = [1]
counterclockwise = [0]
data = np.concatenate([[ang_vel],[rmse],
[r2],[max_error],[max_error_normalized],[time_eff],[duration],
h_save,mu_save,sigma_save,[vert_offset_save],[t[1]-t[0]],[orbit_radius], [x_offsets],
[y_offsets], [D], [theta], [max_SNR], [avg_SNR], clockwise, counterclockwise, [int(initial_number_gaussians)]])
return data
except Exception as excptn:
print("***\n***\nsomething went wrong in package_fit_data\n***\n***")
print(excptn)
return
def seed_later_offsets_peaks(y, noise, rect_area=100, prominence_knockdown_factor=0.03):
"""
Makes seeds for fits following the first fit.
Args:
y (array): signal
noise (float): noise level of y
rect_area (float): area of rect, where area=width * height
prominence_knockdown_factor (float): used to set the prominence for find_peaks as a function of the max height of xc_r
Returns:
peaks (array): a list of positions with which to seed the horizontal offsets, mu
"""
length = len(y)
r = rect_generator(length,length/25, rect_area)
xc_r = signal.correlate(y,r)[length//2:-length//2]
peaks, _ = find_peaks(xc_r, prominence=(np.max(xc_r)-noise*rect_area)*prominence_knockdown_factor)
"""
plt.figure()
plt.plot(y, label='y')
plt.plot(xc_r/100, label='resid')
plt.legend()
"""
return peaks
def subsequent_seeding(t, y, y_fit, popt, number_gaussians, noise_avg):
"""
Make the seeds for fits after the first fit.
Args:
t (array): times corresponding to y
y (array): burst
y_fit (array): fitted burst
popt (arary): output of previous curve fit
number_gaussians (array): number of gaussians in the fit
noise_avg (float): average value of the noise
Returns:
new_translational_offset (array): initial guesses for the translational offsets of the gaussians
new_amplitude (array): initial guesses for the amplitudes of the gaussians
"""
residual = np.abs(y-y_fit)
# find spot with largest residual; record its amplitude
peaks = seed_later_offsets_peaks(residual, noise_avg)
peak_to_use = peaks[np.argmax(residual[peaks])]
"""
plt.figure(78)
plt.plot(y, label='data')
plt.plot(y_fit, label='fitted')
plt.plot(residual, label="|residual|")
plt.plot(peak_to_use, residual[peak_to_use], 'x')
"""
new_gaussian_translational_offset = t[peak_to_use]
new_gaussian_amplitude = residual[peak_to_use]
#if new_gaussian_amplitude < 30:
# new_gaussian_amplitude = 100
#new_translational_offset = np.append(initial_translational_offset, new_gaussian_translational_offset)
# use the previously fitted peaks as initial conditions
fitted_translational_offsets = popt[number_gaussians:number_gaussians*2]
new_translational_offset = np.append(fitted_translational_offsets, new_gaussian_translational_offset)
fitted_amplitudes = popt[:number_gaussians]
new_amplitude = np.append(fitted_amplitudes, new_gaussian_amplitude)
return new_translational_offset, new_amplitude
def fitting_function(selxn, t, y, noise_avg, noise_thresh, ang_vel, orbit_radius, x_offsets, y_offsets, dirctn, max_num_gaussians=8):
"""
Performs multi Gaussian fits. Initializes first fit based on number of peaks in smoothed copy of burst. The residual of this fit is compared to the noise threshold. Until the absolute value of the residual is smaller than the noise threshold or until more than max_num_gaussians Gaussians are needed to parameterize the fit, subsequent fits place new Gaussians at locations which have large residuals. A great deal of care is taken in this function to standardize the weighting and initial conditions of the fits since the Gaussians inherently are not orthogonal. The goal is to produce fits with Gaussians which appear physical (aren't extremely tall and narrow or short and wide). The fits may not converge, or more gaussians than max_num_gaussians may be required to fit the function. In such cases, the fitting function passes the burst without returning a fit.
Args:
selxn (int): burst number being fitted
t (array): times corresponding to y
y (array): burst being fitted
noise_avg (float): average value of the noise
noise_thresh (float): average value of the noise + standard deviation of noise
ang_vel (float): actual angular velocity of underlying simulation Omega
orbit_radius (float): actual orbit radius R
x_offsets (float): actual x component of distance between orbit center and light center, D
y_offsets (float): actual y component of distance between orbit center and light center, D
dirctn (int): +1 or -1 corresponding to direction of rotatoin
max_num_gaussians (int): maximum number of gaussians used to fit the burst
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
# for initial fit, use peak finding to determine the number,
# location, and initaial amplitudes of the Gaussians.
initial_translational_offset, initial_amplitude = initial_seed(t, y, noise_avg)
number_gaussians = len(initial_translational_offset)
initial_number_gaussians = len(initial_translational_offset)
if number_gaussians > max_num_gaussians:
print("too many peaks were found initially: number_gaussians>max_number_gaussians.")
return
#calculate rect effective time to be used in the initial standard dev. condition.
delta_t = t[1]-t[0]
time_eff = calculate_effective_length(y, noise_avg, delta_t) #instead of fitted_vert_offset, use noise_avg (we haven't yet fitted any fitted_vert_offset)
#print("rect effective time: ", time_eff)
initial_stddev_denominator = 1#np.random.randint(40, 60, 1)
initial_stddev = [time_eff/9] * number_gaussians#[np.max(t)/initial_stddev_denominator] * number_gaussians
initial_vertical_offset = noise_avg
p0 = initial_guess(initial_amplitude,
initial_translational_offset,
initial_stddev,
initial_vertical_offset)
#print("initial guesses: current time_eff-based stddev is ", initial_stddev[0], 'previous one was ', np.max(t)/50)
# initialize curve fitting bounds
amplitude_bounds = (0,np.max(y)-noise_avg*.25)
translational_offset_bounds = (0,np.max(t)) ### maybe make these somewhat closer to the seeds
stddev_bounds = (np.max(t)/150,np.max(t)/2)
vertical_offset_bounds = (2*noise_avg-noise_thresh,noise_thresh)# noise_thresh=mean+std, noise_avg=mean. so mean-std=2*noise_avg-noise_thresh
bounds = bound_maker(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_gaussians)
# make weights for fit
sigma = make_weights(y)
# limit the max number of function evaluations
max_nfev = int(30*len(t))
# try first fit
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except Exception as e:
"""plt.figure()
plt.plot(t,y)"""
print('p0:', p0)
print('bounds', bounds)
print('problem in first fit:', e)
return
##### function will only reach this location if initial fit converged.
# calculate residual
y_fit = fit_wrapper(t,*popt)
residual = y-y_fit
"""
plt.figure()
plt.plot(t, y, label="data")
plt.plot(t, y_fit, label="1st fit")
plt.plot(t, np.abs(residual), label="|residual|")
plt.plot([0, np.max(t)], [noise_thresh, noise_thresh], 'k--', label="threshold")
plt.plot([0, np.max(t)], [noise_avg, noise_avg], 'k--', label="mean noise")
plt.legend()
"""
"""
print(noise_thresh)
print(np.any(np.abs(residual)>noise_thresh))
print(number_gaussians<max_num_gaussians)
"""
# compare residual to noise threshold to determine whether or not
# another Gaussian should be added. Only add another Gaussian if
# there are no more than max_num_gaussians Gaussians already.
std_dev_residual_previous = np.std(y)
std_dev_residual_new = np.std(residual)
#print('std dev of residual is: ', std_dev_residual_new)
while (np.any(np.abs(residual)>noise_thresh*1.1)) & (number_gaussians<max_num_gaussians) | (std_dev_residual_new<std_dev_residual_previous*.8):
# try subsequent fit
# add in another gausian
new_translational_offset, new_amplitude = subsequent_seeding(t, y, y_fit, popt, number_gaussians, noise_avg)
old_stddev = popt[number_gaussians*2:number_gaussians*3]
initial_stddev = np.append(old_stddev, time_eff/8)
initial_vertical_offset = popt[-1]
p0 = initial_guess(new_amplitude,
new_translational_offset,
initial_stddev,
initial_vertical_offset)
# initialize curve fitting bounds
num_new_gaussians = 1
bounds = bound_maker_subsequent(t, y, y_fit, popt, num_new_gaussians, amplitude_bounds, stddev_bounds, vertical_offset_bounds, new_translational_offset, noise_avg)
# try curve fit again
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except: # if first fit fails to converge, end fitting
print(selxn, "one of the subsequent fits failed to converge")
return
y_fit = fit_wrapper(t,*popt)
residual = y-y_fit
number_gaussians += 1
"""
plt.plot(t, y_fit, label="new fit")
plt.plot(t, np.abs(residual), label="|new residual|")
plt.legend()
"""
std_dev_residual_previous = std_dev_residual_new
std_dev_residual_new = np.std(residual)
#print('std dev of residual is: ', std_dev_residual_new)
if (np.any(np.abs(residual)<noise_thresh*1.1) & (number_gaussians<=max_num_gaussians)):
print(selxn, "WORKED")
# package data for ML input.
r2, rmse, max_error, max_error_normalized, duration = eval_fit(y, y_fit, t, popt, delta_t)
data = package_fit_data(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, ang_vel,
orbit_radius, x_offsets, y_offsets, number_gaussians, y,
noise_avg, max_num_gaussians, dirctn, initial_number_gaussians, t)
return data
else:
print(selxn, "max number of gaussians reached, but fit not within noise threshold")
return
return
"""
**********************
Fitting functions for erf-rect-erfs (use these when features within the illuminating beam have top hat intensity profiles)
**********************
"""
def error_function(x, x0, w):
"""
Error function with equation y=0.5*(1+erf(np.sqrt(2)*(x-x0)/w))
Args:
x: array of independent variable (x) values
x0: error function offset
w: error function width
Retunrs:
y: computed error function
"""
y = 0.5*(1+erf(np.sqrt(2)*(x-x0)/w))
return y
def error_function_complimentary(x, x0, w):
"""
Complimentary error function with equation y=0.5*(1-erf(np.sqrt(2)*(x-x0)/w))
Args:
x: data x values
x0: error function offset
w: error function width
Returns:
y: computed error function
"""
y = 0.5*(1-erf(np.sqrt(2)*(x-x0)/w))
return y
def fit_wrapper_erfrecterf(x,*args):
"""
This wrapper sets up the variables for the fit function.
It allows for a variable numbers of erf-rect-erfs to be fitted.
Calls erf_rect_fit_function
Args:
x (array): x is independent variable x, such that y=f(x).
args: variable length argument list. args[0:n_erfs] are the amplitudes of the erf-rect-erfs to be fitted. Each erf-rect-erf feature has an erf and a complimentary erf. args[n_erfs:2*n_erfs] are the horizontal offsets of the erf to be fitted. args[2*n_erfs:3*n_erfs] are the widths of the erf to be fitted. args[3*n_erfs:4*n_erfs] and args[4*n_erfs:5*n_erfs] are the horizontal offsets and widths of the complimentary erf to be fitted. args[-1] is the vertical offset parameter
Returns
erf_rect_fit_function(x,a,mu0,sigma0,mu1,sigma1,vertical_offset)
"""
n_erfs = (len(args)-1)//5 # number of erf-rect-erf features that we're fitting
a = args[0:n_erfs]
mu0 = args[n_erfs:2*n_erfs]
sigma0 = args[2*n_erfs:3*n_erfs]
mu1 = args[3*n_erfs:4*n_erfs]
sigma1 = args[4*n_erfs:5*n_erfs]
vertical_offset = args[-1]
return erf_rect_fit_function(x, a, mu0, sigma0, mu1, sigma1, vertical_offset)
def erf_rect_fit_function(x,a,mu0,sigma0,mu1,sigma1,vertical_offset):
"""
Returns a function that is comprised of erf-rect-erf features. Each feature has a top-hat profile
generated as the sum of an error function at one time and a complimentary error function at a later time.
Args:
x (array): independent variable, such that y=f(x).
a (list): initial guesses for the amplitudes of the erf-rect-erfs
mu0 (list): initial guesses for the translational offsets of the erfs
sigma0 (list): initial guesses for standard deviations of erfs
mu1 (list): initial guesses for the translational offsets of the complimentary erfs
sigma1 (list): initial guesses for standard deviations of the complimentary erfs
vertical_offset (list): initial guess for vertical offset h
Returns:
fit (array): a function which consists of the sum of multiple gaussians and a vertical offset
Returns a function that is comprised of an offset h and the
sum of gaussians with variable amplitudes, offsets, and standard
deviations (widths)
Args:
x (array): independent variable, such that y=f(x).
h (list): initial guesses for the amplitudes of the gaussians
mu (list): initial guesses for the translational offsets of the gaussians
sigma (list): initial guesses for standard deviations of gaussians
vertical_offset (list): initial guess for vertical offset h
Returns:
fit (array): a function which consists of the sum of multiple gaussians and a vertical offset
"""
# initialize fi function & add the vertical offset to the fit function
fit = np.zeros(len(x))+vertical_offset
# iterate through each erf-rect-erf and add it to the fit function
for amp, offset0, std0, offset1, std1 in zip(a, mu0, sigma0, mu1, sigma1):
fit += amp*(error_function(x, offset0, std0) + error_function_complimentary(x, offset1, std1) -1 )
return fit
def initial_guess_erfrecterf(initial_amplitude,initial_translational_offset0,initial_stddev0,initial_translational_offset1,initial_stddev1,initial_vertical_offset):
"""
Create array with amplitude, standard deviation, translational offsets, and vertical offset to be used in the curve fit
Args:
initial_amplitude (array): guess for the initial values of the amplitudes of the erf-rect-erf
initial_translational_offset0 (array): guess for the initial values of the translational offsets of the erf
initial_stddev0 (array): guess for the initial values of the standard deviations of the erf
initial_translational_offset1 (array): guess for the initial values of the translational offsets of the complimentary erf
initial_stddev1 (array): guess for the initial values of the standard deviations of the complimentary erf
initial_vertical_offset (float): guess for the initial values of the vertical offset
Returns:
p0 (array): lists the initial_amplitude, initial_translational_offset of the erf, initial_stddev of the erf, initial_translational_offset of the complimentary erf, initial_stddev of the complimentary erf, initial_vertical_offset in the correct format for the curve fit.
"""
p0 = [i for i in initial_amplitude]\
+ [i for i in initial_translational_offset0]\
+ [i for i in initial_stddev0]\
+ [i for i in initial_translational_offset1]\
+ [i for i in initial_stddev1]\
+ [initial_vertical_offset]
return p0
def bound_maker_erfrecterf(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_erfs):
"""
Create tuple with lower and upper bounds to be used in the curve fit
Args:
amplitude_bounds (tuple): bounds on the amplitudes of the gaussians
translational_offset_bounds (tuple): bounds on the translational offsets of the gaussians
stddev_bounds (tuple): bounds on the standard deviations of the gaussians
vertical_offset_bounds (tuple): bounds on the vertical offset of the gaussians
number_erfs (int): the number of erf-rect-erf features in the fit
Returns:
bounds (tuple): lists the bounds on the parameters used in the erf-rect-erf fits
"""
lower = [amplitude_bounds[0]]*number_erfs + [translational_offset_bounds[0]]*number_erfs + [stddev_bounds[0]]*number_erfs + [translational_offset_bounds[0]]*number_erfs + [stddev_bounds[0]]*number_erfs + [vertical_offset_bounds[0]]
upper = [amplitude_bounds[1]]*number_erfs + [translational_offset_bounds[1]]*number_erfs + [stddev_bounds[1]]*number_erfs + [translational_offset_bounds[1]]*number_erfs + [stddev_bounds[1]]*number_erfs + [vertical_offset_bounds[1]]
bounds = (lower, upper)
return bounds
def bound_maker_subsequent_erfrecterf(t, y, y_fit, popt, num_new_erfs, amplitude_bounds, sigma0_bounds, sigma1_bounds, vertical_offset_bounds, new_mu0, new_mu1):
"""
Makes the bounds vector for fits after the first. Takes into account the previous fitted values
Args:
t (array): time grid of burst
y (array): burst
y_fit (array): previous fit to the burst
popt (arrapy): the results from the multi gaussian curve fit of the previous fit
num_new_erfs (int): the number of gaussians to be added to the new fit
amplitude_bounds (array): bounds on the amplitudes of the gaussians
sigma0_bounds (array): bounds on the standard deviations of the erf
sigma1_bounds (array): bounds on the standard deviations of the complimentary erf
vertical_offset_bounds (array): bounds on the vertical offset
new_mu0 (array): new value for the translational position of the erf
new_mu1 (array): new value for the translational position of the complimentary erf
Returns:
bounds (tuple): lists the bounds on the parameters used in the erf-rect-erf fits
"""
amplitudes = popt[0:num_erfs_old]
mu0 = popt[num_erfs_old:2*num_erfs_old]
sigma0 = popt[2*num_erfs_old:3*num_erfs_old]
mu1 = popt[3*num_erfs_old:4*num_erfs_old]
sigma1 = popt[4*num_erfs_old:5*num_erfs_old]
vertical_offset = popt[-1]
lower_amp = np.append(amplitudes-np.abs(amplitudes)*.2, [0]*num_new_erfs)
upper_amp = np.append(amplitudes+np.abs(amplitudes)*.4, [1.2*(np.max(y)-noise_avg)]*num_new_erfs)
# limit the movement of the previously fitted erf-rect-erfs.
lower_mu0 = np.append(mu0*.8, [0]*num_new_erfs)
upper_mu0 = np.append(mu0*1.2, [np.max(t)]*num_new_erfs)
lower_mu1 = np.append(mu1*.8, [0]*num_new_erfs)
upper_mu1 = np.append(mu1*1.2, [np.max(t)]*num_new_erfs)
if num_new_erfs == 1:
lower_mu0 = np.append(mu0*.8, [new_mu0[-1]*.5])
upper_mu0 = np.append(mu0*1.2, [new_mu0[-1]*1.5])
lower_mu1 = np.append(mu1*.8, [new_mu1[-1]*.5])
upper_mu1 = np.append(mu1*1.2, [new_mu1[-1]*1.5])
lower_mu0[lower_mu0<0] = 0
lower_mu1[lower_mu1<0] = 0
upper_mu0[upper_mu0>np.max(t)] = .9*np.max(t)
upper_mu1[upper_mu1>np.max(t)] = .9*np.max(t)
lower_sigma0 = np.append([sigma0_bounds[0]]*num_erfs_old, [sigma0_bounds[0]]*num_new_erfs)
lower_sigma1 = np.append([sigma1_bounds[0]]*num_erfs_old, [sigma1_bounds[0]]*num_new_erfs)
upper_sigma0 = np.append([sigma0_bounds[1]]*num_erfs_old, [sigma0_bounds[1]]*num_new_erfs)
upper_sigma1 = np.append([sigma1_bounds[1]]*num_erfs_old, [sigma1_bounds[1]]*num_new_erfs)
# make into array
lower = np.concatenate((lower_amp, lower_mu0, lower_sigma0, lower_mu1, lower_sigma1, [vertical_offset_bounds[0]]))
upper = np.concatenate((upper_amp, upper_mu0, upper_sigma0, upper_mu1, upper_sigma1, [vertical_offset_bounds[1]]))
bounds = (lower, upper)
return bounds
def find_edges(y, trigger_height):
"""
Simple zero-crossing algorithm to locate the rising and falling edges of a signal. If the signal is noisy around the location of the threshold, then multiple rising and falling edges may be detected where only one should be detected. If this happens, try smoothing the signal beforehand or selecting only a single of the set of falsely identified edge positions.
Args:
y (array): signal of interest
trigger_height (float): the height at which a rising or falling edge is detected
Returns:
potential_rising_edges (list): list of rising edges at which to seed erf-rect-erfs
potential_falling_edges (list): list of falling edges at which to seed erf-rect-erfs
"""
potential_falling_edge, potential_rising_edge = [], []
for num, (i,j) in enumerate(zip(y[:-1], y[1:])):
if (i>trigger_height) and (j<trigger_height):
potential_falling_edge.append(num)
if (i< trigger_height) and (j>trigger_height):
potential_rising_edge.append(num)
return potential_rising_edge, potential_falling_edge
def seed_initial_offsets_edges(y, noise_level):
"""
Seed the starts and the edges
Args:
y (array): signal of interest
noise_level (float): the mean noise level of the signal. The threshold for finding the edges is based on this value
Returns:
rising_edges (list): list of rising edges at which to seed erf-rect-erfs
falling_edges (list): list of falling edges at which to seed erf-rect-erfs
"""
threshold = noise_level*2
rising_edges, falling_edges = find_edges(y, threshold)
return rising_edges, falling_edges
def seed_initial_offsets_edges_smoothed(y, noise):
"""
Seed the starts and the edges
Inputs:
y (array): signal of interest
noise (float): the mean noise level of the signal. The threshold for finding the edges is based on this value
Returns:
rising_edges (list): list of rising edges at which to seed erf-rect-erfs
falling_edges (list): list of falling edges at which to seed erf-rect-erfs
"""
# find the major peaks
threshold = np.max(y)*.25
# Find edges of smoothed signal
area = 4000
length = len(y)//50
width = len(y)
r = rect_generator(length,width,area)
xc_r = signal.correlate(y,r)[length//2:-length//2]
normalized_xc_r = xc_r/np.max(xc_r)*np.max(y)
rising_edges, falling_edges = find_edges(normalized_xc_r, threshold)
""" plt.figure()
plt.plot(y)
plt.plot([0, len(y)], [threshold,threshold], 'm')
print(rising_edges)"""
return rising_edges, falling_edges, xc_r
def initial_seed_erfrecterf(t, y, noise):
"""
Makes seeds for the first fit.
Calls seed_initial_offsets_peaks
Args:
t (array): time corresponding to signal
y (array): signal
noise (float): noise level of y
Returns:
initial_translational_offset (array): a list of the initial conditions for the horizontal offsets, mu
initial_amplitude (array): a list of the initial conditions for the amplitudes, A
"""
rising_edges, falling_edges, xc_r = seed_initial_offsets_edges_smoothed(y, noise)
#initial_translational_offset = t[peak_locns]
initial_amplitudes = []
for r,f in zip(rising_edges, falling_edges):
initial_amplitudes.append(y[int((f-r)//2+r)]-noise)
#print("initial amplitudes:", initial_amplitudes)
initial_amplitudes = np.asarray(initial_amplitudes)
initial_mu0 = t[rising_edges]
initial_mu1 = t[falling_edges]
return initial_mu0, initial_mu1, initial_amplitudes
def seed_later_offsets_peaks_erfrecterf(y, noise_level):
"""
Seed the starts and the edges of the erf-rect-erf features
Args:
y (array): signal of interest
noise_level (float): the mean noise level of the signal. The threshold for finding the edges is based on this value
Returns:
rising_edge (int): rising edges at which to seed erf-rect-erfs which corresponds to the location of the largest residual
falling_edge (int): falling edges at which to seed erf-rect-erfs which corresponds to the location of the largest residual
"""
threshold = noise_level
rising_edges, falling_edges = find_edges(y, threshold)
# find the location with the lartest peak
peak_val = []
for r,f in zip(rising_edges, falling_edges):
peak_val.append(np.abs(y[(f-r)//2+r]))
if not peak_val: #if peak_val is empty
threshold = noise_level*.5
rising_edges, falling_edges = find_edges(y, threshold)
for r,f in zip(rising_edges, falling_edges):
peak_val.append(np.abs(y[(falling_edge-rising_edge)//2+rising_edge]))
if not peak_val:
return
else:
biggest_residual_location = np.argmax(peak_val)
return rising_edges[biggest_residual_location], falling_edges[biggest_residual_location]
def subsequent_seeding_erfrecterf(t, y, y_fit, popt, number_erfs, noise_threshold):
"""
Make the seeds for fits after the first fit.
Args:
t (array): times corresponding to y
y (array): burst
y_fit (array): fitted burst
popt (arary): output of previous curve fit
number_erfs (array): number of erf-rect-erf features in the fit
noise_avg (float): average value of the noise
Returns:
new_translational_offset (array): initial guesses for the translational offsets of the erf-rect-erf features
new_amplitude (array): initial guesses for the amplitudes of the erf-rect-erf features
"""
residual = np.abs(y-y_fit)
plt.figure()
plt.plot(residual)
plt.plot(y)
plt.plot(y_fit)
# find spot with largest residual; record its amplitude
try:
rising_edge, falling_edge = seed_later_offsets_peaks_erfrecterf(residual, noise_threshold)
print(rising_edge, falling_edge)
mu0_new = t[rising_edge]
mu1_new = t[falling_edge]
print('falling edge is ',falling_edge)
a_new = y[(falling_edge-rising_edge)//2+rising_edge]-noise_threshold
sigma0_new = 5
sigma1_new = 5
"""
plt.figure(78)
plt.plot(y, label='data')
plt.plot(y_fit, label='fitted')
plt.plot(residual, label="|residual|")
plt.plot(peak_to_use, residual[peak_to_use], 'x')
"""
# use the previously fitted peaks as initial conditions
fitted_a = popt[:number_erfs]
new_a = np.append(fitted_a, a_new)
fitted_mu0 = popt[number_erfs:2*number_erfs]
new_mu0 = np.append(fitted_mu0, mu0_new
)
fitted_sigma0 = popt[2*number_erfs:3*number_erfs]
new_sigma0 = np.append(fitted_sigma0, sigma0_new)
fitted_mu1 = popt[3*number_erfs:4*number_erfs]
new_mu1 = np.append(fitted_mu1, mu1_new)
fitted_sigma1 = popt[4*number_erfs:5*number_erfs]
new_sigma1 = np.append(fitted_sigma1, sigma1_new)
except Exception as e:
print("Exception in subsequent_seeding", e)
return
return new_a, new_mu0, new_sigma0, new_mu1, new_sigma1
def package_fit_data_erfrecterf(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, fr,
number_erfrecterfs,
y, noise_avg, max_num_erfrecterfs, initial_number_erfrecterfs):
"""
Save data from fits for use in later modules.
Args:
r2 (float): percentage of variability of the burst that's been accounted for in the fit. (how well the regression predicts the data)
rmse (float): root mean square error between fit and signal
max_error (float): maximum absolute value of difference between y and y_fit
max_error_normalized (float): max_error/max(y)
time_eff (float): rect effective time of signal. time of a rectangle with same max height as the signal
duration (float): time of signal between indices set by lower and upper 10% of the integrated area of signal
popt (array): output of curve fit
fr (float): flow rate
number_erfrecterfs (int): number of erf-rect-erf features used to parameterize burst
y (array): burst
noise_avg (float): average noise value
max_num_erfrecterfs (int): maximum number of gaussians to be included in the fit
initial_number_erfrecterfs (int): number of Gaussians used in initial fit
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
try:
# fit parameters
a = popt[0:number_erfrecterfs] # amplitude
mu0 = popt[number_erfrecterfs:2*number_erfrecterfs] # offset
sigma0 = popt[2*number_erfrecterfs:3*number_erfrecterfs] # width
mu1 = popt[3*number_erfrecterfs:4*number_erfrecterfs] # offset
sigma1 = popt[4*number_erfrecterfs:5*number_erfrecterfs] # width
# to save, we want distances relative to location of first erfrecterf
sorted_indices = np.argsort(mu0)
#print('length of mu:', len(mu), 'length of popt:', len(popt), 'number of gaussians', number_gaussians,'length of h:', len(h))
a_save, mu0_save, sigma0_save, mu1_save, sigma1_save = np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs)
a_save[:number_erfrecterfs] = a[sorted_indices]
mu0_save[:number_erfrecterfs] = mu0[sorted_indices]-mu0[sorted_indices[0]] # subtract smalles from all to get relative offsets
sigma0_save[:number_erfrecterfs] = sigma0[sorted_indices]
mu1_save[:number_erfrecterfs] = mu1[sorted_indices]-mu0[sorted_indices[0]] # subtract smalles from all to get relative offsets
sigma1_save[:number_erfrecterfs] = sigma1[sorted_indices]
vert_offset_save = popt[-1]
max_SNR = np.max(y)/noise_avg
avg_SNR = np.mean(y)/noise_avg
data = np.concatenate([[fr],[rmse],
[r2],[max_error],[max_error_normalized],[time_eff],[duration],
a_save,mu0_save,sigma0_save,mu1_save, sigma1_save,[vert_offset_save],[t[1]-t[0]],
[max_SNR], [avg_SNR], [int(initial_number_erfrecterfs)]])
return data
except Exception as e:
print('Exception:', e)
print("***\n***\nsomething went wrong in package_fit_data\n***\n***")
return
def fitting_function_erfrecterf(selxn, t, y, noise_avg, noise_thresh, fr, max_num_erfrecterfs=4):
"""
Performs erf-rect-erf fits. Initializes first fit based on number of edges in smoothed copy of burst. The residual of this fit is compared to the noise threshold. Until the absolute value of the residual is smaller than the noise threshold or until more than max_num_erfrecterfs features are needed to parameterize the fit, subsequent fits place new Gaussians at locations which have large residuals. A great deal of care is taken in this function to standardize the weighting and initial conditions of the fits since the erf-rect-erf features inherently are not orthogonal. The goal is to produce fits with Gaussians which appear physical (aren't extremely tall and narrow or short and wide). The fits may not converge, or more features than max_num_erfrecterfs may be required to fit the function. In such cases, the fitting function passes the burst without returning a fit.
Args:
selxn (int): burst number being fitted
t (array): times corresponding to y
y (array): burst being fitted
noise_avg (float): average value of the noise
noise_thresh (float): average value of the noise + standard deviation of noise
fr (float): actual flow rate underlying simulation
max_num_erfrecterfs (int): maximum number of erf-rect-erf features used to fit the burst
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
# check that there are enough points above the noise threshold to actually do a fit
if np.shape(np.argwhere(y>noise_avg + 3*(noise_thresh-noise_avg)))[0]<12:
print("not enough of the burst has an intensity greater than 2x the noise threshold ")
return
# for initial fit, use peak finding to determine the number,
# location, and initaial amplitudes of the Gaussians.
initial_mu0, initial_mu1, initial_amplitude = initial_seed_erfrecterf(t, y, noise_thresh)
number_erfrecterfs = len(initial_mu0)
if number_erfrecterfs > max_num_erfrecterfs:
print("too many peaks were found initially: number_erfrecterfs > max_num_erfrecterfs.")
return
#calculate rect effective time to be used in the initial standard dev. condition.
delta_t = t[1]-t[0]
time_eff = calculate_effective_length(y, noise_avg, delta_t) #instead of fitted_vert_offset, use noise_avg (we haven't yet fitted any fitted_vert_offset)
#print("rect effective time: ", time_eff)
initial_sigma0 = [time_eff/5]*number_erfrecterfs
initial_sigma1 = [time_eff/5]*number_erfrecterfs
# initialize vertical offset
initial_vertical_offset = noise_avg + np.mean( [np.mean(y[:len(y)//5]), np.mean(y[4*len(y)//5:])] )
p0 = initial_guess(initial_amplitude,
initial_mu0,
initial_sigma0,
initial_mu1,
initial_sigma1,
initial_vertical_offset)
# initialize curve fitting bounds
amplitude_bounds = (noise_avg,np.max(y)-noise_avg*.25)
mu_bounds = (0,np.max(t)) ### maybe make these somewhat closer to the seeds
sigma_bounds = (np.max(t)/150,np.max(t)/2)
vertical_offset_bounds = (.95*np.min( [np.min(y[:len(y)//5]), np.min(y[4*len(y)//5:])]), noise_avg+1.25*np.max( [np.max(y[:len(y)//5]), np.max(y[4*len(y)//5:])]) )
bounds = bound_maker_erfrecterf(amplitude_bounds,mu_bounds,sigma_bounds,vertical_offset_bounds,number_erfrecterfs)
initial_number_erfrecterfs = len(initial_sigma0)
# make weights for fit
sigma = make_weights(y, g_length=50)
# limit the max number of function evaluations
max_nfev = int(30*len(t))
# try first fit
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper_erfrecterf(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except Exception as e:
print('p0:', p0)
print('bounds', bounds)
print('problem in first fit:', e)
return
##### function will only reach this location if initial fit converged.
# calculate residual
y_fit = fit_wrapper_erfrecterf(t,*popt)
residual = y-y_fit
"""plt.figure()
plt.plot(t, y, label="data")
plt.plot(t, y_fit, label="1st fit")
plt.plot(t, np.abs(residual)/sigma**2, label="|residual|/sigma**2")
#plt.plot([0, np.max(t)], [noise_thresh, noise_thresh], 'k--', label="threshold")
plt.plot([0, np.max(t)], [750, 750], 'k--', label="threshold")
#plt.plot([0, np.max(t)], [noise_avg, noise_avg], 'k--', label="mean noise")
plt.legend()"""
"""
print(noise_thresh)
print(np.any(np.abs(residual)>noise_thresh))
print(number_gaussians<max_num_gaussians)
"""
# compare residual to noise threshold to determine whether or not
# another Gaussian should be added. Only add another Gaussian if
# there are no more than max_num_gaussians Gaussians already.
std_dev_residual_previous = 9999999#noise_thresh-noise_avg#np.std(y)
std_dev_residual_new = np.std(residual)
fitnum = 1
noisethresh_to_use = .05
while (np.any(np.abs(residual)/sigma**2>noisethresh_to_use)) & (number_erfrecterfs<max_num_erfrecterfs) & (std_dev_residual_new<std_dev_residual_previous*.8):
plt.figure()
plt.plot(y, label='y')
plt.plot(y_fit, label='fitted')
plt.plot((y-y_fit)/sigma**2, label='scaled residual')
plt.plot([0,len(y_fit)], [noisethresh_to_use, noisethresh_to_use], label='threshold')
plt.legend()
print('initial fit insufficient')
# try subsequent fit
# add in another gausian
fitnum += 1
print('fit number', fitnum)
try:
new_a, new_mu0, new_sigma0, new_mu1, new_sigma1 = subsequent_seeding_erfrecterf(t, y, y_fit, popt, number_erfrecterfs, noise_thresh)
initial_vertical_offset = popt[-1]
p0 = initial_guess(new_a,
new_mu0,
new_sigma0,
new_mu1,
new_sigma1,
initial_vertical_offset)
sigma0_bounds = (np.max(t)/150,np.max(t)/2)
sigma1_bounds = (np.max(t)/150,np.max(t)/2)
# initialize curve fitting bounds
num_new_erfrecterfs = 1
bounds = bound_maker_subsequent_erfrecterf(t, y, y_fit, popt, num_new_erfrecterfs, amplitude_bounds,
sigma0_bounds, sigma1_bounds, vertical_offset_bounds, new_mu0, new_mu1)
# try curve fit again
except Exception as e:
print(e)
print("$$$$$$$$$$$$$")
return
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper_erfrecterf(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except: # if first fit fails to converge, end fitting
print(selxn, "one of the subsequent fits failed to converge")
return
y_fit = fit_wrapper_erfrecterf(t,*popt)
residual = y-y_fit
number_erfrecterfs += 1
print('num erfs',number_erfrecterfs)
std_dev_residual_previous = std_dev_residual_new
std_dev_residual_new = np.std(residual)
#print('std dev of residual is: ', std_dev_residual_new)
if (np.any(np.abs(residual/sigma**2)<noisethresh_to_use) & (number_erfrecterfs<=max_num_erfrecterfs)):
print(selxn, "WORKED")
# package data for ML input.
r2, rmse, max_error, max_error_normalized, duration = eval_fit(y, y_fit, t, popt, delta_t)
data = package_fit_data_erfrecterf(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, fr,
number_erfrecterfs, y,
noise_avg, max_num_erfrecterfs, initial_number_erfrecterfs)
""" plt.figure()
plt.plot(t, y, label='signal')
plt.plot(t, y_fit, label="fit")
#plt.plot(t, np.abs(residual), label="|new residual|")
plt.legend()"""
#print(number_erfrecterfs)
return data
else:
print(selxn, "max number of erfrecterfs reached, but fit not within noise threshold")
return
return
| 0 | 0 | 0 |
f31b44507288c917bf150f4d3293c1fa4e34ff56 | 979 | py | Python | GloVe_TFID_GoogleNews/org/wso2/tfid.py | wso2-incubator/twitter-sentiment-analysis | e9b2f55f6309d9652d01f84a37115e21fb1367f5 | [
"Apache-2.0"
] | 4 | 2016-11-27T15:20:07.000Z | 2021-03-26T06:38:51.000Z | GloVe_TFID_GoogleNews/org/wso2/tfid.py | wso2-incubator/twitter-sentiment-analysis | e9b2f55f6309d9652d01f84a37115e21fb1367f5 | [
"Apache-2.0"
] | null | null | null | GloVe_TFID_GoogleNews/org/wso2/tfid.py | wso2-incubator/twitter-sentiment-analysis | e9b2f55f6309d9652d01f84a37115e21fb1367f5 | [
"Apache-2.0"
] | 3 | 2017-11-20T05:29:20.000Z | 2022-02-16T14:19:20.000Z | from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
outputFile = open('/home/anoukh/SentimentAnalysis/0.3uniqueTFIDFTweetVectorsSize200.csv', "wb")
writer = csv.writer(outputFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL, escapechar=',')
with open('/home/anoukh/SentimentAnalysis/GloVe-1.2/data/0.3uniqueTweets.csv', 'rb') as f:
reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
corpus = []
for row in reader:
for sentence in row:
corpus.append(sentence)
vectorizer = TfidfVectorizer(analyzer='word', min_df=0, max_features=500, stop_words='english',
use_idf=True)
X = vectorizer.fit_transform(corpus)
print("n_samples: %d, n_features: %d" % X.shape)
i = 0
print "Using PCA to reduce dimensions"
pca = PCA(n_components=20)
reducedDimensions = pca.fit_transform(X.toarray())
for tweet in reducedDimensions:
writer.writerow(tweet)
| 42.565217 | 104 | 0.723187 | from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
outputFile = open('/home/anoukh/SentimentAnalysis/0.3uniqueTFIDFTweetVectorsSize200.csv', "wb")
writer = csv.writer(outputFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL, escapechar=',')
with open('/home/anoukh/SentimentAnalysis/GloVe-1.2/data/0.3uniqueTweets.csv', 'rb') as f:
reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
corpus = []
for row in reader:
for sentence in row:
corpus.append(sentence)
vectorizer = TfidfVectorizer(analyzer='word', min_df=0, max_features=500, stop_words='english',
use_idf=True)
X = vectorizer.fit_transform(corpus)
print("n_samples: %d, n_features: %d" % X.shape)
i = 0
print "Using PCA to reduce dimensions"
pca = PCA(n_components=20)
reducedDimensions = pca.fit_transform(X.toarray())
for tweet in reducedDimensions:
writer.writerow(tweet)
| 0 | 0 | 0 |
faf55276bdeeba4a2ce7543e5a7379ed72944c65 | 8,108 | py | Python | samples/vmc/sddc/deploy_ovf_template.py | restapicoding/VMware-SDK | edc387a76227be1ad7c03e5eeaf603351574f70c | [
"MIT"
] | 1 | 2018-08-10T20:31:20.000Z | 2018-08-10T20:31:20.000Z | samples/vmc/sddc/deploy_ovf_template.py | restapicoding/VMware-SDK | edc387a76227be1ad7c03e5eeaf603351574f70c | [
"MIT"
] | 1 | 2017-08-17T12:46:46.000Z | 2017-08-17T12:46:46.000Z | samples/vmc/sddc/deploy_ovf_template.py | restapicoding/VMware-SDK | edc387a76227be1ad7c03e5eeaf603351574f70c | [
"MIT"
] | 1 | 2018-07-03T23:36:33.000Z | 2018-07-03T23:36:33.000Z | #!/usr/bin/env python
"""
* *******************************************************
* Copyright VMware, Inc. 2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__vcenter_version__ = 'VMware Cloud on AWS'
from pprint import pprint as pp
from com.vmware.content.library_client import Item
from com.vmware.vcenter.ovf_client import LibraryItem
from com.vmware.vcenter_client import ResourcePool, Folder, Network
from com.vmware.vcenter.vm.hardware_client import Ethernet
from vmware.vapi.vsphere.client import create_vsphere_client
from samples.vsphere.common import sample_cli, sample_util
from samples.vsphere.common.id_generator import generate_random_uuid
class DeployOvfTemplate:
"""
Demonstrates the workflow to deploy an OVF library item to
a resource pool in VMware Cloud on AWS.
Note: the sample needs an existing library item with an OVF template
and an existing resource pool with resources for deploying the VM.
"""
if __name__ == '__main__':
main()
| 41.367347 | 94 | 0.607795 | #!/usr/bin/env python
"""
* *******************************************************
* Copyright VMware, Inc. 2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__vcenter_version__ = 'VMware Cloud on AWS'
from pprint import pprint as pp
from com.vmware.content.library_client import Item
from com.vmware.vcenter.ovf_client import LibraryItem
from com.vmware.vcenter_client import ResourcePool, Folder, Network
from com.vmware.vcenter.vm.hardware_client import Ethernet
from vmware.vapi.vsphere.client import create_vsphere_client
from samples.vsphere.common import sample_cli, sample_util
from samples.vsphere.common.id_generator import generate_random_uuid
class DeployOvfTemplate:
"""
Demonstrates the workflow to deploy an OVF library item to
a resource pool in VMware Cloud on AWS.
Note: the sample needs an existing library item with an OVF template
and an existing resource pool with resources for deploying the VM.
"""
def __init__(self):
self.vm_name = 'deployed-vm-' + str(generate_random_uuid())
parser = sample_cli.build_arg_parser()
parser.add_argument('--libitemname',
required=True,
help='(Required) The name of the library item to deploy. '
'The library item should contain an OVF package.')
parser.add_argument('--vmname',
default=self.vm_name,
help='(Optional) The name of the Virtual Machine to be deployed. '
'Default: "{}"'.format(self.vm_name))
parser.add_argument('--resourcepoolname',
default='Compute-ResourcePool',
help='(Optional) The name of the resource pool to be used. '
'Default: "Compute-ResourcePool"')
parser.add_argument('--foldername',
default='Workloads',
help='(Optional) The name of the folder to be used. '
'Default: "Workloads"')
parser.add_argument('--opaquenetworkname',
help='(Optional) The name of the opaque network to be added '
'to the deployed vm')
args = sample_util.process_cli_args(parser.parse_args())
self.vm_id = None
self.lib_item_name = args.libitemname
self.vm_name = args.vmname
self.resourcepoolname = args.resourcepoolname
self.foldername = args.foldername
self.opaquenetworkname = args.opaquenetworkname
self.cleardata = args.cleardata
# Connect to vAPI Endpoint on vCenter Server
self.client = create_vsphere_client(server=args.server,
username=args.username,
password=args.password)
def deploy_ovf_template(self):
# Build the deployment target with resource pool ID and folder ID
rp_filter_spec = ResourcePool.FilterSpec(names=set([self.resourcepoolname]))
resource_pool_summaries = self.client.vcenter.ResourcePool.list(rp_filter_spec)
if not resource_pool_summaries:
raise ValueError("Resource pool with name '{}' not found".
format(self.resourcepoolname))
resource_pool_id = resource_pool_summaries[0].resource_pool
print('Resource pool ID: {}'.format(resource_pool_id))
folder_filter_spec = Folder.FilterSpec(names=set([self.foldername]))
folder_summaries = self.client.vcenter.Folder.list(folder_filter_spec)
if not folder_summaries:
raise ValueError("Folder with name '{}' not found".
format(self.foldername))
folder_id = folder_summaries[0].folder
print('Folder ID: {}'.format(folder_id))
deployment_target = LibraryItem.DeploymentTarget(
resource_pool_id=resource_pool_id,
folder_id=folder_id
)
# Find the library item
find_spec = Item.FindSpec(name=self.lib_item_name)
lib_item_ids = self.client.content.library.Item.find(find_spec)
if not lib_item_ids:
raise ValueError("Library item with name '{}' not found".
format(self.lib_item_name))
lib_item_id = lib_item_ids[0]
print('Library item ID: {}'.format(lib_item_id))
ovf_summary = self.client.vcenter.ovf.LibraryItem.filter(
ovf_library_item_id=lib_item_id,
target=deployment_target)
print('Found an OVF template: {} to deploy.'.format(ovf_summary.name))
# Build the deployment spec
deployment_spec = LibraryItem.ResourcePoolDeploymentSpec(
name=self.vm_name,
annotation=ovf_summary.annotation,
accept_all_eula=True,
network_mappings=None,
storage_mappings=None,
storage_provisioning=None,
storage_profile_id=None,
locale=None,
flags=None,
additional_parameters=None,
default_datastore_id=None)
# Deploy the ovf template
result = self.client.vcenter.ovf.LibraryItem.deploy(
lib_item_id,
deployment_target,
deployment_spec,
client_token=generate_random_uuid())
# The type and ID of the target deployment is available in the deployment result.
if result.succeeded:
print('Deployment successful. VM Name: "{}", ID: "{}"'
.format(self.vm_name, result.resource_id.id))
self.vm_id = result.resource_id.id
error = result.error
if error is not None:
for warning in error.warnings:
print('OVF warning: {}'.format(warning.message))
else:
print('Deployment failed.')
for error in result.error.errors:
print('OVF error: {}'.format(error.message))
# Add an opaque network portgroup to the deployed VM
if self.opaquenetworkname:
filter = Network.FilterSpec(
names=set([self.opaquenetworkname]),
types=set([Network.Type.OPAQUE_NETWORK]))
network_summaries = self.client.vcenter.Network.list(filter=filter)
if not network_summaries:
raise ValueError("Opaque network {} can not find".format(
self.opaquenetworkname))
network = network_summaries[0].network
nic_create_spec = Ethernet.CreateSpec(
start_connected=True,
mac_type=Ethernet.MacAddressType.GENERATED,
backing=Ethernet.BackingSpec(
type=Ethernet.BackingType.OPAQUE_NETWORK,
network=network))
print('vm.hardware.Ethernet.create({}, {}) -> {}'.format(
self.vm_id, nic_create_spec, network))
nic = self.client.vcenter.vm.hardware.Ethernet.create(
self.vm_id, nic_create_spec)
nic_info = self.client.vcenter.vm.hardware.Ethernet.get(self.vm_id, nic)
print('vm.hardware.Ethernet.get({}, {}) -> {}'.format(
self.vm_id, nic, pp(nic_info)))
def delete_vm(self):
if self.cleardata:
self.client.vcenter.VM.delete(self.vm_id)
print('VM ({}) is deleted'.format(self.vm_id))
def main():
deploy_ovf_sample = DeployOvfTemplate()
deploy_ovf_sample.deploy_ovf_template()
deploy_ovf_sample.delete_vm()
if __name__ == '__main__':
main()
| 6,587 | 0 | 104 |
1dad30a3ebffdce3381c35fb02b7c2a4bfe42e4f | 565 | py | Python | tests/aws/test_libsm.py | umccr/libumccr | e08ca1fdf8db72d6ea6e17442dc1bf1fb304243d | [
"MIT"
] | null | null | null | tests/aws/test_libsm.py | umccr/libumccr | e08ca1fdf8db72d6ea6e17442dc1bf1fb304243d | [
"MIT"
] | 5 | 2021-11-04T03:15:37.000Z | 2021-11-04T03:32:32.000Z | tests/aws/test_libsm.py | umccr/libumccr | e08ca1fdf8db72d6ea6e17442dc1bf1fb304243d | [
"MIT"
] | null | null | null | from unittest import TestCase, skip
from libumccr.aws import libsm
| 21.730769 | 85 | 0.688496 | from unittest import TestCase, skip
from libumccr.aws import libsm
class LibSmUnitTests(TestCase):
# TODO https://github.com/umccr/libumccr/issues/2
pass
class LibSmIntegrationTests(TestCase):
@skip
def test_get_secret(self):
"""
python -m unittest tests.aws.test_libsm.LibSmIntegrationTests.test_get_secret
"""
lookup_name = "IcaSecretsPortal"
secret = libsm.get_secret(secret_name=lookup_name)
self.assertIsNotNone(secret)
self.assertIsInstance(secret, str)
# print(secret)
| 0 | 449 | 46 |
6bd1ae2db40b5dd029887c47b7e52b48d28e22de | 2,612 | py | Python | src/datapane/client/api/__init__.py | admariner/datapane | c440eaf07bd1c1f2de3ff952e0fd8c78d636aa8f | [
"Apache-2.0"
] | null | null | null | src/datapane/client/api/__init__.py | admariner/datapane | c440eaf07bd1c1f2de3ff952e0fd8c78d636aa8f | [
"Apache-2.0"
] | 7 | 2021-11-19T21:40:47.000Z | 2021-12-24T16:10:06.000Z | src/datapane/client/api/__init__.py | admariner/datapane | c440eaf07bd1c1f2de3ff952e0fd8c78d636aa8f | [
"Apache-2.0"
] | null | null | null | """# API docs for Datapane Client
These docs describe the Python API for building Datapane documents, along with additional information on the Datapane Teams API.
Usage docs for Datapane can be found at https://docs.datapane.com
These objects are all available under the `datapane` module, via `import datapane as dp` (they are re-exported from `datapane.client.api`).
### Datapane Reports API
The core document APIs are available for both Datapane Community and Datapane Teams, these are found in `datapane.client.api.report`, including,
- `datapane.client.api.report.core.Report`
- Layout Blocks
- `datapane.client.api.report.blocks.Page`
- `datapane.client.api.report.blocks.Group`
- `datapane.client.api.report.blocks.Select`
- Data Blocks
- `datapane.client.api.report.blocks.Plot`
- `datapane.client.api.report.blocks.Table`
- `datapane.client.api.report.blocks.DataTable`
- `datapane.client.api.report.blocks.Media`
- `datapane.client.api.report.blocks.Formula`
- `datapane.client.api.report.blocks.BigNumber`
- `datapane.client.api.report.blocks.Text`
- `datapane.client.api.report.blocks.Code`
- `datapane.client.api.report.blocks.HTML`
### Datapane Teams
Additional API docs are found in `datapane.client.api.teams` that provide building, deployment, and sharing of data analytics apps and workflows
- `datapane.client.api.teams.File`
- `datapane.client.api.teams.Environment`
- `datapane.client.api.teams.App`
- `datapane.client.api.teams.Schedule`
..note:: These docs describe the latest version of the datapane API available on [pypi](https://pypi.org/project/datapane/)
<a href="https://pypi.org/project/datapane/">
<img src="https://img.shields.io/pypi/v/datapane?color=blue" alt="Latest release" />
</a>
"""
# flake8: noqa F401
# Internal API re-exports
import warnings
from .common import HTTPError, Resource
from .dp_object import DPObjectRef
from .report.blocks import (
Attachment,
BigNumber,
Code,
Group,
DataTable,
Divider,
Embed,
Empty,
Media,
Formula,
HTML,
Media,
Page,
Plot,
Select,
SelectType,
Text,
Table,
Toggle,
)
from .report.core import FontChoice, PageLayout, Report, ReportFormatting, ReportWidth, TextAlignment, Visibility
from .runtime import Params, Result, by_datapane, _reset_runtime, _report
from .teams import App, Environment, File, Run, Schedule
from .user import hello_world, login, logout, ping, signup
from ..utils import IncompatibleVersionError
from ..config import init
from . import builtins
| 32.246914 | 144 | 0.72856 | """# API docs for Datapane Client
These docs describe the Python API for building Datapane documents, along with additional information on the Datapane Teams API.
Usage docs for Datapane can be found at https://docs.datapane.com
These objects are all available under the `datapane` module, via `import datapane as dp` (they are re-exported from `datapane.client.api`).
### Datapane Reports API
The core document APIs are available for both Datapane Community and Datapane Teams, these are found in `datapane.client.api.report`, including,
- `datapane.client.api.report.core.Report`
- Layout Blocks
- `datapane.client.api.report.blocks.Page`
- `datapane.client.api.report.blocks.Group`
- `datapane.client.api.report.blocks.Select`
- Data Blocks
- `datapane.client.api.report.blocks.Plot`
- `datapane.client.api.report.blocks.Table`
- `datapane.client.api.report.blocks.DataTable`
- `datapane.client.api.report.blocks.Media`
- `datapane.client.api.report.blocks.Formula`
- `datapane.client.api.report.blocks.BigNumber`
- `datapane.client.api.report.blocks.Text`
- `datapane.client.api.report.blocks.Code`
- `datapane.client.api.report.blocks.HTML`
### Datapane Teams
Additional API docs are found in `datapane.client.api.teams` that provide building, deployment, and sharing of data analytics apps and workflows
- `datapane.client.api.teams.File`
- `datapane.client.api.teams.Environment`
- `datapane.client.api.teams.App`
- `datapane.client.api.teams.Schedule`
..note:: These docs describe the latest version of the datapane API available on [pypi](https://pypi.org/project/datapane/)
<a href="https://pypi.org/project/datapane/">
<img src="https://img.shields.io/pypi/v/datapane?color=blue" alt="Latest release" />
</a>
"""
# flake8: noqa F401
# Internal API re-exports
import warnings
from .common import HTTPError, Resource
from .dp_object import DPObjectRef
from .report.blocks import (
Attachment,
BigNumber,
Code,
Group,
DataTable,
Divider,
Embed,
Empty,
Media,
Formula,
HTML,
Media,
Page,
Plot,
Select,
SelectType,
Text,
Table,
Toggle,
)
from .report.core import FontChoice, PageLayout, Report, ReportFormatting, ReportWidth, TextAlignment, Visibility
from .runtime import Params, Result, by_datapane, _reset_runtime, _report
from .teams import App, Environment, File, Run, Schedule
from .user import hello_world, login, logout, ping, signup
from ..utils import IncompatibleVersionError
from ..config import init
from . import builtins
| 0 | 0 | 0 |
c162162c37ecae91086f125ebcfe055202b2d087 | 17,764 | py | Python | src/tools/antool/converter.py | krzycz/pmemfile | a1b9897a90cd223e24c10c4a7558235986f0fad3 | [
"BSD-3-Clause"
] | 82 | 2017-06-30T13:54:44.000Z | 2022-03-13T02:51:28.000Z | src/tools/antool/converter.py | krzycz/pmemfile | a1b9897a90cd223e24c10c4a7558235986f0fad3 | [
"BSD-3-Clause"
] | 40 | 2017-05-12T13:27:14.000Z | 2017-11-16T19:47:40.000Z | src/tools/antool/converter.py | krzycz/pmemfile | a1b9897a90cd223e24c10c4a7558235986f0fad3 | [
"BSD-3-Clause"
] | 15 | 2017-05-12T12:32:01.000Z | 2022-02-28T14:09:11.000Z | #!/usr/bin/python3
#
# Copyright 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from os import stat
from syscall import *
from syscalltable import *
# minimum required version of vltrace log
VLTRACE_VMAJOR = 0
VLTRACE_VMINOR = 1
VLTRACE_TAB_SIGNATURE = "VLTRACE_TAB" # signature of vltrace syscall table
VLTRACE_LOG_SIGNATURE = "VLTRACE_LOG" # signature of vltrace log
# currently only the x86_64 architecture is supported
ARCH_x86_64 = 1
Archs = ["Unknown", "x86_64"]
DO_GO_ON = 0
DO_REINIT = 1
SYSCALL_NOT_FOUND = -1
########################################################################################################################
# Converter
########################################################################################################################
| 43.970297 | 120 | 0.51655 | #!/usr/bin/python3
#
# Copyright 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from os import stat
from syscall import *
from syscalltable import *
# minimum required version of vltrace log
VLTRACE_VMAJOR = 0
VLTRACE_VMINOR = 1
VLTRACE_TAB_SIGNATURE = "VLTRACE_TAB" # signature of vltrace syscall table
VLTRACE_LOG_SIGNATURE = "VLTRACE_LOG" # signature of vltrace log
# currently only the x86_64 architecture is supported
ARCH_x86_64 = 1
Archs = ["Unknown", "x86_64"]
DO_GO_ON = 0
DO_REINIT = 1
SYSCALL_NOT_FOUND = -1
########################################################################################################################
# Converter
########################################################################################################################
class Converter:
def __init__(self, fileout, max_packets, offline_mode, script_mode, debug_mode, verbose_mode):
self.script_mode = script_mode
self.debug_mode = debug_mode
self.offline_mode = offline_mode
self.verbose_mode = verbose_mode
self.print_progress = not (self.debug_mode or self.script_mode or not self.offline_mode)
self.print_single_record = not self.offline_mode
self.syscall_table = SyscallTable()
self.syscall = Syscall(0, 0, SyscallInfo("", 0, 0, 0), 0, 0)
self.buf_size = 0
if max_packets:
self.max_packets = int(max_packets)
else:
self.max_packets = -1
log_format = '%(levelname)s(%(name)s): %(message)s'
if debug_mode:
level = logging.DEBUG
elif verbose_mode:
level = logging.INFO
else:
level = logging.WARNING
if fileout:
logging.basicConfig(format=log_format, level=level, filename=fileout)
else:
logging.basicConfig(format=log_format, level=level)
self.log_conv = logging.getLogger("converter")
self.list_ok = list()
self.list_no_exit = list()
self.list_no_entry = list()
self.list_others = list()
####################################################################################################################
def read_syscall_table(self, fh):
self.syscall_table.read_syscall_table(fh)
####################################################################################################################
@staticmethod
def print_always(alist):
for syscall in alist:
syscall.print_always()
####################################################################################################################
def print_other_lists(self):
if len(self.list_no_entry):
print("\nWARNING: list 'list_no_entry' is not empty!")
self.print_always(self.list_no_entry)
if len(self.list_no_exit):
print("\nWARNING: list 'list_no_exit' is not empty!")
self.print_always(self.list_no_exit)
if len(self.list_others):
print("\nWARNING: list 'list_others' is not empty!")
self.print_always(self.list_others)
####################################################################################################################
def print_log(self):
self.print_always(self.list_ok)
####################################################################################################################
def process_complete_syscall(self, syscall): # pragma: no cover - overloaded method cannot be tested
assert(syscall.is_complete())
if self.offline_mode:
self.list_ok.append(syscall)
return syscall
####################################################################################################################
# look_for_matching_record -- look for matching record in a list of incomplete syscalls
####################################################################################################################
@staticmethod
def look_for_matching_record(alist, info_all, pid_tid, sc_id, name, retval):
for syscall in alist:
check = syscall.check_read_data(info_all, pid_tid, sc_id, name, retval, DEBUG_OFF)
if check == CHECK_OK:
alist.remove(syscall)
return syscall
return SYSCALL_NOT_FOUND
####################################################################################################################
# decide_what_to_do_next - decide what to do next basing on the check done
####################################################################################################################
def decide_what_to_do_next(self, check, info_all, pid_tid, sc_id, name, retval, timestamp):
if CHECK_NO_EXIT == check:
syscall = self.look_for_matching_record(self.list_no_entry, self.syscall.info_all, self.syscall.pid_tid,
self.syscall.sc_id, self.syscall.name, self.syscall.ret)
if syscall == SYSCALL_NOT_FOUND:
self.list_no_exit.append(self.syscall)
self.syscall.log_parse.debug("Notice: no exit info found, packet saved to 'list_no_exit': "
"{0:016X} {1:s}".format(self.syscall.pid_tid, self.syscall.name))
return DO_REINIT
self.syscall.log_parse.debug("Notice: found matching exit for syscall: {0:016X} {1:s}"
.format(self.syscall.pid_tid, self.syscall.name))
self.syscall.save_exit(syscall.ret, syscall.time_end)
self.syscall.state = STATE_COMPLETED
self.syscall = self.process_complete_syscall(self.syscall)
return DO_REINIT
if CHECK_NO_ENTRY == check:
syscall = self.look_for_matching_record(self.list_no_exit, info_all, pid_tid, sc_id, name, retval)
if syscall == SYSCALL_NOT_FOUND:
self.syscall.log_parse.debug("WARNING: no entry found: exit without entry info found: {0:016X} {1:s}"
.format(pid_tid, name))
self.syscall.save_exit(retval, timestamp)
self.list_no_entry.append(self.syscall)
self.syscall.log_parse.debug("Notice: packet saved (to 'list_no_entry'): {0:016X} {1:s}"
.format(self.syscall.pid_tid, self.syscall.name))
return DO_REINIT
self.syscall = syscall
self.syscall.log_parse.debug("Notice: found matching entry for syscall: {0:016X} {1:s}"
.format(pid_tid, name))
return DO_GO_ON
if CHECK_NOT_FIRST_PACKET == check:
syscall = self.look_for_matching_record(self.list_others, info_all, pid_tid, sc_id, name, retval)
if syscall == SYSCALL_NOT_FOUND:
self.syscall.log_parse.debug("WARNING: no matching first packet found: {0:016X} {1:s}"
.format(pid_tid, name))
return DO_REINIT
self.syscall = syscall
self.syscall.log_parse.debug("Notice: found matching first packet for syscall: {0:016X} {1:s}"
.format(pid_tid, name))
return DO_GO_ON
if check in (CHECK_SAVE_IN_ENTRY, CHECK_WRONG_ID):
self.list_others.append(self.syscall)
self.syscall.log_parse.debug("Notice: packet saved (to 'list_others'): {0:016X} {1:s}"
.format(self.syscall.pid_tid, self.syscall.name))
return DO_REINIT
return DO_GO_ON
####################################################################################################################
# analyse_read_data - analyse the read data
####################################################################################################################
def analyse_read_data(self, state, info_all, pid_tid, sc_id, bdata, timestamp):
sc_info = self.syscall_table.get(sc_id)
name = self.syscall_table.name(sc_id)
retval = self.syscall.get_return_value(bdata)
result = DO_REINIT
while result != DO_GO_ON:
if state == STATE_COMPLETED:
state = STATE_INIT
if state == STATE_INIT:
self.syscall = Syscall(pid_tid, sc_id, sc_info, self.buf_size, self.debug_mode)
check = self.syscall.check_read_data(info_all, pid_tid, sc_id, name, retval, DEBUG_ON)
result = self.decide_what_to_do_next(check, info_all, pid_tid, sc_id, name, retval, timestamp)
if result == DO_REINIT:
if state == STATE_INIT:
self.syscall = Syscall(pid_tid, sc_id, sc_info, self.buf_size, self.debug_mode)
return state, self.syscall
state = STATE_INIT
return state, self.syscall
####################################################################################################################
# check_signature -- check signature
####################################################################################################################
@staticmethod
def check_signature(fh, signature):
sign, = read_fmt_data(fh, '12s')
bsign = bytes(sign)
sign = str(bsign.decode(errors="ignore"))
sign = sign.split('\0')[0]
if sign != signature:
raise CriticalError("wrong signature of vltrace log: {0:s} (expected: {1:s})".format(sign, signature))
####################################################################################################################
# check_version -- check version
####################################################################################################################
@staticmethod
def check_version(fh, major, minor):
vmajor, vminor, vpatch = read_fmt_data(fh, 'III')
if vmajor < major or (vmajor == major and vminor < minor):
raise CriticalError("wrong version of vltrace log: {0:d}.{1:d}.{2:d} (required: {3:d}.{4:d}.0 or later)"
.format(vmajor, vminor, vpatch, major, minor))
####################################################################################################################
# check_architecture -- check hardware architecture
####################################################################################################################
@staticmethod
def check_architecture(fh, architecture):
arch, = read_fmt_data(fh, 'I')
if arch != architecture:
if arch in range(len(Archs)):
iarch = arch
else:
iarch = 0
raise CriticalError("wrong architecture of vltrace log: {0:s} ({1:d}) (required: {2:s})"
.format(Archs[iarch], arch, Archs[architecture]))
####################################################################################################################
def process_cwd(self, cwd): # pragma: no cover - overloaded method cannot be tested
return
####################################################################################################################
# read_and_parse_data - read and parse data from a vltrace binary log file
####################################################################################################################
# noinspection PyUnboundLocalVariable
def read_and_parse_data(self, path_to_trace_log):
sizei = struct.calcsize('i')
sizeI = struct.calcsize('I')
sizeQ = struct.calcsize('Q')
sizeIQQQ = sizeI + 3 * sizeQ
file_size = 0
fh = open_file(path_to_trace_log, 'rb')
try:
statinfo = stat(path_to_trace_log)
file_size = statinfo.st_size
self.check_signature(fh, VLTRACE_TAB_SIGNATURE)
self.check_version(fh, VLTRACE_VMAJOR, VLTRACE_VMINOR)
self.check_architecture(fh, ARCH_x86_64)
self.read_syscall_table(fh)
self.check_signature(fh, VLTRACE_LOG_SIGNATURE)
# read and init global buf_size
self.buf_size, = read_fmt_data(fh, 'i')
# read length of CWD
cwd_len, = read_fmt_data(fh, 'i')
# read CWD
bdata = read_bdata(fh, cwd_len)
# decode and set CWD
cwd = str(bdata.decode(errors="ignore"))
cwd = cwd.split('\0')[0]
self.process_cwd(cwd)
# read header = command line
data_size, argc = read_fmt_data(fh, 'ii')
data_size -= sizei # subtract size of argc only, because 'data_size' does not include its own size
bdata = read_bdata(fh, data_size)
argv = str(bdata.decode(errors="ignore"))
argv = argv.replace('\0', ' ')
except EndOfFile:
print("ERROR: log file is truncated: {0:s}".format(path_to_trace_log), file=stderr)
exit(-1)
except CriticalError as err:
print("ERROR: {0:s}".format(err.message), file=stderr)
exit(-1)
except: # pragma: no cover
print("ERROR: unexpected error", file=stderr)
raise
if not self.script_mode:
# noinspection PyTypeChecker
self.log_conv.info("Command line: {0:s}".format(argv))
# noinspection PyTypeChecker
self.log_conv.info("Current working directory: {0:s}\n".format(cwd))
print("Reading packets:")
n = 0
state = STATE_INIT
while True:
try:
if n >= self.max_packets > 0:
if not self.script_mode:
print("done (read maximum number of packets: {0:d})".format(n))
break
# read data from the file
data_size, info_all, pid_tid, sc_id, timestamp = read_fmt_data(fh, 'IIQQQ')
# subtract size of all read fields except of 'data_size' itself,
# because 'data_size' does not include its own size
data_size -= sizeIQQQ
bdata = read_bdata(fh, data_size)
# print progress
n += 1
if self.print_progress:
print("\r{0:d} ({1:d}% bytes) ".format(n, int((100 * fh.tell()) / file_size)), end=' ')
# analyse the read data and assign 'self.syscall' appropriately
state, self.syscall = self.analyse_read_data(state, info_all, pid_tid, sc_id, bdata, timestamp)
# add the read data to the syscall record
state = self.syscall.add_data(info_all, bdata, timestamp)
if state == STATE_COMPLETED and self.syscall.is_complete():
self.syscall = self.process_complete_syscall(self.syscall)
if self.print_single_record:
self.syscall.print_single_record(DEBUG_OFF)
elif self.debug_mode:
self.syscall.print_single_record(DEBUG_ON)
if self.syscall.truncated:
string = self.syscall.strings[self.syscall.truncated - 1]
self.syscall.log_parse.error("string argument is truncated: {0:s}".format(string))
except CriticalError as err:
print("ERROR: {0:s}".format(err.message), file=stderr)
exit(-1)
except EndOfFile:
break
except: # pragma: no cover
print("ERROR: unexpected error", file=stderr)
raise
fh.close()
if self.print_progress:
print("\rDone (read {0:d} packets).".format(n))
if len(self.list_no_entry):
self.list_ok += self.list_no_entry
if len(self.list_no_exit):
self.list_ok += self.list_no_exit
if len(self.list_others):
self.list_ok += self.list_others
self.list_ok.sort()
| 12,101 | 3,356 | 22 |
dae5b47117a440a5614a8fa262deb3eedab13cd2 | 3,395 | py | Python | python-playground/images-to-tfrecords-shuffled.py | ericdoerheit/playgrounds | 3a2a238496ef21257174097dba6de848aa3e737b | [
"MIT"
] | null | null | null | python-playground/images-to-tfrecords-shuffled.py | ericdoerheit/playgrounds | 3a2a238496ef21257174097dba6de848aa3e737b | [
"MIT"
] | null | null | null | python-playground/images-to-tfrecords-shuffled.py | ericdoerheit/playgrounds | 3a2a238496ef21257174097dba6de848aa3e737b | [
"MIT"
] | null | null | null | """
Create tensorflow TFRecord files from images from the following directory structure:
<input-directory>
<class1>
<image1>
<image2>
...
<class2>
...
"""
import getopt
import os
import sys
import tensorflow as tf
import numpy as np
import png
from tqdm import tqdm
try:
from itertools import imap
except ImportError:
# For Python 3
imap=map
if __name__ == "__main__":
main(sys.argv[1:])
| 29.267241 | 119 | 0.62975 | """
Create tensorflow TFRecord files from images from the following directory structure:
<input-directory>
<class1>
<image1>
<image2>
...
<class2>
...
"""
import getopt
import os
import sys
import tensorflow as tf
import numpy as np
import png
from tqdm import tqdm
try:
from itertools import imap
except ImportError:
# For Python 3
imap=map
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def main(argv):
print("Start converting images into TFRecords.")
# Parse command line arguments
usage = """usage: images-to-tfrecords.py -i <images> -o <output-directory>"""
input_dir_images = None
output_dir = None
try:
opts, args = getopt.getopt(argv, "hi:o:", ["help", "images=", "output="])
except getopt.GetoptError:
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print(usage)
sys.exit()
elif opt in ("-i", "--images"):
input_dir_images = arg
elif opt in ("-o", "--output"):
output_dir = arg
if input_dir_images is None or output_dir is None:
print(usage)
sys.exit(1)
if not os.path.exists(input_dir_images):
raise IOError('Input directory does not exist!')
# Create output directory if not exists
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# List class/label folders
label_names = os.listdir(input_dir_images)
input_files = []
for folder in os.listdir(input_dir_images):
input_files_sub = os.listdir(os.path.join(input_dir_images, folder))
input_files_sub = list(map(lambda f: os.path.join(os.path.join(input_dir_images, folder), f), input_files_sub))
input_files_sub = list(filter(lambda f: f.lower().endswith('.png'), input_files_sub))
input_files_sub = list(map(lambda f: (f, folder), input_files_sub))
input_files.extend(input_files_sub)
print("{} files founds.".format(len(input_files)))
np.random.shuffle(input_files)
# Process all images in label folders
output_file_path = os.path.join(output_dir, 'data.tfrecords')
writer = tf.python_io.TFRecordWriter(output_file_path)
for i in tqdm(range(len(input_files))):
image_file = input_files[i][0]
label = input_files[i][1]
reader = png.Reader(image_file)
width, height, pngdata, meta = reader.asDirect()
bitdepth = meta['bitdepth']
planes = meta['planes']
greyscale = meta['greyscale']
alpha = meta['alpha']
image_2d = np.vstack(imap(np.uint8, pngdata))
image = np.reshape(image_2d, (height, width, planes))
# write label, shape, and image content to the TFRecord file
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(height),
'width': _int64_feature(width),
'label': _int64_feature(int(label)),
'image': _bytes_feature(image.tostring())
}))
writer.write(example.SerializeToString())
writer.close()
print("Finished.")
if __name__ == "__main__":
main(sys.argv[1:])
| 2,876 | 0 | 69 |
ced3e8ff722a9295036c8a92de0aaf9b10ef51b8 | 5,336 | py | Python | v3/gpu/mpi/common.py | 5enxia/parallel-krylov | 2d75e220b9b0cc6df924111cfb57f917f2100925 | [
"MIT"
] | 1 | 2022-02-25T14:17:55.000Z | 2022-02-25T14:17:55.000Z | v3/gpu/mpi/common.py | 5enxia/parallel-krylov | 2d75e220b9b0cc6df924111cfb57f917f2100925 | [
"MIT"
] | null | null | null | v3/gpu/mpi/common.py | 5enxia/parallel-krylov | 2d75e220b9b0cc6df924111cfb57f917f2100925 | [
"MIT"
] | 1 | 2022-02-20T02:57:10.000Z | 2022-02-20T02:57:10.000Z | import os
import numpy as np
import scipy
import cupy as cp
from cupy.cuda import Device
from cupy.cuda.runtime import getDeviceCount
from mpi4py import MPI
# import socket
from ..common import _start, _finish
# 計測開始
# 計測終了
# パラメータの初期化
| 31.023256 | 171 | 0.579273 | import os
import numpy as np
import scipy
import cupy as cp
from cupy.cuda import Device
from cupy.cuda.runtime import getDeviceCount
from mpi4py import MPI
# import socket
from ..common import _start, _finish
# 計測開始
def start(method_name='', k=None):
_start(method_name, k)
return MPI.Wtime()
# 計測終了
def finish(start_time, isConverged, num_of_iter, final_residual, final_k=None):
elapsed_time = MPI.Wtime() - start_time
_finish(elapsed_time, isConverged, num_of_iter, final_residual, final_k)
return elapsed_time
# パラメータの初期化
def init(b, x=None, maxiter=None) -> tuple:
T = np.float64
b = cp.array(b)
b_norm = cp.linalg.norm(b)
N = b.size
if isinstance(x, np.ndarray):
x = cp.array(x)
else:
x = cp.zeros(N, dtype=T)
if maxiter == None:
maxiter = N
residual = cp.zeros(maxiter+1, T)
num_of_solution_updates = cp.zeros(maxiter+1, np.int)
return b, x, maxiter, b_norm, N, residual, num_of_solution_updates
class MultiGpu(object):
# numbers
begin: int = 0
end: int = 0
num_of_gpu: int = 0
num_of_process: int = 0
# dimentinal size
N: int = 0
local_N: int = 0
local_local_N: int = 0
# matrix
A: list = []
# vector
x: list = []
y: list = []
out: np.ndarray = None
# byte size
nbytes: int = 0
local_nbytes: int = 0
local_local_nbytes: int = 0
# mpi
comm = None
# gpu stream
streams = None
# GPUの初期化
@classmethod
def init(cls):
# ip = socket.gethostbyname(socket.gethostname())
# rank = os.environ['MV2_COMM_WORLD_RANK']
# local_rank = os.environ['MV2_COMM_WORLD_LOCAL_RANK']
if os.environ.get('GPU_IDS') != None:
ids = os.environ['GPU_IDS'].split(',')
cls.begin = int(ids[0])
cls.end = int(ids[-1])
else:
cls.begin = 0
cls.end = getDeviceCount() - 1
cls.num_of_gpu = cls.end - cls.begin + 1
cls.streams = [None] * cls.num_of_gpu
# init memory allocator
for i in range(cls.begin, cls.end+1):
Device(i).use()
pool = cp.cuda.MemoryPool(cp.cuda.malloc_managed)
cp.cuda.set_allocator(pool.malloc)
cls.streams[i-cls.begin] = cp.cuda.Stream(non_blocking=False)
# Enable P2P
for j in range(4):
if i == j:
continue
cp.cuda.runtime.deviceEnablePeerAccess(j)
# メモリー領域を確保
@classmethod
def alloc(cls, local_A, b, T):
# dimentional size
cls.local_N, cls.N = local_A.shape
cls.local_local_N = cls.local_N // cls.num_of_gpu
# byte size
cls.nbytes = b.nbytes
cls.local_nbytes = cls.nbytes // cls.num_of_process
cls.local_local_nbytes = cls.local_nbytes // cls.num_of_gpu
# init list
cls.A = [None] * cls.num_of_gpu
cls.x = [None] * cls.num_of_gpu
cls.y = [None] * cls.num_of_gpu
# divide single A -> multi local_A
# allocate x, y
for i in range(cls.begin, cls.end+1):
Device(i).use()
index = i-cls.begin
# local_Aは1/8
begin, end = index*cls.local_local_N, (index+1)*cls.local_local_N
# npy
if isinstance(local_A, np.ndarray):
cls.A[index] = cp.array(local_A[begin:end], T)
# npz
elif isinstance(local_A, scipy.sparse.csr.csr_matrix):
from cupyx.scipy.sparse import csr_matrix
cls.A[index] = csr_matrix(local_A[begin:end])
cls.x[index] = cp.zeros(cls.N, T)
cls.y[index] = cp.zeros(cls.local_local_N, T)
# init out vector
cls.out = cp.zeros(cls.local_N, T)
# マルチGPUを用いた行列ベクトル積
@classmethod
def dot(cls, local_A, x, out):
# Copy vector data to All devices
for i in range(cls.begin, cls.end+1):
# Device(i).use()
index = i-cls.begin
# cp.cuda.runtime.memcpyPeerAsync(cls.x[index].data.ptr, i, x.data.ptr, cls.end, cls.nbytes, cls.streams[index].ptr)
cp.cuda.runtime.memcpyPeer(cls.x[index].data.ptr, i, x.data.ptr, cls.end, cls.nbytes)
# dot
for i in range(cls.begin, cls.end+1):
index = i-cls.begin
Device(i).use()
# cls.streams[index].synchronize()
cls.y[index] = cls.A[index].dot(cls.x[index])
# Gather caculated element from All devices
for i in range(cls.begin, cls.end+1):
Device(i).synchronize()
index = i-cls.begin
# cp.cuda.runtime.memcpyPeerAsync(cls.out[index*cls.local_local_N].data.ptr, cls.end, cls.y[index].data.ptr, i, cls.local_local_nbytes, cls.streams[index].ptr)
cp.cuda.runtime.memcpyPeer(cls.out[index*cls.local_local_N].data.ptr, cls.end, cls.y[index].data.ptr, i, cls.y[index].nbytes)
# # sync
# for i in range(cls.begin, cls.end+1):
# index = i-cls.begin
# cls.streams[index].synchronize()
cls.comm.Allgather(cls.out, out)
# return
return out
# joint comm
@classmethod
def joint_mpi(cls, comm):
cls.comm = comm
cls.num_of_process = comm.Get_size()
| 4,300 | 758 | 89 |
f8ca889431a4ebf34d3e90527fa88b3a952172da | 1,090 | py | Python | dbutils.py | Keda87/odoa-telegram-bot | 8f0d1fc8880d258a42e41203239b3f56ea078260 | [
"MIT"
] | 1 | 2020-10-04T07:02:40.000Z | 2020-10-04T07:02:40.000Z | dbutils.py | Keda87/odoa-telegram-bot | 8f0d1fc8880d258a42e41203239b3f56ea078260 | [
"MIT"
] | null | null | null | dbutils.py | Keda87/odoa-telegram-bot | 8f0d1fc8880d258a42e41203239b3f56ea078260 | [
"MIT"
] | null | null | null | import sqlite3
from skylark import Model, Field, Database
| 24.222222 | 67 | 0.599083 | import sqlite3
from skylark import Model, Field, Database
class Subscriber(Model):
telegram_id = Field()
username = Field()
first_name = Field()
last_name = Field()
class DBUtils(object):
def __init__(self, db_name):
Database.set_dbapi(sqlite3)
Database.config(db=db_name)
def insert(self, telegram_id, username, first_name, last_name):
try:
meta = {
'telegram_id': telegram_id,
'username': username,
'first_name': first_name,
'last_name': last_name,
}
Subscriber.create(**meta)
return True
except sqlite3.IntegrityError:
return False
def get(self, telegram_id):
return Subscriber.findone(telegram_id=telegram_id)
def delete(self, telegram_id):
subscriber = self.get(telegram_id=telegram_id)
if subscriber:
subscriber.destroy()
return True
return False
def fetch_all(self):
return Subscriber.select(Subscriber.telegram_id)
| 745 | 102 | 181 |
bdbb59d5544b3fd4db7250030fd714153b0057ff | 320 | py | Python | BasicPythonPrograms/PythonInheritance8.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | BasicPythonPrograms/PythonInheritance8.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | BasicPythonPrograms/PythonInheritance8.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | #Python Program to demonstrate single inheritance
#Base class
#Derived class
#Driver's code
object=Child()
object.fun1()
object.fun2() | 24.615385 | 52 | 0.6875 | #Python Program to demonstrate single inheritance
#Base class
class Parent:
def fun1(self):
print("This is function is in parent class")
#Derived class
class Child(Parent):
def fun2(self):
print("This function is in child class")
#Driver's code
object=Child()
object.fun1()
object.fun2() | 90 | -9 | 96 |
b60e94cd67d6801b23ed777d125e48d3e7fb96ad | 10,639 | py | Python | networks.py | kemingzeng/BBR-Net | b5372f6e666433b3554ccbca589da12a60e4a71f | [
"MIT"
] | null | null | null | networks.py | kemingzeng/BBR-Net | b5372f6e666433b3554ccbca589da12a60e4a71f | [
"MIT"
] | null | null | null | networks.py | kemingzeng/BBR-Net | b5372f6e666433b3554ccbca589da12a60e4a71f | [
"MIT"
] | 1 | 2018-10-22T13:53:22.000Z | 2018-10-22T13:53:22.000Z | import numpy as np
import random
import tensorflow as tf
class QNetwork(object):
"""
Base class for QNetworks.
"""
def copy_to(self, dst_net):
"""
mn = ModelNetwork(2, 3, 0, "actor")
mn_target = ModelNetwork(2, 3, 0, "target_actor")
s=tf.InteractiveSession()
s.run( tf.initialize_all_variables() )
mn.copy_to(mn_target)
"""
v1 = self.variables()
v2 = dst_net.variables()
for i in range(len(v1)):
v2[i].assign( v1[i] ).eval()
class QNetworkNIPS(QNetwork):
"""
QNetwork used in ``Playing Atari with Deep Reinforcement Learning'', [Mnih et al., 2013].
It's a Convolutional Neural Network with the following specs:
L1: 16 8x8 filters with stride 4 + RELU
L2: 32 4x4 filters with stride 2 + RELU
L3: 256 unit Fully-Connected layer + RELU
L4: [output_size] output units, Fully-Connected
"""
class QNetworkNature(QNetwork):
"""
QNetwork used in ``Human-level control through deep reinforcement learning'', [Mnih et al., 2015].
It's a Convolutional Neural Network with the following specs:
L1: 32 8x8 filters with stride 4 + RELU
L2: 64 4x4 filters with stride 2 + RELU
L3: 64 3x3 fitlers with stride 1 + RELU
L4: 512 unit Fully-Connected layer + RELU
L5: [output_size] output units, Fully-Connected
"""
class QNetworkDueling(QNetwork):
"""
QNetwork used in ``Human-level control through deep reinforcement learning'', [Mnih et al., 2015].
It's a Convolutional Neural Network with the following specs:
L1: 32 8x8 filters with stride 4 + RELU
L2: 64 4x4 filters with stride 2 + RELU
L3: 64 3x3 fitlers with stride 1 + RELU
L4a: 512 unit Fully-Connected layer + RELU
L4b: 512 unit Fully-Connected layer + RELU
L5a: 1 unit FC + RELU (State Value)
L5b: #actions FC + RELU (Advantage Value)
L6: Aggregate V(s)+A(s,a)
"""
| 31.017493 | 148 | 0.696306 | import numpy as np
import random
import tensorflow as tf
class QNetwork(object):
"""
Base class for QNetworks.
"""
def __init__(self, input_size, output_size, name):
self.name = name
def weight_variable(self, shape, fanin=0):
if fanin==0:
initial = tf.truncated_normal(shape, stddev=0.01)
else:
mod_init = 1.0 / math.sqrt(fanin)
initial = tf.random_uniform( shape , minval=-mod_init, maxval=mod_init)
return tf.Variable(initial)
def bias_variable(self, shape, fanin=0):
if fanin==0:
initial = tf.constant(0.01, shape=shape)
else:
mod_init = 1.0 / math.sqrt(fanin)
initial = tf.random_uniform( shape , minval=-mod_init, maxval=mod_init)
return tf.Variable(initial)
def variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
def copy_to(self, dst_net):
"""
mn = ModelNetwork(2, 3, 0, "actor")
mn_target = ModelNetwork(2, 3, 0, "target_actor")
s=tf.InteractiveSession()
s.run( tf.initialize_all_variables() )
mn.copy_to(mn_target)
"""
v1 = self.variables()
v2 = dst_net.variables()
for i in range(len(v1)):
v2[i].assign( v1[i] ).eval()
def print_num_of_parameters(self):
list_vars = self.variables()
total_parameters = 0
for variable in list_vars:
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
print('# of parameters in network ',self.name,': ',total_parameters,' -> ',np.round(float(total_parameters)/1000000.0, 2),'M')
class QNetworkNIPS(QNetwork):
"""
QNetwork used in ``Playing Atari with Deep Reinforcement Learning'', [Mnih et al., 2013].
It's a Convolutional Neural Network with the following specs:
L1: 16 8x8 filters with stride 4 + RELU
L2: 32 4x4 filters with stride 2 + RELU
L3: 256 unit Fully-Connected layer + RELU
L4: [output_size] output units, Fully-Connected
"""
def __init__(self, input_size, output_size, name):
self.name = name
self.input_size = input_size
self.output_size = output_size
with tf.variable_scope(self.name):
## 16 8x8 filters, stride=4; 32 4x4 filters, stride=2; 256 fc layer, output layer
self.W_conv1 = self.weight_variable([8, 8, 4, 16]) # 32 8x8 filters over 4 channels (frames)
self.B_conv1 = self.bias_variable([16])
self.stride1 = 4
self.W_conv2 = self.weight_variable([4, 4, 16, 32])
self.B_conv2 = self.bias_variable([32])
self.stride2 = 2
# FC layer
self.W_fc4 = self.weight_variable([9*9*32, 256])#, fanin=11*11*32)
self.B_fc4 = self.bias_variable([256])#, fanin=11*11*32)
# FC layer
self.W_fc5 = self.weight_variable([256, self.output_size])#, fanin=256)
self.B_fc5 = self.bias_variable([self.output_size])#, fanin=256)
# Print number of parameters in the network
self.print_num_of_parameters()
def __call__(self, input_tensor):
if type(input_tensor) == list:
input_tensor = tf.concat(1, input_tensor)
with tf.variable_scope(self.name):
# input_tensor is (84, 84, 4)
self.h_conv1 = tf.nn.relu( tf.nn.conv2d(input_tensor, self.W_conv1, strides=[1, self.stride1, self.stride1, 1], padding='VALID') + self.B_conv1 )
# max pooling: self.h_pool1 = tf.nn.max_pool(self.h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
self.h_conv2 = tf.nn.relu( tf.nn.conv2d(self.h_conv1, self.W_conv2, strides=[1, self.stride2, self.stride2, 1], padding='VALID') + self.B_conv2 )
#self.h_conv3 = tf.nn.relu( tf.nn.conv2d(self.h_conv2, self.W_conv3, strides=[1, self.stride3, self.stride3, 1], padding='SAME') + self.B_conv3 )
self.h_conv2_flat = tf.reshape(self.h_conv2, [-1, 9*9*32])
self.h_fc4 = tf.nn.relu(tf.matmul(self.h_conv2_flat, self.W_fc4) + self.B_fc4)
self.h_fc5 = tf.identity(tf.matmul(self.h_fc4, self.W_fc5) + self.B_fc5)
return self.h_fc5
class QNetworkNature(QNetwork):
"""
QNetwork used in ``Human-level control through deep reinforcement learning'', [Mnih et al., 2015].
It's a Convolutional Neural Network with the following specs:
L1: 32 8x8 filters with stride 4 + RELU
L2: 64 4x4 filters with stride 2 + RELU
L3: 64 3x3 fitlers with stride 1 + RELU
L4: 512 unit Fully-Connected layer + RELU
L5: [output_size] output units, Fully-Connected
"""
def __init__(self, input_size, output_size, name):
self.name = name
self.input_size = input_size
self.output_size = output_size
with tf.variable_scope(self.name):
## 32 8x8 filters, stride=4; 64 4x4 filters, stride=2; 64 3x3, stride 1; 512 fc layer, output layer
self.W_conv1 = self.weight_variable([8, 8, 4, 32]) # 32 8x8 filters over 4 channels (frames)
self.B_conv1 = self.bias_variable([32])
self.stride1 = 4
self.W_conv2 = self.weight_variable([4, 4, 32, 64])
self.B_conv2 = self.bias_variable([64])
self.stride2 = 2
self.W_conv3 = self.weight_variable([3, 3, 64, 64])
self.B_conv3 = self.bias_variable([64])
self.stride3 = 1
# FC layer
self.W_fc4 = self.weight_variable([7*7*64, 512])#, fanin=11*11*32)
self.B_fc4 = self.bias_variable([512])#, fanin=11*11*32)
# FC layer
self.W_fc5 = self.weight_variable([512, self.output_size])#, fanin=256)
self.B_fc5 = self.bias_variable([self.output_size])#, fanin=256)
# Print number of parameters in the network
self.print_num_of_parameters()
def __call__(self, input_tensor):
if type(input_tensor) == list:
input_tensor = tf.concat(1, input_tensor)
with tf.variable_scope(self.name):
# input_tensor is (84, 84, 4)
self.h_conv1 = tf.nn.relu( tf.nn.conv2d(input_tensor, self.W_conv1, strides=[1, self.stride1, self.stride1, 1], padding='VALID') + self.B_conv1 )
self.h_conv2 = tf.nn.relu( tf.nn.conv2d(self.h_conv1, self.W_conv2, strides=[1, self.stride2, self.stride2, 1], padding='VALID') + self.B_conv2 )
self.h_conv3 = tf.nn.relu( tf.nn.conv2d(self.h_conv2, self.W_conv3, strides=[1, self.stride3, self.stride3, 1], padding='VALID') + self.B_conv3 )
self.h_conv3_flat = tf.reshape(self.h_conv3, [-1, 7*7*64])
self.h_fc4 = tf.nn.relu(tf.matmul(self.h_conv3_flat, self.W_fc4) + self.B_fc4)
self.h_fc5 = tf.identity(tf.matmul(self.h_fc4, self.W_fc5) + self.B_fc5)
return self.h_fc5
class QNetworkDueling(QNetwork):
"""
QNetwork used in ``Human-level control through deep reinforcement learning'', [Mnih et al., 2015].
It's a Convolutional Neural Network with the following specs:
L1: 32 8x8 filters with stride 4 + RELU
L2: 64 4x4 filters with stride 2 + RELU
L3: 64 3x3 fitlers with stride 1 + RELU
L4a: 512 unit Fully-Connected layer + RELU
L4b: 512 unit Fully-Connected layer + RELU
L5a: 1 unit FC + RELU (State Value)
L5b: #actions FC + RELU (Advantage Value)
L6: Aggregate V(s)+A(s,a)
"""
def __init__(self, input_size, output_size, name):
self.name = name
self.input_size = input_size
self.output_size = output_size
with tf.variable_scope(self.name):
self.W_conv1 = self.weight_variable([8, 8, 4, 32]) # 32 8x8 filters over 4 channels (frames)
self.B_conv1 = self.bias_variable([32])
self.stride1 = 4
self.W_conv2 = self.weight_variable([4, 4, 32, 64])
self.B_conv2 = self.bias_variable([64])
self.stride2 = 2
self.W_conv3 = self.weight_variable([3, 3, 64, 64])
self.B_conv3 = self.bias_variable([64])
self.stride3 = 1
# FC layer
self.W_fc4a = self.weight_variable([7*7*64, 512])#, fanin=11*11*32)
self.B_fc4a = self.bias_variable([512])#, fanin=11*11*32)
self.W_fc4b = self.weight_variable([7*7*64, 512])#, fanin=11*11*32)
self.B_fc4b = self.bias_variable([512])#, fanin=11*11*32)
# FC layer
self.W_fc5a = self.weight_variable([512, 1])#, fanin=256)
self.B_fc5a = self.bias_variable([1])#, fanin=256)
self.W_fc5b = self.weight_variable([512, self.output_size])#, fanin=256)
self.B_fc5b = self.bias_variable([self.output_size])#, fanin=256)
# Print number of parameters in the network
self.print_num_of_parameters()
def __call__(self, input_tensor):
if type(input_tensor) == list:
input_tensor = tf.concat(1, input_tensor)
with tf.variable_scope(self.name):
# input_tensor is (84, 84, 4)
self.h_conv1 = tf.nn.relu( tf.nn.conv2d(input_tensor, self.W_conv1, strides=[1, self.stride1, self.stride1, 1], padding='VALID') + self.B_conv1 )
self.h_conv2 = tf.nn.relu( tf.nn.conv2d(self.h_conv1, self.W_conv2, strides=[1, self.stride2, self.stride2, 1], padding='VALID') + self.B_conv2 )
self.h_conv3 = tf.nn.relu( tf.nn.conv2d(self.h_conv2, self.W_conv3, strides=[1, self.stride3, self.stride3, 1], padding='VALID') + self.B_conv3 )
self.h_conv3_flat = tf.reshape(self.h_conv3, [-1, 7*7*64])
self.h_fc4a = tf.nn.relu(tf.matmul(self.h_conv3_flat, self.W_fc4a) + self.B_fc4a)
self.h_fc4b = tf.nn.relu(tf.matmul(self.h_conv3_flat, self.W_fc4b) + self.B_fc4b)
self.h_fc5a_value = tf.identity(tf.matmul(self.h_fc4a, self.W_fc5a) + self.B_fc5a)
self.h_fc5b_advantage = tf.identity(tf.matmul(self.h_fc4b, self.W_fc5b) + self.B_fc5b)
self.h_fc6 = self.h_fc5a_value + ( self.h_fc5b_advantage - tf.reduce_mean(self.h_fc5b_advantage, reduction_indices=[1,], keep_dims=True) )
return self.h_fc6
class ActorCritic_MLP(QNetwork):
def __init__(self, input_size, output_size, actor_or_critic, name):
"""
actor_or_critic=0 for actor, 1 for critic. The only difference is in the output transfer function (tanh for actor, identity for critic)
"""
self.name = name
self.actor_or_critic = actor_or_critic
self.input_size = input_size
self.output_size = output_size
with tf.variable_scope(self.name):
l1 = 200
l2 = 200
self.W_fc1 = self.weight_variable([self.input_size, l1])
self.B_fc1 = self.bias_variable([l1])
self.W_fc2 = self.weight_variable([l1, l2])
self.B_fc2 = self.bias_variable([l2])
self.W_out = self.weight_variable([l2, self.output_size])
self.B_out = self.bias_variable([self.output_size])
# Print number of parameters in the network
self.print_num_of_parameters()
def __call__(self, input_tensor):
if type(input_tensor) == list:
input_tensor = tf.concat(1, input_tensor)
with tf.variable_scope(self.name):
self.h_fc1 = tf.nn.relu(tf.matmul(input_tensor, self.W_fc1) + self.B_fc1)
self.h_fc2 = tf.nn.relu(tf.matmul(self.h_fc1, self.W_fc2) + self.B_fc2)
if self.actor_or_critic==0: # ACTOR
self.out = tf.nn.tanh(tf.matmul(self.h_fc2, self.W_out) + self.B_out)
else: # CRITIC
self.out = tf.identity(tf.matmul(self.h_fc2, self.W_out) + self.B_out)
return self.out
| 7,678 | 831 | 283 |
dd95fe45869a58b90f4f796a86e3443cc5bc6a93 | 8,503 | py | Python | plugins/modules/oci_data_safe_grant_facts.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 108 | 2020-05-19T20:46:10.000Z | 2022-03-25T14:10:01.000Z | plugins/modules/oci_data_safe_grant_facts.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 90 | 2020-06-14T22:07:11.000Z | 2022-03-07T05:40:29.000Z | plugins/modules/oci_data_safe_grant_facts.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 42 | 2020-08-30T23:09:12.000Z | 2022-03-25T16:58:01.000Z | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_safe_grant_facts
short_description: Fetches details about one or multiple Grant resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple Grant resources in Oracle Cloud Infrastructure
- Gets a list of grants for a particular user in the specified user assessment. A user grant contains details such as the
privilege name, type, category, and depth level. The depth level indicates how deep in the hierarchy of roles granted to
roles a privilege grant is. The userKey in this operation is a system-generated identifier. Perform the operation ListUsers
to get the userKey for a particular user.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
user_assessment_id:
description:
- The OCID of the user assessment.
type: str
required: true
user_key:
description:
- The unique user key. This is a system-generated identifier. ListUsers gets the user key for a user.
type: str
required: true
grant_key:
description:
- A filter to return only items that match the specified user grant key.
type: str
grant_name:
description:
- A filter to return only items that match the specified user grant name.
type: str
privilege_type:
description:
- A filter to return only items that match the specified privilege grant type.
type: str
privilege_category:
description:
- A filter to return only items that match the specified user privilege category.
type: str
depth_level:
description:
- A filter to return only items that match the specified user grant depth level.
type: int
depth_level_greater_than_or_equal_to:
description:
- A filter to return only items that are at a level greater than or equal to the specified user grant depth level.
type: int
depth_level_less_than:
description:
- A filter to return only items that are at a level less than the specified user grant depth level.
type: int
sort_order:
description:
- The sort order to use, either ascending (ASC) or descending (DESC).
type: str
choices:
- "ASC"
- "DESC"
sort_by:
description:
- The field to sort by. You can specify only one sort order (sortOrder). The default order for grantName is ascending.
type: str
choices:
- "grantName"
- "grantType"
- "privilegeCategory"
- "depthLevel"
- "key"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List grants
oci_data_safe_grant_facts:
# required
user_assessment_id: "ocid1.userassessment.oc1..xxxxxxEXAMPLExxxxxx"
user_key: user_key_example
# optional
grant_key: grant_key_example
grant_name: grant_name_example
privilege_type: privilege_type_example
privilege_category: privilege_category_example
depth_level: 56
depth_level_greater_than_or_equal_to: 56
depth_level_less_than: 56
sort_order: ASC
sort_by: grantName
"""
RETURN = """
grants:
description:
- List of Grant resources
returned: on success
type: complex
contains:
key:
description:
- The unique key of a user grant.
returned: on success
type: str
sample: key_example
grant_name:
description:
- The name of a user grant.
returned: on success
type: str
sample: grant_name_example
privilege_type:
description:
- The type of a user grant.
returned: on success
type: str
sample: SYSTEM_PRIVILEGE
privilege_category:
description:
- The privilege category.
returned: on success
type: str
sample: CRITICAL
depth_level:
description:
- The grant depth level of the indirect grant.
An indirectly granted role/privilege is granted to the user through another role.
The depth level indicates how deep a privilege is within the grant hierarchy.
returned: on success
type: int
sample: 56
sample: [{
"key": "key_example",
"grant_name": "grant_name_example",
"privilege_type": "SYSTEM_PRIVILEGE",
"privilege_category": "CRITICAL",
"depth_level": 56
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.data_safe import DataSafeClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataSafeGrantFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
DataSafeGrantFactsHelperCustom = get_custom_class("DataSafeGrantFactsHelperCustom")
if __name__ == "__main__":
main()
| 31.966165 | 130 | 0.629072 | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_safe_grant_facts
short_description: Fetches details about one or multiple Grant resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple Grant resources in Oracle Cloud Infrastructure
- Gets a list of grants for a particular user in the specified user assessment. A user grant contains details such as the
privilege name, type, category, and depth level. The depth level indicates how deep in the hierarchy of roles granted to
roles a privilege grant is. The userKey in this operation is a system-generated identifier. Perform the operation ListUsers
to get the userKey for a particular user.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
user_assessment_id:
description:
- The OCID of the user assessment.
type: str
required: true
user_key:
description:
- The unique user key. This is a system-generated identifier. ListUsers gets the user key for a user.
type: str
required: true
grant_key:
description:
- A filter to return only items that match the specified user grant key.
type: str
grant_name:
description:
- A filter to return only items that match the specified user grant name.
type: str
privilege_type:
description:
- A filter to return only items that match the specified privilege grant type.
type: str
privilege_category:
description:
- A filter to return only items that match the specified user privilege category.
type: str
depth_level:
description:
- A filter to return only items that match the specified user grant depth level.
type: int
depth_level_greater_than_or_equal_to:
description:
- A filter to return only items that are at a level greater than or equal to the specified user grant depth level.
type: int
depth_level_less_than:
description:
- A filter to return only items that are at a level less than the specified user grant depth level.
type: int
sort_order:
description:
- The sort order to use, either ascending (ASC) or descending (DESC).
type: str
choices:
- "ASC"
- "DESC"
sort_by:
description:
- The field to sort by. You can specify only one sort order (sortOrder). The default order for grantName is ascending.
type: str
choices:
- "grantName"
- "grantType"
- "privilegeCategory"
- "depthLevel"
- "key"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List grants
oci_data_safe_grant_facts:
# required
user_assessment_id: "ocid1.userassessment.oc1..xxxxxxEXAMPLExxxxxx"
user_key: user_key_example
# optional
grant_key: grant_key_example
grant_name: grant_name_example
privilege_type: privilege_type_example
privilege_category: privilege_category_example
depth_level: 56
depth_level_greater_than_or_equal_to: 56
depth_level_less_than: 56
sort_order: ASC
sort_by: grantName
"""
RETURN = """
grants:
description:
- List of Grant resources
returned: on success
type: complex
contains:
key:
description:
- The unique key of a user grant.
returned: on success
type: str
sample: key_example
grant_name:
description:
- The name of a user grant.
returned: on success
type: str
sample: grant_name_example
privilege_type:
description:
- The type of a user grant.
returned: on success
type: str
sample: SYSTEM_PRIVILEGE
privilege_category:
description:
- The privilege category.
returned: on success
type: str
sample: CRITICAL
depth_level:
description:
- The grant depth level of the indirect grant.
An indirectly granted role/privilege is granted to the user through another role.
The depth level indicates how deep a privilege is within the grant hierarchy.
returned: on success
type: int
sample: 56
sample: [{
"key": "key_example",
"grant_name": "grant_name_example",
"privilege_type": "SYSTEM_PRIVILEGE",
"privilege_category": "CRITICAL",
"depth_level": 56
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.data_safe import DataSafeClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataSafeGrantFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"user_assessment_id",
"user_key",
]
def list_resources(self):
optional_list_method_params = [
"grant_key",
"grant_name",
"privilege_type",
"privilege_category",
"depth_level",
"depth_level_greater_than_or_equal_to",
"depth_level_less_than",
"sort_order",
"sort_by",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_grants,
user_assessment_id=self.module.params.get("user_assessment_id"),
user_key=self.module.params.get("user_key"),
**optional_kwargs
)
DataSafeGrantFactsHelperCustom = get_custom_class("DataSafeGrantFactsHelperCustom")
class ResourceFactsHelper(DataSafeGrantFactsHelperCustom, DataSafeGrantFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
user_assessment_id=dict(type="str", required=True),
user_key=dict(type="str", required=True, no_log=True),
grant_key=dict(type="str", no_log=True),
grant_name=dict(type="str"),
privilege_type=dict(type="str"),
privilege_category=dict(type="str"),
depth_level=dict(type="int"),
depth_level_greater_than_or_equal_to=dict(type="int"),
depth_level_less_than=dict(type="int"),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
sort_by=dict(
type="str",
choices=[
"grantName",
"grantType",
"privilegeCategory",
"depthLevel",
"key",
],
),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="grant",
service_client_class=DataSafeClient,
namespace="data_safe",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(grants=result)
if __name__ == "__main__":
main()
| 2,358 | 75 | 100 |
e3e6a22817ee365d69754e9fc90b75a1cbd42407 | 817 | py | Python | rl_parsers/dpomdp/__init__.py | DavidSlayback/rl-parsers | df33cd1814aaeac05b057330ccf742b3310f36df | [
"MIT"
] | null | null | null | rl_parsers/dpomdp/__init__.py | DavidSlayback/rl-parsers | df33cd1814aaeac05b057330ccf742b3310f36df | [
"MIT"
] | null | null | null | rl_parsers/dpomdp/__init__.py | DavidSlayback/rl-parsers | df33cd1814aaeac05b057330ccf742b3310f36df | [
"MIT"
] | 2 | 2020-09-27T15:02:32.000Z | 2021-08-24T22:43:21.000Z | from ply import lex, yacc
from . import tokrules
from .parser import DPOMDP, Parser
__all__ = ['DPOMDP_Parser', 'parse']
# legacy
| 27.233333 | 77 | 0.656059 | from ply import lex, yacc
from . import tokrules
from .parser import DPOMDP, Parser
__all__ = ['DPOMDP_Parser', 'parse']
class DPOMDP_Parser:
def __init__(self, *, debug=False, optimize=True):
self.debug = debug
self.optimize = optimize
self.lexer = lex.lex(module=tokrules, debug=debug, optimize=optimize)
def parse_file(self, filename: str) -> DPOMDP:
with open(filename) as f:
return self.parse_string(f.read())
def parse_string(self, string: str) -> DPOMDP:
parser = yacc.yacc(
module=Parser(), debug=self.debug, optimize=self.optimize
)
return parser.parse(string, lexer=self.lexer)
# legacy
def parse(string: str, **kwargs) -> DPOMDP:
parser = DPOMDP_Parser(**kwargs)
return parser.parse_string(string)
| 558 | -1 | 125 |
ec524e99e9c8a7435f5b962ac29de88cde7f4591 | 5,412 | py | Python | cgra/CGRAFL.py | pnnl/open-cgra | c19bc3a1baca3da659dc8f3cbfe32dd6003a2c65 | [
"BSD-3-Clause"
] | 13 | 2020-09-01T16:55:42.000Z | 2021-09-01T16:39:44.000Z | cgra/CGRAFL.py | pnnl/open-cgra | c19bc3a1baca3da659dc8f3cbfe32dd6003a2c65 | [
"BSD-3-Clause"
] | 5 | 2020-08-19T05:40:01.000Z | 2021-07-15T22:05:03.000Z | cgra/CGRAFL.py | pnnl/open-cgra | c19bc3a1baca3da659dc8f3cbfe32dd6003a2c65 | [
"BSD-3-Clause"
] | 4 | 2020-09-01T16:44:02.000Z | 2021-07-11T04:40:05.000Z | """
=========================================================================
CGRAFL.py
=========================================================================
CGRAFL -- running DFG nodes one by one.
Author : Cheng Tan
Date : Feb 13, 2020
"""
from pymtl3 import *
from ..lib.opt_type import *
from ..lib.messages import *
#------------------------------------------------------------------------
# Assuming that the elements in FuDFG are already ordered well.
#------------------------------------------------------------------------
| 41 | 122 | 0.576681 | """
=========================================================================
CGRAFL.py
=========================================================================
CGRAFL -- running DFG nodes one by one.
Author : Cheng Tan
Date : Feb 13, 2020
"""
from pymtl3 import *
from ..lib.opt_type import *
from ..lib.messages import *
#------------------------------------------------------------------------
# Assuming that the elements in FuDFG are already ordered well.
#------------------------------------------------------------------------
def CGRAFL( FuDFG, DataType, CtrlType, src_const ):#, data_spm ):
live_out_val = DataType( 0, 0 )
live_out_ctrl = DataType( 0, 0 )
data_spm = FuDFG.data_spm
print("data SPM: ", data_spm)
while live_out_ctrl.predicate == Bits1( 0 ):
for node in FuDFG.nodes:
current_input = []
current_input_predicate = 0
# print("id: ", node.id, " node.num_const: ", node.num_const, "; node.num_input: ", node.num_input)
# Assume const goes in first, then the output from predecessor.
if node.num_const != 0:
for i in range( node.num_const ):
current_input.append( src_const[node.const_index[i]] );
if node.num_input != 0:
for value in node.input_value:
current_input.append(value);
result = [ DataType( 0, 1 ) for _ in node.num_output ]
if node.opt_predicate == 1:
current_input_predicate = node.input_predicate
#current_input_predicate = node.input_predicate
print( "id: ", node.id, ", current_input: ", current_input, ", current_input_predicate: ", current_input_predicate )
if node.opt == OPT_ADD:
result[0].payload = current_input[0].payload + current_input[1].payload
elif node.opt == OPT_SUB:
result[0].payload = current_input[0].payload - current_input[1].payload
elif node.opt == OPT_MUL:
result[0].payload = current_input[0].payload * current_input[1].payload
elif node.opt == OPT_PHI:
if current_input[1].predicate == Bits1( 1 ):
result[0].payload = current_input[1].payload
else:
result[0].payload = current_input[0].payload
elif node.opt == OPT_LD:
result[0].payload = data_spm[current_input[0].payload]
elif node.opt == OPT_EQ:
# if current_input[0].payload == current_input[1].payload:
# FIXME: need to specify the constant input for each node
if current_input[0].payload == current_input[1].payload:
result[0] = DataType( 1, 1)
else:
result[0] = DataType( 0, 1)
elif node.opt == OPT_BRH:
# Br node does not output any meaningful value but a predication
result[0].payload = 0
# Cmp result goes into [0]
if current_input[0].payload == 0:
result[0].predicate = Bits1( 1 )
for j in range( node.num_output[0] ):
FuDFG.get_node(node.output_node[0][j]).updatePredicate( 1 )
for j in range( node.num_output[1] ):
FuDFG.get_node(node.output_node[1][j]).updatePredicate( 0 )
else:
result[0].predicate = Bits1( 0 )
for j in range( node.num_output[0] ):
FuDFG.get_node(node.output_node[0][j]).updatePredicate( 0 )
for j in range( node.num_output[1] ):
FuDFG.get_node(node.output_node[1][j]).updatePredicate( 1 )
# if len(node.num_output) > 1:
# result[1] = DataType( 0, 0 )
# result[1].payload = 0
# if current_input[0].payload == 0:
# result[1].predicate = Bits1( 0 )
# else:
# result[1].predicate = Bits1( 1 )
# Currently, treat BRH node as the exit node that could contain live_out_ctrl
if node.live_out_ctrl != 0:
if node.opt_predicate == 1:
for i in range(len( node.num_output )):
result[i].predicate = result[i].predicate and current_input_predicate
# case of 'FALSE' ([0]->'FALSE' path; [1]->'TRUE' path)
if result[0].predicate == 1:
live_out_ctrl.predicate = Bits1( 0 )
# Terminate the execution when BRANCH leads to a 'TRUE'
else:
live_out_ctrl.predicate = Bits1( 1 )
if node.opt_predicate == 1:
live_out_ctrl.predicate = live_out_ctrl.predicate and current_input_predicate
# We allow single live out value in the DFG.
if node.live_out_val != 0:
live_out_val.payload = result[0].payload
live_out_val.predicate = result[0].predicate
if node.opt_predicate == 1:
for i in range(len( node.num_output )):
result[i].predicate = result[i].predicate and current_input_predicate
# BRH only updates predicate rather than values.
if node.opt != OPT_BRH:
for i in range( len( node.num_output ) ):
# print( "see node.num_output[i]: ", node.num_output[i] )
for j in range( node.num_output[i] ):
node.updateOutput( i, j, result[i] )
FuDFG.get_node(node.output_node[i][j]).updateInput( result[i] )
print( "id: ", node.id, " current output: ", result )
if live_out_ctrl.predicate == Bits1( 1 ):
break
print( "[ current iteration live_out_val: ", live_out_val, " ]" )
print( "--------------------------------------" )
print( "final live_out: ", live_out_val )
return live_out_val.payload, data_spm
| 4,839 | 0 | 22 |