content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from utils import s3
| [
6738,
3384,
4487,
1330,
264,
18,
628
] | 3.142857 | 7 |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys
# load a datafile, handle some command line args
smoothing = int(sys.argv[2]) # window size for smoothing
data_file = sys.argv[1]
name = data_file.split('/')[-1].split('.')[0] # used in output
df = pd.read_csv(data_file, index_col=None, sep='\t')
# get the bin size of the windows
window_size = (df['BIN_END'] - df['BIN_START']).values[0] + 1
window_rad = int(window_size / 2)
# calculate ranges for the y axis
pi_arr = df['PI'].values
low_percentile = 1
high_percentile = 100 - low_percentile
mean_low = np.percentile(pi_arr, low_percentile)
mean_high = np.percentile(pi_arr, high_percentile)
# calculate the smoothed lines
# we're going to reject outliers for this part and only keep data that falls
# below the 'high' percentile
mean_vals = pi_arr
mean_vals[mean_vals > mean_high] = mean_high
smooth_radius = int(smoothing / 2)
smoothed_mean = np.zeros(df.shape[0])
for w in range(smooth_radius, df.shape[0] - smooth_radius):
smoothed_mean[w] = np.mean(mean_vals[w - smooth_radius : w + smooth_radius])
fig, ax = plt.subplots()
ax.plot(df['BIN_START'] + window_rad, pi_arr, c='grey', alpha=0.5)
ax.plot(df['BIN_START'][smooth_radius:-smooth_radius] + window_rad, smoothed_mean[smooth_radius:-smooth_radius], c='C0')
ax.set_ylabel(r'Binned $\pi$')
ax.set_ylim((mean_low, mean_high))
# output name
plt.savefig('../plots/pi_plots/' + name + '_pi_region.png') | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
25064,
198,
198,
2,
3440,
257,
1366,
7753,
11,
5412,
617,
3141,
1627,
26498,
198,
5796,
102... | 2.608618 | 557 |
from arcade.sprite import Sprite
from src.entities.ennemies.base_enemy import BaseEnemy
from os.path import join
from math import pi
| [
6738,
27210,
13,
34975,
578,
1330,
33132,
198,
6738,
12351,
13,
298,
871,
13,
1697,
5090,
13,
8692,
62,
46970,
1330,
7308,
4834,
3065,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
10688,
1330,
31028,
198,
220,
220,
220,
220,
628... | 3.309524 | 42 |
from bottle import Bottle, TEMPLATE_PATH
from bottle.ext import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
#from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine('sqlite:///database.db', echo = True)
#create_session = sessionmaker(bind = engine)
app = Bottle()
TEMPLATE_PATH.insert(0, 'app/views/')
plugin = sqlalchemy.Plugin(
engine,
Base.metadata,
keyword = 'db',
create = True,
commit = True,
use_kwargs = False)
app.install(plugin)
from app.controllers import default
from app.models import tables | [
6738,
9294,
1330,
33608,
11,
309,
3620,
6489,
6158,
62,
34219,
201,
198,
6738,
9294,
13,
2302,
1330,
44161,
282,
26599,
201,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
201,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
... | 2.617761 | 259 |
# # -*- coding: utf-8 -*-
from polylogyx.celery.tasks import example_task
| [
2,
1303,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
7514,
6404,
28391,
13,
7015,
88,
13,
83,
6791,
1330,
1672,
62,
35943,
628
] | 2.5 | 30 |
import os
from configparser import ConfigParser
from app.domain.errors import NotFoundError
class AppConfig:
"""The configurations of the app at runtime"""
__instance = None
config = None
auth0_config = None
neo4j_config = None
@staticmethod
def _load_config() -> ConfigParser:
"""
Loads the .config file from the root of the project.
:return: the config
"""
env = os.getenv("ENV", ".config")
if env == ".config":
config = ConfigParser()
config.read([".config", ".test.config", "test/.test.config"])
return config
raise NotFoundError("config file not found")
| [
11748,
28686,
198,
6738,
4566,
48610,
1330,
17056,
46677,
198,
198,
6738,
598,
13,
27830,
13,
48277,
1330,
1892,
21077,
12331,
628,
198,
4871,
2034,
16934,
25,
198,
220,
220,
220,
37227,
464,
25412,
286,
262,
598,
379,
19124,
37811,
198... | 2.571429 | 266 |
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import sys
from tqdm import tqdm
from util import helpers
from util.mahalanobis_lib import get_Mahalanobis_score, sample_estimator, sample_estimator_cifar10
from util.metrics import get_metrics
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
11748,
25064,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
7736,
... | 3.108696 | 92 |
from rest_framework import status
from rest_framework.response import Response
from shared.audit_log.viewsets import AuditLoggingModelViewSet
from applications.api.v1.serializers import ApplicationSerializer
from applications.models import Application
| [
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
4888,
13,
3885,
270,
62,
6404,
13,
1177,
28709,
1330,
46450,
11187,
2667,
17633,
7680,
7248,
198,
198,
6738,
5479,
13,
15042,
13,
85,
16... | 4.45614 | 57 |
"""This component provides number entities for UniFi Protect."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
import logging
from typing import Callable, Sequence
from homeassistant.components.number import NumberEntity, NumberEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ENTITY_CATEGORY_CONFIG
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import Entity
from pyunifiprotect.data.devices import Camera, Light
from .const import DOMAIN
from .data import ProtectData
from .entity import ProtectDeviceEntity, async_all_device_entities
from .models import ProtectRequiredKeysMixin
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
_KEY_WDR = "wdr_value"
_KEY_MIC_LEVEL = "mic_level"
_KEY_ZOOM_POS = "zoom_position"
_KEY_SENSITIVITY = "sensitivity"
_KEY_DURATION = "duration"
_KEY_CHIME = "chime_duration"
@dataclass
class NumberKeysMixin:
"""Mixin for required keys."""
ufp_max: int
ufp_min: int
ufp_step: int
ufp_set_function: str
@dataclass
class ProtectNumberEntityDescription(
ProtectRequiredKeysMixin, NumberEntityDescription, NumberKeysMixin
):
"""Describes UniFi Protect Number entity."""
CAMERA_NUMBERS: tuple[ProtectNumberEntityDescription, ...] = (
ProtectNumberEntityDescription(
key=_KEY_WDR,
name="Wide Dynamic Range",
icon="mdi:state-machine",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=3,
ufp_step=1,
ufp_required_field="feature_flags.has_wdr",
ufp_value="isp_settings.wdr",
ufp_set_function="set_wdr_level",
),
ProtectNumberEntityDescription(
key=_KEY_MIC_LEVEL,
name="Microphone Level",
icon="mdi:microphone",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=100,
ufp_step=1,
ufp_required_field="feature_flags.has_mic",
ufp_value="mic_volume",
ufp_set_function="set_mic_volume",
),
ProtectNumberEntityDescription(
key=_KEY_ZOOM_POS,
name="Zoom Position",
icon="mdi:magnify-plus-outline",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=100,
ufp_step=1,
ufp_required_field="feature_flags.can_optical_zoom",
ufp_value="isp_settings.zoom_position",
ufp_set_function="set_camera_zoom",
),
ProtectNumberEntityDescription(
key=_KEY_CHIME,
name="Duration",
icon="mdi:camera-timer",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=10000,
ufp_step=100,
ufp_required_field="feature_flags.has_chime",
ufp_value="chime_duration",
ufp_set_function="set_chime_duration",
),
)
LIGHT_NUMBERS: tuple[ProtectNumberEntityDescription, ...] = (
ProtectNumberEntityDescription(
key=_KEY_SENSITIVITY,
name="Motion Sensitivity",
icon="mdi:walk",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=0,
ufp_max=100,
ufp_step=1,
ufp_required_field=None,
ufp_value="light_device_settings.pir_sensitivity",
ufp_set_function="set_sensitivity",
),
ProtectNumberEntityDescription(
key=_KEY_DURATION,
name="Duration",
icon="mdi:camera-timer",
entity_category=ENTITY_CATEGORY_CONFIG,
ufp_min=15,
ufp_max=900,
ufp_step=15,
ufp_required_field=None,
ufp_value="light_device_settings.pir_duration",
ufp_set_function="set_duration",
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[Sequence[Entity]], None],
) -> None:
"""Set up number entities for UniFi Protect integration."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
entities: list[ProtectDeviceEntity] = async_all_device_entities(
data,
ProtectNumbers,
camera_descs=CAMERA_NUMBERS,
light_descs=LIGHT_NUMBERS,
)
async_add_entities(entities)
class ProtectNumbers(ProtectDeviceEntity, NumberEntity):
"""A UniFi Protect Number Entity."""
def __init__(
self,
data: ProtectData,
device: Camera | Light,
description: ProtectNumberEntityDescription,
) -> None:
"""Initialize the Number Entities."""
self.device: Camera | Light = device
self.entity_description: ProtectNumberEntityDescription = description
super().__init__(data)
self._attr_max_value = self.entity_description.ufp_max
self._attr_min_value = self.entity_description.ufp_min
self._attr_step = self.entity_description.ufp_step
@callback
async def async_set_value(self, value: float) -> None:
"""Set new value."""
function = self.entity_description.ufp_set_function
_LOGGER.debug(
"Calling %s to set %s for Camera %s",
function,
value,
self.device.name,
)
set_value: float | timedelta = value
if self.entity_description.key == _KEY_DURATION:
set_value = timedelta(seconds=value)
await getattr(self.device, function)(set_value)
| [
37811,
1212,
7515,
3769,
1271,
12066,
329,
43376,
10547,
21916,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
18931,
... | 2.340017 | 2,294 |
import matplotlib
import numpy as np
import pandas as pd
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import glob
import os
from IPython.display import clear_output
from skimage.io import imread
from skimage.transform import resize
from google.colab import drive
import sys
# run GPU ....
if(torch.cuda.is_available()):
device = torch.device("cuda")
print(device, torch.cuda.get_device_name(0))
else:
device= torch.device("cpu")
print(device)
size = (7, 7)
| [
11748,
2603,
29487,
8019,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
198,
11748,
28034,
10178,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
3... | 3.019417 | 206 |
"""
Construct all subgroup graphs and their relations between them from a single space group.
"""
from __future__ import absolute_import, division, print_function
from cctbx import sgtbx
from cctbx.sgtbx import show_cosets
from cctbx.sgtbx import pointgroup_tools
from cctbx.development import debug_utils
import sys
if __name__=="__main__":
if len(sys.argv)>1:
run_single( sys.argv[1],True,True )
else:
run_all()
| [
37811,
198,
42316,
477,
850,
8094,
28770,
290,
511,
2316,
1022,
606,
422,
257,
2060,
2272,
1448,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
628,
198,
6738,
269,
310,
65,
87,
13... | 2.825806 | 155 |
import service_factory | [
11748,
2139,
62,
69,
9548
] | 4.4 | 5 |
import json
import os
from os import listdir, path
from os.path import isfile, join
tasks_path = 'tasks/'
if __name__ == "__main__":
update_definitions(tasks_path)
| [
11748,
33918,
198,
11748,
28686,
198,
6738,
28686,
1330,
1351,
15908,
11,
3108,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
198,
83,
6791,
62,
6978,
796,
705,
83,
6791,
14,
6,
198,
198,
361,
11593,
3672,
834,
6624,
3... | 2.741935 | 62 |
# -*- coding: utf-8 -*-
from sklearn_export.Template import Template
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
1341,
35720,
62,
39344,
13,
30800,
1330,
37350,
628
] | 2.84 | 25 |
#!/usr/bin/env python
"""
Copyright 2014-2015 Taxamo, Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Settlement_daily_stats_schema:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
15269,
1946,
12,
4626,
9241,
18811,
11,
12052,
13,
628,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
220,
220,
... | 3.454148 | 229 |
"""
usando Requests
trabalhando com proxies
"""
# importando modulo Requests
import requests
# proxies free
# http://www.ultrapoxies.com/
# https://www.hide-my-ip.com/pt/proxylist.shtml
url = 'https://www.hide-my-ip.com/pt/proxylist.shtml'
#proxies = {'https':'169.57.157.148:8123'}
proxies = {'http':'183.181.164.210:80'}
try:
r = requests.get(url, proxies=proxies)
print(r.status_code)
except requests.exceptions.ConnectionError as e:
print(str(e) | [
37811,
198,
385,
25440,
9394,
3558,
198,
2213,
44349,
4993,
78,
401,
41775,
198,
37811,
198,
2,
1330,
25440,
953,
43348,
9394,
3558,
198,
11748,
7007,
198,
198,
2,
41775,
1479,
198,
2,
2638,
1378,
2503,
13,
586,
2416,
1140,
444,
13,
... | 2.391753 | 194 |
from .api import Api | [
6738,
764,
15042,
1330,
5949,
72
] | 3.333333 | 6 |
from pydantic import BaseModel, Extra
| [
6738,
279,
5173,
5109,
1330,
7308,
17633,
11,
17221,
628
] | 3.9 | 10 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 12:52:01 2020
Circle Detection inspiration:
https://stackoverflow.com/questions/58109962/how-to-optimize-circle-detection-with-python-opencv
@author: modal
"""
#%% INIT
image_file_name = 'a2_a_cropped.jpg'
from well_plate_project.config import data_dir
path = data_dir / 'raw'
image_file = path / image_file_name
assert image_file.is_file()
import cv2
import numpy as np
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
from scipy import ndimage
import matplotlib.pyplot as plt
# Load in image, convert to gray scale, and Otsu's threshold
image = cv2.imread(str(image_file))
plt.imshow(image)
plt.show()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove small noise by filtering using contour area
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if cv2.contourArea(c) < 1000:
cv2.drawContours(thresh,[c], 0, (0,0,0), -1)
#cv2.imshow('thresh', thresh)
plt.imshow(cv2.cvtColor(thresh, cv2.COLOR_BGR2RGB)); plt.show()
# Compute Euclidean distance from every binary pixel
# to the nearest zero pixel then find peaks
distance_map = ndimage.distance_transform_edt(thresh)
local_max = peak_local_max(distance_map, indices=False, min_distance=5, labels=thresh)
# Perform connected component analysis then apply Watershed
markers = ndimage.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=thresh)
# Iterate through unique labels
for label in np.unique(labels):
if label == 0:
continue
# Create a mask
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
# Find contours and determine contour area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
c = max(cnts, key=cv2.contourArea)
cv2.drawContours(image, [c], -1, (36,255,12), -1)
#cv2.imshow('image', image)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()
#cv2.waitKey() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
5267,
220,
362,
1105,
25,
4309,
25,
486,
12131,
198,
31560,
293,
46254,
12141,
25,
... | 2.422198 | 919 |
#!/bin/python
from roomai.games.texasholdem.TexasHoldemUtil import PokerCard
from roomai.games.texasholdem.TexasHoldemUtil import AllCardsPattern
from roomai.games.texasholdem.TexasHoldemUtil import AllPokerCards
from roomai.games.texasholdem.TexasHoldemUtil import Stage
from roomai.games.texasholdem.TexasHoldemActionChance import TexasHoldemActionChance
from roomai.games.texasholdem.TexasHoldemAction import TexasHoldemAction
from roomai.games.texasholdem.TexasHoldemStatePerson import TexasHoldemStatePerson
from roomai.games.texasholdem.TexasHoldemStatePrivate import TexasHoldemStatePrivate
from roomai.games.texasholdem.TexasHoldemStatePublic import TexasHoldemStatePublic
from roomai.games.texasholdem.TexasHoldemEnv import TexasHoldemEnv
| [
2,
48443,
8800,
14,
29412,
198,
6738,
2119,
1872,
13,
19966,
13,
16886,
1077,
727,
368,
13,
21607,
26807,
368,
18274,
346,
220,
220,
220,
220,
220,
220,
220,
220,
1330,
36157,
16962,
198,
6738,
2119,
1872,
13,
19966,
13,
16886,
1077,
... | 2.898182 | 275 |
# An inefficient way I could think of to find prime numbers.
i = int(input("enter max "))
t=1
while t<i:
m=1
c=0
while m<i:
if t%m==0:
c+=1
m+=1
if c<=2:
print(t)
t+=1
| [
2,
1052,
30904,
835,
314,
714,
892,
286,
284,
1064,
6994,
3146,
13,
201,
198,
72,
796,
493,
7,
15414,
7203,
9255,
3509,
366,
4008,
201,
198,
83,
28,
16,
201,
198,
4514,
256,
27,
72,
25,
201,
198,
220,
220,
220,
285,
28,
16,
20... | 1.680851 | 141 |
"""
This file contains the CLI code for the maintenance commands.
References
----------
For the Rich package, colors are defined here:
https://rich.readthedocs.io/en/latest/appendix/colors.html#appendix-colors
"""
# -----------------------------------------------------------------------------
# System Imports
# -----------------------------------------------------------------------------
from typing import List, Dict
from operator import attrgetter
# -----------------------------------------------------------------------------
# Public Imports
# -----------------------------------------------------------------------------
import click
from rich.console import Console
from rich.table import Table, Text
# from rich.console import TerminalTheme
import maya
# -----------------------------------------------------------------------------
# Private Imports
# -----------------------------------------------------------------------------
from pyzayo import ZayoClient
from .cli_root import cli
from pyzayo import consts
from pyzayo.mtc_models import CaseRecord, ImpactRecord, NotificationDetailRecord
from pyzayo.consts import CaseStatusOptions
# -----------------------------------------------------------------------------
#
# TABLE CODE BEGINS
#
# -----------------------------------------------------------------------------
def colorize_urgency(urgency: str):
""" set the text style for case.urgency field """
style = {
consts.CaseUrgencyOptions.emergency: "bold red",
consts.CaseUrgencyOptions.demand: "bright_blue",
consts.CaseUrgencyOptions.planned: "bright_yellow",
}.get(
consts.CaseUrgencyOptions(urgency) # noqa
) # noqa
return Text(urgency, style=style)
def colorize_status(status):
""" set the text style for case.status field"""
return Text(
status,
style={CaseStatusOptions.scheduled: "bright_yellow"}.get(
consts.CaseStatusOptions(status) # noqa
),
)
def colorize_impact(impact):
""" set the text style for case.impact field """
style = {
consts.CaseImpactOptions.potential_svc_aff: "",
consts.CaseImpactOptions.svc_aff: "bold red",
}.get(
consts.CaseImpactOptions(impact) # noqa
) # noqa
return Text("\n".join(impact.split()), style=style)
def make_cases_table(recs: List[CaseRecord]) -> Table:
"""
This function creates the Rich.Table that contains the cases information.
Parameters
----------
recs: List[CaseRecord]
The list of case records in model-object form.
Returns
-------
The rendered Table of case information.
"""
n_cases = len(recs)
table = Table(
title=Text(
f"Cases ({n_cases})" if n_cases > 1 else "Case",
style="bright_white",
justify="left",
),
show_header=True,
header_style="bold magenta",
show_lines=True,
)
table.add_column("Case #")
table.add_column("Urgency")
table.add_column("Status")
table.add_column("Impact")
table.add_column("Date(s)")
table.add_column("Location", width=12, overflow="fold")
table.add_column("Start Time")
table.add_column("End Time")
table.add_column("Reason")
pdates = attrgetter("primary_date", "primary_date_2", "primary_date_3")
for row_obj in recs:
if row_obj.status != consts.CaseStatusOptions.closed:
row_obj.urgency = colorize_urgency(row_obj.urgency) # noqa
row_obj.impact = colorize_impact(row_obj.impact)
row_obj.status = colorize_status(row_obj.status)
rec_pdates = sorted(pd for pd in pdates(row_obj) if pd)
md = maya.parse(rec_pdates[0])
dstr = "\n".join(map(str, rec_pdates)) + f"\n({md.slang_time()})"
table.add_row(
row_obj.case_num,
row_obj.urgency,
row_obj.status,
row_obj.impact,
dstr,
row_obj.location,
str(row_obj.from_time),
str(row_obj.to_time),
row_obj.reason,
)
return table
def make_impacts_table(impacts: List[dict]) -> Table:
"""
This function creates the Rich.Table that contains the case impact information.
Parameters
----------
impacts: List[dict]
The list of case impact records in API dict form.
Returns
-------
The rendered Table of case impact information.
"""
count = len(impacts)
table = Table(
title=Text(
f"Impacts ({count})" if count > 1 else "Impact",
style="bright_white",
justify="left",
),
show_header=True,
header_style="bold magenta",
show_lines=True,
)
table.add_column("Case #")
table.add_column("Circuit Id")
table.add_column("Expected Impact")
table.add_column("CLLI A")
table.add_column("CLLI Z")
for rec in impacts:
row_obj = ImpactRecord.parse_obj(rec)
table.add_row(
row_obj.case_num,
row_obj.circuit_id,
row_obj.impact,
row_obj.clli_a,
row_obj.clli_z,
)
return table
def make_notifs_table(notifs):
"""
This function creates the Rich.Table that contains the case notification information.
Parameters
----------
notifs: List[dict]
The list of case impact records in API dict form.
Returns
-------
The rendered Table of case notifications information.
"""
count = len(notifs)
table = Table(
title=Text(
f"Notifications ({count})" if count > 1 else "Notification",
style="bright_white",
justify="left",
),
show_header=True,
header_style="bold magenta",
show_lines=True,
)
table.add_column("#")
table.add_column("Type")
table.add_column("Email Sent")
table.add_column("Email Subject")
table.add_column("Email To")
for rec in notifs:
row_obj = NotificationDetailRecord.parse_obj(rec)
email_list = sorted(map(str.strip, row_obj.email_list.split(";")))
mt = maya.parse(row_obj.date)
dstring = (
mt.local_datetime().strftime("%Y-%m-%d\n%H:%M:%S")
+ f"\n({mt.slang_time()})"
)
table.add_row(
row_obj.name, row_obj.type, dstring, row_obj.subject, "\n".join(email_list)
)
return table
# HTML_SAVE_THEME = TerminalTheme(
# (0, 0, 0),
# (199, 199, 199),
# [(0, 0, 0),
# (201, 27, 0),
# (0, 194, 0),
# (199, 196, 0),
# (2, 37, 199),
# (202, 48, 199),
# (0, 197, 199),
# (199, 199, 199),
# (104, 104, 104)],
# [(255, 110, 103),
# (95, 250, 104),
# (255, 252, 103),
# (104, 113, 255),
# (255, 119, 255),
# (96, 253, 255),
# (255, 255, 255)]
# )
# -----------------------------------------------------------------------------
#
# CLI CODE BEGINS
#
# -----------------------------------------------------------------------------
@cli.group("cases")
def mtc():
"""
Maintenance commands.
"""
pass
@mtc.command(name="list")
@click.option("--circuit-id", help="filter case by circuit ID")
def mtc_cases(circuit_id):
"""
Show listing of maintenance caess.
"""
zapi = ZayoClient()
recs = [
rec
for rec in map(
CaseRecord.parse_obj,
zapi.get_cases(orderBy=[consts.OrderBy.date_sooner.value]),
)
if rec.status != CaseStatusOptions.closed
]
# if circuit_id was provided by the User then we need to filter the case
# list by only those records that have an associated impact record with the
# same circuit_id value.
if circuit_id:
circuit_id = zapi.format_circuit_id(circuit_id)
impacted_case_nums = [
i_rec["caseNumber"]
for rec in recs
for i_rec in zapi.get_impacts(by_case_num=rec.case_num)
if i_rec["circuitId"] == circuit_id
]
recs = [rec for rec in recs if rec.case_num in impacted_case_nums]
console = Console(record=True)
console.print(make_cases_table(recs))
# console.save_html('cases.html', theme=HTML_SAVE_THEME)
@mtc.command(name="show-details")
@click.argument("case_number")
@click.option("--save-emails", "-E", is_flag=True, help="Save notification emails")
def mtc_case_details(case_number, save_emails):
"""
Show specific case details.
"""
# find the case by number
zapi = ZayoClient()
case, impacts, notifs = zapi.get_case_details(by_case_num=case_number)
console = Console()
if not case:
console.print(f"Case [bold white]{case_number}: [bold red]Not found")
return
console.print(f"\nCase [bold white]{case_number}[/bold white]: [bold green]Found")
console.print("\n", make_cases_table([CaseRecord.parse_obj(case)]), "\n")
console.print(make_impacts_table(impacts), "\n")
console.print(make_notifs_table(notifs), "\n")
if save_emails:
_save_notif_emails(notifs)
# -----------------------------------------------------------------------------
#
# MODULE FUNCTIONS
#
# -----------------------------------------------------------------------------
def _save_notif_emails(notifs: List[Dict]) -> None:
""" save each notification email to a file as <name>.html """
for notif in notifs:
with open(notif["name"] + ".html", "w+") as ofile:
ofile.write(notif["emailBody"])
print(f"Email saved: {ofile.name}")
| [
37811,
198,
1212,
2393,
4909,
262,
43749,
2438,
329,
262,
9262,
9729,
13,
198,
198,
19927,
198,
35937,
198,
220,
220,
220,
1114,
262,
3998,
5301,
11,
7577,
389,
5447,
994,
25,
198,
220,
220,
220,
3740,
1378,
7527,
13,
961,
83,
704,
... | 2.457584 | 3,949 |
# -*- coding: utf-8 -*-
"""
sphinx.ext.todo
~~~~~~~~~~~~~~~
Allow todos to be inserted into your documentation. Inclusion of todos can
be switched of by a configuration variable. The todolist directive collects
all todos of your project and lists them along with a backlink to the
original location.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
import sphinx
from sphinx.environment import NoUri
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import set_source_info
from sphinx.util.texescape import tex_escape_map
if False:
# For type annotation
from typing import Any, Dict, Iterable, List # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
logger = logging.getLogger(__name__)
class Todo(BaseAdmonition, SphinxDirective):
"""
A todo entry, displayed (if configured) in the form of an admonition.
"""
node_class = todo_node
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'class': directives.class_option,
}
class TodoList(SphinxDirective):
"""
A list of all todo entries.
"""
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {} # type: Dict
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
599,
20079,
87,
13,
2302,
13,
83,
24313,
198,
220,
220,
220,
220,
15116,
8728,
4907,
93,
628,
220,
220,
220,
22507,
284,
37427,
284,
307,
... | 2.859083 | 589 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2012 New Dream Network, LLC (DreamHost) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import glob
import platform
import re
import shlex
import yaml
from anvil import colorizer
from anvil import exceptions as excp
from anvil import importer
from anvil import log as logging
from anvil import shell as sh
LOG = logging.getLogger(__name__)
| [
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
2705,
8658,
11338,
28,
19,
198,
198,
2,
220,
220,
220,
15069,
357,
34,
8,
2321,
16551,
0,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
220,
220,
220,
15069,
357,
34,
8,
2321,
... | 3.356209 | 306 |
import queue
import multiprocessing
import pangolin as pango
import OpenGL.GL as gl
import numpy as np
| [
11748,
16834,
198,
11748,
18540,
305,
919,
278,
198,
198,
11748,
279,
648,
24910,
355,
279,
14208,
198,
11748,
30672,
13,
8763,
355,
1278,
198,
11748,
299,
32152,
355,
45941,
198
] | 3.354839 | 31 |
import flask
import uuid
blueprint = flask.Blueprint("guid", __name__)
@blueprint.route("/guid/mint", methods=["GET"])
def mint_guid():
"""
Mint a GUID that is valid for this instance of indexd. The intention
of this endpoint is to allow generating valid GUIDs to be indexed
WITHOUT actually creating a new record yet.
Allows for a `count` query parameter to get bulk GUIDs up to some limit
"""
count = flask.request.args.get("count", 1)
max_count = 10000
try:
count = int(count)
except Exception:
return f"Count {count} is not a valid integer", 400
# error on < 0, > max_count
if count < 0:
return "You cannot provide a count less than 0", 400
elif count > max_count:
return f"You cannot provide a count greater than {max_count}", 400
guids = []
for _ in range(count):
valid_guid = _get_prefix() + str(uuid.uuid4())
guids.append(valid_guid)
return flask.jsonify({"guids": guids}), 200
@blueprint.route("/guid/prefix", methods=["GET"])
def get_prefix():
"""
Get the prefix for this instance of indexd.
"""
return flask.jsonify({"prefix": _get_prefix()}), 200
def _get_prefix():
"""
Return prefix if it's configured to be prepended to all GUIDs and NOT
set as an alias
"""
prefix = ""
if flask.current_app.config["INDEX"]["driver"].config.get(
"PREPEND_PREFIX"
) and not flask.current_app.config["INDEX"]["driver"].config.get(
"ADD_PREFIX_ALIAS"
):
prefix = flask.current_app.config["INDEX"]["driver"].config["DEFAULT_PREFIX"]
return prefix
| [
11748,
42903,
198,
11748,
334,
27112,
198,
198,
17585,
4798,
796,
42903,
13,
14573,
4798,
7203,
5162,
312,
1600,
11593,
3672,
834,
8,
628,
198,
31,
17585,
4798,
13,
38629,
7203,
14,
5162,
312,
14,
34289,
1600,
5050,
28,
14692,
18851,
... | 2.61465 | 628 |
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Event_user)
#class EventueAdmin(admin.ModelAdmin):
# list_display = ('title', 'description', 'get_date',)
admin.site.register(models.Task_user)
admin.site.register(models.User_Profile)
admin.site.register(models.Chatroom)
admin.site.register(models.Friendship) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
1330,
4981,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
9237,
62,
7220,
8,
198,
2,
4871,
8558,
518,
46787,
7,
28... | 3.114754 | 122 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-09 13:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import projects.models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
319,
2864,
12,
3023,
12,
2931,
1511,
25,
3559,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738... | 2.916667 | 72 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 15:36:42 2020
@author: rakiki
"""
import numpy as np
import rasterio
# dataset construction
def pointCube(dim1_left, dim1_right, dim2_left, dim2_right
, alt_min, alt_max
, grid_len, num_layers):
'''
Construct a 3D meshgrid
Args:
dim1, dim2 can either be lon, lat (or lat, lon /order is irrelevant)
or row, col (or col, row /order is irrelevant )
dim1/2_left/right: the limits of the dimension range
alt_min/max: the limits of the altitude range
grid_len: the number of points to be taken in dim1/2
num_layers: the number of points to be taken in the altitude range
Returns:
dim1_list, dim2_list, alt_list: a 1D numpy array of values in each dimension
'''
# meshgrid
dim1_rg = np.linspace(dim1_left,dim1_right, grid_len)
dim2_rg = np.linspace(dim2_left, dim2_right, grid_len)
alt_rg = np.linspace(alt_min,alt_max, num_layers)
dim1_grd, dim2_grd, alt_grd = np.meshgrid(dim1_rg, dim2_rg, alt_rg)
dim2_list = dim2_grd.ravel()
dim1_list = dim1_grd.ravel()
alt_list = alt_grd.ravel()
return dim1_list, dim2_list, alt_list
def read_dem(dem_path):
'''
Reads the dem that covers the area of intererst,
geotiff, from the disk
dem_path: the path to the dem file
Returns:
demdb: rasterio closed dataset
demdata: numpy.2D array containing the dem
'''
with rasterio.open(dem_path) as demdb:
demdata = demdb.read(1)
return demdb, demdata
def getDataset_projection(demdb, demdata , grid_len, num_layers, projectionFunc
, train = True, margin = 0.2, **kwargs):
'''
Computes 3D pts + 2D correspondence for train or test set
Args:
demdb: rasterio db of the geotiff dem on the area of interest
demdata: the data of the dem
grid_len: the len of the grid in the two lon, lat dimensions
num_layers: the number of alt layers
projectionFunc: projection function (lon, lat, alt) -> (col, row)
train: if True, returns a training grid else returns a test grid (shifted by half a step from the train grid)
margin: the safety margin to apply to the altitude bounds when constructing the grid
kwargs: dict of params to pass to projection function
Returns:
input_locs: [lon, lat, alt] array
target: [col, row] array
'''
lon_left, lat_top = demdb.transform * (0,0)
lon_right,lat_bottom = demdb.transform * (demdb.width -1 ,demdb.height -1 )
mask = (demdata != demdb.nodata)
alt_min = np.nanmin(demdata[mask])
alt_max = np.nanmax(demdata[mask])
alt_margin = np.round((alt_max - alt_min) * margin)
alt_min -= alt_margin
alt_max += alt_margin
if train:
lon,lat, alt = pointCube(lon_left, lon_right, lat_top,
lat_bottom, alt_min ,
alt_max, grid_len, num_layers,
)
else:
lon_stp = (lon_right - lon_left)/(2 * (grid_len - 1 ) )
lat_stp = (lat_bottom - lat_top)/(2 * (grid_len - 1 ) )
alt_stp = np.round((alt_max - alt_min)/(2 * (num_layers - 1 ) ) )
lon, lat, alt = pointCube(lon_left + lon_stp , lon_right + lon_stp, lat_top + lat_stp,
lat_bottom + lat_stp, alt_min + alt_stp, alt_max + alt_stp
, grid_len, num_layers,
)
col, row = projectionFunc( lon = lon , lat = lat
, alt = alt , **kwargs)
input_locs = np.vstack((lon, lat, alt)).T
target = np.vstack((col,row)).T
return input_locs, target
def getDataset_localization(demdb, demdata, grid_len, num_layers
, im_size , localizationFunc, train = True, margin = 0.2
, **kwargs ):
'''
Computes 3D pts + 2D correspondence for train or test set
Args:
demdb: rasterio db of the geotiff dem on the area of interest
demdata: the data of the dem
grid_len: the len of the grid in the two lon, lat dimensions
num_layers: the number of alt layers
im_size: tuple(height, width) of the image
localizationFunc: localization function (col, line, alt) -> (lon, lat)
train: if True, returns a training grid else returns a test grid (shifted by half a step from the train grid)
margin: the safety margin to apply to the bounds of the image dimension and the altitude bounds when constructing the grid
kwargs: localization function additional arguments
Returns:
input_locs: [lon, lat, alt] array
target: [col, row] array
'''
# line, col limits
lines = im_size[0]
l_margin = np.round(margin * lines)
columns = im_size[1]
c_margin = np.round(margin * columns)
# alt limits, use preexisting demdb, demdata
mask = (demdata != demdb.nodata)
alt_min = np.nanmin(demdata[mask])
alt_max = np.nanmax(demdata[mask])
alt_margin = np.round((alt_max - alt_min) * margin)
if train:
c,l, alt = pointCube(-c_margin, columns + c_margin, -l_margin,
lines + l_margin, alt_min - alt_margin , alt_max + alt_margin,
grid_len, num_layers)
else:
c_stp = (columns + 2 * c_margin)/(2 * (grid_len - 1 ) )
l_stp = (lines + 2 * l_margin)/(2 * (grid_len - 1 ) )
alt_stp = np.round((alt_max - alt_min + 2 * alt_margin)/(2 * (num_layers - 1 ) ) )
c, l, alt = pointCube(-c_margin + c_stp , columns + c_margin + c_stp, -l_margin + l_stp,
lines + l_margin + l_stp, alt_min - alt_margin + alt_stp
, alt_max + alt_margin + alt_stp,
grid_len, num_layers)
lon , lat = localizationFunc(col = c, line = l, alt = alt, **kwargs)
input_locs = np.vstack((lon, lat, alt)).T
target = np.vstack((c,l)).T
return input_locs, target | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
2556,
678,
1315,
25,
2623,
25,
3682,
12131,
198,
198,
31,
9800,
25,
374,
461,
5580,... | 2.138477 | 2,903 |
from flask import Blueprint
# Blueprint Configuration
auth_bp = Blueprint(
'auth_bp', __name__,
template_folder='templates',
static_folder='static'
)
from . import views | [
6738,
42903,
1330,
39932,
628,
198,
2,
39932,
28373,
198,
18439,
62,
46583,
796,
39932,
7,
198,
220,
220,
220,
705,
18439,
62,
46583,
3256,
11593,
3672,
834,
11,
198,
220,
220,
220,
11055,
62,
43551,
11639,
11498,
17041,
3256,
198,
22... | 3.210526 | 57 |
import time, random, configparser
from Classes.Base.Vector import Vector
from Loading.RandomGen import getRandomMagicWeapon, getRandomMagicCast, getRandomMonster
from Classes.Super.Weapon import Weapon
from Classes.Middle.Particle import Particle
from Classes.Super.Monster import Monster
from Loading.Objects import weapon_set, visual_set, spriteDictionary, getUid, playerId
from Loading.Objects import monster_set, player_list
from Classes.Functions.Collisions.Collisions import doCirclesIntersect, isPointInRect, isCircleInRect
config = configparser.ConfigParser()
config.read_file(open('Classes/config'))
MAP_WIDTH = int(config['MAP']['WIDTH'])
MAP_HEIGHT = int(config['MAP']['HEIGHT'])
| [
11748,
640,
11,
4738,
11,
4566,
48610,
198,
6738,
38884,
13,
14881,
13,
38469,
1330,
20650,
198,
6738,
12320,
13,
29531,
13746,
1330,
651,
29531,
22975,
27632,
11,
651,
29531,
22975,
19248,
11,
651,
29531,
40872,
198,
6738,
38884,
13,
1... | 3.5 | 198 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2004 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import re
from trac.util.text import unicode_quote, unicode_urlencode
slashes_re = re.compile(r'/{2,}')
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which then are used to assemble the URL.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/trac')
>>> href('ticket', 540)
'/trac/ticket/540'
>>> href('ticket', 540, 'attachment', 'bugfix.patch')
'/trac/ticket/540/attachment/bugfix.patch'
>>> href('ticket', '540/attachment/bugfix.patch')
'/trac/ticket/540/attachment/bugfix.patch'
If a positional parameter evaluates to None, it will be skipped:
>>> href('ticket', 540, 'attachment', None)
'/trac/ticket/540/attachment'
The first path segment can also be specified by calling an attribute
of the instance, as follows:
>>> href.ticket(540)
'/trac/ticket/540'
>>> href.changeset(42, format='diff')
'/trac/changeset/42?format=diff'
Simply calling the Href object with no arguments will return the base URL:
>>> href()
'/trac'
Keyword arguments are added to the query string, unless the value is None:
>>> href = Href('/trac')
>>> href('timeline', format='rss')
'/trac/timeline?format=rss'
>>> href('timeline', format=None)
'/trac/timeline'
>>> href('search', q='foo bar')
'/trac/search?q=foo+bar'
Multiple values for one parameter are specified using a sequence (a list or
tuple) for the parameter:
>>> href('timeline', show=['ticket', 'wiki', 'changeset'])
'/trac/timeline?show=ticket&show=wiki&show=changeset'
Alternatively, query string parameters can be added by passing a dict or
list as last positional argument:
>>> href('timeline', {'from': '02/24/05', 'daysback': 30})
'/trac/timeline?daysback=30&from=02%2F24%2F05'
>>> href('timeline', {})
'/trac/timeline'
>>> href('timeline', [('from', '02/24/05')])
'/trac/timeline?from=02%2F24%2F05'
>>> href('timeline', ()) == href('timeline', []) == href('timeline', {})
True
The usual way of quoting arguments that would otherwise be interpreted
as Python keywords is supported too:
>>> href('timeline', from_='02/24/05', daysback=30)
'/trac/timeline?from=02%2F24%2F05&daysback=30'
If the order of query string parameters should be preserved, you may also
pass a sequence of (name, value) tuples as last positional argument:
>>> href('query', (('group', 'component'), ('groupdesc', 1)))
'/trac/query?group=component&groupdesc=1'
>>> params = []
>>> params.append(('group', 'component'))
>>> params.append(('groupdesc', 1))
>>> href('query', params)
'/trac/query?group=component&groupdesc=1'
By specifying an absolute base, the function returned will also generate
absolute URLs:
>>> href = Href('http://trac.edgewall.org')
>>> href('ticket', 540)
'http://trac.edgewall.org/ticket/540'
>>> href = Href('https://trac.edgewall.org')
>>> href('ticket', 540)
'https://trac.edgewall.org/ticket/540'
In common usage, it may improve readability to use the function-calling
ability for the first component of the URL as mentioned earlier:
>>> href = Href('/trac')
>>> href.ticket(540)
'/trac/ticket/540'
>>> href.browser('/trunk/README.txt', format='txt')
'/trac/browser/trunk/README.txt?format=txt'
The ``path_safe`` argument specifies the characters that don't
need to be quoted in the path arguments. Likewise, the
``query_safe`` argument specifies the characters that don't need
to be quoted in the query string:
>>> href = Href('')
>>> href.milestone('<look,here>', param='<here,too>')
'/milestone/%3Clook%2Chere%3E?param=%3Chere%2Ctoo%3E'
>>> href = Href('', path_safe='/<,', query_safe=',>')
>>> href.milestone('<look,here>', param='<here,too>')
'/milestone/<look,here%3E?param=%3Chere,too>'
"""
_printable_safe = ''.join(map(chr, xrange(0x21, 0x7f)))
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
5816,
12,
10531,
1717,
39909,
439,
10442,
198,
2,
15069,
357,
34,
8,
5816,
12,
15724,
40458,
29004,
2536,
9101,
76,
1279,
46286,
292,
3... | 2.809659 | 1,760 |
import serial
import time
ser = serial.Serial('/dev/ttyUSB0',115200,timeout = 1)
ser.write(0x42)
ser.write(0x57)
ser.write(0x02)
ser.write(0x00)
ser.write(0x00)
ser.write(0x00)
ser.write(0x01)
ser.write(0x06)
while(True):
while(ser.in_waiting >= 9):
#print ("a")
if(('Y' == ser.read()) and ('Y' == ser.read())):
Dist_L = ser.read()
Dist_H = ser.read()
Dist_Total = (ord(Dist_H) * 256) + (ord(Dist_L))
for i in range (0,5):
ser.read()
#time.sleep(0.0005)
print (Dist_Total)
| [
11748,
11389,
201,
198,
11748,
640,
201,
198,
201,
198,
2655,
796,
11389,
13,
32634,
10786,
14,
7959,
14,
42852,
27155,
15,
3256,
15363,
2167,
11,
48678,
796,
352,
8,
201,
198,
2655,
13,
13564,
7,
15,
87,
3682,
8,
201,
198,
2655,
... | 1.79403 | 335 |
from datetime import datetime, time
from django import test as unittest
from django.test.client import RequestFactory
from dimagi.utils.dates import DateSpan
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import WebUser
from corehq.sql_db.connections import Session
from corehq.util.dates import iso_string_to_date
from corehq.util.test_utils import softer_assert
from .sql_fixture import load_data
from .sql_reports import RegionTestReport, UserTestReport, test_report
DOMAIN = "test"
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
198,
198,
6738,
42625,
14208,
1330,
1332,
355,
555,
715,
395,
198,
6738,
42625,
14208,
13,
9288,
13,
16366,
1330,
19390,
22810,
198,
198,
6738,
5391,
18013,
13,
26791,
13,
19581,
1330,
7536,
... | 3.31875 | 160 |
from munerator.store import handle_event, setup_eve_mongoengine
| [
6738,
29856,
263,
1352,
13,
8095,
1330,
5412,
62,
15596,
11,
9058,
62,
44655,
62,
76,
25162,
18392,
628,
628
] | 3.35 | 20 |
# coding: utf-8
# #Fire up graphlab create
# In[35]:
import graphlab
# #Load some house sales data
#
# Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
# In[36]:
sales = graphlab.SFrame('home_data.gl/')
# In[37]:
sales
# #Exploring the data for housing sales
# The house price is correlated with the number of square feet of living space.
# In[38]:
graphlab.canvas.set_target('ipynb')
sales.show(view="Scatter Plot", x="sqft_living", y="price")
# #Create a simple regression model of sqft_living to price
# Split data into training and testing.
# We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).
# In[39]:
train_data,test_data = sales.random_split(.8,seed=0)
# ##Build the regression model using only sqft_living as a feature
# In[40]:
sqft_model = graphlab.linear_regression.create(train_data, target='price', features=['sqft_living'])
# #Evaluate the simple model
# In[41]:
print test_data['price'].mean()
# In[42]:
print sqft_model.evaluate(test_data)
# RMSE of about \$255,170!
# #Let's show what our predictions look like
# Matplotlib is a Python plotting library that is also useful for plotting. You can install it with:
#
# 'pip install matplotlib'
# In[43]:
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
# In[44]:
plt.plot(test_data['sqft_living'],test_data['price'],'.',
test_data['sqft_living'],sqft_model.predict(test_data),'-')
# Above: blue dots are original data, green line is the prediction from the simple regression.
#
# Below: we can view the learned regression coefficients.
# In[45]:
sqft_model.get('coefficients')
# #Explore other features in the data
#
# To build a more elaborate model, we will explore using more features.
# In[46]:
my_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']
# In[47]:
sales[my_features].show()
# In[48]:
sales.show(view='BoxWhisker Plot', x='zipcode', y='price')
# Pull the bar at the bottom to view more of the data.
#
# 98039 is the most expensive zip code.
# #Build a regression model with more features
# In[49]:
my_features_model = graphlab.linear_regression.create(train_data,target='price',features=my_features)
# In[50]:
print my_features
# ##Comparing the results of the simple model with adding more features
# In[51]:
print sqft_model.evaluate(test_data)
print my_features_model.evaluate(test_data)
# The RMSE goes down from \$255,170 to \$179,508 with more features.
# #Apply learned models to predict prices of 3 houses
# The first house we will use is considered an "average" house in Seattle.
# In[52]:
house1 = sales[sales['id']=='5309101200']
# In[53]:
house1
# <img src="house-5309101200.jpg">
# In[54]:
print house1['price']
# In[55]:
print sqft_model.predict(house1)
# In[56]:
print my_features_model.predict(house1)
# In this case, the model with more features provides a worse prediction than the simpler model with only 1 feature. However, on average, the model with more features is better.
# ##Prediction for a second, fancier house
#
# We will now examine the predictions for a fancier house.
# In[57]:
house2 = sales[sales['id']=='1925069082']
# In[58]:
house2
# <img src="house-1925069082.jpg">
# In[59]:
print sqft_model.predict(house2)
# In[60]:
print my_features_model.predict(house2)
# In this case, the model with more features provides a better prediction. This behavior is expected here, because this house is more differentiated by features that go beyond its square feet of living space, especially the fact that it's a waterfront house.
# ##Last house, super fancy
#
# Our last house is a very large one owned by a famous Seattleite.
# In[61]:
bill_gates = {'bedrooms':[8],
'bathrooms':[25],
'sqft_living':[50000],
'sqft_lot':[225000],
'floors':[4],
'zipcode':['98039'],
'condition':[10],
'grade':[10],
'waterfront':[1],
'view':[4],
'sqft_above':[37500],
'sqft_basement':[12500],
'yr_built':[1994],
'yr_renovated':[2010],
'lat':[47.627606],
'long':[-122.242054],
'sqft_living15':[5000],
'sqft_lot15':[40000]}
# <img src="house-bill-gates.jpg">
# In[62]:
print my_features_model.predict(graphlab.SFrame(bill_gates))
# The model predicts a price of over $13M for this house! But we expect the house to cost much more. (There are very few samples in the dataset of houses that are this fancy, so we don't expect the model to capture a perfect prediction here.)
# In[63]:
house_zip_code = sales[sales["zipcode"] == "98039"]
# In[64]:
house_zip_code
# In[65]:
house_zip_code['price'].mean()
# In[66]:
house_zip_code_range = house_zip_code[house_zip_code.apply(lambda x: x['sqft_living'] > 2000.0 and x['sqft_living'] <= 4000.0)]
# In[67]:
house_zip_code_range.head()
# In[68]:
house_zip_code_range.num_rows()
# In[69]:
house_zip_code.num_rows()
# In[70]:
advanced_features = [
'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode',
'condition', # condition of house
'grade', # measure of quality of construction
'waterfront', # waterfront property
'view', # type of view
'sqft_above', # square feet above ground
'sqft_basement', # square feet in basement
'yr_built', # the year built
'yr_renovated', # the year renovated
'lat', 'long', # the lat-long of the parcel
'sqft_living15', # average sq.ft. of 15 nearest neighbors
'sqft_lot15', # average lot size of 15 nearest neighbors
]
# In[71]:
advanced_features_model = graphlab.linear_regression.create(train_data, target='price', features=advanced_features)
# In[72]:
print advanced_features_model.evaluate(test_data)
# In[73]:
advanced_features_model.evaluate(test_data)['rmse'] - my_features_model.evaluate(test_data)['rmse']
# In[ ]:
# In[ ]:
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
13543,
510,
4823,
23912,
2251,
198,
198,
2,
554,
58,
2327,
5974,
198,
198,
11748,
4823,
23912,
628,
198,
2,
1303,
8912,
617,
2156,
4200,
1366,
198,
2,
220,
198,
2,
16092,
2... | 2.66595 | 2,326 |
import inspect
import os.path
import time
from two1.blockchain.twentyone_provider import TwentyOneProvider
from two1.bitcoin.hash import Hash
from two1.wallet.cache_manager import CacheManager
from two1.wallet.wallet_txn import WalletTransaction
this_file_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
cm = CacheManager()
dp = TwentyOneProvider()
| [
11748,
10104,
198,
11748,
28686,
13,
6978,
198,
11748,
640,
198,
198,
6738,
734,
16,
13,
9967,
7983,
13,
4246,
3787,
505,
62,
15234,
1304,
1330,
22381,
3198,
29495,
198,
6738,
734,
16,
13,
35395,
13,
17831,
1330,
21059,
198,
6738,
734... | 3.29661 | 118 |
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract subjects from a DataONE X.509 v3 certificate.
If a certificate was provided, it has been validated by Apache before being
passed to GMN. So it is known to signed by a trusted CA and to be unexpired.
A user can connect without providing a certificate (and so, without providing a
session). This limits the user's access to data that is publicly available.
A user can connect with a certificate that does not contain a list of
equivalent identities and group memberships (no SubjectInfo). This limits the
user's access to data that is publicly available and that is available directly
to that user (as designated in the Subject DN).
"""
import d1_common.cert.subjects
import d1_common.const
import d1_common.types.exceptions
def get_subjects(request):
"""Get all subjects in the certificate.
- Returns: primary_str (primary subject), equivalent_set (equivalent identities,
groups and group memberships)
- The primary subject is the certificate subject DN, serialized to a DataONE
compliant subject string.
"""
if _is_certificate_provided(request):
try:
return get_authenticated_subjects(request.META["SSL_CLIENT_CERT"])
except Exception as e:
raise d1_common.types.exceptions.InvalidToken(
0,
'Error extracting session from certificate. error="{}"'.format(str(e)),
)
else:
return d1_common.const.SUBJECT_PUBLIC, set()
def get_authenticated_subjects(cert_pem):
"""Return primary subject and set of equivalents authenticated by certificate.
- ``cert_pem`` can be str or bytes
"""
if isinstance(cert_pem, str):
cert_pem = cert_pem.encode("utf-8")
return d1_common.cert.subjects.extract_subjects(cert_pem)
| [
2,
770,
670,
373,
2727,
416,
6809,
287,
262,
6060,
11651,
1628,
11,
290,
318,
198,
2,
26913,
33696,
416,
11983,
6712,
287,
6060,
11651,
13,
1114,
198,
2,
517,
1321,
319,
6060,
11651,
11,
766,
674,
3992,
2524,
379,
2638,
1378,
7890,
... | 3.268448 | 786 |
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetFwSystems200Ok(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'solar_system_id': 'int',
'owner_faction_id': 'int',
'occupier_faction_id': 'int',
'victory_points': 'int',
'victory_points_threshold': 'int',
'contested': 'bool'
}
attribute_map = {
'solar_system_id': 'solar_system_id',
'owner_faction_id': 'owner_faction_id',
'occupier_faction_id': 'occupier_faction_id',
'victory_points': 'victory_points',
'victory_points_threshold': 'victory_points_threshold',
'contested': 'contested'
}
def __init__(self, solar_system_id=None, owner_faction_id=None, occupier_faction_id=None, victory_points=None, victory_points_threshold=None, contested=None): # noqa: E501
"""GetFwSystems200Ok - a model defined in Swagger""" # noqa: E501
self._solar_system_id = None
self._owner_faction_id = None
self._occupier_faction_id = None
self._victory_points = None
self._victory_points_threshold = None
self._contested = None
self.discriminator = None
self.solar_system_id = solar_system_id
self.owner_faction_id = owner_faction_id
self.occupier_faction_id = occupier_faction_id
self.victory_points = victory_points
self.victory_points_threshold = victory_points_threshold
self.contested = contested
@property
def solar_system_id(self):
"""Gets the solar_system_id of this GetFwSystems200Ok. # noqa: E501
solar_system_id integer # noqa: E501
:return: The solar_system_id of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._solar_system_id
@solar_system_id.setter
def solar_system_id(self, solar_system_id):
"""Sets the solar_system_id of this GetFwSystems200Ok.
solar_system_id integer # noqa: E501
:param solar_system_id: The solar_system_id of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if solar_system_id is None:
raise ValueError("Invalid value for `solar_system_id`, must not be `None`") # noqa: E501
self._solar_system_id = solar_system_id
@property
def owner_faction_id(self):
"""Gets the owner_faction_id of this GetFwSystems200Ok. # noqa: E501
owner_faction_id integer # noqa: E501
:return: The owner_faction_id of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._owner_faction_id
@owner_faction_id.setter
def owner_faction_id(self, owner_faction_id):
"""Sets the owner_faction_id of this GetFwSystems200Ok.
owner_faction_id integer # noqa: E501
:param owner_faction_id: The owner_faction_id of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if owner_faction_id is None:
raise ValueError("Invalid value for `owner_faction_id`, must not be `None`") # noqa: E501
self._owner_faction_id = owner_faction_id
@property
def occupier_faction_id(self):
"""Gets the occupier_faction_id of this GetFwSystems200Ok. # noqa: E501
occupier_faction_id integer # noqa: E501
:return: The occupier_faction_id of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._occupier_faction_id
@occupier_faction_id.setter
def occupier_faction_id(self, occupier_faction_id):
"""Sets the occupier_faction_id of this GetFwSystems200Ok.
occupier_faction_id integer # noqa: E501
:param occupier_faction_id: The occupier_faction_id of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if occupier_faction_id is None:
raise ValueError("Invalid value for `occupier_faction_id`, must not be `None`") # noqa: E501
self._occupier_faction_id = occupier_faction_id
@property
def victory_points(self):
"""Gets the victory_points of this GetFwSystems200Ok. # noqa: E501
victory_points integer # noqa: E501
:return: The victory_points of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._victory_points
@victory_points.setter
def victory_points(self, victory_points):
"""Sets the victory_points of this GetFwSystems200Ok.
victory_points integer # noqa: E501
:param victory_points: The victory_points of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if victory_points is None:
raise ValueError("Invalid value for `victory_points`, must not be `None`") # noqa: E501
self._victory_points = victory_points
@property
def victory_points_threshold(self):
"""Gets the victory_points_threshold of this GetFwSystems200Ok. # noqa: E501
victory_points_threshold integer # noqa: E501
:return: The victory_points_threshold of this GetFwSystems200Ok. # noqa: E501
:rtype: int
"""
return self._victory_points_threshold
@victory_points_threshold.setter
def victory_points_threshold(self, victory_points_threshold):
"""Sets the victory_points_threshold of this GetFwSystems200Ok.
victory_points_threshold integer # noqa: E501
:param victory_points_threshold: The victory_points_threshold of this GetFwSystems200Ok. # noqa: E501
:type: int
"""
if victory_points_threshold is None:
raise ValueError("Invalid value for `victory_points_threshold`, must not be `None`") # noqa: E501
self._victory_points_threshold = victory_points_threshold
@property
def contested(self):
"""Gets the contested of this GetFwSystems200Ok. # noqa: E501
contested boolean # noqa: E501
:return: The contested of this GetFwSystems200Ok. # noqa: E501
:rtype: bool
"""
return self._contested
@contested.setter
def contested(self, contested):
"""Sets the contested of this GetFwSystems200Ok.
contested boolean # noqa: E501
:param contested: The contested of this GetFwSystems200Ok. # noqa: E501
:type: bool
"""
if contested is None:
raise ValueError("Invalid value for `contested`, must not be `None`") # noqa: E501
self._contested = contested
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetFwSystems200Ok):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
32356,
2451,
7928,
26491,
628,
220,
220,
220,
1052,
4946,
17614,
329,
32356,
7467,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
220,
220,
4946,
17614,
1020,
21... | 2.252065 | 3,753 |
import FormaPago
import MenuOpciones
if __name__ == '__main__':
FormaPago.menuPagos()
opc = input("Dijite el numero de la opción que desea acceder: ")
if opc == "1":
MenuOpciones.OpcionCrear()
elif opc == "2":
MenuOpciones.OpcionCambiar()
| [
11748,
5178,
64,
47,
3839,
201,
198,
11748,
21860,
46,
14751,
295,
274,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
5178,
64,
47,
3839,
13,
26272,
47,
48215,
3419,
201,
198,... | 1.972603 | 146 |
from random import randrange
# Split a dataset into a train and test set
# Split a dataset into $k$ folds
# Evaluate an algorithm using a train/test split several times
# Evaluate an algorithm using a cross-validation split
| [
6738,
4738,
1330,
43720,
9521,
201,
198,
201,
198,
2,
27758,
257,
27039,
656,
257,
4512,
290,
1332,
900,
201,
198,
201,
198,
2,
27758,
257,
27039,
656,
720,
74,
3,
38744,
201,
198,
201,
198,
2,
26439,
4985,
281,
11862,
1262,
257,
... | 3.575758 | 66 |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tempfile
import json
from ..representation import (
DetectionPrediction,
DetectionAnnotation,
CoCoInstanceSegmentationAnnotation,
CoCocInstanceSegmentationPrediction,
PoseEstimationAnnotation,
PoseEstimationPrediction
)
from ..logging import print_info
from ..config import BaseField
from ..utils import get_or_parse_value
from .metric import FullDatasetEvaluationMetric
from .coco_metrics import COCO_THRESHOLDS
SHOULD_SHOW_PREDICTIONS = False
SHOULD_DISPLAY_DEBUG_IMAGES = False
if SHOULD_DISPLAY_DEBUG_IMAGES:
import cv2
iou_specific_processing = {
'bbox': box_to_coco,
'segm': segm_to_coco,
'keypoints': keypoints_to_coco
}
| [
37811,
198,
15269,
357,
66,
8,
13130,
8180,
10501,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 3.20202 | 396 |
from django.urls import path, include, reverse
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.generic.base import TemplateView
from django.views.i18n import JavaScriptCatalog
from wagtail.core import hooks
from .decorators import turbo_disable
from .views import turbo_init
@hooks.register("register_admin_urls")
@hooks.register("insert_global_admin_js", order=100)
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
11,
9575,
198,
6738,
42625,
14208,
13,
33571,
13,
12501,
273,
2024,
13,
12976,
73,
5430,
1330,
2124,
14535,
62,
25811,
62,
31642,
47103,
198,
6738,
42625,
14208,
13,
33571,
13,
4... | 3.344 | 125 |
import pytest
import boto3
from moto.dynamodb2 import mock_dynamodb2
from otter.router.src.shared.client import DynamoDBClient, get_valid_devices
from otter.router.src.shared.device import Device
DYNAMODB_TABLE = "ottr-example"
@pytest.fixture
@mock_dynamodb2
@mock_dynamodb2
@mock_dynamodb2
@mock_dynamodb2
@mock_dynamodb2
@mock_dynamodb2
@mock_dynamodb2
| [
11748,
12972,
9288,
198,
11748,
275,
2069,
18,
198,
6738,
285,
2069,
13,
67,
4989,
375,
65,
17,
1330,
15290,
62,
67,
4989,
375,
65,
17,
198,
198,
6738,
30972,
353,
13,
472,
353,
13,
10677,
13,
28710,
13,
16366,
1330,
41542,
11012,
... | 2.239521 | 167 |
from django.shortcuts import render, redirect, render_to_response
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView, ListView, CreateView
from django.core.files.storage import FileSystemStorage
from django.urls import reverse_lazy
from .forms import BookForm
from .forms import OrderForm
from .forms import TripInOrderForm
from .models import Book
from .models import Order
from .models import TripInOrder
from mysite.choices import *
# class Home(TemplateView):
# count = User.objects.count()
# template_name = 'home.html'
# return render(request, 'home.html', {
# 'count': count
# })
@login_required
@login_required
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
11,
8543,
62,
1462,
62,
26209,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
... | 3.223881 | 268 |
__all__=["model_utility","model","utility","visualization"]
from bldgnorm.model_utility import *
from bldgnorm.model import *
from bldgnorm.utility import *
from bldgnorm.visualization import * | [
834,
439,
834,
28,
14692,
19849,
62,
315,
879,
2430,
19849,
2430,
315,
879,
2430,
41464,
1634,
8973,
198,
198,
6738,
275,
335,
4593,
579,
13,
19849,
62,
315,
879,
1330,
1635,
198,
6738,
275,
335,
4593,
579,
13,
19849,
1330,
1635,
19... | 3.03125 | 64 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils.server_utils import ServerUtils
from utils.video_player import MPVVideoPlayer
from api.constants import Kinds
from PyInquirer import prompt
from typing import List
import os
class Main:
""" main app that run servers and get user commands """
@property
@_media_name.setter
@property
@_media_kind.setter
@property
@_media_season.setter
@property
@_media_episode.setter
# MPV Video Player
def _video_player(self, slug: str, verbose: bool = False) -> None:
"""The video player method uses mpv as default. """
chosed_quality_url: str = self._choose_quality(slug)
trans_files: List = self._get_trans_files(slug)
cmd_args = ['mpv', ]
if chosed_quality_url == None:
return False
cmd_args.append(f"{chosed_quality_url}")
if len(trans_files) >= 1:
for t in trans_files:
cmd_args.append(f"--sub-file={t}")
# no terminal output
cmd_args.append("--no-terminal")
if verbose:
print('$ ' + ' '.join(cmd_args))
# Save screenshots to data folder, with seperating medias
# First make sure the data folder exist, if not make one
if not os.path.exists(Defaults.DATA_FOLDER):
os.mkdir(Defaults.DATA_FOLDER)
# check for screenshots folder existance, or make one
if not os.path.exists(Defaults.SCREENSHOTS_FOLDER):
os.mkdir(Defaults.SCREENSHOTS_FOLDER)
media_screenshots_path = os.path.join(
Defaults.SCREENSHOTS_FOLDER,
self._media_name
)
# check if the playing media have already folder, if not make one
if not os.path.exists(media_screenshots_path):
os.mkdir(media_screenshots_path)
# Set directory, and quality for screenshots
cmd_args.extend([
# The path screenshots saved to
f"--screenshot-directory={media_screenshots_path}",
f"--screenshot-jpeg-quality={100}",
])
# change screenshot filename template, and set media title
if self._media_kind == Kinds.MOVIES:
cmd_args.extend([
f"--screenshot-template=%P", # %p: Current playback time
f"--force-media-title={self._media_name}"
])
elif self._media_kind == Kinds.SERIES:
cmd_args.extend([
f"--screenshot-template=s{self._media_season}-e{self._media_episode}-%P",
f"--force-media-title={self._media_name} s{self._media_season} e{self._media_episode}",
])
# start playing the video
video_player = MPVVideoPlayer()
while True:
video_process: bool = video_player.play_video(cmd_args)
if video_process: # if process returned True
break # end the loop
# On error, Ask to retry playing the video
elif self._continue(msg="Error on playing the videos, Retry? "):
continue
else: # Else end the loop and return to the main loop
break
@property
if __name__ == '__main__':
main_app = Main()
main_app.run() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
3384,
4487,
13,
15388,
62,
26791,
1330,
9652,
18274,
4487,
198,
6738,
3384,
4487,
13,
15588,
62,
7829,
1330,... | 2.157861 | 1,552 |
var_from_resource_var_file = 'Some Value'
| [
7785,
62,
6738,
62,
31092,
62,
7785,
62,
7753,
796,
705,
4366,
11052,
6,
201,
198
] | 2.6875 | 16 |
#from bson.objectid import ObjectId
from pymongo import MongoClient
client= MongoClient("localhost", 27017)
db = client.Dados | [
2,
6738,
275,
1559,
13,
15252,
312,
1330,
9515,
7390,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
628,
198,
198,
16366,
28,
42591,
11792,
7203,
36750,
1600,
2681,
29326,
8,
198,
198,
9945,
796,
5456,
13,
35,
22484
] | 3.307692 | 39 |
#!/usr/bin/env python
class BaseVimeoException(Exception):
"""Base class for Vimeo Exceptions."""
def __init__(self, response, message):
"""Base Exception class init."""
# API error message
self.message = self.__get_message(response)
# HTTP status code
if type(response) is Exception:
self.status_code = 500
elif hasattr(response, 'status_code'):
self.status_code = response.status_code
else:
self.status_code = 500
super(BaseVimeoException, self).__init__(self.message)
class ObjectLoadFailure(Exception):
"""Object Load failure exception."""
def __init__(self, message):
"""Object Load failure exception init."""
super(ObjectLoadFailure, self).__init__(message)
class UploadQuotaExceeded(Exception):
"""Exception for upload quota execeeded."""
def __get_free_space(self, num):
"""Transform bytes in gigabytes."""
return 'Free space quota: %sGb' % (round((num / 1073741824.0), 1))
def __init__(self, free_quota, message):
"""Init method for this subclass of BaseVimeoException."""
message = message + self.__get_free_space(num=free_quota)
super(UploadQuotaExceeded, self).__init__(message)
class UploadAttemptCreationFailure(BaseVimeoException):
"""Exception for upload attempt creation failure."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(UploadAttemptCreationFailure, self).__init__(response, message)
class UploadTicketCreationFailure(BaseVimeoException):
"""Exception for upload ticket creation failure."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(UploadTicketCreationFailure, self).__init__(response, message)
class VideoCreationFailure(BaseVimeoException):
"""Exception for failure on the delete during the upload."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(VideoCreationFailure, self).__init__(response, message)
class VideoUploadFailure(BaseVimeoException):
"""Exception for failures during the actual upload od the file."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(VideoUploadFailure, self).__init__(response, message)
class PictureCreationFailure(BaseVimeoException):
"""Exception for failure on initial request to upload a picture."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(PictureCreationFailure, self).__init__(response, message)
class PictureUploadFailure(BaseVimeoException):
"""Exception for failure on the actual upload of the file."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(PictureUploadFailure, self).__init__(response, message)
class PictureActivationFailure(BaseVimeoException):
"""Exception for failure on activating the picture."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(PictureActivationFailure, self).__init__(response, message)
class TexttrackCreationFailure(BaseVimeoException):
"""Exception for failure on the initial request to upload a text track."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(TexttrackCreationFailure, self).__init__(response, message)
class TexttrackUploadFailure(BaseVimeoException):
"""Exception for failure on the actual upload of the file."""
def __init__(self, response, message):
"""Init method for this subclass of BaseVimeoException."""
super(TexttrackUploadFailure, self).__init__(response, message)
class APIRateLimitExceededFailure(BaseVimeoException):
"""Exception used when the user has exceeded the API rate limit."""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
4871,
7308,
53,
47776,
16922,
7,
16922,
2599,
198,
220,
220,
220,
37227,
14881,
1398,
329,
569,
47776,
1475,
11755,
526,
15931,
628,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,... | 3.077381 | 1,344 |
from calculator import calculator
if __name__=='__main__':
calculator()
| [
6738,
28260,
1330,
28260,
198,
198,
361,
11593,
3672,
834,
855,
6,
834,
12417,
834,
10354,
198,
220,
220,
220,
28260,
3419,
198
] | 3.347826 | 23 |
"""
Parametric design with CPE modeled separately for cue and stimulus periods.
"""
design_name = "cpe_cuestim"
condition_names = ["cue", "stim",
"cpe_cue", "cpe_stim",
"error", "response_time"]
temporal_deriv = True
confound_pca = True
contrasts = [
("cue_neg", ["cue"], [-1]),
("stim_neg", ["stim"], [-1]),
("cpe_cue_neg", ["cpe_cue"], [-1]),
("cpe_stim_neg", ["cpe_stim"], [-1]),
("cue-stim", ["cue", "stim"], [1, -1]),
("stim-cue", ["cue", "stim"], [-1, 1]),
("cpe_cue-stim", ["cpe_cue", "cpe_stim"], [1, -1]),
("cpe_stim-cue", ["cpe_cue", "cpe_stim"], [-1, 1]),
]
sampling_range = (.5, .5, 1)
surf_smooth = 6
| [
37811,
198,
22973,
19482,
1486,
351,
327,
11401,
29563,
13869,
329,
28381,
290,
19819,
9574,
13,
198,
37811,
198,
26124,
62,
3672,
796,
366,
66,
431,
62,
27399,
395,
320,
1,
198,
198,
31448,
62,
14933,
796,
14631,
15509,
1600,
366,
42... | 1.901478 | 406 |
from django.db import models
from django.contrib.auth.models import AbstractUser, AbstractBaseUser
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
12982,
11,
27741,
14881,
12982,
198,
198,
2,
13610,
534,
4981,
994,
13,
628,
628
] | 3.714286 | 35 |
from django.core.management.base import BaseCommand
from cleanup_later.models import CleanupFile
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
198,
6738,
27425,
62,
36760,
13,
27530,
1330,
5985,
929,
8979,
198
] | 3.92 | 25 |
#! /usr/bin/python3
import requests,argparse,sys,colorama,pyfiglet
from colorama import Fore,Style
if __name__ == "__main__":
Covid().stats | [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
7007,
11,
853,
29572,
11,
17597,
11,
8043,
1689,
11,
9078,
5647,
1616,
198,
6738,
3124,
1689,
1330,
4558,
11,
21466,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
129... | 2.803922 | 51 |
print('olá mundo')
| [
4798,
10786,
349,
6557,
27943,
78,
11537,
198
] | 2.375 | 8 |
import uuid
from django.test import TestCase, override_settings
from gitd.core.constants import GitHubEvents
from gitd.core.exceptions import GitHubException
from gitd.core.handlers import github_handler
from gitd.core.models import Deployment
| [
11748,
334,
27112,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
11,
20957,
62,
33692,
198,
198,
6738,
17606,
67,
13,
7295,
13,
9979,
1187,
1330,
21722,
37103,
198,
6738,
17606,
67,
13,
7295,
13,
1069,
11755,
1330,
21722,
... | 3.686567 | 67 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
PATH = "C:\Program Files (x86)\chromedriver.exe"
driver = webdriver.Chrome(PATH)
driver.get("https://orteil.dashnet.org/cookieclicker/")
driver.implicitly_wait(5)
cookie = driver.find_element_by_id("bigCookie")
cookie_count = driver.find_element_by_id("cookies")
items = [[driver.find_element_by_id("productName" + str(i)), driver.find_element_by_id("productPrice" + str(i))] for i in range(3, -1, -1)]
#print(items)
actions = ActionChains(driver)
actions.click(cookie)
for i in range(2000):
actions.perform()
count = int(cookie_count.text.split(" ")[0])
for item_name, item_value in items:
value = int(item_value.text)
if value <= count:
building_actions = ActionChains(driver)
building_actions.move_to_element(item_name)
building_actions.click()
building_actions.perform()
print("Just bought {} for {} cookies...".format(item_name.text, value))
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
2673,
62,
38861,
1330,
7561,
1925,
1299,
198,
198,
34219,
796,
366,
34,
7479,
15167,
13283,
357,
87,
4521,
19415,
28663,
276,
38291... | 2.562963 | 405 |
import os
import re
import logging
from binascii import crc_hqx
from datetime import datetime
from flask import Flask, request, render_template, redirect
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
from flask_bootstrap import Bootstrap
from gevent.pywsgi import WSGIServer
from google.cloud import spanner
from google.api_core.exceptions import AlreadyExists
secret_key = os.urandom(32)
app = Flask(__name__)
Bootstrap(app)
app.secret_key = secret_key.hex()
spanner_client = spanner.Client()
app_settings = os.environ.get('APP_SETTINGS')
instance_id = os.environ.get('SPANNER_INSTANCE', 'runfaster-spanner')
database_id = os.environ.get('SPANNER_DATABASE', 'runfaster')
database = spanner_client.instance(instance_id).database(database_id, ddl_statements=["""CREATE TABLE urls (
shorten STRING(MAX) NOT NULL,
created_at TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true),
source STRING(MAX) NOT NULL,
) PRIMARY KEY (shorten)
"""])
source_regex = re.compile("https?://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+")
shorten_regex = re.compile("[a-zA-Z0-9]")
@app.route('/<string:shorten>', methods=['GET'])
@app.route('/', methods=['GET', 'POST'])
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
11748,
302,
198,
11748,
18931,
198,
6738,
9874,
292,
979,
72,
1330,
1067,
66,
62,
71,
80,
87,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
8543,
62,
28243,
11,
18941,
198,
... | 2.73176 | 466 |
# -*- coding: utf-8 -*-
import json
import os
import random
import click
import neptune
import numpy as np
import regex
import torch
from loguru import logger
from neptune.exceptions import NoExperimentContext
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from bert.optimization import BertAdam
from bert.tokenization import BertTokenizer
from eval import evalb
from label_encoder import LabelEncoder
from model import ChartParser
from trees import InternalParseNode, load_trees
try:
from apex import amp
except ImportError:
pass
MODEL_FILENAME = "model.bin"
BERT_TOKEN_MAPPING = {
"-LRB-": "(",
"-RRB-": ")",
"-LCB-": "{",
"-RCB-": "}",
"-LSB-": "[",
"-RSB-": "]",
}
@click.command()
@click.option("--train_file", required=True, type=click.Path())
@click.option("--dev_file", required=True, type=click.Path())
@click.option("--test_file", required=True, type=click.Path())
@click.option("--output_dir", required=True, type=click.Path())
@click.option("--bert_model", required=True, type=click.Path())
@click.option("--lstm_layers", default=2, show_default=True, type=click.INT)
@click.option("--lstm_dim", default=250, show_default=True, type=click.INT)
@click.option("--tag_embedding_dim", default=50, show_default=True, type=click.INT)
@click.option("--label_hidden_dim", default=250, show_default=True, type=click.INT)
@click.option("--dropout_prob", default=0.4, show_default=True, type=click.FLOAT)
@click.option("--batch_size", default=32, show_default=True, type=click.INT)
@click.option("--num_epochs", default=20, show_default=True, type=click.INT)
@click.option("--learning_rate", default=5e-5, show_default=True, type=click.FLOAT)
@click.option("--warmup_proportion", default=0.1, show_default=True, type=click.FLOAT)
@click.option(
"--gradient_accumulation_steps", default=1, show_default=True, type=click.INT
)
@click.option("--seed", default=42, show_default=True, type=click.INT)
@click.option("--device", default=0, show_default=True, type=click.INT)
@click.option("--fp16", is_flag=True)
@click.option("--do_eval", is_flag=True)
@click.option("--resume", is_flag=True)
@click.option("--preload", is_flag=True)
@click.option("--freeze_bert", is_flag=True)
if __name__ == "__main__":
neptune.init(project_qualified_name=os.getenv("NEPTUNE_PROJECT_NAME"))
try:
# main(
# [
# "--train_file=corpora/WSJ-PTB/02-21.10way.clean.train",
# "--dev_file=corpora/WSJ-PTB/22.auto.clean.dev",
# "--test_file=corpora/WSJ-PTB/23.auto.clean.test",
# "--output_dir=outputs",
# "--bert_model=models/bert-base-multilingual-cased",
# "--batch_size=32",
# "--num_epochs=20",
# "--learning_rate=3e-5",
# # "--fp16",
# # "--do_eval",
# ]
# )
main()
finally:
try:
neptune.stop()
except NoExperimentContext:
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
4738,
198,
198,
11748,
3904,
198,
11748,
497,
457,
1726,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
40364,
198,
11748,
28... | 2.352134 | 1,312 |
import pathlib
import os
import sys
import time
import shutil
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
BASE_URL = 'https://servicos-portal.mpro.mp.br/web/mp-transparente/contracheque'
BASE_URL_MEMBROS_ATIVOS = 'https://servicos-portal.mpro.mp.br/plcVis/frameset?__report=..%2FROOT%2Frel%2Fcontracheque%2Fmembros%2FremuneracaoMembrosAtivos.rptdesign&anomes='
BASE_URL_VERBAS_INDENIZATORIAS = 'https://servicos-portal.mpro.mp.br/plcVis/frameset?__report=..%2FROOT%2Frel%2Fcontracheque%2Fmembros%2FverbasIndenizatoriasMembrosAtivos.rptdesign&anomes='
FLAG = ['remuneracao','verbas-indenizatorias']
REMUNERACAO = 'remuneracao'
VERBAS_INDENIZATORIAS = 'verbas-indenizatorias'
| [
11748,
3108,
8019,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
4423,
346,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
... | 2.371069 | 318 |
from riscv_isac.log import logger
import itertools
import struct
import random
import sys
import math
from decimal import *
fzero = ['0x00000000', '0x80000000']
fminsubnorm = ['0x00000001', '0x80000001']
fsubnorm = ['0x00000002', '0x80000002', '0x007FFFFE', '0x807FFFFE', '0x00555555', '0x80555555']
fmaxsubnorm = ['0x007FFFFF', '0x807FFFFF']
fminnorm = ['0x00800000', '0x80800000']
fnorm = ['0x00800001', '0x80800001', '0x00855555', '0x80855555', '0x008AAAAA', '0x808AAAAA', '0x55000000', '0xD5000000', '0x2A000000', '0xAA000000']
fmaxnorm = ['0x7F7FFFFF', '0xFF7FFFFF']
finfinity = ['0x7F800000', '0xFF800000']
fdefaultnan = ['0x7FC00000', '0xFFC00000']
fqnan = ['0x7FC00001', '0xFFC00001', '0x7FC55555', '0xFFC55555']
fsnan = ['0x7F800001', '0xFF800001', '0x7FAAAAAA', '0xFFAAAAAA']
fone = ['0x3F800000', '0xBF800000']
dzero = ['0x0000000000000000', '0x8000000000000000']
dminsubnorm = ['0x0000000000000001', '0x8000000000000001']
dsubnorm = ['0x0000000000000002', '0x8000000000000002','0x0008000000000000', '0x0008000000000002', '0x0001000000000000', '0x8001000000000000','0x8001000000000003','0x8001000000000007']
dmaxsubnorm = ['0x000FFFFFFFFFFFFF', '0x800FFFFFFFFFFFFF']
dminnorm = ['0x0010000000000000', '0x8010000000000000']
dnorm = ['0x0010000000000002', '0x8010000000000002', '0x0011000000000000', '0x8011000000000000', '0x0018000000000000', '0x8018000000000000','0x8018000000000005','0x8018000000000007']
dmaxnorm = ['0x7FEFFFFFFFFFFFFF', '0xFFEFFFFFFFFFFFFF']
dinfinity = ['0x7FF0000000000000', '0xFFF0000000000000']
ddefaultnan = ['0x7FF8000000000000', '0xFFF8000000000000']
dqnan = ['0x7FF8000000000001', '0xFFF8000000000001', '0x7FFC000000000001', '0xFFFC000000000001']
dsnan = ['0x7FF0000000000001', '0xFFF0000000000001', '0x7FF4AAAAAAAAAAAA', '0xFFF4AAAAAAAAAAAA']
done = ['0x3FF0000000000000', '0xBF80000000000000']
rounding_modes = ['0','1','2','3','4']
def ibm_b1(flen, opcode, ops):
'''
IBM Model B1 Definition:
Test all combinations of floating-point basic types, positive and negative, for
each of the inputs. The basic types are Zero, One, MinSubNorm, SubNorm,
MaxSubNorm, MinNorm, Norm, MaxNorm, Infinity, DefaultNaN, QNaN, and
SNaN.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operands =>
[Zero, One, MinSubNorm, SubNorm, MaxSubNorm, MinNorm, Norm, MaxNorm, Infinity, DefaultNaN, QNaN, SNaN]
Implementation:
- Dependent on the value of flen, a predefined dataset of floating point values are added.
- Using the itertools package, an iterative multiplication is performed with two lists to create an exhaustive combination of all the operand values.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with the respective rounding mode for that particular opcode.
'''
if flen == 32:
basic_types = fzero + fminsubnorm + [fsubnorm[0], fsubnorm[3]] +\
fmaxsubnorm + fminnorm + [fnorm[0], fnorm[3]] + fmaxnorm + \
finfinity + fdefaultnan + [fqnan[0], fqnan[3]] + \
[fsnan[0], fsnan[3]] + fone
elif flen == 64:
basic_types = dzero + dminsubnorm + [dsubnorm[0], dsubnorm[1]] +\
dmaxsubnorm + dminnorm + [dnorm[0], dnorm[1]] + dmaxnorm + \
dinfinity + ddefaultnan + [dqnan[0], dqnan[1]] + \
[dsnan[0], dsnan[1]] + done
else:
logger.error('Invalid flen value!')
sys.exit(1)
# the following creates a cross product for ops number of variables
b1_comb = list(itertools.product(*ops*[basic_types]))
coverpoints = []
for c in b1_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
if opcode.split('.')[0] in ["fadd","fsub","fmul","fdiv","fsqrt","fmadd","fnmadd","fmsub","fnmsub","fcvt","fmv","fle","fmv","fmin","fsgnj"]:
cvpt += 'rm_val == 0'
elif opcode.split('.')[0] in ["fclass","flt","fmax","fsgnjn"]:
cvpt += 'rm_val == 1'
elif opcode.split('.')[0] in ["feq","flw","fsw","fsgnjx"]:
cvpt += 'rm_val == 2'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B1 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b2(flen, opcode, ops, int_val = 100, seed = -1):
'''
IBM Model B2 Definition:
This model tests final results that are very close, measured in Hamming
distance, to the specified boundary values. Each boundary value is taken as a
base value, and the model enumerates over small deviations from the base, by
flipping one bit of the significand.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param int_val: Number to define the range in which the random value is to be generated. (Predefined to 100)
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:type int_val: int
:param seed: int
Abstract Dataset Description:
Final Results = [Zero, One, MinSubNorm, MaxSubNorm, MinNorm, MaxNorm]
Operand1 {operation} Operand2 = Final Results
Implementation:
- Hamming distance is calculated using an xor operation between a number in the dataset and a number generated using walking ones operation.
- A random operand value for one of the operands is assigned and based on the result and operation under consideration, the next operand is calculated.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with the respective rounding mode for that particular opcode.
'''
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
b = '0x00000010'
e_sz=8
m_sz = 23
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
b = '0x0000000000000010'
e_sz=11
m_sz = 52
result = []
b2_comb = []
opcode = opcode.split('.')[0]
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
for i in range(len(flip_types)):
k=1
for j in range (1,24):
#print('{:010b}'.format(k))
result.append(['0x'+hex(eval(bin(int('1'+flip_types[i][2:], 16))) ^ eval('0b'+'{:023b}'.format(k)))[3:],' | Result = '+num_explain(flen, '0x'+str(hex(eval(bin(int('1'+flip_types[i][2:], 16))))[3:]))+'(0x'+str(hex(eval(bin(int('1'+flip_types[i][2:], 16))))[3:])+')^'+str('0x'+hex(eval('0b'+'1'+'{:024b}'.format(k)))[3:])])
k=k*2
for i in range(len(result)):
bin_val = bin(int('1'+result[i][0][2:],16))[3:]
rsgn = bin_val[0]
rexp = bin_val[1:e_sz+1]
rman = bin_val[e_sz+1:]
rs1_exp = rs3_exp = rexp
rs1_bin = bin(random.randrange(1,int_val))
rs3_bin = bin(random.randrange(1,int_val))
rs1_bin = ('0b0'+rexp+('0'*(m_sz-(len(rs1_bin)-2)))+rs1_bin[2:])
rs3_bin = ('0b0'+rexp+('0'*(m_sz-(len(rs3_bin)-2)))+rs3_bin[2:])
rs1 = fields_dec_converter(flen,'0x'+hex(int('1'+rs1_bin[2:],2))[3:])
rs3 = fields_dec_converter(flen,'0x'+hex(int('1'+rs3_bin[2:],2))[3:])
if opcode in 'fadd':
rs2 = fields_dec_converter(flen,result[i][0]) - rs1
elif opcode in 'fsub':
rs2 = rs1 - fields_dec_converter(flen,result[i][0])
elif opcode in 'fmul':
rs2 = fields_dec_converter(flen,result[i][0])/rs1
elif opcode in 'fdiv':
if fields_dec_converter(flen,result[i][0]) != 0:
rs2 = rs1/fields_dec_converter(flen,result[i][0])
elif opcode in 'fsqrt':
rs2 = fields_dec_converter(flen,result[i][0])*fields_dec_converter(flen,result[i][0])
elif opcode in 'fmadd':
rs2 = (fields_dec_converter(flen,result[i][0]) - rs3)/rs1
elif opcode in 'fnmadd':
rs2 = (rs3 - fields_dec_converter(flen,result[i][0]))/rs1
elif opcode in 'fmsub':
rs2 = (fields_dec_converter(flen,result[i][0]) + rs3)/rs1
elif opcode in 'fnmsub':
rs2 = -1*(rs3 + fields_dec_converter(flen,result[i][0]))/rs1
if(flen==32):
m = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
m = rs2
if opcode in ['fadd','fsub','fmul','fdiv']:
b2_comb.append((floatingPoint_tohex(flen,rs1),floatingPoint_tohex(flen,m)))
elif opcode in 'fsqrt':
b2_comb.append((floatingPoint_tohex(flen,m),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b2_comb.append((floatingPoint_tohex(flen,rs1),floatingPoint_tohex(flen,m),floatingPoint_tohex(flen,rs3)))
#print("b2_comb",b2_comb)
coverpoints = []
k=0
for c in b2_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += result[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B2 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b3(flen, opcode, ops, seed=-1):
'''
IBM Model B3 Definition:
This model tests all combinations of the sign, significand’s LSB, guard bit & sticky bit of the intermediate result.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result is chosen at random
Intermediate Result = [All possible combinations of Sign, LSB, Guard and Sticky are taken]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The Sticky bit is 1 if there were non-zero digits to the right of the guard digit, hence the lsb list is subjected to that condition.
- Float_val [ a list of numbers ] extracted from the fields_dec_converter is checked for the LSB. If it is a negative number, then the list ieee754_num is appended with splitting the p character and first 10 characters in the 0th split + ‘p’ + other part of the split. “p” specifies the maximum available number in python and used in 64 bit architecture. If we require a digit more than thea number, then we represent it using a string because an int
- Now the ir_dataset is initialized and since the ieee754_num list has the same element twice [ first is just the number and second is with sign ], hence we loop that array, considering only multiples of 2 elements from it. If the sign is ‘-’, then then the index is updated with 1 else if it is ‘+’, then it is updated with 0 complying with the IEEE standards.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
lsb = []
for i in fsubnorm+fnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ieee754_num.append('-'+float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
else:
ieee754_num.append(float_val.split('p')[0][0:11]+'p'+float_val.split('p')[1])
ieee754_num.append(float_val.split('p')[0][1:11]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([ieee754_num[k].split('p')[0]+str(i)+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k]])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
lsb = []
for i in dsubnorm+dnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = str(fields_dec_converter(64,i))
if float_val[0] != '-':
ieee754_num.append(float_val)
ieee754_num.append('-'+float_val)
else:
ieee754_num.append(float_val)
ieee754_num.append(float_val[1:])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([str(Decimal(ieee754_num[k].split('e')[0])+Decimal(pow(i*16,-14)))+'e'+ieee754_num[k].split('e')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k]])
b4_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b4_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b4_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B3 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b4(flen, opcode, ops, seed=-1):
'''
IBM Model B4 Definition:
This model creates a test-case for each of the following constraints on the
intermediate results:
1. All the numbers in the range [+MaxNorm – 3 ulp, +MaxNorm + 3 ulp]
2. All the numbers in the range [-MaxNorm - 3 ulp, -MaxNorm + 3 ulp]
3. A random number that is larger than +MaxNorm + 3 ulp
4. A random number that is smaller than -MaxNorm – 3 ulp
5. One number for every exponent in the range [MaxNorm.exp - 3, MaxNorm.exp + 3] for positive and negative numbers
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [[MaxNorm-3 ulp, MaxNorm+3 ulp], [-MaxNorm-3 ulp, -MaxNorm+3 ulp], Random Num > MaxNorm+3 ulp, Random Num < -MaxNorm-3 ulp, [MaxNorm.exp-3, MaxNorm.exp+3]]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
if flen == 32:
ieee754_maxnorm_p = '0x1.7fffffp+127'
ieee754_maxnorm_n = '0x1.7ffffep+127'
maxnum = float.fromhex(ieee754_maxnorm_p)
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_maxnorm_p.split('p')[0]+str(i)+'p'+ieee754_maxnorm_p.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp'])
ir_dataset.append([ieee754_maxnorm_n.split('p')[0]+str(i)+'p'+ieee754_maxnorm_n.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp'])
for i in range(-3,4):
ir_dataset.append([ieee754_maxnorm_p.split('p')[0]+'p'+str(127+i),' | Exponent = '+str(127+i)+' Number = +ve'])
ir_dataset.append(['-'+ieee754_maxnorm_n.split('p')[0]+'p'+str(127+i),' | Exponent = '+str(127+i)+' Number = -ve'])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
maxdec_p = str(maxnum)
maxdec_n = str(float.fromhex('0x1.ffffffffffffep+1023'))
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(maxdec_p.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_p.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp'])
ir_dataset.append([str(Decimal(maxdec_n.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_n.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp'])
for i in range(-3,4):
ir_dataset.append([str(random.uniform(1,maxnum)).split('e')[0]+'e'+str(int(math.log(pow(2,1023+i),10))),' | Exponent = '+str(1023+i)+' Number = +ve'])
ir_dataset.append([str(-1*random.uniform(1,maxnum)).split('e')[0]+'e'+str(int(math.log(pow(2,1023+i),10))),' | Exponent = '+str(1023+i)+' Number = -ve'])
b4_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b4_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b4_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b4_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B4 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b5(flen, opcode, ops, seed=-1):
'''
IBM Model B5 Definition:
This model creates a test-case for each of the following constraints on the intermediate results:
1. All the numbers in the range [+MinSubNorm – 3 ulp, +MinSubNorm + 3 ulp]
2. All the numbers in the range [-MinSubNorm - 3 ulp, -MinSubNorm + 3 ulp]
3. All the numbers in the range [MinNorm – 3 ulp, MinNorm + 3 ulp]
4. All the numbers in the range [-MinSubNorm - 3 ulp, -MinSubNorm + 3 ulp]
5. All the numbers in the range [MinNorm – 3 ulp, MinNorm + 3 ulp]
6. All the numbers in the range [-MinNorm - 3 ulp, -MinNorm + 3 ulp]
7. A random number in the range (0, MinSubNorm)
8. A random number in the range (-MinSubNorm, -0)
9. One number for every exponent in the range [MinNorm.exp, MinNorm.exp + 5]
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [+MinSubNorm – 3 ulp, +MinSubNorm + 3 ulp], [-MinSubNorm - 3 ulp, -MinSubNorm + 3 ulp] , [MinNorm – 3 ulp, MinNorm + 3 ulp] , [-MinNorm - 3 ulp, -MinNorm + 3 ulp] , Random Num in (0, MinSubNorm), Random Num in (-MinSubNorm, -0), One Num for every exp in [MinNorm.exp, MinNorm.exp + 5]]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
ir_dataset = []
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minsubnorm.split('p')[0]+str(i)+'p'+ieee754_minsubnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp'])
ieee754_minnorm = '0x1.000000p-126'
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minnorm.split('p')[0]+str(i)+'p'+ieee754_minnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp'])
minnorm_Exp = ['0x1.000000p-126','0x1.000000p-125','0x1.000000p-124','0x1.000000p-123','0x1.000000p-122','0x1.000000p-121']
for i in minnorm_Exp:
ir_dataset.append([i,' | Exponent = MinNorm.exp + '+str(126+int(i.split('p')[1]))])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
ir_dataset.append([-1*ir_dataset[i][0],ir_dataset[i][1]])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
minsubdec = '5e-324'
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minsubdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minsubdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp'])
minnormdec = '2.2250738585072014e-308'
ir_dataset.append([minsubdec, ' | Guard = 0 Round = 0 Sticky = 0 --> Minsubnorm + 0 ulp'])
ir_dataset.append([minnormdec,' | Guard = 0 Round = 0 Sticky = 0 --> Minnorm + 0 ulp'])
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minnormdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minnormdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp'])
minnorm_Exp = ['4.450147717014403e-308','8.900295434028806e-308','1.780059086805761e-307','3.560118173611522e-307','7.120236347223044e-307']
k = 1
for i in minnorm_Exp:
ir_dataset.append([i,' | Exponent = MinNorm.exp + '+str(k)])
k += 1
n = len(ir_dataset)
for i in range(n):
ir_dataset.append(['-'+ir_dataset[i][0],ir_dataset[i][1]])
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
b5_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b5_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b5_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b5_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b5_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B5 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b6(flen, opcode, ops, seed=-1):
'''
IBM Model B6 Definition:
This model tests intermediate results in the space between –MinSubNorm and
+MinSubNorm. For each of the following ranges, we select 8 random test cases,
one for every combination of the LSB, guard bit, and sticky bit.
1. -MinSubNorm < intermediate < -MinSubNorm / 2
2. -MinSubNorm / 2 <= intermediate < 0
3. 0 < intermediate <= +MinSubNorm / 2
4. +MinSubNorm / 2 < intermediate < +MinSubNorm
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [Random number ∈ (-MinSubNorm, -MinSubNorm/2), Random number ∈ (-MinSubNorm/2, 0), Random number ∈ (0, +MinSubNorm/2), Random number ∈ (+MinSubNorm/2, +MinSubNorm)]
{All 8 combinations of guard, round and sticky bit are tested for every number}
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fmul':
random.seed(0)
elif opcode in 'fdiv':
random.seed(1)
elif opcode in 'fmadd':
random.seed(2)
elif opcode in 'fnmadd':
random.seed(3)
elif opcode in 'fmsub':
random.seed(4)
elif opcode in 'fnmsub':
random.seed(5)
else:
random.seed(seed)
if flen == 32:
ir_dataset = []
ieee754_minsubnorm_n = '-0x0.000001p-127'
minnum = float.fromhex(ieee754_minsubnorm_n)
r=str(random.uniform(minnum,minnum/2))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm, -MinSubNorm / 2)'])
r=str(random.uniform(minnum/2,0))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm / 2, 0)'])
r=str(random.uniform(0,abs(minnum/2)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (0, +MinSubNorm / 2)'])
r=str(random.uniform(abs(minnum/2),abs(minnum)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-7)))+'e'+r.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (+MinSubNorm / 2, +MinSubNorm)'])
elif flen == 64:
ir_dataset = []
ieee754_minsubnorm_n = '-0x0.0000000000001p-1022'
minnum = float.fromhex(ieee754_minsubnorm_n)
r=str("{:.2e}".format(random.uniform(minnum,minnum/2)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm, -MinSubNorm / 2)'])
r=str("{:.2e}".format(random.uniform(minnum/2,0)))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (-MinSubNorm / 2, 0)'])
r=str("{:.2e}".format(random.uniform(0,abs(minnum/2))))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (0, +MinSubNorm / 2)'])
r=str("{:.2e}".format(random.uniform(abs(minnum/2),abs(minnum))))
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(r.split('e')[0])+Decimal(pow(i*16,-14))),' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> IR ∈ (+MinSubNorm / 2, +MinSubNorm)'])
b6_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(0,1e-30)
rs3 = random.uniform(0,1e-30)
if opcode in 'fmul':
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmul','fdiv']:
b6_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b6_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
#print(*b6_comb,sep='\n')
coverpoints = []
k=0
for c in b6_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B6 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b7(flen, opcode, ops, seed=-1):
'''
IBM Model B7 Definition:
This model checks that the sticky bit is calculated correctly in each of the following cases (for every possible combination in the table). The Guard bit should always be 0, and the sign positive, so that miscalculation of the sticky bit will alter the final result.
Mask in Extra bits
.. code-block::
1000...000
0100...000
…
0000...010
0000...001
0000000000
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [ieee754_maxnorm, maxnum, maxdec, maxnum]
{It assures the calculation of sticky bit for every possible combination in the table}
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The Sticky bit is calculated in each case. The guard bit here is always assumed to be zero and the sign is positive, so that miscalculation of the sticky bit will alter the final result.
- In the intermediate result dataset, the elements are appended as elements before the character ‘p’ and then the binary equivalent of ‘010’ + pow(2,i).
- Finally on the extra bits, it is masked with the comment created in the previous point. All the first character of each element is converted to its floating point equivalent in a loop
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 60
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
for i in fsubnorm+fnorm:
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(0,20):
comment = (20-i)*'0' + '1' + i*'0'
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('010'+'{:021b}'.format(pow(2,i)),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Mask on extra bits ---> ' + comment])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
for i in dsubnorm+dnorm:
float_val = fields_dec_converter(64,i)
if float_val > 0:
ieee754_num.append(str(float_val))
ir_dataset = []
for l in range(len(ieee754_num)):
for k in range(1,13):
for i in range(4):
comment = (k*(i+1))*'0' + '1' + (51-(k*(i+1)))*'0'
ir_dataset.append([str(Decimal(ieee754_num[l].split('e')[0])+Decimal(pow(16,-14))+Decimal(pow(pow(2,3-i)*16,-14-k)))+'e'+ieee754_num[l].split('e')[1],' | Mask on extra bits ---> ' + comment])
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
b7_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
rs3 = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b7_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b7_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b7_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k = 0
for c in b7_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 3'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B7 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b8(flen, opcode, ops, seed=-1):
'''
IBM Model B8 Definition:
This model targets numbers that are on the edge of a rounding boundary. These boundaries may vary depending on the rounding mode. These numbers include floating-point numbers and midpoints between floating-point numbers. In order to target the vicinity of these numbers, we test the following constraints on the extra bits of the intermediate result:
1. All values of extra-bits in the range [000...00001, 000...00011]
2. All values of extra-bits in the range [111...11100, 111...11111]
For each value selected above, test all the combinations on the LSB of the significand, the guard bit, and the sticky bit (if the number of extra bits is not finite).
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [For every Subnormal and Normal number, 8 combinations of guard, round and sticky bit are appended, along with 6 combinations(3 positive, 3 negative) of the mask on extra bits]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The intermediate results dataset is populated in accordance with the abstract dataset defined above. The coverpoints can be increased by increasing the dataset of normal and subnormal numbers.
- Intermediate results can be out of the range of what is representable in the specified format; they should only be viewed numerically. Inorder to represent numbers that went out of range of the maximum representable number in python, the “Decimal” module was utilized.
- These operand values are treated as decimal numbers until their derivation after which they are converted into their respective IEEE754 hexadecimal floating point formats using the “floatingPoint_tohex” function.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 60
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
for i in fsubnorm+fnorm:
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ir_dataset = []
# print(*ieee754_num, sep = '\n')
for k in range(len(ieee754_num)):
for i in range(1,4):
for j in range(1,8):
grs = '{:03b}'.format(j)
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('{:03b}'.format(j)+19*'0'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'0'+'{:02b}'.format(i)])
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('{:03b}'.format(j)+19*'1'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'1'+'{:02b}'.format(i)])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
for i in dsubnorm+dnorm:
float_val = float.hex(fields_dec_converter(64,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:17]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(1,4):
for j in range(1,8):
grs = '{:03b}'.format(j)
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('010'+19*'0'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'0'+'{:02b}'.format(i)])
ir_dataset.append([ieee754_num[k].split('p')[0]+hex(int('010'+19*'1'+'{:02b}'.format(i),2))[2:]+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Mask On Extra Bits: '+19*'1'+'{:02b}'.format(i)])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
elif opcode in 'fmul':
random.seed(2)
elif opcode in 'fdiv':
random.seed(3)
elif opcode in 'fsqrt':
random.seed(4)
elif opcode in 'fmadd':
random.seed(5)
elif opcode in 'fnmadd':
random.seed(6)
elif opcode in 'fmsub':
random.seed(7)
elif opcode in 'fnmsub':
random.seed(8)
else:
random.seed(seed)
b8_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,ir_dataset[i][0])
rs3 = random.uniform(1,ir_dataset[i][0])
if opcode in 'fadd':
if flen == 32:
rs2 = ir_dataset[i][0] - rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0]) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir_dataset[i][0])
elif opcode in 'fmul':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
elif opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
elif opcode in 'fmadd':
if flen == 32:
rs2 = (ir_dataset[i][0] - rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) - Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmadd':
if flen == 32:
rs2 = (rs3 - ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = (Decimal(rs3) - Decimal(ir_dataset[i][0]))/Decimal(rs1)
elif opcode in 'fmsub':
if flen == 32:
rs2 = (ir_dataset[i][0] + rs3)/rs1
elif flen == 64:
rs2 = (Decimal(ir_dataset[i][0]) + Decimal(rs3))/Decimal(rs1)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*(rs3 + ir_dataset[i][0])/rs1
elif flen == 64:
rs2 = -1*(Decimal(rs3) + Decimal(ir_dataset[i][0]))/Decimal(rs1)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fadd','fsub','fmul','fdiv']:
b8_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b8_comb.append((floatingPoint_tohex(flen,float(rs2)),))
elif opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b8_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
k=0
for c in b8_comb:
for rm in range(5):
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '+str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B8 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b9(flen, opcode, ops):
'''
IBM Model B9 Definition:
This model tests special patterns in the significands of the input operands. Each
of the input operands should contain one of the following patterns (each
sequence can be of length 0 up to the number of bits in the significand – the
more interesting cases will be chosen).
1. A sequence of leading zeroes
2. A sequence of leading ones
3. A sequence of trailing zeroes
4. A sequence of trailing ones
5. A small number of 1s as compared to 0s
6. A small number of 0s as compared to 1s
7. A "checkerboard" pattern (for example 00110011... or 011011011...)
8. Long sequences of 1s
9. Long sequences of 0s
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operand1, Operand2 ∈ [A sequence of leading zeroes, A sequence of leading ones, A sequence of trailing zeroes, A sequence of trailing ones, A small number of 1s as compared to 0s, A small number of 0s as compared to 1s, A "checkerboard" pattern (for example 00110011... or 011011011...), Long sequences of 1s, Long sequences of 0s]
Implementation:
- The rs1 array is appended with the elements of flip types and then for each iteration, the respective sign, mantissa and exponent is computed.
- A nested loop is initialized, assuming the rs1 mantissa as the base number and rs2 sign and rs2 exponent is obtained directly from the rs1 sign and rs1 exponent. Rs2 mantissa is calculated by adding the iteration number in the beginning of rs1 mantissa. This is done respectively for each repeating pattern.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
e_sz=8
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
e_sz=11
rs1 = []
b9_comb = []
comment = []
if ops == 2:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Leading zeroes ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Leading ones ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Trailing zeroes ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Trailing ones ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Long sequence of ones ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b9_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(' | Checkerboard pattern ---> rs1_man = '+rs2_man)
else:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
if rs1_sgn != '1':
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Trailing ones ---> rs1_man = '+rs2_man)
rs1_sgn = '0'
for j in range(flen-e_sz-1-math.ceil(0.1*(flen-e_sz-1)), flen-e_sz-1):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(32,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b9_comb.append((floatingPoint_tohex(flen,rs2),))
comment.append(' | Checkerboard pattern ---> rs1_man = '+rs2_man)
coverpoints = []
k = 0
for c in b9_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B9 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b10(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B10 Definition:
This model tests every possible value for a shift between the input operands.
1. A value smaller than -(p + 4)
2. All the values in the range [-(p + 4) , (p + 4)]
3. A value larger than (p + 4)
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param N: No. of sets of coverpoints to be generated. (Predefined to -1. Set to 2)
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:type N: int
:param seed: int
Abstract Dataset Description:
Operand1 = [Random Number]
Operand2 = [A value smaller than -(op1.exp+4), All values in the range [-(op1.exp+4), (op1.exp+4)], A value larger than +(op1.exp+4)]
Implementation:
- The exponent values of operand 1 and operand 2 obey the shift defined above. The mantissa value is randomly chosen and appended with the exponent derived.
- Simultaneously, we convert these numbers into their corresponding IEEE754 floating point formats.
- These operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode ‘0’ for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
exp_max = 255
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
exp_max = 1023
if N == -1:
N = 2
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
b10_comb = []
comment = []
for i in range(1,N):
rs1 = random.uniform(1,maxnum/1000)
rs2 = random.uniform(1,maxnum/1000)
rs1_exp = str(rs1).split('e')[1]
rs2_exp = -1*random.randrange(int(math.log(pow(10,int(rs1_exp)),2))+4, exp_max)
rs2_num = str(rs2).split('e')[0] + 'e' + str(int(math.log(pow(2,int(rs2_exp)),10)))
b10_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2_num))))
comment.append(' | Exponent = '+ str(rs2_exp) + ' --> A value smaller than -(p + 4)')
for j in range(-(int(math.log(pow(10,int(rs1_exp)),2))+4),+(int(math.log(pow(10,int(rs1_exp)),2))+4)):
rs2_num = str(rs2).split('e')[0] + 'e' + str(int(math.log(pow(2,int(j)),10)))
b10_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2_num))))
comment.append(' | Exponent = '+ str(j) + ' --> Values in the range [-(p + 4) , (p + 4)]')
rs2_exp = random.randrange(int(math.log(pow(10,int(rs1_exp)),2))+4, exp_max)
rs2_num = str(rs2).split('e')[0] + 'e' + str(int(math.log(pow(2,int(rs2_exp)),10)))
b10_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2_num))))
comment.append(' | Exponent = '+ str(rs2_exp) + ' --> A value larger than (p + 4)')
coverpoints = []
k = 0
for c in b10_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B10 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b11(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B11 Definition:
In this model we test the combination of different shift values between the
inputs, with special patterns in the significands of the inputs.
Significands of Input1 and Input2: as in model (B9) "Special Significands on
Inputs"
Shift: as in model (B10) "Shift - Add"
We test both effective operations: addition and subtraction.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand1, Operand2 ∈ Abstract Dataset in B9 + Abstract Dataset in B10
Implementation:
- A culmination of the techniques used in the implementations of Model B9 and Model B10 are used to form the dataset.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
e_sz=8
exp_max = 255
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
e_sz=11
exp_max = 1023
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
rs1 = []
b11_comb = []
comment = []
if ops == 2:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
if int(rs1_exp,2) < 4: rs2_exp = -127
else : rs2_exp = random.randrange(-127,int(rs1_exp,2)-131)
comment_str = ' | Exponent = '+ str(rs2_exp) + ' --> A value smaller than (p - 4)'
rs2_exp += 127
if flen == 32: rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64: rs2_exp = '{:011b}'.format(rs2_exp)
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Checkerboard pattern ---> rs1_man = '+rs2_man)
if int(rs1_exp,2) >= 250: rs2_exp = 127
else : rs2_exp = random.randrange(int(rs1_exp,2)-123,127)
comment_str = ' | Exponent = '+ str(rs2_exp) + ' --> A value greater than (p + 4)'
rs2_exp += 127
if flen == 32: rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64: rs2_exp = '{:011b}'.format(rs2_exp)
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Checkerboard pattern ---> rs1_man = '+rs2_man)
ul = int(rs1_exp,2)-123
ll = int(rs1_exp,2)-131
if int(rs1_exp,2) >= 250: ul = 127
if int(rs1_exp,2) < 4: ll = -127
for expval in range (ll, ul):
rs2_exp = expval
comment_str = ' | Exponent = '+ str(rs2_exp) + ' --> Values in the range (p - 4) to (p + 4)'
rs2_exp += 127
if flen == 32: rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64: rs2_exp = '{:011b}'.format(rs2_exp)
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading zeroes ---> rs1_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Leading ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Leading ones ---> rs1_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing zeroes ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Trailing ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Trailing ones ---> rs1_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of ones ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of ones ---> rs1_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs1_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b11_comb.append((rs1[i],floatingPoint_tohex(flen,rs2)))
comment.append(comment_str + ' | Checkerboard pattern ---> rs2_man = '+rs2_man)
b11_comb.append((floatingPoint_tohex(flen,rs2),rs1[i]))
comment.append(comment_str + ' | Checkerboard pattern ---> rs1_man = '+rs2_man)
coverpoints = []
k = 0
for c in b11_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B11 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b12(flen, opcode, ops, seed=-1):
'''
IBM Model B12 Definition:
This model tests every possible value for cancellation.
For the difference between the exponent of the intermediate result and the
maximum between the exponents of the inputs, test all values in the range:
[-p, +1].
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result - Operand.Exp ∈ [-p, +1]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The exponent values of operand 1 and operand 2 obey the shift defined above. The mantissa value is randomly chosen and appended with the exponent derived.
- Simultaneously, we convert these numbers into their corresponding IEEE754 floating point formats.
- These operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode ‘0’ for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
b12_comb = []
for i in range(50):
if opcode in 'fadd': rs1 = -1*random.uniform(minsubnorm,maxnum)
elif opcode in 'fsub': rs1 = random.uniform(minsubnorm,maxnum)
ir = random.uniform(1,maxnum)
if opcode in 'fadd':
if flen == 32:
rs2 = ir - rs1
elif flen == 64:
rs2 = Decimal(ir) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
if opcode in ['fadd','fsub']:
b12_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
coverpoints = []
comment = ' | Add: Cancellation'
for c in b12_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, 3):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B12 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b13(flen, opcode, ops, seed=-1):
'''
IBM Model B13 Definition:
This model tests all combinations of cancellation values as in model (B12), with
all possible unbiased exponent values of subnormal results.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result - Operand.Exp ∈ [-p, +1] (The exponent for the intermediate result is chosen such that it is a subnormal number)
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- The implementation procedure for Model B12 is repeated with a revised exponent range as defined above.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
if seed == -1:
if opcode in 'fadd':
random.seed(0)
elif opcode in 'fsub':
random.seed(1)
else:
random.seed(seed)
b13_comb = []
for i in range(200):
rs1 = random.uniform(minsubnorm,maxnum)
ir = random.uniform(minsubnorm,maxsubnorm)
if opcode in 'fadd':
if flen == 32:
rs2 = ir - rs1
elif flen == 64:
rs2 = Decimal(ir) - Decimal(rs1)
elif opcode in 'fsub':
if flen == 32:
rs2 = rs1 - ir
elif flen == 64:
rs2 = Decimal(rs1) - Decimal(ir)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
if opcode in ['fadd','fsub']:
b13_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
coverpoints = []
comment = ' | Add: Cancellation ---> Subnormal result'
for c in b13_comb:
cvpt = ""
for x in range(1, 3):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B13 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b14(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B14 Definition:
This model tests every possible value for a shift between the addends of the multiply-add operation.
For the difference between the unbiased exponent of the addend and the
unbiased exponent of the result of the multiplication, test the following values:
1. A value smaller than -(2* p + 1)
2. All the values in the range [-(2*p +1), (p +1) ]
3. A value larger than (p + 1)
We test both effective operations: addition and subtraction. The end values tested are selected to be greater by one than the largest possible shift in which
the smaller addend may affect the result.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param N: No. of sets of coverpoints to be generated. (Predefined to -1. Set to 2)
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:type N: int
:param seed: int
Abstract Dataset Description:
Shift between the addends of the multiply-add operation = [ A value smaller than -(2* p + 1), All the values in the range [-(2*p +1), (p +1), A value larger than (p + 1) ] → Condition 1
Operand 1, 2 = Random
Operand 3 = Condition 1
Implementation:
- The shift between the two addends are constrained by the conditions mentioned in the dataset above.
- Operands 1 and 2 are randomly obtained. But Operand 3 is obtained by ensuring the shift conditions.
- Once the dataset is formed, these operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode ‘0’ for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
exp_max = 127
mant_bits = 23
limnum = maxnum
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
exp_max = 1022
ieee754_limnum = '0x1.fffffffffffffp+507'
mant_bits = 52
limnum = float.fromhex(ieee754_limnum)
if N == -1:
N = 2
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fmsub':
random.seed(1)
elif opcode in 'fnmadd':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
b14_comb = []
comment = []
for i in range(1,N):
rs1 = random.uniform(1,limnum)
rs2 = random.uniform(1,limnum)
rs3 = random.uniform(1,limnum)
mul_exp = int(str(rs1*rs2).split('e')[1])
mul_exp = int(math.log(pow(2,int(mul_exp)),10))
if mul_exp-((2*mant_bits)+1) > -1*exp_max:
rs3_exp = random.randrange(-1*exp_max,mul_exp-((2*mant_bits)+1))
rs3_num = float.hex(float(str(rs3).split('e')[0])).split('p')[0]+'p'+str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp)
rs3_num = float.fromhex(rs3_num)
b14_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3_num))))
comment.append(' | Multiplicand Exponent = '+str(mul_exp)+', Addend exponent = '+ str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp) + ' --> Difference smaller than -(2*p + 1)')
if mul_exp-((2*mant_bits)+1) < -1*exp_max: exp1 = -1*exp_max
else: exp1 = mul_exp-((2*mant_bits)+1)
if mul_exp+mant_bits+1 > exp_max: exp2 = exp_max
else: exp2 = mul_exp+mant_bits+1
for j in range(exp1, exp2):
rs3_num = float.hex(float(str(rs3).split('e')[0])).split('p')[0]+'p'+str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+j)
rs3_num = float.fromhex(rs3_num)
b14_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3_num))))
comment.append(' | Multiplicand Exponent = '+str(mul_exp)+', Addend exponent = '+ str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+j) + ' --> Values in the range [-(2*p + 1) , (p + 1)]')
rs3_exp = random.randrange(exp2, exp_max)
rs3_num = float.hex(float(str(rs3).split('e')[0])).split('p')[0]+'p'+str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp)
rs3_num = float.fromhex(rs3_num)
b14_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3_num))))
comment.append(' | Multiplicand Exponent = '+str(mul_exp)+', Addend exponent = '+ str(int(float.hex(float(str(rs3).split('e')[0])).split('p')[1])+rs3_exp) + ' --> A value larger than (p + 1)')
coverpoints = []
k = 0
for c in b14_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, 4):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B14 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b15(flen, opcode, ops, N=-1, seed=-1):
'''
IBM Model B15 Definition:
In this model we test the combination of different shift values between the
addends, with special patterns in the significands of the addends.
For the significand of the addend and for the multiplication result we take the
cases defined in model (B9) "Special Significands on Inputs"
For the shift we take the cases defined in model (B14) "Shift – multiply-add".
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1, 2 = Random
Operand 3 ∈ Abstract Dataset in B9 + Abstract Dataset in B14
Implementation:
- Here the condition is imposed that if the value of the ops variable is 3, then each of the elements in the flip types is iterated and split into their respective sign, mantissa and exponent part.
- A mul variable is initialized and parsed to the field_dec_converter for each rs1 value in the list. Next the loop is run for the mantissa parts generated for rs1 values, where it is checked for certain patterns like the leading 0’s, leading 1’s, trailing 0’s and trailing 1’s.
- The checkerboard list is declared with the probable sequences for rs2. Here the sign and exponent are extracted from the rs1 values. Mantissa part is derived from the checkerboard list. Consecutively, if the flen value differs, then the range available varies.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
if flen == 32:
flip_types = fzero + fone + fminsubnorm + fmaxsubnorm + fminnorm + fmaxnorm
e_sz=8
exp_max = 255
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
exp_max = 127
mant_bits = 23
limnum = maxnum
elif flen == 64:
flip_types = dzero + done + dminsubnorm + dmaxsubnorm + dminnorm + dmaxnorm
e_sz=11
exp_max = 1023
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
exp_max = 1022
ieee754_limnum = '0x1.fffffffffffffp+507'
mant_bits = 52
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fnmadd':
random.seed(1)
elif opcode in 'fmsub':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
rs1 = []
b15_comb = []
comment = []
if ops == 3:
for i in range(len(flip_types)):
rs1.append(flip_types[i])
for i in range(len(rs1)):
bin_val = bin(int('1'+rs1[i][2:],16))[3:]
rs1_sgn = bin_val[0]
rs1_exp = bin_val[1:e_sz+1]
rs1_man = bin_val[e_sz+1:]
if flen == 32:
if int(rs1_exp,2) < 65: rs2_exp = 0
else : rs2_exp = random.randrange(0,int(rs1_exp,2)-65)
comment_str = ' | Exponent = '+ str(rs2_exp-127) + ' --> Difference smaller than -(2p + 1)'
rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64:
if int(rs1_exp,2) < 129: rs2_exp = 0
else : rs2_exp = random.randrange(0,int(rs1_exp,2)-129)
comment_str = ' | Exponent = '+ str(rs2_exp-1023) + ' --> Difference smaller than -(2p + 1)'
rs2_exp = '{:011b}'.format(rs2_exp)
mul = fields_dec_converter(flen,rs1[i])
rs1_act = random.uniform(1,limnum)
rs2_act = mul/rs1_act
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading zeroes ---> rs3_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading ones ---> rs3_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing zeroes ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing ones ---> rs3_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of ones ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs3_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Checkerboard pattern ---> rs3_man = '+rs2_man)
if flen == 32:
if int(rs1_exp,2) > 222: rs2_exp = 255
else : rs2_exp = random.randrange(int(rs1_exp,2)+33, 255)
comment_str = ' | Exponent = '+ str(rs2_exp-127) + ' --> Difference greater than (p + 1)'
rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64:
if int(rs1_exp,2) > 958: rs2_exp = 1023
else : rs2_exp = random.randrange(int(rs1_exp,2)+65, 1023)
comment_str = ' | Exponent = '+ str(rs2_exp-1023) + ' --> Difference greater than (p + 1)'
rs2_exp = '{:011b}'.format(rs2_exp)
mul = fields_dec_converter(flen,rs1[i])
rs1_act = random.uniform(1,limnum)
rs2_act = mul/rs1_act
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading zeroes ---> rs3_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading ones ---> rs3_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing zeroes ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing ones ---> rs3_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of ones ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs3_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Checkerboard pattern ---> rs3_man = '+rs2_man)
if flen == 32:
ul = int(rs1_exp,2)+33
ll = int(rs1_exp,2)-65
if int(rs1_exp,2) >= 222: ul = 255
if int(rs1_exp,2) < 65: ll = 0
elif flen == 64:
ul = int(rs1_exp,2)+65
ll = int(rs1_exp,2)-129
if int(rs1_exp,2) >= 958: ul = 1023
if int(rs1_exp,2) < 129: ll = 0
for expval in range (ll, ul):
rs2_exp = expval
if flen == 32:
comment_str = ' | Exponent = '+ str(rs2_exp-127) + ' --> Difference between -(2p+1) and (p+1)'
rs2_exp = '{:08b}'.format(rs2_exp)
elif flen == 64:
comment_str = ' | Exponent = '+ str(rs2_exp-1023) + ' --> Difference between -(2p+1) and (p+1)'
rs2_exp = '{:011b}'.format(rs2_exp)
mul = fields_dec_converter(flen,rs1[i])
rs1_act = random.uniform(1,limnum)
rs2_act = mul/rs1_act
for j in range(len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_man = '0'*j + rs1_man[j:] # Leading 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading zeroes ---> rs3_man = '+rs2_man)
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Leading 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Leading ones ---> rs3_man = '+rs2_man)
rs2_man = rs1_man[0:j] + '0'*(len(rs1_man)-j) # Trailing 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing zeroes ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Trailing 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Trailing ones ---> rs3_man = '+rs2_man)
for j in range(len(rs1_man)-math.ceil(0.1*len(rs1_man)),len(rs1_man)):
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = '1'*j + '0'*(len(rs1_man)-j) # Long sequence of 1s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of ones ---> rs3_man = '+rs2_man)
rs2_man = '0'*j + '1'*(len(rs1_man)-j) # Long sequence of 0s
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Long sequence of zeroes ---> rs3_man = '+rs2_man)
chkrbrd = ['011','110','0011','1100','0111','1000','010','101','0110','1001']
for j in chkrbrd:
rs2_sgn = rs1_sgn
rs2_exp = rs1_exp
rs2_man = j
for k in range(math.ceil(len(rs1_man)/len(j))):
rs2_man += j
rs2_man = rs2_man[0:flen-e_sz-1]
rs2 = fields_dec_converter(flen,'0x'+hex(int('1'+rs2_sgn+rs2_exp+rs2_man,2))[3:])
b15_comb.append((floatingPoint_tohex(flen,float(rs1_act)),floatingPoint_tohex(flen,float(rs2_act)),floatingPoint_tohex(flen,float(rs2))))
comment.append(comment_str + ' | Checkerboard pattern ---> rs3_man = '+rs2_man)
coverpoints = []
k = 0
for c in b15_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B15 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b16(flen, opcode, ops, seed=-1):
'''
IBM Model B16 Definition:
This model tests every possible value for cancellation.
For the difference between the exponent of the intermediate result and the
maximum between the exponents of the addend and the multiplication result,
test all values in the range:
[-(2 * p + 1), 1].
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result.exp - max(addend.exp, multiplication result.exp) ∈ [-(2 * p + 1), 1] → Condition 1
Operand 1 {operation 1} Operand 2 {operation 2} Operand 3 = Condition 1
Implementation:
- Random values of operands 1 and 2 are obtained from the random library.
- Since the objective of the test is to cancel the operands among each other constrained by the above condition, the intermediate result is calculated by the multiplication of operand 1 and 2.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fmsub':
random.seed(1)
elif opcode in 'fnmadd':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
b17_comb = []
for i in range(200):
rs1 = random.uniform(minsubnorm,limnum)
rs2 = random.uniform(minsubnorm,limnum)
ir = random.uniform(minsubnorm,rs1*rs2)
if opcode in 'fmadd':
if flen == 32:
rs3 = ir - rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) - Decimal(rs1)*Decimal(rs2)
elif opcode in 'fnmadd':
if flen == 32:
rs3 = -1*rs1*rs2 - ir
elif flen == 64:
rs3 = -1*Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fmsub':
if flen == 32:
rs3 = rs1*rs2 - ir
elif flen == 64:
rs3 = Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fnmsub':
if flen == 32:
rs3 = ir + rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) + Decimal(rs1)*Decimal(rs2)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
result = []
if opcode in ['fmadd','fmsub','fnmadd','fnmsub']:
b17_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
comment = ' | Multiply-Add: Cancellation'
for c in b17_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B16 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b17(flen, opcode, ops, seed=-1):
'''
IBM Model B17 Definition:
This model tests all combinations of cancellation values as in model (B16), with
all possible unbiased exponent values of subnormal results.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Result.exp - max(addend.exp, multiplication result.exp) ∈ [-(2 * p + 1), 1] → Condition 1 (Exponents are subnormal)
Operand 1 {operation 1} Operand 2 {operation 2} Operand 3 = Condition 1
Implementation:
- It functions the same as model B16 with calculating the additional unbiased exponent values of subnormal results.
- Operands 1 and 2 are randomly initialized in the range and the subsequent operator value is found.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fmsub':
random.seed(1)
elif opcode in 'fnmadd':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
b17_comb = []
for i in range(200):
rs1 = random.uniform(minsubnorm,limnum)
rs2 = random.uniform(minsubnorm,limnum)
ir = random.uniform(minsubnorm,maxsubnorm)
if ir > rs1*rs2: ir = random.uniform(minsubnorm,rs1*rs2)
if opcode in 'fmadd':
if flen == 32:
rs3 = ir - rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) - Decimal(rs1)*Decimal(rs2)
elif opcode in 'fnmadd':
if flen == 32:
rs3 = -1*rs1*rs2 - ir
elif flen == 64:
rs3 = -1*Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fmsub':
if flen == 32:
rs3 = rs1*rs2 - ir
elif flen == 64:
rs3 = Decimal(rs1)*Decimal(rs2) - Decimal(ir)
elif opcode in 'fnmsub':
if flen == 32:
rs3 = ir + rs1*rs2
elif flen == 64:
rs3 = Decimal(ir) + Decimal(rs1)*Decimal(rs2)
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
result = []
if opcode in ['fmadd','fmsub','fnmadd','fnmsub']:
b17_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
coverpoints = []
comment = ' | Multiply-Add: Cancellation ---> Subnormal result '
for c in b17_comb:
cvpt = ""
for x in range(1, 4):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B17 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b18(flen, opcode, ops, seed=-1):
'''
IBM Model B18 Definition:
This model checks different cases where the multiplication causes some event
in the product while the addition cancels this event.
1. Product: Enumerate all options for LSB, Guard and Sticky bit. Intermediate Result: Exact (Guard and Sticky are zero).
2. Product: Take overflow values from (B4) "Overflow". Intermediate Result: No overflow
3. Product: Take underflow values from model (B5) "Underflow". Intermediate Result: No underflow
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Implementation:
- Firstly, cancellation using the B3 model as base is performed.
- Next model is the replica of the B4 model which takes into account the overflow of value for guard, round and sticky bits
- The final model is obtained from the B5 model and different operations are done for underflow in decimal format.
- The operand values are calculated using the intermediate results dataset and then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if seed == -1:
if opcode in 'fmadd':
random.seed(0)
elif opcode in 'fnmadd':
random.seed(1)
elif opcode in 'fmsub':
random.seed(2)
elif opcode in 'fnmsub':
random.seed(3)
else:
random.seed(seed)
# Cancellation of B3
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_num = []
lsb = []
for i in fsubnorm+fnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = float.hex(fields_dec_converter(32,i))
if float_val[0] != '-':
ieee754_num.append(float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
ieee754_num.append('-'+float_val.split('p')[0][0:10]+'p'+float_val.split('p')[1])
else:
ieee754_num.append(float_val.split('p')[0][0:11]+'p'+float_val.split('p')[1])
ieee754_num.append(float_val.split('p')[0][1:11]+'p'+float_val.split('p')[1])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([ieee754_num[k].split('p')[0]+str(i)+'p'+ieee754_num[k].split('p')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k] + ': Multiply add - Guard & Sticky Cancellation'])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
ieee754_num = []
lsb = []
for i in dsubnorm+dnorm:
if int(i[-1],16)%2 == 1:
lsb.append('1')
lsb.append('1')
else:
lsb.append('0')
lsb.append('0')
float_val = str(fields_dec_converter(64,i))
if float_val[0] != '-':
ieee754_num.append(float_val)
ieee754_num.append('-'+float_val)
else:
ieee754_num.append(float_val)
ieee754_num.append(float_val[1:])
ir_dataset = []
for k in range(len(ieee754_num)):
for i in range(2,16,2):
grs = '{:04b}'.format(i)
if ieee754_num[k][0] == '-': sign = '1'
else: sign = '0'
ir_dataset.append([str(Decimal(ieee754_num[k].split('e')[0])+Decimal(pow(i*16,-14)))+'e'+ieee754_num[k].split('e')[1],' | Guard = '+grs[0]+' Sticky = '+grs[2]+' Sign = '+sign+' LSB = '+lsb[k] + ': Multiply add - Guard & Sticky Cancellation'])
b18_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
res = '0x1.7ffff0p+100'
res = float.fromhex(res)
if opcode in 'fmadd':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fnmadd':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = -1*res + ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = -1*Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fmsub':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = ir_dataset[i][0] - res
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(ir_dataset[i][0]) - Decimal(res)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b18_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
ir_dataset1 = ir_dataset
# Cancellation of B4
if flen == 32:
ieee754_maxnorm_p = '0x1.7fffffp+127'
ieee754_maxnorm_n = '0x1.7ffffep+127'
maxnum = float.fromhex(ieee754_maxnorm_p)
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_maxnorm_p.split('p')[0]+str(i)+'p'+ieee754_maxnorm_p.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
ir_dataset.append([ieee754_maxnorm_n.split('p')[0]+str(i)+'p'+ieee754_maxnorm_n.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
for i in range(len(ir_dataset)):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
elif flen == 64:
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
maxdec_p = str(maxnum)
maxdec_n = str(float.fromhex('0x1.ffffffffffffep+1023'))
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(maxdec_p.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_p.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
ir_dataset.append([str(Decimal(maxdec_n.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+maxdec_n.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Maxnorm - '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Overflow Cancellation'])
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
res = '0x1.7ffff0p+100'
res = float.fromhex(res)
if opcode in 'fmadd':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fnmadd':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = -1*res + ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = -1*Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fmsub':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = ir_dataset[i][0] - res
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(ir_dataset[i][0]) - Decimal(res)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b18_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
ir_dataset2 = ir_dataset
# Cancellation of B5
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
ir_dataset = []
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minsubnorm.split('p')[0]+str(i)+'p'+ieee754_minsubnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
ieee754_minnorm = '0x1.000000p-126'
for i in range(0,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([ieee754_minnorm.split('p')[0]+str(i)+'p'+ieee754_minnorm.split('p')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
n = len(ir_dataset)
for i in range(n):
ir_dataset[i][0] = float.fromhex(ir_dataset[i][0])
ir_dataset.append([-1*ir_dataset[i][0],ir_dataset[i][1]])
elif flen == 64:
maxdec = '1.7976931348623157e+308'
maxnum = float.fromhex('0x1.fffffffffffffp+1023')
minsubdec = '5e-324'
ir_dataset = []
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minsubdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minsubdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minsubnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
minnormdec = '2.2250738585072014e-308'
ir_dataset.append([minsubdec, ' | Guard = 0 Round = 0 Sticky = 0 --> Minsubnorm + 0 ulp'])
ir_dataset.append([minnormdec,' | Guard = 0 Round = 0 Sticky = 0 --> Minnorm + 0 ulp'])
for i in range(2,16,2):
grs = '{:04b}'.format(i)
ir_dataset.append([str(Decimal(minnormdec.split('e')[0])+Decimal(pow(i*16,-14)))+'e'+minnormdec.split('e')[1],' | Guard = '+grs[0]+' Round = '+grs[1]+' Sticky = '+grs[2]+' --> Minnorm + '+str(int(grs[0:3],2))+' ulp' + ': Multiply add - Underflow Cancellation'])
n = len(ir_dataset)
for i in range(n):
ir_dataset.append(['-'+ir_dataset[i][0],ir_dataset[i][1]])
for i in range(len(ir_dataset)):
rs1 = random.uniform(1,maxnum)
res = '0x1.7ffff0p+100'
res = float.fromhex(res)
if opcode in 'fmadd':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fnmadd':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = -1*res + ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = -1*Decimal(res) - Decimal(ir_dataset[i][0])
elif opcode in 'fmsub':
if flen == 32:
rs2 = ir_dataset[i][0]/rs1
rs3 = ir_dataset[i][0] - res
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(ir_dataset[i][0]) - Decimal(res)
elif opcode in 'fnmsub':
if flen == 32:
rs2 = -1*ir_dataset[i][0]/rs1
rs3 = res - ir_dataset[i][0]
elif flen == 64:
rs2 = -1*Decimal(ir_dataset[i][0])/Decimal(rs1)
rs3 = Decimal(res) - Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
x3 = struct.unpack('f', struct.pack('f', rs3))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
x3 = rs3
if opcode in ['fmadd','fnmadd','fmsub','fnmsub']:
b18_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2)),floatingPoint_tohex(flen,float(rs3))))
ir_dataset3 = ir_dataset
ir_dataset = ir_dataset1 + ir_dataset2 + ir_dataset3
coverpoints = []
k = 0
for c in b18_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B18 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b19(flen, opcode, ops, seed=-1):
'''
IBM Model B19 Definition:
This model checks various possible differences between the two inputs.
A test-case will be created for each combination of the following table::
First input Second input Difference between exponents Difference between significands
+Normal +Normal >0 >0
-Normal -Normal =0 =0
+SubNormal +SubNormal <0 <0
-SubNormal -SubNormal
0 0
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand1 {operation} Operand2 = Derived from the table above
Implementation:
- Normal (positive and negative), subnormal (positive and negative) arrays are randomly initialized within their respectively declared ranges.
- The difference between exponents and significands are formed as per the conditions in the table.
- All possible combinations of the table are used in creating the test-cases.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 40
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
if seed == -1:
if opcode in 'fmin':
random.seed(0)
elif opcode in 'fmax':
random.seed(1)
elif opcode in 'flt':
random.seed(2)
elif opcode in 'feq':
random.seed(3)
elif opcode in 'fle':
random.seed(3)
else:
random.seed(seed)
b19_comb = []
comment = []
normal = []
normal_neg = []
sub_normal = []
sub_normal_neg = []
zero = [[0e0,'Zero']]
for i in range(5):
normal.append([random.uniform(1,maxnum),'Normal'])
normal_neg.append([random.uniform(-1*maxnum,-1),'-Normal'])
sub_normal.append([random.uniform(minsubnorm,maxsubnorm),'Subnormal'])
sub_normal_neg.append([random.uniform(-1*maxsubnorm,-1*minsubnorm),'-Subnormal'])
all_num = normal + normal_neg + sub_normal + sub_normal_neg + zero
for i in all_num:
for j in all_num:
if i[0] != 0:
i_sig = str(i[0]).split('e')[0]
i_exp = str(i[0]).split('e')[1]
else:
i_sig = '0'
i_exp = '0'
if j[0] != 0:
j_sig = str(j[0]).split('e')[0]
j_exp = str(j[0]).split('e')[1]
else:
j_sig = '0'
j_exp = '0'
if float(i_sig) >= float(j_sig): sig_sign = '>='
else: sig_sign = '<'
if float(i_exp) >= float(j_exp): exp_sign = '>='
else: exp_sign = '<'
rs1 = float(i_sig+'e'+i_exp)
rs2 = float(j_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + i[1] + ', rs2 --> ' + j[1] + ', rs1_sigificand ' + sig_sign + ' rs2_significand' + ', rs1_exp ' + exp_sign + ' rs2_exp')
rs1 = float(i_sig+'e'+j_exp)
rs2 = float(j_sig+'e'+i_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + j[1] + ', rs2 --> ' + i[1] + ', rs1_sigificand ' + sig_sign + ' rs2_significand' + ', rs2_exp ' + exp_sign + ' rs1_exp')
rs1 = float(j_sig+'e'+i_exp)
rs2 = float(i_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + j[1] + ', rs2 --> ' + i[1] + ', rs2_sigificand ' + sig_sign + ' rs1_significand' + ', rs1_exp ' + exp_sign + ' rs2_exp')
rs1 = float(i_sig+'e'+j_exp)
rs2 = float(j_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + j[1] + ', rs2 --> ' + j[1] + ', rs1_sigificand ' + sig_sign + ' rs2_significand' + ', rs1_exp = rs2_exp')
rs1 = float(i_sig+'e'+i_exp)
rs2 = float(i_sig+'e'+j_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + i[1] + ', rs2 --> ' + j[1] + ', rs1_sigificand = rs2_significand' + ', rs1_exp ' + exp_sign + ' rs2_exp')
rs1 = float(i_sig+'e'+i_exp)
rs2 = float(i_sig+'e'+i_exp)
b19_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
comment.append(' | rs1 --> ' + i[1] + ', rs2 --> ' + i[1] + ', rs1_sigificand = rs2_significand, rs1_exp = rs2_exp')
coverpoints = []
k = 0
for c in b19_comb:
cvpt = ""
for x in range(1, 3):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
if opcode in ["fadd","fsub","fmul","fdiv","fsqrt","fmadd","fnmadd","fmsub","fnmsub","fcvt","fmv","fle","fmv","fmin","fsgnj"]:
cvpt += 'rm_val == 0'
elif opcode in ["fclass","flt","fmax","fsgnjn"]:
cvpt += 'rm_val == 1'
elif opcode in ["feq","flw","fsw","fsgnjx"]:
cvpt += 'rm_val == 2'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += comment[k]
coverpoints.append(cvpt)
k += 1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B19 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b20(flen, opcode, ops, seed=-1):
'''
IBM Model B20 Definition:
This model will create test-cases such that the significand of the intermediate results will cover each of the following patterns:
Mask on the intermediate result significand (excluding the leading “1” )
.. code-block::
xxx...xxx10
xxx...xx100
xxx...x1000
…
xx1...00000
x10...00000
100...00000
000...00000
The sticky bit of the intermediate result should always be 0. In case of the remainder operation, we will look at the result of the division in order to find the interesting test-cases.
Operation: Divide, Square-root.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Intermediate Results = [Random bits are taken initially to form xxx...xxx10. The pattern described above is then formed]
Operand1 {operation} Operand2 = Intermediate Results
Implementation:
- A loop is initiated where random bits are obtained for which the subsequent sign, exponent is calculated for the intermediate value and stored in the ir_dataset.
- Operand 1 (rs1) is randomly initialized in the range (1, limnum) and the subsequent operator value is found.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0]
getcontext().prec = 60
if seed == -1:
if opcode in 'fdiv':
random.seed(1)
elif opcode in 'fsqrt':
random.seed(2)
else:
random.seed(seed)
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
ir_dataset = []
for i in range(1,21,1):
for k in range(5):
bits = random.getrandbits(i)
bits = bin(bits)[2:]
front_zero = i-len(bits)
bits = '0'*front_zero + bits
trailing_zero = 22-i
sig = bits+'1'+'0'*trailing_zero
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, ' | Intermediate result significand: ' + sig + ' Pattern: ' + 'X'*i + '1' + '0'*trailing_zero])
sig = '1'+'0'*22
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: '+ sig + ' Pattern: ' + '1' + '0'*22])
sig = '0'*23
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: '+ sig + ' Pattern: ' + '0' + '0'*22])
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
ieee754_num = []
ir_dataset = []
for i in range(1,50,1):
for k in range(5):
bits = random.getrandbits(i)
bits = bin(bits)[2:]
front_zero = i-len(bits)
bits = '0'*front_zero + bits
trailing_zero = 51-i
sig = bits+'1'+'0'*trailing_zero
exp = random.getrandbits(11)
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, ' | Intermediate result significand: ' + sig + ' Pattern: ' + 'X'*i + '1' + '0'*trailing_zero])
sig = '1'+'0'*51
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: '+ sig + ' Pattern: ' + '1' + '0'*51])
sig = '0'*52
exp = random.getrandbits(8)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
ir = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
ir_dataset.append([ir, 'Intermediate result significand: ' + sig + ' Pattern: ' + '0' + '0'*52])
b8_comb = []
for i in range(len(ir_dataset)):
rs1 = random.uniform(1, limnum)
if opcode in 'fdiv':
if flen == 32:
rs2 = rs1/ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(rs1)/Decimal(ir_dataset[i][0])
elif opcode in 'fsqrt':
if flen == 32:
rs2 = ir_dataset[i][0]*ir_dataset[i][0]
elif flen == 64:
rs2 = Decimal(ir_dataset[i][0])*Decimal(ir_dataset[i][0])
if(flen==32):
x1 = struct.unpack('f', struct.pack('f', rs1))[0]
x2 = struct.unpack('f', struct.pack('f', rs2))[0]
elif(flen==64):
x1 = rs1
x2 = rs2
if opcode in ['fdiv']:
b8_comb.append((floatingPoint_tohex(flen,float(rs1)),floatingPoint_tohex(flen,float(rs2))))
elif opcode in 'fsqrt':
b8_comb.append((floatingPoint_tohex(flen,float(rs2)),))
coverpoints = []
k=0
for c in b8_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += ir_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B20 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b21(flen, opcode, ops):
'''
IBM Model B21 Definition:
This model will test the Divide By Zero exception flag. For the operations divide and remainder, a test case will be created for each of the possible combinations from the following table:
First Operand : 0, Random non-zero number, Infinity, NaN
Second Operand : 0, Random non-zero number, Infinity, NaN
Operation: Divide, Remainder
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Final Results = [ Zero, Subnorm, Norm, Infinity, DefaultNaN, QNaN, SNaN ]
Implementation:
- The basic_types dataset is accumulated with the combinations of the abstract dataset description.
- Using python’s package itertools, a permutation of all possible combinations as a pair is computed for basic_types dataset..
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
if flen == 32:
basic_types = fzero + fsubnorm + fnorm + finfinity + fdefaultnan + [fqnan[0], fqnan[3]] + \
[fsnan[0], fsnan[3]]
elif flen == 64:
basic_types = dzero + dsubnorm + dnorm +\
dinfinity + ddefaultnan + [dqnan[0], dqnan[1]] + \
[dsnan[0], dsnan[1]]
else:
logger.error('Invalid flen value!')
sys.exit(1)
# the following creates a cross product for ops number of variables
b21_comb = list(itertools.product(*ops*[basic_types]))
coverpoints = []
for c in b21_comb:
cvpt = ""
for x in range(1, ops+1):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
if opcode.split('.')[0] in ["fdiv"]:
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B21 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b22(flen, opcode, ops, seed=10):
'''
IBM Model B22 Definition:
This model creates test cases for each of the following exponents (unbiased):
1. Smaller than -3
2. All the values in the range [-3, integer width+3]
3. Larger than integer width + 3
For each exponent two cases will be randomly chosen, positive and negative.
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to -1. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand1 = [Smaller than -3, All the values in the range [-3, integer width+3], Larger than integer width + 3]
Implementation:
- Random bits are calculated and appended to obtain the exponent ranges defined in case 2.
- To satisfy case 1 and case 3, similar steps are performed outside the loop and hence updated in the loop.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
if opcode[2] == 's': flen = 32
elif opcode[2] == 'd': flen = 64
getcontext().prec = 40
xlen = 0
if opcode in 'fcvt.w':
xlen = 32
elif opcode in 'fcvt.l':
xlen = 64
elif opcode in 'fcvt.wu':
xlen = 32
elif opcode in 'fcvt.lu':
xlen = 64
if seed == -1:
if opcode in 'fcvt.w':
random.seed(0)
elif opcode in 'fcvt.l':
random.seed(1)
elif opcode in 'fcvt.wu':
random.seed(2)
elif opcode in 'fcvt.lu':
random.seed(3)
else:
random.seed(seed)
b22_comb = []
if flen == 32:
ieee754_maxnorm = '0x1.7fffffp+127'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.000001p-126'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.7fffffp-126'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
limnum = maxnum
op_dataset = []
for i in range(124,xlen+130,1):
bits = random.getrandbits(23)
bits = bin(bits)[2:]
front_zero = 23-len(bits)
sig = '0'*front_zero + bits
exp = i
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-127) + ', Exponent in the range [-3, integer width+3]'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(23)
bits = bin(bits)[2:]
front_zero = 23-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(0,124)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-127) + ', Exponent less than -3'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(23)
bits = bin(bits)[2:]
front_zero = 23-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(xlen+130,255)
exp = '{:08b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-127) + ', Exponent greater than (integer width+3)'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
elif flen == 64:
ieee754_maxnorm = '0x1.fffffffffffffp+1023'
maxnum = float.fromhex(ieee754_maxnorm)
ieee754_minsubnorm = '0x0.0000000000001p-1022'
minsubnorm = float.fromhex(ieee754_minsubnorm)
ieee754_maxsubnorm = '0x0.fffffffffffffp-1022'
maxsubnorm = float.fromhex(ieee754_maxsubnorm)
ieee754_limnum = '0x1.fffffffffffffp+507'
limnum = float.fromhex(ieee754_limnum)
op_dataset = []
for i in range(1020,xlen+1026,1):
bits = random.getrandbits(52)
bits = bin(bits)[2:]
front_zero = 52-len(bits)
sig = '0'*front_zero + bits
exp = i
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-1023) + ', Exponent in the range [-3, integer width+3]'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(52)
bits = bin(bits)[2:]
front_zero = 52-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(0,1020)
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-1023) + ', Exponent less than -3'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
bits = random.getrandbits(52)
bits = bin(bits)[2:]
front_zero = 52-len(bits)
sig = '0'*front_zero + bits
exp = random.randint(xlen+1026,2047)
exp = '{:011b}'.format(exp)
sgn = random.getrandbits(1)
sgn = '{:01b}'.format(sgn)
ir_bin = ('0b'+sgn+exp+sig)
op = fields_dec_converter(flen,'0x'+hex(int('1'+ir_bin[2:],2))[3:])
op_dataset.append([op, ' | Exponent: ' + str(int(exp,2)-1023) + ', Exponent greater than (integer width+3)'])
b22_comb.append((floatingPoint_tohex(flen,float(op)),))
coverpoints = []
k=0
for c in b22_comb:
cvpt = ""
for x in range(1, 2):
# cvpt += 'rs'+str(x)+'_val=='+str(c[x-1]) # uncomment this if you want rs1_val instead of individual fields
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += op_dataset[k][1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+ \
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B22 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b23(flen, opcode, ops):
'''
IBM Model B23 Definition:
This model creates boundary cases for the rounding to integers that might cause Overflow.
A test case will be created with inputs equal to the maximum integer number in the destination's format (MaxInt), or close to it. In particular, the following FP numbers will be used:
1. ±MaxInt
2. ±MaxInt ± 0.01 (¼)
3. ±MaxInt ± 0.1 (½)
4. ±MaxInt ± 0.11 (¾)
5. ±MaxInt ± 1
Rounding Mode: All
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operand 1 = [ MaxInt-4, MaxInt+5 ]
Implementation:
- In the range of (-4,5), the dataset array is appended with the hexadecimal equivalent of maxnum plus the iteration number in a string format. The next highest encoding of the hexadecimal value is calculated.
- This is done with different values of maxnum for flen=32 or flen=64.
- Since this model is meant for floating point conversion instructions, only one operand is expected.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
getcontext().prec = 40
operations = ['+','-']
nums = [0,100,200,800,1600]
dataset = []
if flen == 32:
maxnum = 0x4f000000 # MaxInt (2**31-1) in IEEE 754 Floating Point Representation
for i in range(-4,5):
dataset.append((hex(int(maxnum)+i),"| MaxInt + ({})".format(str(i))))
elif flen == 64:
maxnum = 0x43e0000000000000
for i in range(-4,5):
dataset.append((hex(int(maxnum)+i),"| MaxInt + ({})".format(str(i))))
coverpoints = []
k=0
for c in dataset:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.s":
cvpt += '0'
else:
cvpt += str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " "+c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B23 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return (coverpoints)
def ibm_b24(flen, opcode, ops):
'''
IBM Model B24 Definition:
This model creates boundary cases for rounding to integer that might cause major loss of accuracy.
A test-case will be created for each of the following inputs:
1. ±0
2. ±0 ± 0.01 (¼)
3. ±0 ± 0.1 (½)
4. ±0 ± 0.11 (¾)
5. ±1
6. ±1 + 0.01 (¼)
7. ±1 + 0.1 (½)
8. ±1 + 0.11 (¾)
Rounding Mode: All
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:type flen: int
:type opcode: str
:type ops: int
Abstract Dataset Description:
Operand 1 = [±0, ±0 ± 0.01, ±0 ± 0.1, ±0 ± 0.11, ±1, ±1 + 0.01, ±1 + 0.1, ±1 + 0.11]
Implementation:
- A nested loop with 4 stages is initiated to iterate each element in minimums, nums, operations1 and operations2 for the two operands. This is done to form the dataset defined above.
- Depending on the value of flen, these values are then converted into their respective IEEE 754 hexadecimal values.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
getcontext().prec = 40
operations = ['+','-']
nums = [0,0.01,0.1,0.11]
minnums = [0,1]
dataset = []
for minnum in minnums:
for num in nums:
for op1 in operations:
for op2 in operations:
dataset.append((eval(op1+str(minnum)+op2+str(num)),op1+str(minnum)+op2+str(num)))
b24_comb = []
for data in dataset:
t = "{:e}".format(data[0])
b24_comb.append((floatingPoint_tohex(flen,float(t)),data[1]))
b24_comb = set(b24_comb)
coverpoints = []
k=0
for c in b24_comb:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.s":
cvpt += '0'
else:
cvpt += str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " | "+c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B24 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return (coverpoints)
def ibm_b25(flen, opcode, ops, seed=10):
'''
IBM Model B25 Definition:
This model creates a test-case for each of the following inputs:
1. ±MaxInt
2. ±0
3. ±1
4. Random number
:param xlen: Size of the integer registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [±MaxInt, ±0, ±1, Random number]
Implementation:
- The dataset is formed as per the dataset description.
- rand_num is initialized to a random number in the range (1, maxnum).
- Since this model is for an integer to floating point conversion instruction, the operands are presented in decimal format.
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
random.seed(seed)
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
getcontext().prec = 40
operations = ['+','-']
nums = [0,0.01,0.1,0.11]
dataset = [(0,"0"),(1,"1"),(-1,"-1")]
if flen == 32:
maxnum = 2**31-1
elif flen == 64:
maxnum = 2**63-1
dataset.append((maxnum,"MaxInt"))
dataset.append((-1*maxnum,"-MaxInt"))
rand_num = int(random.uniform(1,maxnum))
dataset.append((rand_num,"+ve Random Number"))
dataset.append((-1*rand_num,"-ve Random Number"))
b25_comb = []
for data in dataset:
b25_comb.append((int(data[0]),data[1]))
coverpoints = []
k=0
for c in b25_comb:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += "rs1_val == "+str(c[x-1])
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.wu":
cvpt += str(0)
else:
cvpt += str(rm)
cvpt += ' # Number = '
cvpt += c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B25 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return (coverpoints)
def ibm_b26(xlen, opcode, ops, seed=10):
'''
IBM Model B26 Definition:
This model creates a test-case for each possible value of the number of significant bits in the input operand (which is an integer). A test is created with an example from each of the following
ranges: [0], [1], [2,3], [4,7], [8,15], …, [(MaxInt+1)/2, MaxInt]
:param xlen: Size of the integer registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = Random number in [0], [1], [2,3], [4,7], [8,15], …, [(MaxInt+1)/2, MaxInt]
Implementation:
- A random number is chosen in the ranges defined above.
- Since this model is for an integer to floating point conversion instruction, the operands are presented in decimal format.
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
random.seed(seed)
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
dataset = [(0," # Number in [0]"),(1," # Number in [1]")]
i = 3
while(i<=2**(xlen-1)-1):
rand_num = random.randint(int((i+1)/2),i)
dataset.append((rand_num," # Random number chosen in the range: ["+str(int((i+1)/2))+", "+str(i)+"]"))
i = i*2+1
coverpoints = []
k=0
for c in dataset:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += "rs1_val == "+str(c[x-1])
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or opcode in "fcvt.d.wu":
cvpt += str(0)
else:
cvpt += str(rm)
cvpt += c[1]
coverpoints.append(cvpt)
k=k+1
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if xlen == 32 else str(64)) + '-bit coverpoints using Model B26 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b27(flen, opcode, ops, seed=10):
'''
IBM Model B27 Definition:
This model tests the conversion of NaNs from a wider format to a narrow one. Each combination from the following table will create one test case (N represents the number of bits in the significand of the destination's format):
[SNaN, QNaN]
==================== ========================================================= =====================
Value of the operand The N-1 MSB bits of the significand (excluding the first) The rest of the bits
==================== ========================================================= =====================
QNaN All 0 All 0
SNan Not all 0 Not all 0
==================== ========================================================= =====================
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [ SNaN, QNaN ]
Implementation:
- Dataset is the combination of snan and qnan values predefined at random initially.
- Depending on the value of flen, these values are then converted into their respective IEEE 754 hexadecimal values.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
if flen == 32:
dataset = fsnan + fqnan
elif flen == 64:
dataset = dsnan + dqnan
coverpoints = []
for c in dataset:
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c,str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c) + '(' + str(c) + ')'
if(y != ops):
cvpt += " and "
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B27 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b28(flen, opcode, ops, seed=10):
'''
IBM Model B28 Definition:
This model tests the conversion of a floating point number to an integral value, represented in floating-point format. A test case will be created for each of the following inputs:
1. +0
2. A random number in the range (+0, +1)
3. +1
4. Every value in the range (1.00, 10.11] (1 to 2.75 in jumps of 0.25)
5. A random number in the range (+1, +1.11..11*2^precision)
6. +1.11..11*2^precision
7. +Infinity
8. NaN
9. -0
10. A random number in the range (-1, -0)
11. -1
12. Every value in the range [-10.11, -1.00)
13. A random number in the range (-1.11..11*2^precision , -1)
14.-1.11..11*2^precision
15. –Infinity
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10. Actual value is set with respect to the opcode calling the function)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [ ±0, ±1, ±Infinity, Default NaN, A random number in the range (+0, +1), Every value in the range (1.00, 10.11] (1 to 2.75 in jumps of 0.25), A random number in the range (+1, +1.11..11*2^precision), ±1.11..11*2^precision, A random number in the range (-1, -0), Every value in the range [-10.11, -1.00), A random number in the range (-1.11..11*2^precision , -1) ]
Implementation:
- According to the given inputs, all cases are declared and appended to the dataset for flen=32 and flen=64.
- Random numbers are obtained in the respective ranges and for absolute values, it is inherited from the dataset definition.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with rounding mode “0” for that particular opcode.
'''
random.seed(seed)
opcode = opcode.split('.')[0] + '.' + opcode.split('.')[1]
dataset = []
if flen == 32:
dataset.append((fzero[0],"+0"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(0,1))),"A random number in the range (+0, +1)"))
dataset.append((fone[0],"+1"))
for i in range(125,300,25):
dataset.append((floatingPoint_tohex(32, i/100),"Number = "+str(i/100)+" => Number ∈ (1,2.75]"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(1,2**31-1))),"A random number in the range (+1, +1.11..11*2^precision)"))
dataset.append((floatingPoint_tohex(32,float(2**31-1)),"MaxInt"))
dataset.append((finfinity[0],"+Infinity"))
dataset.append((fsnan[0],"Signaling NaN"))
dataset.append((fqnan[0],"Quiet NaN"))
dataset.append((fzero[1],"-0"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(-1,0))),"A random number in the range (-1, -0)"))
dataset.append((fone[1],"-1"))
for i in range(-275,-100,25):
dataset.append((floatingPoint_tohex(32, i/100),"Number = "+str(i/100)+" => Number ∈ [-2.75,-1)"))
dataset.append((floatingPoint_tohex(32,float(random.uniform(-2**31-1,-1))),"A random number in the range (-1.11..11*2^precision, -1)"))
dataset.append((floatingPoint_tohex(32,float(-2**31-1)),"-MaxInt"))
dataset.append((finfinity[1],"-Infinity"))
elif flen == 64:
dataset.append((dzero[0],"+0"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(0,1))),"A random number in the range (+0, +1)"))
dataset.append((done[0],"+1"))
for i in range(125,300,25):
dataset.append((floatingPoint_tohex(64, i/100),"Number = "+str(i/100)+" => Number ∈ (1,2.75]"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(1,2**63-1))),"A random number in the range (+1, +1.11..11*2^precision)"))
dataset.append((floatingPoint_tohex(64,float(2**63-1)),"MaxInt"))
dataset.append((dinfinity[0],"+Infinity"))
dataset.append((dsnan[0],"Signaling NaN"))
dataset.append((dqnan[0],"Quiet NaN"))
dataset.append((dzero[1],"-0"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(-1,0))),"A random number in the range (-1, -0)"))
dataset.append((done[1],"-1"))
for i in range(-275,-100,25):
dataset.append((floatingPoint_tohex(64, i/100),"Number = "+str(i/100)+" => Number ∈ [-2.75,-1)"))
dataset.append((floatingPoint_tohex(64,float(random.uniform(-2**63-1,-1))),"A random number in the range (-1.11..11*2^precision, -1)"))
dataset.append((floatingPoint_tohex(64,float(-2**63-1)),"-MaxInt"))
dataset.append((dinfinity[1],"-Infinity"))
coverpoints = []
for c in dataset:
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == 0'
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " | "+c[1]
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B28 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
def ibm_b29(flen, opcode, ops, seed=10):
'''
IBM Model B29 Definition:
This model checks different cases of rounding of the floating point number. A test will be created for each possible combination of the Sign, LSB, Guard bit and the Sticky bit (16 cases for each operation).
Rounding Mode: All
:param flen: Size of the floating point registers
:param opcode: Opcode for which the coverpoints are to be generated
:param ops: No. of Operands taken by the opcode
:param seed: Initial seed value of the random library. (Predefined to 10)
:type flen: int
:type opcode: str
:type ops: int
:param seed: int
Abstract Dataset Description:
Operand 1 = [All possible combinations of Sign, LSB, Guard and Sticky are taken]
Implementation:
- A random mantissa is obtained and is iterated for each sign in each digit in the binary number.
- The exponent is always maintained at -3, in order to facilitate the shift process that occurs during the actual conversion.
- The respective hexadecimal values are appended to the dataset along with the respective Least, Guard and Sticky bit value wherever available.
- The operand values are then passed into the extract_fields function to get individual fields in a floating point number (sign, exponent and mantissa).
- Coverpoints are then appended with all rounding modes for that particular opcode.
'''
random.seed(seed)
sgns = ["0","1"]
dataset = []
if flen == 32:
mant = random.getrandbits(20)
mant = '{:020b}'.format(mant)
for sgn in sgns:
for i in range(8):
LeastGuardSticky = '{:03b}'.format(i)
hexnum = "0x" + hex(int("1"+sgn + "01111100" + mant + LeastGuardSticky,2))[3:]
dataset.append((hexnum,"Exp = -3; Sign = {}; LSB = {}; Guard = {}; Sticky = {}"\
.format(sgn,LeastGuardSticky[0],LeastGuardSticky[1],LeastGuardSticky[2])))
elif flen == 64:
mant = random.getrandbits(49)
mant = '{:049b}'.format(mant)
for sgn in sgns:
for i in range(8):
LeastGuardSticky = '{:03b}'.format(i)
hexnum = "0x" + hex(int("1"+sgn + "01111111100" + mant + LeastGuardSticky,2))[3:]
dataset.append((hexnum,"Exp = -3; Sign = {}; LSB = {}; Guard = {}; Sticky = {}"\
.format(sgn,LeastGuardSticky[0],LeastGuardSticky[1],LeastGuardSticky[2])))
coverpoints = []
for c in dataset:
for rm in range(0,5):
cvpt = ""
for x in range(1, ops+1):
cvpt += (extract_fields(flen,c[x-1],str(x)))
cvpt += " and "
cvpt += 'rm_val == '
if "fmv" in opcode or "fcvt.d.s" in opcode:
cvpt += '0'
else:
cvpt += str(rm)
cvpt += ' # '
for y in range(1, ops+1):
cvpt += 'rs'+str(y)+'_val=='
cvpt += num_explain(flen, c[y-1]) + '(' + str(c[y-1]) + ')'
if(y != ops):
cvpt += " and "
cvpt += " | "+c[1]
coverpoints.append(cvpt)
mess='Generated'+ (' '*(5-len(str(len(coverpoints)))))+ str(len(coverpoints)) +' '+\
(str(32) if flen == 32 else str(64)) + '-bit coverpoints using Model B29 for '+opcode+' !'
logger.info(mess)
coverpoints = comments_parser(coverpoints)
return coverpoints
| [
6738,
374,
2304,
85,
62,
271,
330,
13,
6404,
1330,
49706,
198,
11748,
340,
861,
10141,
198,
11748,
2878,
198,
11748,
4738,
198,
11748,
25064,
198,
11748,
10688,
198,
6738,
32465,
1330,
1635,
198,
198,
69,
22570,
220,
220,
220,
220,
22... | 2.170405 | 82,615 |
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.storage.stores.nosql.mongo.dao.lookup import Lookup
| [
37811,
198,
15269,
357,
66,
8,
12131,
327,
26631,
4339,
22196,
16284,
11,
3457,
13,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
428,
3788,
290,
3917,
198,
22897,
341,
3696,
... | 3.888889 | 297 |
# -*- coding: utf-8
"""Unit tests for the vndb event plugin."""
# pylint: disable=missing-docstring,too-few-public-methods
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from ...message import collapse
from ...test.helpers import CommandTestMixin
from . import Default
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
198,
37811,
26453,
5254,
329,
262,
410,
358,
65,
1785,
13877,
526,
15931,
198,
2,
279,
2645,
600,
25,
15560,
28,
45688,
12,
15390,
8841,
11,
18820,
12,
32146,
12,
11377,
12,
24396,
82,
... | 3.389474 | 95 |
# coding=utf-8
from __future__ import absolute_import, print_function
import posixpath
from urllib import urlencode
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import parse_qsl, urlsplit, urlunsplit
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
try:
# noinspection PyUnresolvedReferences
from propane.flask.urls import *
except ImportError:
pass
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
198,
198,
11748,
1426,
844,
6978,
198,
6738,
2956,
297,
571,
1330,
2956,
11925,
8189,
198,
198,
2,
645,
1040,
14978,
9485,
3118,
4... | 2.861314 | 137 |
from rest_framework import serializers
from radar.models import BaseRadares, Contagens, Viagens, Trajetos
from rest_framework_cache.serializers import CachedSerializerMixin
from rest_framework_cache.registry import cache_registry
cache_registry.register(BaseRadaresSerializer)
cache_registry.register(ContagensSerializer)
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
13428,
13,
27530,
1330,
7308,
15546,
3565,
11,
2345,
363,
641,
11,
16049,
363,
641,
11,
4759,
31173,
418,
198,
6738,
1334,
62,
30604,
62,
23870,
13,
46911,
11341,
1330,
327,
2317,
... | 3.593407 | 91 |
""" Balance resource model """
import datetime
from sqlalchemy.sql import func
from underbudget.database import db
from underbudget.models.transaction import (
AccountTransactionModel,
EnvelopeTransactionModel,
TransactionModel,
)
class AccountBalanceModel:
""" Account balance model """
@staticmethod
def get_balance(
account_id: int,
date: datetime.date,
) -> int:
""" Gets the balance of an account as of a particular date. """
result = (
db.session.query(
func.sum(AccountTransactionModel.amount).label("balance"), func.count()
)
.join(TransactionModel)
.filter(AccountTransactionModel.account_id == account_id)
.filter(TransactionModel.recorded_date <= date)
.first()
)
if result:
return {"balance": result[0], "total": result[1]}
return {"balance": 0, "total": 0}
class EnvelopeBalanceModel:
""" Envelope balance model """
@staticmethod
def get_balance(
envelope_id: int,
date: datetime.date,
) -> int:
""" Gets the balance of an envelope as of a particular date. """
result = (
db.session.query(
func.sum(EnvelopeTransactionModel.amount).label("balance"), func.count()
)
.join(TransactionModel)
.filter(EnvelopeTransactionModel.envelope_id == envelope_id)
.filter(TransactionModel.recorded_date <= date)
.first()
)
if result:
return {"balance": result[0], "total": result[1]}
return {"balance": 0, "total": 0}
| [
37811,
22924,
8271,
2746,
37227,
198,
11748,
4818,
8079,
198,
6738,
44161,
282,
26599,
13,
25410,
1330,
25439,
198,
198,
6738,
739,
37315,
13,
48806,
1330,
20613,
198,
6738,
739,
37315,
13,
27530,
13,
7645,
2673,
1330,
357,
198,
220,
22... | 2.366197 | 710 |
import numpy as np
import re
from gensim.models import Word2Vec
max_len = 25
print('-- READING DATA --')
# read in defs
with open("data/definitions.txt") as f:
data = f.readlines()
# remove \n at end
data = [process_def(x.strip()) for x in data]
# reorganize into [[def 1, def 2, 1/0], [def 1, def 2, 1/0], ...]
data = np.reshape(data, (-1, 3)).T
x = data[:2].T
y = [int(x) for x in data[2:][0]]
model = Word2Vec.load("trained/w2v/trained.w2v")
x = [[model.wv[word] for word in a] for a in x]
np.save('data/x.npy', x)
np.save('data/y.npy', y)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
302,
198,
6738,
308,
641,
320,
13,
27530,
1330,
9678,
17,
53,
721,
198,
198,
9806,
62,
11925,
796,
1679,
628,
198,
4798,
10786,
438,
20832,
2751,
42865,
1377,
11537,
198,
198,
2,
1100,
287,
... | 2.292181 | 243 |
from utils import *
import os
import numpy as np
from video_stabilization import video_stabilization
PlotsDirectory = '../plots/Week4/task2-3/'
if not os.path.exists(PlotsDirectory):
os.makedirs(PlotsDirectory)
print("reading video...")
seq_color = video_to_frame('video1.mp4', grayscale=False)
max_size = 100
seq_color = seq_color[0:max_size]
#block_size_x, block_size_y, search_area_x, search_area_y = 20, 20, 20, 20
#print("stabilizing video...")
#print(seq_color.shape)
#est_seq = video_stabilization(seq_color, block_size_x, block_size_y, search_area_x, search_area_y,
# compensation='backward', grayscale=False, resize=(320, 240))
#print("saving video...")
#np.save(PlotsDirectory + 'own_stabilization.npy', est_seq)
write_images2(seq_color, PlotsDirectory, 'seq_')
| [
6738,
3384,
4487,
1330,
1635,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2008,
62,
301,
14991,
1634,
1330,
2008,
62,
301,
14991,
1634,
198,
198,
3646,
1747,
43055,
796,
705,
40720,
489,
1747,
14,
20916,
19,
14,
... | 2.513932 | 323 |
from django.contrib import admin
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from . import models
@admin.register(models.ControllerLink)
@admin.register(models.ControllerCommand)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
1330,
62,
39344,
1330,
4133,
198,
6738,
1330,
62,
39344,
13,
28482,
1330,
17267,
43834,
17633,
46787,
198,
6738,
764,
1330,
4981,
198,
198,
31,
28482,
13,
30238,
7,
27530,
13,
... | 4 | 57 |
import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
import traceback
import asyncio
import asyncpg
import yaml # removed aiofiles because its not needed
from datetime import datetime
import os
import sys
import logging
import aiohttp
import aioredis
import psutil
import discord
from utils import errorhandler
import logging.handlers
import lavalink
import utils
logger = logging.getLogger("bot")
logger.setLevel(logging.INFO)
handler = logging.handlers.TimedRotatingFileHandler(
filename=f"logs/bot.log",
encoding="utf-8",
when="D",
interval=1,
utc=True,
backupCount=10,
)
handler.setFormatter(
logging.Formatter("[%(asctime)s:%(levelname)s:%(name)s] %(message)s")
)
logger.addHandler(handler)
try:
import uvloop
except ImportError:
if (
sys.platform == "linux"
): # alert the user to install uvloop if they are on a linux system
print("UVLoop not detected")
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
description = """uwu. A RPG bot made by mellowmarshe#0001"""
startup_extensions = [
"jishaku",
"utils.errorhandler",
"modules.create",
"modules.exploring",
"modules.owner",
"modules.uwulonian",
"modules.misc",
"modules.patron",
"modules.DBL",
"modules.uwus",
"modules.events",
"modules.daily",
"modules.pets",
"modules.help",
"modules.votes",
"modules.logging",
"modules.music",
"modules.moderation",
]
prefixes = ["uwu ", "|"]
if __name__ == "__main__":
uwu().run()
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
36446,
13,
2302,
13,
9503,
1746,
13,
1073,
15041,
82,
1330,
48353,
6030,
198,
11748,
12854,
1891,
198,
11748,
30351,
952,
198,
11748,
30351,
6024,
198,
11748,
331,
43695,
... | 2.624172 | 604 |
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.contrib.auth import login, logout
from django.conf import settings
from microsoft_authentication.auth.auth_utils import (
get_sign_in_flow,
get_token_from_code,
get_user,
get_django_user,
get_logout_url,
)
| [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
7738,
1060,
11,
367,
29281,
31077,
1890,
37978,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
17594,
11,
2604,
448,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6... | 2.758929 | 112 |
#!/usr/bin/python3
"""Script to check this homework."""
import argparse
import logging
from .checker import Checker
from .md_writer import MdWriter
logging.basicConfig()
log = logging.getLogger("GHC")
log.setLevel(logging.INFO)
def main():
"""Run this script."""
parser = argparse.ArgumentParser()
parser.add_argument(
'-v', '--verbose',
help='Make the output verbose.',
action='store_true')
parser.add_argument(
'-i', '--input',
help='An input *.yml file with the job definition.',
required=True)
parser.add_argument(
'-o', '--output',
help='An output *.md file with the results.',
required=True)
args = parser.parse_args()
if args.verbose:
log.setLevel(logging.DEBUG)
log.debug('Enable DEBUG logging.')
# Read the job file.
log.debug('Reading from file "%s"', args.input)
checker = Checker(args.input)
results = checker.check_homework()
md_writer = MdWriter()
md_writer.update(results)
# Write the resulting markdown file.
log.debug('Writing to file "%s"', args.output)
md_writer.write_md_file(args.output)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
37811,
7391,
284,
2198,
428,
26131,
526,
15931,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
198,
6738,
764,
9122,
263,
1330,
6822,
263,
198,
6738,
764,
9132,
62,
16002,
1330,
39762,
... | 2.502075 | 482 |
T = int(raw_input())
for t in range(T):
n, m = map(int, raw_input().split())
powers = {}
bullets = {}
origBullet = 0
levelBullet = 0
for ni in range( n):
powers[ni] = map(int, raw_input().split())
for ni in range( n):
bullets[ni] = map(int, raw_input().split())
bullets[-1] = [0 for _ in range(m)]
dpminbul={}
print minbul() | [
51,
796,
493,
7,
1831,
62,
15414,
28955,
198,
1640,
256,
287,
2837,
7,
51,
2599,
198,
220,
220,
220,
299,
11,
285,
796,
3975,
7,
600,
11,
8246,
62,
15414,
22446,
35312,
28955,
198,
220,
220,
220,
5635,
796,
23884,
198,
220,
220,
... | 2.222222 | 171 |
import datetime
import logging
import ee
from dateutil.relativedelta import *
from . import utils
# import openet.core.utils as utils
def daily(target_coll, source_coll, interp_days=32, interp_method='linear',
use_joins=False, compute_product=False):
"""Interpolate non-daily source images to a daily target image collection
Parameters
----------
target_coll : ee.ImageCollection
Source images will be interpolated to each target image time_start.
Target images should have a daily time step. This will typically be
the reference ET (ETr) collection.
source_coll : ee.ImageCollection
Images that will be interpolated to the target image collection.
This will typically be the fraction of reference ET (ETrF) collection.
interp_days : int, optional
Number of days before and after each image date to include in the
interpolation (the default is 32).
interp_method : {'linear'}, optional
Interpolation method (the default is 'linear').
use_joins : bool, optional
If True, the source collection will be joined to the target collection
before mapping/interpolation and the source images will be extracted
from the join properties ('prev' and 'next').
Setting use_joins=True should be more memory efficient.
If False, the source images will be built by filtering the source
collection separately for each image in the target collection
(inside the mapped function).
compute_product : bool, optional
If True, compute the product of the target and all source image bands.
The default is False.
Returns
-------
ee.ImageCollection() of daily interpolated images
Raises
------
ValueError
If `interp_method` is not a supported method.
"""
prev_filter = ee.Filter.And(
ee.Filter.maxDifference(
difference=(interp_days + 1) * 24 * 60 * 60 * 1000,
leftField='system:time_start',
rightField='system:time_start',
),
ee.Filter.greaterThan(
leftField='system:time_start',
rightField='system:time_start',
)
)
next_filter = ee.Filter.And(
ee.Filter.maxDifference(
difference=(interp_days + 1) * 24 * 60 * 60 * 1000,
leftField='system:time_start',
rightField='system:time_start',
),
ee.Filter.lessThanOrEquals(
leftField='system:time_start',
rightField='system:time_start',
)
)
if use_joins:
# Join the neighboring Landsat images in time
target_coll = ee.ImageCollection(
ee.Join.saveAll(
matchesKey='prev',
ordering='system:time_start',
ascending=True,
outer=True,
).apply(
primary=target_coll,
secondary=source_coll,
condition=prev_filter,
)
)
target_coll = ee.ImageCollection(
ee.Join.saveAll(
matchesKey='next',
ordering='system:time_start',
ascending=False,
outer=True,
).apply(
primary=target_coll,
secondary=source_coll,
condition=next_filter,
)
)
# # DEADBEEF - This module is assuming that the time band is already in
# # the source collection.
# # Uncomment the following to add a time band here instead.
# def add_utc0_time_band(image):
# date_0utc = utils.date_0utc(ee.Date(image.get('system:time_start')))
# return image.addBands([
# image.select([0]).double().multiply(0).add(date_0utc.millis())\
# .rename(['time'])])
# source_coll = ee.ImageCollection(source_coll.map(add_utc0_time_band))
if interp_method.lower() == 'linear':
def _linear(image):
"""Linearly interpolate source images to target image time_start(s)
Parameters
----------
image : ee.Image.
The first band in the image will be used as the "target" image
and will be returned with the output image.
Returns
-------
ee.Image of interpolated values with band name 'src'
Notes
-----
The source collection images must have a time band.
This function is intended to be mapped over an image collection and
can only take one input parameter.
"""
# target_img = ee.Image(image).select(0).double()
target_date = ee.Date(image.get('system:time_start'))
# All filtering will be done based on 0 UTC dates
utc0_date = utils.date_0utc(target_date)
# utc0_time = target_date.update(hour=0, minute=0, second=0)\
# .millis().divide(1000).floor().multiply(1000)
time_img = ee.Image.constant(utc0_date.millis()).double()
# Build nodata images/masks that can be placed at the front/back of
# of the qm image collections in case the collections are empty.
bands = source_coll.first().bandNames()
prev_qm_mask = ee.Image.constant(ee.List.repeat(1, bands.length()))\
.double().rename(bands).updateMask(0)\
.set({
'system:time_start': utc0_date.advance(
-interp_days - 1, 'day').millis()})
next_qm_mask = ee.Image.constant(ee.List.repeat(1, bands.length()))\
.double().rename(bands).updateMask(0)\
.set({
'system:time_start': utc0_date.advance(
interp_days + 2, 'day').millis()})
if use_joins:
# Build separate mosaics for before and after the target date
prev_qm_img = ee.ImageCollection\
.fromImages(ee.List(ee.Image(image).get('prev')))\
.merge(ee.ImageCollection(prev_qm_mask))\
.sort('system:time_start', True)\
.mosaic()
next_qm_img = ee.ImageCollection\
.fromImages(ee.List(ee.Image(image).get('next')))\
.merge(ee.ImageCollection(next_qm_mask))\
.sort('system:time_start', False)\
.mosaic()
else:
# Build separate collections for before and after the target date
prev_qm_coll = source_coll\
.filterDate(utc0_date.advance(-interp_days, 'day'), utc0_date)\
.merge(ee.ImageCollection(prev_qm_mask))
next_qm_coll = source_coll\
.filterDate(utc0_date, utc0_date.advance(interp_days + 1, 'day'))\
.merge(ee.ImageCollection(next_qm_mask))
# Flatten the previous/next collections to single images
# The closest image in time should be on "top"
# CGM - Is the previous collection already sorted?
# prev_qm_img = prev_qm_coll.mosaic()
prev_qm_img = prev_qm_coll.sort('system:time_start', True)\
.mosaic()
next_qm_img = next_qm_coll.sort('system:time_start', False)\
.mosaic()
# DEADBEEF - It might be easier to interpolate all bands instead of
# separating the value and time bands
# prev_value_img = ee.Image(prev_qm_img).double()
# next_value_img = ee.Image(next_qm_img).double()
# Interpolate all bands except the "time" band
prev_bands = prev_qm_img.bandNames()\
.filter(ee.Filter.notEquals('item', 'time'))
next_bands = next_qm_img.bandNames()\
.filter(ee.Filter.notEquals('item', 'time'))
prev_value_img = ee.Image(prev_qm_img.select(prev_bands)).double()
next_value_img = ee.Image(next_qm_img.select(next_bands)).double()
prev_time_img = ee.Image(prev_qm_img.select('time')).double()
next_time_img = ee.Image(next_qm_img.select('time')).double()
# Fill masked values with values from the opposite image
# Something like this is needed to ensure there are always two
# values to interpolate between
# For data gaps, this will cause a flat line instead of a ramp
prev_time_mosaic = ee.Image(ee.ImageCollection.fromImages([
next_time_img, prev_time_img]).mosaic())
next_time_mosaic = ee.Image(ee.ImageCollection.fromImages([
prev_time_img, next_time_img]).mosaic())
prev_value_mosaic = ee.Image(ee.ImageCollection.fromImages([
next_value_img, prev_value_img]).mosaic())
next_value_mosaic = ee.Image(ee.ImageCollection.fromImages([
prev_value_img, next_value_img]).mosaic())
# Calculate time ratio of the current image between other cloud free images
time_ratio_img = time_img.subtract(prev_time_mosaic)\
.divide(next_time_mosaic.subtract(prev_time_mosaic))
# Interpolate values to the current image time
interp_img = next_value_mosaic.subtract(prev_value_mosaic)\
.multiply(time_ratio_img).add(prev_value_mosaic)
# Pass the target image back out as a new band
target_img = image.select([0]).double()
output_img = interp_img.addBands([target_img])\
# TODO: Come up with a dynamic way to name the "product" bands
# The product bands will have a "_1" appended to the name
# i.e. "et_fraction" -> "et_fraction_1"
if compute_product:
output_img = output_img\
.addBands([interp_img.multiply(target_img)])
return output_img.set({
'system:index': image.get('system:index'),
'system:time_start': image.get('system:time_start'),
# 'system:time_start': utc0_time,
})
interp_coll = ee.ImageCollection(target_coll.map(_linear))
# elif interp_method.lower() == 'nearest':
# interp_coll = ee.ImageCollection(target_coll.map(_nearest))
else:
raise ValueError('invalid interpolation method: {}'.format(interp_method))
return interp_coll
# @deprecated
def aggregate_to_daily(image_coll, start_date=None, end_date=None,
agg_type='mean'):
"""Aggregate images by day without using joins
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string, optional
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string, optional
Exclusive end date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
"""
if start_date and end_date:
test_coll = image_coll.filterDate(ee.Date(start_date), ee.Date(end_date))
elif start_date:
test_coll = image_coll.filter(ee.Filter.greaterThanOrEquals(
'system:time_start', ee.Date(start_date).millis()))
elif end_date:
test_coll = image_coll.filter(ee.Filter.lessThan(
'system:time_start', ee.Date(end_date).millis()))
else:
test_coll = image_coll
# Build a sorted list of the unique "dates" in the image_coll
date_list = ee.List(test_coll.aggregate_array('system:time_start'))\
.map(lambda time: ee.Date(ee.Number(time)).format('yyyy-MM-dd'))\
.distinct().sort()
return ee.ImageCollection(date_list.map(aggregate_func))
def from_scene_et_fraction(scene_coll, start_date, end_date, variables,
interp_args, model_args, t_interval='custom',
use_joins=False,
):
"""Interpolate from a precomputed collection of Landsat ET fraction scenes
Parameters
----------
scene_coll : ee.ImageCollection
Non-daily 'et_fraction' images that will be interpolated.
start_date : str
ISO format start date.
end_date : str
ISO format end date (exclusive, passed directly to .filterDate()).
variables : list
List of variables that will be returned in the Image Collection.
interp_args : dict
Parameters from the INTERPOLATE section of the INI file.
# TODO: Look into a better format for showing the options
interp_method : {'linear}, optional
Interpolation method. The default is 'linear'.
interp_days : int, str, optional
Number of extra days before the start date and after the end date
to include in the interpolation calculation. The default is 32.
model_args : dict
Parameters from the MODEL section of the INI file. The reference
source and parameters will need to be set here if computing
reference ET or actual ET.
t_interval : {'daily', 'monthly', 'annual', 'custom'}, optional
Time interval over which to interpolate and aggregate values
The default is 'custom' which means the aggregation time period
will be controlled by the start and end date parameters.
use_joins : bool, optional
If True, use joins to link the target and source collections.
If False, the source collection will be filtered for each target image.
This parameter is passed through to interpolate.daily().
Returns
-------
ee.ImageCollection
Raises
------
ValueError
Notes
-----
This function currently assumes that "mask" and "time" bands already exist
in the scene collection.
"""
# Get interp_method
if 'interp_method' in interp_args.keys():
interp_method = interp_args['interp_method']
else:
interp_method = 'linear'
logging.debug('interp_method was not set, default to "linear"')
# Get interp_days
if 'interp_days' in interp_args.keys():
interp_days = interp_args['interp_days']
else:
interp_days = 32
logging.debug('interp_days was not set, default to 32')
# Check that the input parameters are valid
if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']:
raise ValueError('unsupported t_interval: {}'.format(t_interval))
elif interp_method.lower() not in ['linear']:
raise ValueError('unsupported interp_method: {}'.format(
interp_method))
if ((type(interp_days) is str or type(interp_days) is float) and
utils.is_number(interp_days)):
interp_days = int(interp_days)
elif not type(interp_days) is int:
raise TypeError('interp_days must be an integer')
elif interp_days <= 0:
raise ValueError('interp_days must be a positive integer')
if not variables:
raise ValueError('variables parameter must be set')
# Adjust start/end dates based on t_interval
# Increase the date range to fully include the time interval
start_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_dt = datetime.datetime.strptime(end_date, '%Y-%m-%d')
if t_interval.lower() == 'annual':
start_dt = datetime.datetime(start_dt.year, 1, 1)
# Covert end date to inclusive, flatten to beginning of year,
# then add a year which will make it exclusive
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, 1, 1)
end_dt += relativedelta(years=+1)
elif t_interval.lower() == 'monthly':
start_dt = datetime.datetime(start_dt.year, start_dt.month, 1)
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, end_dt.month, 1)
end_dt += relativedelta(months=+1)
start_date = start_dt.strftime('%Y-%m-%d')
end_date = end_dt.strftime('%Y-%m-%d')
# The start/end date for the interpolation include more days
# (+/- interp_days) than are included in the ETr collection
interp_start_dt = start_dt - datetime.timedelta(days=interp_days)
interp_end_dt = end_dt + datetime.timedelta(days=interp_days)
interp_start_date = interp_start_dt.date().isoformat()
interp_end_date = interp_end_dt.date().isoformat()
# Get reference ET source
if 'et_reference_source' in model_args.keys():
et_reference_source = model_args['et_reference_source']
else:
raise ValueError('et_reference_source was not set')
# Get reference ET band name
if 'et_reference_band' in model_args.keys():
et_reference_band = model_args['et_reference_band']
else:
raise ValueError('et_reference_band was not set')
# Get reference ET factor
if 'et_reference_factor' in model_args.keys():
et_reference_factor = model_args['et_reference_factor']
else:
et_reference_factor = 1.0
logging.debug('et_reference_factor was not set, default to 1.0')
# raise ValueError('et_reference_factor was not set')
# CGM - Resampling is not working correctly so commenting out for now
# # Get reference ET resample
# if 'et_reference_resample' in model_args.keys():
# et_reference_resample = model_args['et_reference_resample']
# else:
# et_reference_resample = 'nearest'
# logging.debug(
# 'et_reference_resample was not set, default to nearest')
# # raise ValueError('et_reference_resample was not set')
if type(et_reference_source) is str:
# Assume a string source is an single image collection ID
# not an list of collection IDs or ee.ImageCollection
daily_et_ref_coll = ee.ImageCollection(et_reference_source) \
.filterDate(start_date, end_date) \
.select([et_reference_band], ['et_reference'])
# elif isinstance(et_reference_source, computedobject.ComputedObject):
# # Interpret computed objects as image collections
# daily_et_reference_coll = et_reference_source \
# .filterDate(self.start_date, self.end_date) \
# .select([et_reference_band])
else:
raise ValueError('unsupported et_reference_source: {}'.format(
et_reference_source))
# Scale reference ET images (if necessary)
# CGM - Resampling is not working correctly so not including for now
if (et_reference_factor and et_reference_factor != 1):
daily_et_ref_coll = daily_et_ref_coll.map(et_reference_adjust)
# Initialize variable list to only variables that can be interpolated
interp_vars = ['et_fraction', 'ndvi']
interp_vars = list(set(interp_vars) & set(variables))
# To return ET, the ETf must be interpolated
if 'et' in variables and 'et_fraction' not in interp_vars:
interp_vars.append('et_fraction')
# With the current interpolate.daily() function,
# something has to be interpolated in order to return et_reference
if 'et_reference' in variables and 'et_fraction' not in interp_vars:
interp_vars.append('et_fraction')
# The time band is always needed for interpolation
interp_vars.append('time')
# TODO: Look into implementing et_fraction clamping here
# (similar to et_actual below)
# Filter scene collection to the interpolation range
# This probably isn't needed since scene_coll was built to this range
scene_coll = scene_coll.filterDate(interp_start_date, interp_end_date)
# For count, compute the composite/mosaic image for the mask band only
if 'count' in variables:
aggregate_coll = aggregate_to_daily(
image_coll = scene_coll.select(['mask']),
start_date=start_date, end_date=end_date)
# The following is needed because the aggregate collection can be
# empty if there are no scenes in the target date range but there
# are scenes in the interpolation date range.
# Without this the count image will not be built but the other
# bands will be which causes a non-homogeneous image collection.
aggregate_coll = aggregate_coll.merge(
ee.Image.constant(0).rename(['mask'])
.set({'system:time_start': ee.Date(start_date).millis()}))
# Interpolate to a daily time step
daily_coll = daily(
target_coll=daily_et_ref_coll,
source_coll=scene_coll.select(interp_vars),
interp_method=interp_method, interp_days=interp_days,
use_joins=use_joins,
compute_product=False,
)
# The interpolate.daily() function can/will return the product of
# the source and target image named as "{source_band}_1".
# The problem with this approach is that is will drop any other bands
# that are being interpolated (such as the ndvi).
# daily_coll = daily_coll.select(['et_fraction_1'], ['et'])
# Compute ET from ETf and ETr (if necessary)
# This isn't needed if compute_product=True in daily() and band is renamed
# The check for et_fraction is needed since it is back computed from ET and ETr
# if 'et' in variables or 'et_fraction' in variables:
def compute_et(img):
"""This function assumes ETr and ETf are present"""
et_img = img.select(['et_fraction']) \
.multiply(img.select(['et_reference']))
return img.addBands(et_img.double().rename('et'))
daily_coll = daily_coll.map(compute_et)
def aggregate_image(agg_start_date, agg_end_date, date_format):
"""Aggregate the daily images within the target date range
Parameters
----------
agg_start_date: ee.Date, str
Start date (inclusive).
agg_end_date : ee.Date, str
End date (exclusive).
date_format : str
Date format for system:index (uses EE JODA format).
Returns
-------
ee.Image
Notes
-----
Since this function takes multiple inputs it is being called
for each time interval by separate mappable functions
"""
if 'et' in variables or 'et_fraction' in variables:
et_img = daily_coll.filterDate(agg_start_date, agg_end_date) \
.select(['et']).sum()
if 'et_reference' in variables or 'et_fraction' in variables:
# et_reference_img = daily_coll \
et_reference_img = daily_et_ref_coll \
.filterDate(agg_start_date, agg_end_date) \
.select(['et_reference']).sum()
image_list = []
if 'et' in variables:
image_list.append(et_img.float())
if 'et_reference' in variables:
image_list.append(et_reference_img.float())
if 'et_fraction' in variables:
# Compute average et fraction over the aggregation period
image_list.append(
et_img.divide(et_reference_img).rename(
['et_fraction']).float())
if 'ndvi' in variables:
# Compute average ndvi over the aggregation period
ndvi_img = daily_coll \
.filterDate(agg_start_date, agg_end_date) \
.mean().select(['ndvi']).float()
image_list.append(ndvi_img)
if 'count' in variables:
count_img = aggregate_coll \
.filterDate(agg_start_date, agg_end_date) \
.select(['mask']).sum().rename('count').uint8()
image_list.append(count_img)
return ee.Image(image_list) \
.set({
'system:index': ee.Date(agg_start_date).format(date_format),
'system:time_start': ee.Date(agg_start_date).millis()})
# .set(interp_properties) \
# Combine input, interpolated, and derived values
if t_interval.lower() == 'daily':
return ee.ImageCollection(daily_coll.map(agg_daily))
elif t_interval.lower() == 'monthly':
month_list = ee.List(list(month_gen(start_dt, end_dt)))
return ee.ImageCollection(month_list.map(agg_monthly))
elif t_interval.lower() == 'annual':
year_list = ee.List(list(year_gen(start_dt, end_dt)))
return ee.ImageCollection(year_list.map(agg_annual))
elif t_interval.lower() == 'custom':
# Returning an ImageCollection to be consistent
return ee.ImageCollection(aggregate_image(
agg_start_date=start_date, agg_end_date=end_date,
date_format='YYYYMMdd'))
def from_scene_et_actual(scene_coll, start_date, end_date, variables,
interp_args, model_args, t_interval='custom',
use_joins=False,
):
"""Interpolate from a precomputed collection of Landsat actual ET scenes
Parameters
----------
scene_coll : ee.ImageCollection
Non-daily 'et' images that will be interpolated.
start_date : str
ISO format start date.
end_date : str
ISO format end date (exclusive, passed directly to .filterDate()).
variables : list
List of variables that will be returned in the Image Collection.
interp_args : dict
Parameters from the INTERPOLATE section of the INI file.
# TODO: Look into a better format for showing the options
interp_source : str
interp_band : str
interp_resample : {'nearest', 'nearest'}
interp_method : {'linear}, optional
Interpolation method. The default is 'linear'.
interp_days : int, str, optional
Number of extra days before the start date and after the end date
to include in the interpolation calculation. The default is 32.
et_fraction_min : float
et_fraction_max : float
model_args : dict
Parameters from the MODEL section of the INI file. The reference
source and other parameters will need to be set here if computing
reference ET or ET fraction.
t_interval : {'daily', 'monthly', 'annual', 'custom'}, optional
Time interval over which to interpolate and aggregate values
The default is 'custom' which means the aggregation time period
will be controlled by the start and end date parameters.
use_joins : bool, optional
If True, use joins to link the target and source collections.
If False, the source collection will be filtered for each target image.
This parameter is passed through to interpolate.daily().
# TODO: Move these into interp_args (and/or model_args)
fraction_min : float, optional
fraction_max : float, optional
Returns
-------
ee.ImageCollection
Raises
------
ValueError
Notes
-----
This function currently assumes that "mask" and "time" bands already exist
in the scene collection.
"""
# Get interp_method
if 'interp_method' in interp_args.keys():
interp_method = interp_args['interp_method']
else:
interp_method = 'linear'
logging.debug('interp_method was not set, default to "linear"')
# Get interp_days
if 'interp_days' in interp_args.keys():
interp_days = interp_args['interp_days']
else:
interp_days = 32
logging.debug('interp_days was not set, default to 32')
# Check that the input parameters are valid
if t_interval.lower() not in ['daily', 'monthly', 'annual', 'custom']:
raise ValueError('unsupported t_interval: {}'.format(t_interval))
elif interp_method.lower() not in ['linear']:
raise ValueError('unsupported interp_method: {}'.format(
interp_method))
if ((type(interp_days) is str or type(interp_days) is float) and
utils.is_number(interp_days)):
interp_days = int(interp_days)
elif not type(interp_days) is int:
raise TypeError('interp_days must be an integer')
elif interp_days <= 0:
raise ValueError('interp_days must be a positive integer')
if not variables:
raise ValueError('variables parameter must be set')
# Adjust start/end dates based on t_interval
# Increase the date range to fully include the time interval
start_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_dt = datetime.datetime.strptime(end_date, '%Y-%m-%d')
if t_interval.lower() == 'annual':
start_dt = datetime.datetime(start_dt.year, 1, 1)
# Covert end date to inclusive, flatten to beginning of year,
# then add a year which will make it exclusive
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, 1, 1)
end_dt += relativedelta(years=+1)
elif t_interval.lower() == 'monthly':
start_dt = datetime.datetime(start_dt.year, start_dt.month, 1)
end_dt -= relativedelta(days=+1)
end_dt = datetime.datetime(end_dt.year, end_dt.month, 1)
end_dt += relativedelta(months=+1)
start_date = start_dt.strftime('%Y-%m-%d')
end_date = end_dt.strftime('%Y-%m-%d')
# The start/end date for the interpolation include more days
# (+/- interp_days) than are included in the ETr collection
interp_start_dt = start_dt - datetime.timedelta(days=interp_days)
interp_end_dt = end_dt + datetime.timedelta(days=interp_days)
interp_start_date = interp_start_dt.date().isoformat()
interp_end_date = interp_end_dt.date().isoformat()
# Get reference ET collection
if 'et_reference' in variables or 'et_fraction' in variables:
if 'et_reference_source' not in model_args.keys():
raise ValueError('et_reference_source was not set')
if 'et_reference_band' not in model_args.keys():
raise ValueError('et_reference_band was not set')
# TODO: Check if model_args can be modified instead of making new variables
if 'et_reference_factor' in model_args.keys():
et_reference_factor = model_args['et_reference_factor']
else:
et_reference_factor = 1.0
logging.debug('et_reference_factor was not set, default to 1.0')
# raise ValueError('et_reference_factor was not set')
# CGM - Resampling is not working correctly so commenting out for now
# if 'et_reference_resample' in model_args.keys():
# et_reference_resample = model_args['et_reference_resample']
# else:
# et_reference_resample = 'nearest'
# logging.debug(
# 'et_reference_resample was not set, default to nearest')
# # raise ValueError('et_reference_resample was not set')
# Assume a string source is an single image collection ID
# not an list of collection IDs or ee.ImageCollection
daily_et_ref_coll_id = model_args['et_reference_source']
daily_et_ref_coll = ee.ImageCollection(daily_et_ref_coll_id) \
.filterDate(start_date, end_date) \
.select([model_args['et_reference_band']], ['et_reference'])
# Scale reference ET images (if necessary)
# CGM - Resampling is not working correctly so not including for now
if (et_reference_factor and et_reference_factor != 1):
daily_et_ref_coll = daily_et_ref_coll.map(et_reference_adjust)
# TODO: Add code to fall back on the model_args reference ET parameters
# if the interp source/band/resample parameters are not set.
# Get the interpolation collection
if 'interp_source' not in interp_args.keys():
raise ValueError('interp_source was not set')
if 'interp_band' not in interp_args.keys():
raise ValueError('interp_band was not set')
# CGM - Resampling is not working correctly so commenting out for now
# if 'interp_resample' not in interp_args.keys():
# interp_args['interp_resample'] = 'nearest'
# logging.debug('interp_resample was not set, defaulting to nearest')
# # raise ValueError('interp_resample was not set')
# CGM - Factor is not currently being applied so commenting out for now
# if 'interp_factor' not in interp_args.keys():
# interp_args['interp_factor'] = 1.0
# logging.debug('interp_factor was not set, defaulting to 1.0')
# # raise ValueError('interp_factor was not set')
# Target collection needs to be filtered to the same date range as the
# scene collection in order to normalize the scenes.
# It will be filtered again to the start/end when it is sent into
# interpolate.daily()
daily_target_coll = ee.ImageCollection(interp_args['interp_source']) \
.filterDate(interp_start_date, interp_end_date) \
.select([interp_args['interp_band']])
interp_vars = ['et'] + ['mask', 'time']
# For count, compute the composite/mosaic image for the mask band only
if 'count' in variables:
aggregate_coll = aggregate_to_daily(
image_coll=scene_coll.select(['mask']),
start_date=start_date, end_date=end_date)
# The following is needed because the aggregate collection can be
# empty if there are no scenes in the target date range but there
# are scenes in the interpolation date range.
# Without this the count image will not be built but the other
# bands will be which causes a non-homogeneous image collection.
aggregate_coll = aggregate_coll.merge(
ee.Image.constant(0).rename(['mask'])
.set({'system:time_start': ee.Date(start_date).millis()}))
# It might be more efficient to join the target collection to the scenes
# The time band is always needed for interpolation
scene_coll = scene_coll \
.filterDate(interp_start_date, interp_end_date) \
.select(interp_vars) \
.map(normalize_et)
# # Join the target (normalization) image to the scene images
# if use_joins:
# prev_filter = ee.Filter.And(
# ee.Filter.maxDifference(
# difference=(interp_days + 1) * 24 * 60 * 60 * 1000,
# leftField='system:time_start', rightField='system:time_start'),
# ee.Filter.greaterThan(leftField='system:time_start',
# rightField='system:time_start')
# )
# scene_coll = ee.ImageCollection(
# ee.Join.saveFirst(matchKey='norm_img', ordering='system:time_start',
# ascending=False)
# .apply(primary=scene_coll, secondary=target_coll,
# condition=prev_filter)
# )
# Interpolate to a daily time step
daily_coll = daily(
target_coll=daily_target_coll.filterDate(start_date, end_date),
source_coll=scene_coll.select(['et_norm', 'time']),
interp_method=interp_method, interp_days=interp_days,
use_joins=use_joins,
compute_product=True,
)
# The interpolate.daily() function is currently returning the product of
# the source and target image named as "{source_band}_1".
# This approach will not be valid if other bands are interpolated.
daily_coll = daily_coll.select(['et_norm_1'], ['et'])
# Convert normalized ET back to ET
# This isn't needed if compute_product=True in daily() and band is renamed
# The check for et_fraction is needed since it is back computed from ET and ETr
# # if 'et' in variables or 'et_fraction' in variables:
# def compute_et(img):
# """This function assumes ETr and ETf are present"""
# et_img = img.select(['et_norm']).multiply(
# img.select(['et_reference']))
# return img.addBands(et_img.double().rename('et'))
# daily_coll = daily_coll.map(compute_et)
def aggregate_image(agg_start_date, agg_end_date, date_format):
"""Aggregate the daily images within the target date range
Parameters
----------
agg_start_date: ee.Date, str
Start date (inclusive).
agg_end_date : ee.Date, str
End date (exclusive).
date_format : str
Date format for system:index (uses EE JODA format).
Returns
-------
ee.Image
Notes
-----
Since this function takes multiple inputs it is being called
for each time interval by separate mappable functions
"""
if 'et' in variables or 'et_fraction' in variables:
et_img = daily_coll.filterDate(agg_start_date, agg_end_date) \
.select(['et']).sum()
if 'et_reference' in variables or 'et_fraction' in variables:
# Get the reference ET image from the reference ET collection,
# not the interpolated collection
# et_reference_img = daily_coll.select(['et_reference']) \
et_reference_img = daily_et_ref_coll \
.filterDate(agg_start_date, agg_end_date) \
.sum()
image_list = []
if 'et' in variables:
image_list.append(et_img.float())
if 'et_reference' in variables:
image_list.append(et_reference_img.float())
if 'et_fraction' in variables:
# Compute average et fraction over the aggregation period
image_list.append(
et_img.divide(et_reference_img)
.rename(['et_fraction']).float())
# if 'ndvi' in variables:
# # Compute average ndvi over the aggregation period
# ndvi_img = daily_coll \
# .filterDate(agg_start_date, agg_end_date) \
# .mean().select(['ndvi']).float()
# image_list.append(ndvi_img)
if 'count' in variables:
count_img = aggregate_coll \
.filterDate(agg_start_date, agg_end_date) \
.select(['mask']).sum().rename('count').uint8()
image_list.append(count_img)
return ee.Image(image_list) \
.set({
'system:index': ee.Date(agg_start_date).format(date_format),
'system:time_start': ee.Date(agg_start_date).millis()})
# .set(interp_properties)\
# Combine input, interpolated, and derived values
if t_interval.lower() == 'daily':
return ee.ImageCollection(daily_coll.map(agg_daily))
elif t_interval.lower() == 'monthly':
month_list = ee.List(list(month_gen(start_dt, end_dt)))
return ee.ImageCollection(month_list.map(agg_monthly))
elif t_interval.lower() == 'annual':
year_list = ee.List(list(year_gen(start_dt, end_dt)))
return ee.ImageCollection(year_list.map(agg_annual))
elif t_interval.lower() == 'custom':
# Returning an ImageCollection to be consistent
return ee.ImageCollection(aggregate_image(
agg_start_date=start_date, agg_end_date=end_date,
date_format='YYYYMMdd'))
# @deprecated
# def aggregate_daily_with_joins(image_coll, start_date, end_date,
# agg_type='mean'):
# """Aggregate images by day (using joins)
#
# The primary purpose of this function is to join separate Landsat images
# from the same path into a single daily image.
#
# Parameters
# ----------
# image_coll : ee.ImageCollection
# Input image collection.
# start_date : date, number, string
# Start date.
# Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
# end_date : date, number, string
# End date.
# Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
# agg_type : {'mean'}, optional
# Aggregation type (the default is 'mean').
# Currently only a 'mean' aggregation type is supported.
#
# Returns
# -------
# ee.ImageCollection()
#
# Notes
# -----
# This function should be used to mosaic Landsat images from same path
# but different rows.
# system:time_start of returned images will be 0 UTC (not the image time).
#
# """
# # Build a collection of time "features" to join to
# # "Flatten" dates to 0 UTC time
# if start_date and end_date:
# date_list = ee.List.sequence(
# ee.Date(start_date).millis(), ee.Date(end_date).millis(),
# 24 * 3600 * 1000)
# # elif start_date:
# # end_date = ee.Date(ee.Image(image_coll.limit(
# # 1, 'system:time_start', False).first()).get('system:time_start')
# # end_date = ee.Date(end_date.format('yyyy-MM-dd')).advance(1, 'day')
# # # end_date = ee.Date.fromYMD(end_date.get('year'), end_date.get('month'),
# # # end_date.get('day')).advance(1, 'day')
# # date_list = ee.List.sequence(
# # ee.Date(start_date).millis(), end_date.millis(), 24 * 3600 * 1000)
# # elif end_date:
# # start_date = ee.Date(start_date.format('yyyy-MM-dd')).advance(1, 'day')
# # # start_date = ee.Date.fromYMD(
# # # start_date.get('year'), start_date.get('month'),
# # # start_date.get('day')).advance(1, 'day')
# # date_list = ee.List.sequence(
# # start_date.millis(), ee.Date(end_date).millis(), 24 * 3600 * 1000)
# # else:
# # start_date = ee.Date(start_date.format('yyyy-MM-dd')).advance(1, 'day')
# # end_date = ee.Date(ee.Image(image_coll.limit(
# # 1, 'system:time_start', False).first()).get('system:time_start')
# # end_date = ee.Date(end_date.format('yyyy-MM-dd')).advance(1, 'day')
# # date_list = ee.List.sequence(
# # ee.Date(start_date).millis(), ee.Date(end_date).millis(),
# # 24 * 3600 * 1000)
#
# def set_date(time):
# return ee.Feature(None, {
# 'system:index': ee.Date(time).format('yyyyMMdd'),
# 'system:time_start': ee.Number(time).int64(),
# 'date': ee.Date(time).format('yyyy-MM-dd')})
#
# # Add a date property to the image collection
# def set_image_date(img):
# return ee.Image(img.set({
# 'date': ee.Date(img.get('system:time_start')).format('yyyy-MM-dd')}))
#
# join_coll = ee.FeatureCollection(
# ee.Join.saveAll('join').apply(
# ee.FeatureCollection(date_list.map(set_date)),
# ee.ImageCollection(image_coll.map(set_image_date)),
# ee.Filter.equals(leftField='date', rightField='date')))
#
# def aggregate_func(ftr):
# # The composite image time will be 0 UTC (not Landsat time)
# agg_coll = ee.ImageCollection.fromImages(ftr.get('join'))
#
# # if agg_type.lower() == 'mean':
# agg_img = agg_coll.mean()
# # elif agg_type.lower() == 'median':
# # agg_img = agg_coll.median()
#
# return agg_img.set({
# 'system:index': ftr.get('system:index'),
# 'system:time_start': ftr.get('system:time_start'),
# 'date': ftr.get('date'),
# })
#
# return ee.ImageCollection(join_coll.map(aggregate_func))
| [
11748,
4818,
8079,
198,
11748,
18931,
198,
198,
11748,
304,
68,
198,
6738,
3128,
22602,
13,
2411,
265,
1572,
12514,
1330,
1635,
198,
198,
6738,
764,
1330,
3384,
4487,
198,
2,
1330,
1280,
316,
13,
7295,
13,
26791,
355,
3384,
4487,
628,... | 2.340851 | 18,668 |
a = 15 * 3
b = 15 / 3
c = 15 // 2
d = 15 ** 2
print(type(a), a)
print(type(b), b)
print(type(c), b)
print(type(d), d)
| [
64,
796,
1315,
1635,
513,
198,
65,
796,
1315,
1220,
513,
198,
66,
796,
1315,
3373,
362,
198,
67,
796,
1315,
12429,
362,
198,
4798,
7,
4906,
7,
64,
828,
257,
8,
198,
4798,
7,
4906,
7,
65,
828,
275,
8,
198,
4798,
7,
4906,
7,
6... | 1.966667 | 60 |
#! /usr/bin/python
# -*- encoding: utf-8 -*-
from flask import Flask, render_template, jsonify
from flask_socketio import SocketIO, emit
app = Flask(__name__, template_folder='templates', static_url_path='/static/', static_folder='static')
app.config['SECRET_KEY'] = 'ines'
socketio = SocketIO(app)
@app.route('/')
@socketio.on('connected')
@socketio.on('client_message')
if __name__ == '__main__':
socketio.run(app, debug=True)
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
33918,
1958,
198,
6738,
42903,
62,
44971,
952,
1330,
47068,
9399,
... | 2.742138 | 159 |
from .constant import TEXT_DELIMITER
from .settings import get_setting
from typing import Any, Dict, Optional
import sublime
| [
6738,
764,
9979,
415,
1330,
40383,
62,
35,
3698,
3955,
2043,
1137,
198,
6738,
764,
33692,
1330,
651,
62,
33990,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
198,
11748,
41674,
628
] | 3.705882 | 34 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import FbDialogTeacher
from parlai.core.agents import MultiTaskTeacher
from .build import build
import copy
import os
# By default train on all tasks at once.
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,... | 3.508929 | 112 |
import random
#1
for number in gensquares(10):
print(number)
print('\n')
#2
for number in rand_num(1,10,12):
print(number)
print('\n')
#3
s = 'hello'
s_iter = iter(s)
print(next(s_iter))
print(next(s_iter))
print(next(s_iter))
print(next(s_iter))
print(next(s_iter)) | [
11748,
4738,
198,
2,
16,
198,
198,
1640,
1271,
287,
308,
641,
421,
3565,
7,
940,
2599,
198,
220,
220,
220,
3601,
7,
17618,
8,
220,
198,
4798,
10786,
59,
77,
11537,
198,
198,
2,
17,
198,
1640,
1271,
287,
43720,
62,
22510,
7,
16,
... | 2.171875 | 128 |
from django import forms
from django.forms import ModelForm
from employee_information_site.models import CompanyDepartment, EmployeePosition, Employee
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
6738,
6538,
62,
17018,
62,
15654,
13,
27530,
1330,
5834,
36261,
11,
36824,
26545,
11,
36824,
628
] | 4.75 | 32 |
"""
Tests if `KeyboardInterrupt` exception are properly handled.
This test currently fails. That is, trying to interrupt an ongoing
operation of the Comsol client crashes out of the Python session
instead of allowing further code execution or a return to the
interactive prompt.
The script does not depend on MPh, but starts the Comsol client
directly via the Java bridge JPype. Paths to the Comsol installation
are hard-coded for a Windows installation of Comsol 5.6. Other versions
or install locations can be tested by editing the assignment to the
`root` variable. On Linux, 'win64' has to be replaced by 'glnxa64',
and on macOS by 'maci64'.
"""
import jpype
import jpype.imports
from time import sleep
from timeit import default_timer as now
from pathlib import Path
print(f'Starting Comsol\'s Java VM via JPype {jpype.__version__}.')
root = Path(r'C:\Program Files\COMSOL\COMSOL56\Multiphysics')
jvm = root/'java'/'win64'/'jre'/'bin'/'server'/'jvm.dll'
jpype.startJVM(str(jvm), classpath=str(root/'plugins'/'*'), interrupt=False)
print('Starting stand-alone Comsol client.')
from com.comsol.model.util import ModelUtil as client
client.initStandalone(False)
client.loadPreferences()
print('Press Ctrl+C within the next 10 seconds.')
t0 = now()
try:
sleep(10)
except KeyboardInterrupt:
pass
finally:
if now() - t0 < 9.9:
print('Test passed.')
else:
print('Sleep timer expired.')
| [
171,
119,
123,
37811,
201,
198,
51,
3558,
611,
4600,
9218,
3526,
9492,
3622,
63,
6631,
389,
6105,
12118,
13,
201,
198,
201,
198,
1212,
1332,
3058,
10143,
13,
1320,
318,
11,
2111,
284,
11313,
281,
7044,
201,
198,
27184,
286,
262,
955... | 2.945783 | 498 |
import shutil
import java.lang.System
print "Setup exec_test using exec_test_setup.py"
#System Properties
user_home = java.lang.System.getProperty("user.home")
base_dir = java.lang.System.getProperty("basedir")
project_version = java.lang.System.getProperty("project.version")
artifactId = java.lang.System.getProperty("artifactId")
print("use_home=" + user_home)
print("base_dir=" + base_dir)
print("project_version=" + project_version)
print("artifactId=" + artifactId)
shutil.rmtree(base_dir + "/target/exec_test", ignore_errors=True)
shutil.copytree(base_dir + "/resources/exec_test", base_dir + "/target/exec_test")
shutil.copytree(base_dir + "/resources/" + artifactId, base_dir + "/target/exec_test/" + artifactId)
boot_jar_src = user_home +"/.m2/repository/org/xito/bootstrap/" + project_version + "/bootstrap-" + project_version + ".jar"
print "boot jar=" + boot_jar_src
shutil.copyfile(boot_jar_src, base_dir + "/target/exec_test/boot.jar")
test_jar = artifactId + "-" + project_version + "-tests.jar"
shutil.copyfile(base_dir + "/target/" + test_jar, base_dir + "/target/exec_test/" + test_jar)
| [
11748,
4423,
346,
198,
11748,
20129,
13,
17204,
13,
11964,
198,
198,
4798,
366,
40786,
2452,
62,
9288,
1262,
2452,
62,
9288,
62,
40406,
13,
9078,
1,
198,
198,
2,
11964,
24946,
198,
7220,
62,
11195,
796,
20129,
13,
17204,
13,
11964,
... | 2.910995 | 382 |
#!/usr/bin/python
in_str = str(input()).lower().split()
for i in range(1, len(in_str)):
if in_str[i-1][-1] != in_str[i][0]:
print("false")
exit(0)
print("true") | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
259,
62,
2536,
796,
965,
7,
15414,
3419,
737,
21037,
22446,
35312,
3419,
198,
198,
1640,
1312,
287,
2837,
7,
16,
11,
18896,
7,
259,
62,
2536,
8,
2599,
198,
220,
220,
220,
611,
287,
... | 1.967742 | 93 |
# -*- coding: utf-8 -*-
import re
import math
import urlparse
from scrapy.spider import BaseSpider
from scrapy.http import Request, FormRequest
from scrapy.selector import HtmlXPathSelector
from scrapy import log
from bs4 import BeautifulSoup
from registry.items import Corporation, Person, CorporationDocument, StatementDocument, RegistryStatement, PersonCorpRelation, RegistryExtract
from registry import pdfparse
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
302,
198,
11748,
10688,
198,
11748,
19016,
29572,
198,
198,
6738,
15881,
88,
13,
2777,
1304,
1330,
7308,
41294,
198,
6738,
15881,
88,
13,
4023,
1330,
19390,
... | 3.809091 | 110 |
'''
Send JPEG image to tensorflow_model_server loaded with GAN model.
Hint: the code has been compiled together with TensorFlow serving
and not locally. The client is called in the TensorFlow Docker container
'''
from __future__ import print_function
# Communication to TensorFlow server via gRPC
from grpc.beta import implementations
import tensorflow as tf
# TensorFlow serving stuff to send messages
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
# Command line arguments
tf.app.flags.DEFINE_string('server', 'localhost:9000',
'PredictionService host:port')
tf.app.flags.DEFINE_string('image', '', 'path to image in JPEG format')
FLAGS = tf.app.flags.FLAGS
if __name__ == '__main__':
tf.app.run()
| [
7061,
6,
198,
25206,
48561,
2939,
284,
11192,
273,
11125,
62,
19849,
62,
15388,
9639,
351,
402,
1565,
2746,
13,
198,
198,
39,
600,
25,
262,
2438,
468,
587,
14102,
1978,
351,
309,
22854,
37535,
7351,
198,
392,
407,
15726,
13,
383,
54... | 3.11284 | 257 |
# setup_logging.py
import logging
import logging.config
from settings import LOGS_DIR
class CustomFormatter(logging.Formatter):
"""Logging Formatter to ad colors and count warnings / errors"""
pink = "\x1b[35m"
blue = "\033[96m"
yellow = "\033[93m"
red = "\x1b[31;21m"
bold_red = "\033[41m"
reset = "\x1b[0m"
format = "%(asctime)s | %(name)s | %(levelname)s | %(message)s"
FORMATS = {
logging.DEBUG: pink + format + reset,
logging.INFO: blue + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def setup_logger(logger: logging.Logger, log_file_name: str) -> None:
"""
function that setups a standard logger
:rtype: None
:param logger: logger object initiated at the beginning of the file
:param log_file_name: name to save the log as
:return: None only modifications are made to the logger object
"""
logger.setLevel(logging.DEBUG)
# create handlers
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler(LOGS_DIR + '/' + log_file_name)
# set levels of the handlers
console_handler.setLevel(level=logging.DEBUG)
file_handler.setLevel(level=logging.INFO)
# create formats and set them to the handlers
file_format = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s | %(message)s')
console_handler.setFormatter(CustomFormatter())
file_handler.setFormatter(file_format)
# add handlers to the logger
logger.addHandler(console_handler)
logger.addHandler(file_handler)
| [
2,
9058,
62,
6404,
2667,
13,
9078,
198,
198,
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
6738,
6460,
1330,
41605,
50,
62,
34720,
628,
198,
4871,
8562,
8479,
1436,
7,
6404,
2667,
13,
8479,
1436,
2599,
198,
220,
220,
220,
37227,
... | 2.711256 | 613 |
from flask import Flask
from web.BaseRouter import BaseRouter
app = Flask(__name__)
base_url = '/'
router = BaseRouter(base_url)
router.register_flask_blueprints(app)
if __name__ == '__main__':
app.run()
| [
6738,
42903,
1330,
46947,
198,
6738,
3992,
13,
14881,
49,
39605,
1330,
7308,
49,
39605,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
8692,
62,
6371,
796,
31051,
6,
198,
198,
472,
353,
796,
7308,
49,
39605,
7,
8692,
62,
6... | 2.6375 | 80 |
from setuptools import setup, find_packages
setup(
name='github_secret_finder',
version='2.0.0',
description='Script to monitor commits from Github users and organizations for secrets.',
url='https://github.com/gsoft-inc/github-secret-finder',
author='Mathieu Gascon-Lefebvre',
author_email='mathieuglefebvre@gmail.com',
license='Apache',
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
package_data={'github_secret_finder': ['data/*']},
install_requires=[
'unidiff',
'requests',
'detect_secrets',
'sqlitedict'
],
entry_points={
'console_scripts': ['github-secret-finder = github_secret_finder.main:main'],
},
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
12567,
62,
21078,
62,
22805,
3256,
198,
220,
220,
220,
2196,
11639,
17,
13,
15,
13,
15,
3256,
198,
220,
220,
220,
6764,
... | 2.535593 | 295 |
# Generated by Django 3.1.2 on 2020-11-01 02:59
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
17,
319,
12131,
12,
1157,
12,
486,
7816,
25,
3270,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import pynput
from pynput.keyboard import Key, Listener
import pyautogui
import yagmail
import os.path
from datetime import datetime
import time
import sys
import os
from sys import platform as _platform
#Defining color values for later
G = '\033[32m' #Green
R = '\033[31m' # Red
C = '\033[36m' # Cyan
W = '\033[0m' # White
#Needs testing but it SHOULD work
if _platform == "linux" or _platform =="linux2" or _platform =="darwin":
os.system('clear')
elif _platform == "win32" or _platform == "win64":
os.system('cls')
count = 0
keys = []
try:
print(G + "I am alive..." + W)
#Special characters are included here.
with Listener(on_press=on_press, on_release=on_release) as listener:
#Call Methods, Repeats every 1 minute
while True:
time.sleep(100)
save_screenshot()
send_emal()
listener.join()
except KeyboardInterrupt:
print('\n' + R + "Program Killed X_X" + W)
| [
11748,
279,
2047,
1996,
201,
198,
6738,
279,
2047,
1996,
13,
2539,
3526,
1330,
7383,
11,
7343,
877,
201,
198,
11748,
12972,
2306,
519,
9019,
201,
198,
11748,
331,
363,
4529,
201,
198,
11748,
28686,
13,
6978,
201,
198,
6738,
4818,
8079... | 2.021277 | 564 |
# coding: utf-8
"""
Connect API
Pelion Device Management Connect API allows web applications to communicate with devices. You can subscribe to device resources and read/write values to them. Device Management Connect allows connectivity to devices by queueing requests and caching resource values.
OpenAPI spec version: 2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Endpoint(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'q': 'bool',
'status': 'str',
'type': 'str'
}
attribute_map = {
'name': 'name',
'q': 'q',
'status': 'status',
'type': 'type'
}
def __init__(self, name=None, q=None, status=None, type=None):
"""
Endpoint - a model defined in Swagger
"""
self._name = name
self._q = q
self._status = status
self._type = type
self.discriminator = None
@property
def name(self):
"""
Gets the name of this Endpoint.
Unique Device Management Device ID representing the endpoint.
:return: The name of this Endpoint.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Endpoint.
Unique Device Management Device ID representing the endpoint.
:param name: The name of this Endpoint.
:type: str
"""
self._name = name
@property
def q(self):
"""
Gets the q of this Endpoint.
Determines whether the device is in queue mode. <br/><br/><b>Queue mode</b><br/> When an endpoint is in queue mode, messages sent to the endpoint do not wake up the physical device. The messages are queued and delivered when the device wakes up and connects to Device Management Connect itself. You can also use the queue mode when the device is behind a NAT and cannot be reached directly by Device Management Connect.
:return: The q of this Endpoint.
:rtype: bool
"""
return self._q
@q.setter
def q(self, q):
"""
Sets the q of this Endpoint.
Determines whether the device is in queue mode. <br/><br/><b>Queue mode</b><br/> When an endpoint is in queue mode, messages sent to the endpoint do not wake up the physical device. The messages are queued and delivered when the device wakes up and connects to Device Management Connect itself. You can also use the queue mode when the device is behind a NAT and cannot be reached directly by Device Management Connect.
:param q: The q of this Endpoint.
:type: bool
"""
self._q = q
@property
def status(self):
"""
Gets the status of this Endpoint.
Deprecated and the value is always ACTIVE. Only used for API backwards compatibility reasons.
:return: The status of this Endpoint.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this Endpoint.
Deprecated and the value is always ACTIVE. Only used for API backwards compatibility reasons.
:param status: The status of this Endpoint.
:type: str
"""
self._status = status
@property
def type(self):
"""
Gets the type of this Endpoint.
Type of endpoint. (Free text)
:return: The type of this Endpoint.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this Endpoint.
Type of endpoint. (Free text)
:param type: The type of this Endpoint.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Endpoint):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
8113,
7824,
628,
220,
220,
220,
12903,
295,
16232,
8549,
8113,
7824,
3578,
3992,
5479,
284,
10996,
351,
4410,
13,
921,
460,
12383,
284,
3335,
4133,
290,
1100,
14,
... | 2.367178 | 2,413 |
# -*- coding: utf-8 -*-
"""
scraperplot.py
For retreiving and plotting data using the 'scraper.py' module.
Created on Wed Oct 30 15:11:00 2019
@author: Thomas Richards
"""
# import scraper.py
import scraper as scrp
# Run functions in get module
scraper = scrp.Scraper()
choices = input('What do you want to plot? Press enter for all. \n1. Managed '
'space data\n2. Sensor reading data \n>>')
if not choices:
chosen_space_numbers, chosen_space_names = \
scraper._choose_by_number(scraper.managed_space_info)
scraper.plot_managed_spaces(managed_spaces=chosen_space_numbers)
chosen_location_numbers, chosen_location_names = \
scraper._choose_by_number(scraper.sensor_location_info)
scraper.plot_sensor_reading_after(sensor_numbers=chosen_location_numbers)
elif choices:
choices = eval(choices)
if choices == 1:
chosen_space_numbers, chosen_space_names = \
scraper._choose_by_number(scraper.managed_space_info)
scraper.plot_managed_spaces(managed_spaces=chosen_space_numbers)
elif choices == 2:
chosen_location_numbers, chosen_location_names = \
scraper._choose_by_number(scraper.sensor_location_info)
scraper.plot_sensor_reading_after(sensor_numbers=\
chosen_location_numbers)
else:
print('Unknown input.') | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1416,
38545,
29487,
13,
9078,
198,
198,
1890,
1005,
260,
1412,
290,
29353,
1366,
1262,
262,
705,
1416,
38545,
13,
9078,
6,
8265,
13,
198,
198,
41972,
319,
... | 2.428571 | 567 |
from sspipe import p, px
import numpy as np
| [
6738,
264,
2777,
3757,
1330,
279,
11,
279,
87,
198,
11748,
299,
32152,
355,
45941,
628,
628,
628,
198
] | 2.631579 | 19 |
import os
import sys
import argparse
import shutil
from logging import getLogger
from pathlib import Path
from time import sleep
from typing import List, Union, Optional, Sequence, Text
try:
# Might not be installed.
import ipdb as debug
except ImportError:
import pdb as debug
try:
import argcomplete
except ImportError:
argcomplete = None
from tabulate import tabulate
from remake.setup_logging import setup_stdout_logging
from remake.version import get_version
from remake.loader import load_remake
from remake.remake_exceptions import RemakeError
from remake.bcolors import bcolors
from remake.monitor import remake_curses_monitor
logger = getLogger(__name__)
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
4423,
346,
198,
6738,
18931,
1330,
651,
11187,
1362,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
640,
1330,
3993,
198,
6738,
19720,
1330,
7343,
11,
4479,
11,
32233,
... | 3.415459 | 207 |
"""
Django settings for ScienceCruiseDataManagement project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import datetime
import pathlib
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*gb+gevd#dx0euc(#$4ts!37w%9m#kbjlz_4k9@&62ok+=w_*2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# INTERNAL_IPS = ["127.0.0.1",]
INTERNAL_IPS = [] # Used by the Debugger console. The maps/some pages might not work
# offline because the debugger tries to load an external JQurey
# NOTE: by default this is an empty list. Check documentation.
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'import_export', # to export as CSV
'debug_toolbar',
'django_extensions',
'selectable', # auto-completion
'smart_selects', # foreign keys depending on other foreign keys
'ship_data',
'data_storage_management',
'main', # ScienceCruiseManagement main app
'metadata',
'ctd',
'underway_sampling',
'data_administration',
'expedition_reporting',
'spi_admin',
'data_management'
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware', TODO: reenable, test data_storage_management/views.py and the script
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ScienceCruiseDataManagement.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'main', 'templates'), os.path.join(BASE_DIR, 'metadata', 'templates'), os.path.join(BASE_DIR, 'expedition_reporting', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ScienceCruiseDataManagement.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# This project could just use sqlite3 for testing purposes. Then
# the DATABASES dictionary would be like:
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
#
def secrets_file(file_name):
""" First try $HOME/.file_name, else tries /run/secrets/file_name, else raises an exception """
file_path_in_home_directory = os.path.join(str(pathlib.Path.home()), "." + file_name)
if os.path.exists(file_path_in_home_directory):
return file_path_in_home_directory
file_path_in_run_secrets = os.path.join("/run/secrets", file_name)
if os.path.exists(file_path_in_run_secrets):
return file_path_in_run_secrets
raise "Configuration for {} doesn't exist".format(file_name)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': secrets_file("science_cruise_data_management_mysql.conf"),
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'"
},
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
# So DATETIME_FORMAT is honored
USE_L10N = False
USE_TZ = True
# Datetime in list views in YYYY-MM-DD HH:mm::ss
DATETIME_FORMAT = "Y-m-d H:i:s"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Should be moved out from here, just for development at the moment
BASE_STORAGE_DIRECTORY = '/mnt/ace_data'
# Added for the importer-exporter module
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
# Users that can add events should be in this Group (it's created by the command createdjangousers
ADD_EVENTS_GROUP = "Add events"
# Controlled vocabulary sources
VOCAB_SOURCES = (("seadatanet", "Sea Data Net"),
("seavox", "SeaVoX"),
("globalchangemasterdirectory", "Global Change Master Directory"),
("generatedforace", "Generated for ACE"),
("britishoceanographicdatacentre", "British Oceanographic Data Centre (BODC)"))
DEVICE_SOURCE_DEFAULT= "generatedforace"
UNCERTAINTY_DEFAULT = "britishoceanoraphicdatacentre"
VALIDITY_OPTIONS = (("valid", "valid"), ("redundant", "redundant"))
# JQUERY is loaded when necessary from the static files
USE_DJANGO_JQUERY = False
JQUERY_URL = '/static/js/external/jquery-1.12.0.min.js'
ADMIN_SITE_TITLE = 'ACE Data Admin'
ADMIN_SITE_HEADER = 'ACE Data Administration'
# This can be a symbolik link
DOCUMENTS_DIRECTORY = os.path.join(os.getenv("HOME"), "intranet_documents")
FORECAST_DIRECTORY = os.path.join(os.getenv("HOME"), "ethz_forecast_data")
MAIN_GPS = "GLONASS"
NAS_STAGING_MOUNT_POINT = "/mnt/ace_data"
NAS_IP = "192.168.20.2"
UPDATE_LOCATION_STATIONS_TYPES = ["marine"]
UPDATE_LOCATION_POSITION_UNCERTAINTY_NAME = "0.0 to 0.01 n.miles"
UPDATE_LOCATION_POSITION_SOURCE_NAME = "Ship's GPS"
# The following Event Action types will not be updated
UPDATE_LOCATION_POSITION_EXCEPTION_EVENT_ACTION_TYPE_ENDS_EXCEPTIONS = ["Sonobuoy"]
MAP_RESOLUTION_SECONDS = 1800
TRACK_MAP_FILEPATH = "/home/jen/projects/ace_data_management/data_requests/20171106_walton_distance_travelled/geojson_track/geojson.track"
IMAGE_RELOAD_FILEPATH = "/mnt/data_admin/latest_image/latest_image.jpg"
# For default options
DEFAULT_PLATFORM_NAME = "Akademik Treshnikov"
DEFAULT_MISSION_NAME = "Antarctic Circumnavigation Expedition"
DEFAULT_CTD_OPERATOR_FIRSTNAME = "Marie-Noelle"
DEFAULT_CTD_OPERATOR_LASTNAME = "Houssais"
EXPEDITION_SAMPLE_CODE = expedition_sample_code
MAXIMUM_EMAIL_SIZE = 435000 # bytes
# IMAP_SERVER = "192.168.20.40"
IMAP_SERVER = "46.226.111.64"
# DEFAULT VALUES FOR METADATA MODEL
DEFAULT_IN_GCMD = True
DEFAULT_IN_DATACITE = True
DEFAULT_METADATA_NAME = "CEOS IDN DIF"
DEFAULT_METADATA_VERSION = "VERSION 9.9"
DEFAULT_DATA_SET_LANGUAGE = "English"
METADATA_DEFAULT_PLATFORM_SHORT_NAME = ["R/V AT"]
METADATA_DEFAULT_PROJECT_SHORT_NAME = ["SPI-ACE"]
METADATA_DEFAULT_DATA_CENTER = ["SPI"]
METADATA_DEFAULT_IDN_NODE = ["AMD", "SOOS"]
METADATA_DEFAULT_CITATION_PUBLISHER = "SPI"
DATE_TWO_DAYS = datetime.datetime(2017, 2, 5)
try:
from local_settings import *
print('Imported local_settings')
except ImportError:
pass
| [
37811,
198,
35,
73,
14208,
6460,
329,
5800,
27535,
786,
6601,
48032,
1628,
13,
198,
198,
8645,
515,
416,
705,
28241,
14208,
12,
28482,
923,
16302,
6,
1262,
37770,
352,
13,
940,
13,
19,
13,
198,
198,
1890,
517,
1321,
319,
428,
2393,
... | 2.535403 | 3,446 |
# -*- coding: utf-8 -*-
""" Views
"""
__author__ = 'Bartosz Kościów'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
29978,
198,
37811,
198,
834,
9800,
834,
796,
705,
33,
433,
418,
89,
17634,
129,
249,
979,
10205,
86,
6,
198
] | 1.891892 | 37 |
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# Load The Data Into DataFrame with Pandas
iris = load_iris()
X = pd.DataFrame(iris.data) # Independent Variable
y = pd.DataFrame(iris.target) # Dependent Variable
#print(X.head()) # print 5 rows of independent variable
#Label Encoder
encode = LabelEncoder()
y = encode.fit_transform(y)
# convert into train and test data
trainX,testX,trainy, testy = train_test_split(X, y, test_size= 0.2)
# fit and predict model
model = LogisticRegression().fit(trainX,trainy)
predy = model.predict(testX)
# check accuracy score
score = accuracy_score(testy, predy)
print(f'Accuracy Score : {score}')
| [
11748,
19798,
292,
355,
279,
67,
201,
198,
6738,
1341,
35720,
13,
19608,
292,
1039,
1330,
3440,
62,
29616,
201,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
201,
198,
6738,
1341,
35720,
13,
3866,
3694... | 2.876623 | 308 |
"""
particle_swam_optimization_algorithm.py
Returns the minimizer of the function
func_ps - anonimous function (vectorized for multiple particles)
"""
import numpy as np
import numpy.matlib
np.random.seed();
| [
37811,
198,
3911,
1548,
62,
2032,
321,
62,
40085,
1634,
62,
282,
42289,
13,
9078,
198,
198,
35561,
262,
10356,
7509,
286,
262,
2163,
198,
20786,
62,
862,
532,
281,
261,
320,
516,
2163,
357,
31364,
1143,
329,
3294,
13166,
8,
198,
378... | 3.212121 | 66 |