content
stringlengths 5
1.05M
|
|---|
"""
This script displays the Trace image and the traces in an RC Ginga window.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
from pypeit.scripts import scriptbase
class ChkAlignments(scriptbase.ScriptBase):
@classmethod
def get_parser(cls, width=None):
from pypeit.spectrographs import available_spectrographs
parser = super().get_parser(description='Display MasterAlignment image and the trace data',
width=width)
parser.add_argument('master_file', type=str, default = None,
help='PypeIt Master Alignment file [e.g. MasterAlignment_A_1_01.fits]')
parser.add_argument('--chname', default='Alignments', type=str,
help='Channel name for image in Ginga')
return parser
@staticmethod
def main(pargs):
from pypeit import alignframe
# Load
alignments = alignframe.Alignments.from_file(pargs.master_file)
# Show
alignments.show()
|
# -*- coding: utf-8 -*-
import random
import string
import time
from datetime import datetime
from apps.database.session import db
from config import JsonConfig
def get_model(model):
if JsonConfig.get_data('TESTING'):
return model.test_model
return model
def get_token():
random_string = string.ascii_lowercase + string.digits + string.ascii_uppercase
timestamp = str(time.mktime(datetime.today().timetuple()))
return ''.join(random.choice(random_string) for _ in range(28)) + timestamp
class CinemaMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(30))
image_url = db.Column(db.Text())
address = db.Column(db.String(50))
detail_address = db.Column(db.String(30))
def __init__(self, title=None, image_url=None, address=None, detail_address=None):
self.title = title
self.image_url = image_url
self.address = address
self.detail_address = detail_address
class TestCinemaModel(CinemaMixin, db.Model):
__tablename__ = 'test_cinemas'
__table_args__ = {'extend_existing': True}
class CinemaModel(CinemaMixin, db.Model):
__tablename__ = 'cinemas'
__table_args__ = {'extend_existing': True}
test_model = TestCinemaModel
Cinema = get_model(CinemaModel)
class MovieMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(120))
director = db.Column(db.String(20))
description = db.Column(db.Text)
poster_url = db.Column(db.Text)
running_time = db.Column(db.Integer)
age_rating = db.Column(db.Integer)
def __init__(self, title=None, director=None, description=None, poster_url=None, running_time=None,
age_rating=None):
self.title = title
self.director = director
self.description = description
self.poster_url = poster_url
self.running_time = running_time
self.age_rating = age_rating
class TestMovieModel(MovieMixin, db.Model):
__tablename__ = 'test_movies'
__table_args__ = {'extend_existing': True}
showtimes = db.relationship('TestShowtimeModel')
class MovieModel(MovieMixin, db.Model):
__tablename__ = 'movies'
__table_args__ = {'extend_existing': True}
test_model = TestMovieModel
showtimes = db.relationship('ShowtimeModel')
Movie = get_model(MovieModel)
class ShowtimeMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
cinema_id = db.Column(db.Integer)
start_time = db.Column(db.DateTime)
end_time = db.Column(db.DateTime)
def __init__(self, movie_id=None, cinema_id=None, theater_id=None, start_time=None, end_time=None):
self.movie_id = movie_id
self.cinema_id = cinema_id
self.theater_id = theater_id
self.start_time = start_time
self.end_time = end_time
class TestShowtimeModel(ShowtimeMixin, db.Model):
__tablename__ = 'test_showtimes'
__table_args__ = {'extend_existing': True}
movie_id = db.Column(db.Integer, db.ForeignKey('test_movies.id'))
theater_id = db.Column(db.Integer, db.ForeignKey('test_theaters.id'))
theater = db.relationship('TestTheaterModel')
class ShowtimeModel(ShowtimeMixin, db.Model):
__tablename__ = 'showtimes'
__table_args__ = {'extend_existing': True}
test_model = TestShowtimeModel
movie_id = db.Column(db.Integer, db.ForeignKey('movies.id'))
theater_id = db.Column(db.Integer, db.ForeignKey('theaters.id'))
theater = db.relationship('TheaterModel')
Showtime = get_model(ShowtimeModel)
class Test(db.Model):
__tablename__ = 'tests'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
message = db.Column(db.String(120))
def __init__(self, message=None):
self.message = message
class TheaterTicketMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
showtime_id = db.Column(db.Integer())
x = db.Column(db.Integer())
y = db.Column(db.Integer())
def __init__(self, theater_id=None, showtime_id=None, x=None, y=None):
self.theater_id = theater_id
self.showtime_id = showtime_id
self.x = x
self.y = y
class TestTheaterTicketModel(TheaterTicketMixin, db.Model):
__tablename__ = 'test_theater_tickets'
__table_args__ = {'extend_existing': True}
theater_id = db.Column(db.Integer(), db.ForeignKey('test_theaters.id'))
class TheaterTicketModel(TheaterTicketMixin, db.Model):
__tablename__ = 'theater_tickets'
__table_args__ = {'extend_existing': True}
test_model = TestTheaterTicketModel
theater_id = db.Column(db.Integer(), db.ForeignKey('theaters.id'))
TheaterTicket = get_model(TheaterTicketModel)
class TheaterMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
cinema_id = db.Column(db.Integer())
title = db.Column(db.String(10))
seat = db.Column(db.Integer())
def __init__(self, cinema_id=None, title=None, seat=None):
self.cinema_id = cinema_id
self.title = title
self.seat = seat
class TestTheaterModel(TheaterMixin, db.Model):
__tablename__ = 'test_theaters'
__table_args__ = {'extend_existing': True}
theater_tickets = db.relationship('TestTheaterTicketModel')
class TheaterModel(TheaterMixin, db.Model):
__tablename__ = 'theaters'
__table_args__ = {'extend_existing': True}
test_model = TestTheaterModel
theater_tickets = db.relationship('TheaterTicketModel')
Theater = get_model(TheaterModel)
class UserMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(120), unique=True)
nickname = db.Column(db.String(20), unique=True)
password = db.Column(db.String(255))
phone_number = db.Column(db.String(20))
age = db.Column(db.Integer)
profile_url = db.Column(db.Text)
token = db.Column(db.String(40), unique=True, default=get_token())
def __init__(self, email=None, nickname=None, password=None, phone_number=None, age=None, profile_url=None,
token=None):
self.email = email
self.nickname = nickname
self.password = password
self.phone_number = phone_number
self.age = age
self.profile_url = profile_url
self.token = token
class TestUserModel(UserMixin, db.Model):
__tablename__ = 'test_users'
__table_args__ = {'extend_existing': True}
class UserModel(UserMixin, db.Model):
__tablename__ = 'users'
__table_args__ = {'extend_existing': True}
test_model = TestUserModel
User = get_model(UserModel)
|
import json
import os
import pymzml
import pandas as pd
import numpy as np
from tqdm import tqdm
from matchms.importing import load_from_mgf
from pyteomics import mzxml, mzml
import logging
logger = logging.getLogger('msql_fileloading')
def load_data(input_filename, cache=False):
"""
Loading data generically
Args:
input_filename ([type]): [description]
cache (bool, optional): [description]. Defaults to False.
Returns:
[type]: [description]
"""
if cache:
ms1_filename = input_filename + "_ms1.msql.feather"
ms2_filename = input_filename + "_ms2.msql.feather"
if os.path.exists(ms1_filename) or os.path.exists(ms2_filename):
try:
ms1_df = pd.read_feather(ms1_filename)
except:
ms1_df = pd.DataFrame()
try:
ms2_df = pd.read_feather(ms2_filename)
except:
ms2_df = pd.DataFrame()
return ms1_df, ms2_df
# Actually loading
if input_filename[-5:].lower() == ".mzml":
#ms1_df, ms2_df = _load_data_mzML(input_filename)
#ms1_df, ms2_df = _load_data_mzML2(input_filename) # Faster version using pymzML
ms1_df, ms2_df = _load_data_mzML_pyteomics(input_filename) # Faster version using pymzML
elif input_filename[-6:].lower() == ".mzxml":
ms1_df, ms2_df = _load_data_mzXML(input_filename)
elif input_filename[-5:] == ".json":
ms1_df, ms2_df = _load_data_gnps_json(input_filename)
elif input_filename[-4:].lower() == ".mgf":
ms1_df, ms2_df = _load_data_mgf(input_filename)
elif input_filename[-4:].lower() == ".txt" or input_filename[-4:].lower() == ".dat":
ms1_df, ms2_df = _load_data_txt(input_filename)
else:
print("Cannot Load File Extension")
raise Exception("File Format Not Supported")
# Saving Cache
if cache:
ms1_filename = input_filename + "_ms1.msql.feather"
ms2_filename = input_filename + "_ms2.msql.feather"
if not (os.path.exists(ms1_filename) or os.path.exists(ms2_filename)):
try:
ms1_df.to_feather(ms1_filename)
except:
pass
try:
ms2_df.to_feather(ms2_filename)
except:
pass
return ms1_df, ms2_df
def _load_data_mgf(input_filename):
file = load_from_mgf(input_filename)
ms2mz_list = []
for i, spectrum in enumerate(file):
if len(spectrum.peaks.mz) == 0:
continue
mz_list = list(spectrum.peaks.mz)
i_list = list(spectrum.peaks.intensities)
i_max = max(i_list)
i_sum = sum(i_list)
for i in range(len(mz_list)):
if i_list[i] == 0:
continue
peak_dict = {}
peak_dict["i"] = i_list[i]
peak_dict["i_norm"] = i_list[i] / i_max
peak_dict["i_tic_norm"] = i_list[i] / i_sum
peak_dict["mz"] = mz_list[i]
# Handling malformed mgf files
try:
peak_dict["scan"] = spectrum.metadata["scans"]
except:
peak_dict["scan"] = i + 1
try:
peak_dict["rt"] = float(spectrum.metadata["rtinseconds"]) / 60
except:
peak_dict["rt"] = 0
try:
peak_dict["precmz"] = float(spectrum.metadata["pepmass"][0])
except:
peak_dict["precmz"] = 0
peak_dict["ms1scan"] = 0
peak_dict["charge"] = 1 # TODO: Add Charge Correctly here
peak_dict["polarity"] = 1 # TODO: Add Polarity Correctly here
ms2mz_list.append(peak_dict)
# Turning into pandas data frames
ms1_df = pd.DataFrame([peak_dict])
ms2_df = pd.DataFrame(ms2mz_list)
return ms1_df, ms2_df
def _load_data_gnps_json(input_filename):
all_spectra = json.loads(open(input_filename).read())
ms1_df_list = []
ms2_df_list = []
for spectrum in tqdm(all_spectra):
# Skipping spectra bigger than 1MB of peaks
if len(spectrum["peaks_json"]) > 1000000:
continue
peaks = json.loads(spectrum["peaks_json"])
peaks = [peak for peak in peaks if peak[1] > 0]
if len(peaks) == 0:
continue
i_max = max([peak[1] for peak in peaks])
i_sum = sum([peak[1] for peak in peaks])
if i_max == 0:
continue
ms2mz_list = []
for peak in peaks:
peak_dict = {}
peak_dict["i"] = peak[1]
peak_dict["i_norm"] = peak[1] / i_max
peak_dict["i_tic_norm"] = peak[1] / i_sum
peak_dict["mz"] = peak[0]
peak_dict["scan"] = spectrum["spectrum_id"]
peak_dict["rt"] = 0
peak_dict["precmz"] = float(spectrum["Precursor_MZ"])
peak_dict["ms1scan"] = 0
peak_dict["charge"] = 1 # TODO: Add Charge Correctly here
peak_dict["polarity"] = 1 # TODO: Add Polarity Correctly here
ms2mz_list.append(peak_dict)
# Turning into pandas data frames
if len(ms2mz_list) > 0:
ms2_df = pd.DataFrame(ms2mz_list)
ms2_df_list.append(ms2_df)
ms1_df = pd.DataFrame([peak_dict])
ms1_df_list.append(ms1_df)
# Merging
ms1_df = pd.concat(ms1_df_list).reset_index()
ms2_df = pd.concat(ms2_df_list).reset_index()
return ms1_df, ms2_df
def _load_data_mzXML(input_filename):
ms1mz_list = []
ms2mz_list = []
previous_ms1_scan = 0
with mzxml.read(input_filename) as reader:
for spectrum in tqdm(reader):
if len(spectrum["intensity array"]) == 0:
continue
if not "m/z array" in spectrum:
# This is not a mass spectrum
continue
mz_list = list(spectrum["m/z array"])
i_list = list(spectrum["intensity array"])
i_max = max(i_list)
i_sum = sum(i_list)
mslevel = spectrum["msLevel"]
if mslevel == 1:
for i in range(len(mz_list)):
peak_dict = {}
peak_dict["i"] = i_list[i]
peak_dict["i_norm"] = i_list[i] / i_max
peak_dict["i_tic_norm"] = i_list[i] / i_sum
peak_dict["mz"] = mz_list[i]
peak_dict["scan"] = spectrum["id"]
peak_dict["rt"] = spectrum["retentionTime"]
peak_dict["polarity"] = _determine_scan_polarity_mzXML(spectrum)
ms1mz_list.append(peak_dict)
previous_ms1_scan = spectrum["id"]
if mslevel == 2:
msn_mz = spectrum["precursorMz"][0]["precursorMz"]
msn_charge = 0
if "precursorCharge" in spectrum["precursorMz"][0]:
msn_charge = spectrum["precursorMz"][0]["precursorCharge"]
for i in range(len(mz_list)):
peak_dict = {}
peak_dict["i"] = i_list[i]
peak_dict["i_norm"] = i_list[i] / i_max
peak_dict["i_tic_norm"] = i_list[i] / i_sum
peak_dict["mz"] = mz_list[i]
peak_dict["scan"] = spectrum["id"]
peak_dict["rt"] = spectrum["retentionTime"]
peak_dict["precmz"] = msn_mz
peak_dict["ms1scan"] = previous_ms1_scan
peak_dict["charge"] = msn_charge
peak_dict["polarity"] = _determine_scan_polarity_mzXML(spectrum)
ms2mz_list.append(peak_dict)
# Turning into pandas data frames
ms1_df = pd.DataFrame(ms1mz_list)
ms2_df = pd.DataFrame(ms2mz_list)
return ms1_df, ms2_df
def _determine_scan_polarity_mzML(spec):
"""
Gets an enum for positive and negative polarity, for pymzml
Args:
spec ([type]): [description]
Returns:
[type]: [description]
"""
polarity = 0
negative_polarity = spec["negative scan"]
if negative_polarity is True:
polarity = 2
positive_polarity = spec["positive scan"]
if positive_polarity is True:
polarity = 1
return polarity
def _determine_scan_polarity_pyteomics_mzML(spec):
"""
Gets an enum for positive and negative polarity, for pyteomics
Args:
spec ([type]): [description]
Returns:
[type]: [description]
"""
polarity = 0
if "negative scan" in spec:
polarity = 2
if "positive scan" in spec:
polarity = 1
return polarity
def _determine_scan_polarity_mzXML(spec):
polarity = 0
if spec["polarity"] == "+":
polarity = 1
if spec["polarity"] == "-":
polarity = 2
return polarity
def _load_data_mzML_pyteomics(input_filename):
"""
This is a loading operation using pyteomics to help with loading mzML files with ion mobility
Args:
input_filename ([type]): [description]
"""
previous_ms1_scan = 0
# MS1
all_mz = []
all_rt = []
all_polarity = []
all_i = []
all_i_norm = []
all_i_tic_norm = []
all_scan = []
# MS2
all_msn_mz = []
all_msn_rt = []
all_msn_polarity = []
all_msn_i = []
all_msn_i_norm = []
all_msn_i_tic_norm = []
all_msn_scan = []
all_msn_precmz = []
all_msn_ms1scan = []
all_msn_charge = []
all_msn_mobility = []
with mzml.read(input_filename) as reader:
for spectrum in tqdm(reader):
if len(spectrum["intensity array"]) == 0:
continue
# Getting the RT
try:
rt = spectrum["scanList"]["scan"][0]["scan start time"]
except:
rt = 0
# Correcting the unit
try:
if spectrum["scanList"]["scan"][0]["scan start time"].unit_info == "second":
rt = rt / 60
except:
pass
scan = int(spectrum["id"].replace("scanId=", "").split("scan=")[-1])
if not "m/z array" in spectrum:
# This is not a mass spectrum
continue
mz = spectrum["m/z array"]
intensity = spectrum["intensity array"]
i_max = max(intensity)
i_sum = sum(intensity)
# If there is no ms level, its likely an UV/VIS spectrum and we can skip
if not "ms level" in spectrum:
continue
mslevel = spectrum["ms level"]
if mslevel == 1:
all_mz += list(mz)
all_i += list(intensity)
all_i_norm += list(intensity / i_max)
all_i_tic_norm += list(intensity / i_sum)
all_rt += len(mz) * [rt]
all_scan += len(mz) * [scan]
all_polarity += len(mz) * [_determine_scan_polarity_pyteomics_mzML(spectrum)]
previous_ms1_scan = scan
if mslevel == 2:
msn_mz = spectrum["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]["selected ion m/z"]
msn_charge = 0
if "charge state" in spectrum["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]:
msn_charge = int(spectrum["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]["charge state"])
all_msn_mz += list(mz)
all_msn_i += list(intensity)
all_msn_i_norm += list(intensity / i_max)
all_msn_i_tic_norm += list(intensity / i_sum)
all_msn_rt += len(mz) * [rt]
all_msn_scan += len(mz) * [scan]
all_msn_polarity += len(mz) * [_determine_scan_polarity_pyteomics_mzML(spectrum)]
all_msn_precmz += len(mz) * [msn_mz]
all_msn_ms1scan += len(mz) * [previous_ms1_scan]
all_msn_charge += len(mz) * [msn_charge]
if "product ion mobility" in spectrum["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]:
mobility = spectrum["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]["product ion mobility"]
all_msn_mobility += len(mz) * [mobility]
ms1_df = pd.DataFrame()
if len(all_mz) > 0:
ms1_df['i'] = all_i
ms1_df['i_norm'] = all_i_norm
ms1_df['i_tic_norm'] = all_i_tic_norm
ms1_df['mz'] = all_mz
ms1_df['scan'] = all_scan
ms1_df['rt'] = all_rt
ms1_df['polarity'] = all_polarity
ms2_df = pd.DataFrame()
if len(all_msn_mz) > 0:
ms2_df['i'] = all_msn_i
ms2_df['i_norm'] = all_msn_i_norm
ms2_df['i_tic_norm'] = all_msn_i_tic_norm
ms2_df['mz'] = all_msn_mz
ms2_df['scan'] = all_msn_scan
ms2_df['rt'] = all_msn_rt
ms2_df["polarity"] = all_msn_polarity
ms2_df["precmz"] = all_msn_precmz
ms2_df["ms1scan"] = all_msn_ms1scan
ms2_df["charge"] = all_msn_charge
if len(all_msn_mobility) == len(all_msn_i):
ms2_df["mobility"] = all_msn_mobility
return ms1_df, ms2_df
def _load_data_mzML2(input_filename):
"""This is a faster loading version, but a bit more memory intensive
Args:
input_filename ([type]): [description]
Returns:
[type]: [description]
"""
MS_precisions = {
1: 5e-6,
2: 20e-6,
3: 20e-6,
4: 20e-6,
5: 20e-6,
6: 20e-6,
7: 20e-6,
}
run = pymzml.run.Reader(input_filename, MS_precisions=MS_precisions)
previous_ms1_scan = 0
# MS1
all_mz = []
all_rt = []
all_polarity = []
all_i = []
all_i_norm = []
all_i_tic_norm = []
all_scan = []
# MS2
all_msn_mz = []
all_msn_rt = []
all_msn_polarity = []
all_msn_i = []
all_msn_i_norm = []
all_msn_i_tic_norm = []
all_msn_scan = []
all_msn_precmz = []
all_msn_ms1scan = []
all_msn_charge = []
for i, spec in tqdm(enumerate(run)):
# Getting RT
rt = spec.scan_time_in_minutes()
# Getting peaks
peaks = spec.peaks("raw")
# Filtering out zero rows
peaks = peaks[~np.any(peaks < 1.0, axis=1)]
if spec.ms_level == 2:
if len(peaks) > 1000:
# Sorting by intensity
peaks = peaks[peaks[:,1].argsort()]
# Getting top 1000
peaks = peaks[-1000:]
if len(peaks) == 0:
continue
mz, intensity = zip(*peaks)
i_max = max(intensity)
i_sum = sum(intensity)
if spec.ms_level == 1:
all_mz += list(mz)
all_i += list(intensity)
all_i_norm += list(intensity / i_max)
all_i_tic_norm += list(intensity / i_sum)
all_rt += len(mz) * [rt]
all_scan += len(mz) * [spec.ID]
all_polarity += len(mz) * [_determine_scan_polarity_mzML(spec)]
previous_ms1_scan = spec.ID
if spec.ms_level == 2:
msn_mz = spec.selected_precursors[0]["mz"]
charge = 0
if "charge" in spec.selected_precursors[0]:
charge = spec.selected_precursors[0]["charge"]
all_msn_mz += list(mz)
all_msn_i += list(intensity)
all_msn_i_norm += list(intensity / i_max)
all_msn_i_tic_norm += list(intensity / i_sum)
all_msn_rt += len(mz) * [rt]
all_msn_scan += len(mz) * [spec.ID]
all_msn_polarity += len(mz) * [_determine_scan_polarity_mzML(spec)]
all_msn_precmz += len(mz) * [msn_mz]
all_msn_ms1scan += len(mz) * [previous_ms1_scan]
all_msn_charge += len(mz) * [charge]
ms1_df = pd.DataFrame()
if len(all_mz) > 0:
ms1_df['i'] = all_i
ms1_df['i_norm'] = all_i_norm
ms1_df['i_tic_norm'] = all_i_tic_norm
ms1_df['mz'] = all_mz
ms1_df['scan'] = all_scan
ms1_df['rt'] = all_rt
ms1_df['polarity'] = all_polarity
ms2_df = pd.DataFrame()
if len(all_msn_mz) > 0:
ms2_df['i'] = all_msn_i
ms2_df['i_norm'] = all_msn_i_norm
ms2_df['i_tic_norm'] = all_msn_i_tic_norm
ms2_df['mz'] = all_msn_mz
ms2_df['scan'] = all_msn_scan
ms2_df['rt'] = all_msn_rt
ms2_df["polarity"] = all_msn_polarity
ms2_df["precmz"] = all_msn_precmz
ms2_df["ms1scan"] = all_msn_ms1scan
ms2_df["charge"] = all_msn_charge
return ms1_df, ms2_df
def _load_data_mzML(input_filename):
MS_precisions = {
1: 5e-6,
2: 20e-6,
3: 20e-6,
4: 20e-6,
5: 20e-6,
6: 20e-6,
7: 20e-6,
}
run = pymzml.run.Reader(input_filename, MS_precisions=MS_precisions)
ms1_df_list = []
ms2_df_list = []
previous_ms1_scan = 0
for i, spec in tqdm(enumerate(run)):
ms1_df = pd.DataFrame()
ms2_df = pd.DataFrame()
# Getting RT
rt = spec.scan_time_in_minutes()
# Getting peaks
peaks = spec.peaks("raw")
# Filtering out zero rows
peaks = peaks[~np.any(peaks < 1.0, axis=1)]
# Sorting by intensity
peaks = peaks[peaks[:, 1].argsort()]
if spec.ms_level == 2:
# Getting top 1000
peaks = peaks[-1000:]
if len(peaks) == 0:
continue
mz, intensity = zip(*peaks)
i_max = max(intensity)
i_sum = sum(intensity)
if spec.ms_level == 1:
ms1_df['i'] = intensity
ms1_df['i_norm'] = intensity / i_max
ms1_df['i_tic_norm'] = intensity / i_sum
ms1_df['mz'] = mz
ms1_df['scan'] = spec.ID
ms1_df['rt'] = rt
ms1_df['polarity'] = _determine_scan_polarity_mzML(spec)
previous_ms1_scan = spec.ID
if spec.ms_level == 2:
msn_mz = spec.selected_precursors[0]["mz"]
charge = 0
if "charge" in spec.selected_precursors[0]:
charge = spec.selected_precursors[0]["charge"]
ms2_df['i'] = intensity
ms2_df['i_norm'] = intensity / i_max
ms2_df['i_tic_norm'] = intensity / i_sum
ms2_df['mz'] = mz
ms2_df['scan'] = spec.ID
ms2_df['rt'] = rt
ms2_df["polarity"] = _determine_scan_polarity_mzML(spec)
ms2_df["precmz"] = msn_mz
ms2_df["ms1scan"] = previous_ms1_scan
ms2_df["charge"] = charge
# Turning into pandas data frames
if len(ms1_df) > 0:
ms1_df_list.append(ms1_df)
if len(ms2_df) > 0:
ms2_df_list.append(ms2_df)
if len(ms1_df_list) > 0:
ms1_df = pd.concat(ms1_df_list).reset_index()
else:
ms1_df = pd.DataFrame()
if len(ms2_df_list) > 0:
ms2_df = pd.concat(ms2_df_list).reset_index()
else:
ms2_df = pd.DataFrame()
return ms1_df, ms2_df
def _load_data_txt(input_filename):
# We are assuming whitespace separated columns, first is mz, second is intensity, and will be marked as MS1
mz_list = []
i_list = []
for line in open(input_filename):
cleaned_line = line.rstrip()
if len(cleaned_line) == 0:
continue
mz, i = cleaned_line.split()
mz_list.append(float(mz))
i_list.append(float(i))
ms1_df = pd.DataFrame()
ms1_df['mz'] = mz_list
ms1_df['i'] = i_list
ms1_df['i_norm'] = ms1_df['i'] / max(ms1_df['i'])
ms1_df['i_tic_norm'] = ms1_df['i'] / sum(ms1_df['i'])
ms1_df['scan'] = 1
ms1_df['rt'] = 0
ms1_df['polarity'] = "Positive"
print(ms1_df)
return ms1_df, pd.DataFrame()
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.site_backup_file
import cohesity_management_sdk.models.site_info
class SiteBackupStatus(object):
"""Implementation of the 'SiteBackupStatus' model.
Attributes:
backup_file_vec (list of SiteBackupFile): List of backuped files. Its
PnP package and any other files required to recover the site.
option_flags (int): Actual options with which this site was backed up
(BackupSiteArg.BackupSiteOptionFlags).
site_info (SiteInfo): This site info is used during recovery to
recover a full site.
warning_vec (list of string): Backup succeeded, but there were some
warnings for user. For example we could not backup term store due
to lack of permissions.
"""
# Create a mapping from Model property names to API property names
_names = {
"backup_file_vec":'backupFileVec',
"option_flags":'optionFlags',
"site_info":'siteInfo',
"warning_vec":'ipmiUsername'
}
def __init__(self,
backup_file_vec=None,
option_flags=None,
site_info=None,
warning_vec=None):
"""Constructor for the SiteBackupStatus class"""
# Initialize members of the class
self.backup_file_vec = backup_file_vec
self.option_flags = option_flags
self.site_info = site_info
self.warning_vec = warning_vec
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
backup_file_vec = None
if dictionary.get('backupFileVec') != None:
backup_file_vec = list()
for structure in dictionary.get('backupFileVec'):
backup_file_vec.append(cohesity_management_sdk.models.site_backup_file.SiteBackupFile.from_dictionary(structure))
option_flags = dictionary.get('optionFlags')
site_info = cohesity_management_sdk.models.site_info.SiteInfo.from_dictionary(dictionary.get('siteInfo')) if dictionary.get('siteInfo') else None
warning_vec = dictionary.get('ipmiUsername')
# Return an object of this model
return cls(backup_file_vec,
option_flags,
site_info,
warning_vec)
|
"""
Language resources are defined separately from :class:`Intent` and
:class:`Entity` classes. They are stored as plain YAML files in a `language/`
folder, aside Agent Python modules; this is to be flexible in the relationship
with designers and translators, as well as to allow some degree of automation
when downloading cloud changes back to the local Agent definition (this feature
is currently not implemented).
Your `language/` folder will contain one subfloder per language (i.e.
`language/en/`, `language/it/`, ...); each of these will contain
* one YAML file per Intent (e.g. `my_module.my_intent_class.yaml`). Intent
language resources are specified in :mod:`intents.language.intent_language`
* one YAML file per entity (e.g. `ENTITY_MyEntityClass.yaml`). Entity language
resources are specified in :mod:`intents.language.entity_language`
.. tip::
It may be useful to look at The Example Agent code and language resources
(https://github.com/dariowho/intents/tree/master/example_agent/language)
to get more insight on the format and naming conventions of language files.
"""
from intents.language_codes import LanguageCode, LANGUAGE_CODES, ensure_language_code
from intents.language.agent_language import agent_language_folder, agent_supported_languages, match_agent_language
from intents.language.intent_language import intent_language_data, IntentResponseGroup, IntentResponseDict, IntentResponse, TextIntentResponse, ImageIntentResponse, QuickRepliesIntentResponse, CardIntentResponse, CustomPayloadIntentResponse, IntentLanguageData, ExampleUtterance, UtteranceChunk, TextUtteranceChunk, EntityUtteranceChunk
from intents.language.entity_language import entity_language_data, EntityEntry
|
def intro():
print("*********************************************")
print("* Friends of Seaview Pier *")
print("*********************************************")
print()
def capture_new_member_details(volunteer_locations):
member_details = []
firstname = ""
lastname = ""
volunteer = False
volunteer_area = "none"
joining_date = 0
joining_fee_paid = False
member_details.append(input("What is your first name? "))
member_details.append(input("What is your last name? "))
volunteer_choice = input("Do you wish to work as a volunteer (y/n)? ").lower()
if volunteer_choice in ["y", "yes", "yeah", "yep", "indeed"]:
volunteer = True
volunteer_area = capture_volunteer_area(volunteer_locations)
member_details.append(volunteer)
member_details.append(volunteer_area)
member_details.append(capture_joining_date())
joining_fee = input("Have you paid the joining fee (y/n)? ").lower()
if joining_fee in ["y", "yes", "yeah", "yep", "indeed"]:
joining_fee_paid = True
member_details.append(joining_fee_paid)
return member_details
def capture_volunteer_area(volunteer_locations):
print("Here are the volunteering areas:")
for key, value in volunteer_locations.items():
print(key, value)
print()
area_choice = int(input("Please choose an area to volunteer (1, 2 or 3): "))
while area_choice not in [1, 2, 3]:
area_choice = input("That is not a valid choice, please choose 1, 2 or 3: ")
volunteer_area = volunteer_locations[area_choice]
return volunteer_area
def capture_joining_date():
joining_date = input("Please enter the date you joined, using the form dd.mm.yyyy: ")
return joining_date
def print_members(members):
print("Register of members:")
print()
for member in members:
print(f" First name: {member[0]}")
print(f" Last name: {member[1]}")
print(f" Is volunteer: {member[2]}")
if member[2]:
print(f" Volunteer area: {member[3]}")
print(f" Joining date: {member[4]}")
print(f"Joining fee paid: {member[5]}")
print()
def main():
volunteer_locations = {1: 'pier entrance gate',
2: 'gift shop',
3: 'painting and decorating'}
members = []
intro()
while True:
register_member = input("Would you like to register a new member (y/n)? ")
if register_member.lower() not in ["y", "yes", "yep", "indeed"]:
break
print()
members.append(capture_new_member_details(volunteer_locations))
print()
print_members(members)
main()
|
#!/usr/bin/env python
#
# test_fnirt.py -
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
import os.path as op
import itertools as it
import numpy as np
import nibabel as nib
import pytest
import fsl.data.image as fslimage
import fsl.utils.tempdir as tempdir
import fsl.data.constants as constants
import fsl.transform.affine as affine
import fsl.transform.nonlinear as nonlinear
import fsl.transform.fnirt as fnirt
from .test_nonlinear import _random_affine_field
datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear')
def test_readFnirt():
src = op.join(datadir, 'src')
ref = op.join(datadir, 'ref')
coef = op.join(datadir, 'coefficientfield')
disp = op.join(datadir, 'displacementfield')
src = fslimage.Image(src)
ref = fslimage.Image(ref)
coef = fnirt.readFnirt(coef, src, ref)
disp = fnirt.readFnirt(disp, src, ref)
with pytest.raises(ValueError):
fnirt.readFnirt(src, src, ref)
assert isinstance(coef, nonlinear.CoefficientField)
assert isinstance(disp, nonlinear.DeformationField)
assert coef.src.sameSpace(src)
assert coef.ref.sameSpace(ref)
assert disp.src.sameSpace(src)
assert disp.ref.sameSpace(ref)
assert coef.srcSpace == 'fsl'
assert coef.refSpace == 'fsl'
assert disp.srcSpace == 'fsl'
assert disp.refSpace == 'fsl'
def test_readFnirt_defType_intent():
src = op.join(datadir, 'src.nii.gz')
ref = op.join(datadir, 'ref.nii.gz')
coef = op.join(datadir, 'coefficientfield.nii.gz')
disp = op.join(datadir, 'displacementfield.nii.gz')
src = fslimage.Image(src)
ref = fslimage.Image(ref)
field = fnirt.readFnirt(disp, src, ref, defType='absolute')
assert field.deformationType == 'absolute'
field = fnirt.readFnirt(disp, src, ref, defType='relative')
assert field.deformationType == 'relative'
img = nib.load(coef)
img.header['intent_code'] = 0
with tempdir.tempdir():
img.to_filename('field.nii.gz')
with pytest.raises(ValueError):
fnirt.readFnirt('field', src, ref)
field = fnirt.readFnirt(
'field', src, ref,
intent=constants.FSL_CUBIC_SPLINE_COEFFICIENTS)
assert isinstance(field, nonlinear.CoefficientField)
field = fnirt.readFnirt(
'field', src, ref,
intent=constants.FSL_FNIRT_DISPLACEMENT_FIELD)
assert isinstance(field, nonlinear.DeformationField)
def test_toFnirt():
def check(got, exp):
tol = dict(atol=1e-5, rtol=1e-5)
assert np.all(np.isclose(got.data, exp.data, **tol))
assert got.src.sameSpace(exp.src)
assert got.ref.sameSpace(exp.ref)
assert got.srcSpace == 'fsl'
assert got.refSpace == 'fsl'
basefield, xform = _random_affine_field()
src = basefield.src
ref = basefield.ref
spaces = it.permutations(('voxel', 'fsl', 'world'), 2)
for from_, to in spaces:
field = nonlinear.convertDeformationSpace(basefield, from_, to)
got = fnirt.toFnirt(field)
check(got, basefield)
src = fslimage.Image(op.join(datadir, 'src'))
ref = fslimage.Image(op.join(datadir, 'ref'))
coef = fnirt.readFnirt(op.join(datadir, 'coefficientfield'), src, ref)
got = fnirt.toFnirt(coef)
check(got, coef)
def test_fromFnirt():
basefield, basexform = _random_affine_field()
src = basefield.src
ref = basefield.ref
spaces = list(it.permutations(('voxel', 'fsl', 'world'), 2))
for from_, to in spaces:
got = fnirt.fromFnirt(basefield, from_, to)
assert got.srcSpace == to
assert got.refSpace == from_
coords = [np.random.randint(0, basefield.shape[0], 5),
np.random.randint(0, basefield.shape[1], 5),
np.random.randint(0, basefield.shape[2], 5)]
coords = np.array(coords).T
coords = affine.transform(coords, ref.getAffine('voxel', from_))
aff = affine.concat(src.getAffine('fsl', to),
basexform,
ref.getAffine(from_, 'fsl'))
got = got.transform(coords)
exp = affine.transform(coords, aff)
enan = np.isnan(exp)
gnan = np.isnan(got)
assert np.all(np.isclose(enan, gnan))
assert np.all(np.isclose(exp[~enan], got[~gnan]))
# Converting from a FNIRT coefficient field
src = fslimage.Image(op.join(datadir, 'src'))
ref = fslimage.Image(op.join(datadir, 'ref'))
coef = fnirt.readFnirt(op.join(datadir, 'coefficientfield'), src, ref)
disp = fnirt.readFnirt(op.join(datadir, 'displacementfield'), src, ref)
for from_, to in spaces:
cnv = fnirt.fromFnirt(coef, from_, to)
exp = nonlinear.convertDeformationSpace(disp, from_, to)
tol = dict(atol=1e-5, rtol=1e-5)
assert np.all(np.isclose(cnv.data, exp.data, **tol))
|
import torch
from torch.nn import init
import torch.nn as nn
# =============================================================================
# =============================================================================
# This is a Pytorch implementation of Star Topology Convolution.
# =============================================================================
# =============================================================================
#
# Copyright (c) 2020, Chong WU & Zhenan FENG All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# Neither the name of City University of Hong Kong nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# =============================================================================
# =============================================================================
# Please cite our paper if you use this code in your own work:
# Wu, Chong; Feng, Zhenan; Zheng, Jiangbin; Zhang, Houwang; Cao, Jiawang; YAN, Hong (2020): Star Topology Convolution for Graph Representation Learning. TechRxiv. Preprint. https://doi.org/10.36227/techrxiv.12805799.v4
# =============================================================================
# =============================================================================
class STC_decoder(nn.Module):
"""
The decoder for classification
"""
def __init__(self, num_classes, STC_encoder, device):
super(STC_decoder, self).__init__()
self.STC_encoder = STC_encoder
self.device = device
self.xent_loss = nn.CrossEntropyLoss()
self.weight = nn.Parameter(torch.FloatTensor(STC_encoder.embedding_dim, num_classes))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
embeddings = self.STC_encoder(nodes)
output = embeddings.mm(self.weight)
return output
def loss(self, nodes, labels):
output = self.forward(nodes)
return self.xent_loss(output, labels.squeeze().to(self.device))
class STC_decoderM(nn.Module):
"""
The decoder for multi-label classification
"""
def __init__(self, num_classes, STC_encoder, device):
super(STC_decoderM, self).__init__()
self.STC_encoder = STC_encoder
self.device = device
self.xent_loss = nn.BCEWithLogitsLoss()
self.weight = nn.Parameter(torch.FloatTensor(STC_encoder.embedding_dim, num_classes))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
embeddings = self.STC_encoder(nodes)
output = embeddings.mm(self.weight)
return output
def loss(self, nodes, labels):
output = self.forward(nodes)
return self.xent_loss(output, labels.squeeze().to(self.device))
|
import boto3
import json
import time
import os
cf = boto3.client('cloudformation')
sns = boto3.client('sns')
waiter = cf.get_waiter('stack_create_complete')
def sendnotification(appname, nonprod_acc, message):
env_name = appname.lower()
sns_arn = "arn:aws:sns:us-east-1:" + nonprod_acc + ":ECSNotifications-" + env_name
print sns_arn
sns_message = message
subject = "CloudFormation StackSet Completed"
response = sns.publish(TopicArn=sns_arn, Message=sns_message, Subject=subject)
print response
def lambda_handler(event, context):
# TODO implement
print context
print event
nonprod_acc = os.environ['NONPROD_ACC']
eventName = event['detail']['eventName']
if eventName == 'CreateStackSet':
stackSetName = event['detail']['requestParameters']['stackSetName']
stackSetId = event['detail']['responseElements']['stackSetId']
print stackSetId
print "StackSet is being Created for " + stackSetName
if eventName == 'DeleteStackSet':
stackSetName = event['detail']['requestParameters']['stackSetName']
stackSetId = event['detail']['responseElements']['stackSetId']
print stackSetId
print "StackSet is being Deleted for " + stackSetName
if eventName == 'CreateStackInstances':
stackSetName = event['detail']['requestParameters']['stackSetName']
operationId = event['detail']['responseElements']['operationId']
response = cf.describe_stack_set_operation(StackSetName=stackSetName, OperationId=operationId)
status = response['StackSetOperation']['Status']
while status == 'RUNNING':
time.sleep(10)
response = cf.describe_stack_set_operation(StackSetName=stackSetName, OperationId=operationId)
status = response['StackSetOperation']['Status']
if status == 'SUCCEEDED':
dev_endpoint = "https://" + stackSetName + ".dkr.dev.dot-nonprod.corppvt.cloud"
qa_endpoint = "https://" + stackSetName + ".dkr.qa.dot-nonprod.corppvt.cloud"
stage_endpoint = "https://" + stackSetName + ".dkr.stage.dot-nonprod.corppvt.cloud"
prodblue_endpoint = "https://" + stackSetName + ".dkr.prod-blue.dot-nonprod.corppvt.cloud"
prod_endpoint = "https://" + stackSetName + ".dkr.prod.dot-nonprod.corppvt.cloud"
message = "StackSet Creation for Application " + stackSetName + " completed successfully \n Application Name: " + stackSetNamae + "\n ECR Name: " + stackSetName.lower() + "\n DEV Endpoint: " + dev_endpoint + "\n QA Endpoint: " + qa_endpoint + "\n STAGE Endpoint: " + stage_endpoint + "\n PROD_BLUE Endpoint: " + prodblue_endpoint + "\n PROD Endpoint: " + prod_endpoint
print message
sendnotification(stackSetName, nonprod_acc, message)
elif status == 'FAILED':
message = "StackSet Creation for Application " + stackSetName + " Failed"
print message
else:
message = "StackSet Creation for Application " + stackSetName + " is been Stopped"
print message
|
import torch
import torch.nn as nn
from models.AttnLayers import Attention
class AttnDecoder(nn.Module):
def __init__(self, hidden_size, output_size=1, use_attention=True, attention='tanh'):
super(AttnDecoder, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.linear_1 = nn.Linear(hidden_size*2, output_size)
self.attention = Attention(hidden_size)
self.use_attention = use_attention
def decode(self, predict) :
predict = self.linear_1(predict)
return predict
def forward(self, data, z, last, lengths=None, masks=None):
if self.use_attention:
attn = self.attention(z, masks)
context = (attn.unsqueeze(-1) * z).sum(1)
else:
attn = None
context = last
predict = self.decode(context)
return predict, attn
class FrozenAttnDecoder(AttnDecoder):
def generate_frozen_uniform_attn(self, data, lengths, masks):
attn = torch.zeros((len(data), torch.max(lengths)))
inv_l = 1. / (lengths - 2)
attn += inv_l[:, None]
attn = attn.cuda()
attn.masked_fill_(masks, 0)
return attn
def forward(self, data, z, last, lengths=None, masks=None):
if self.use_attention:
attn = self.generate_frozen_uniform_attn(data, lengths, masks)
context = (attn.unsqueeze(-1) * z).sum(1)
else:
attn = None
context = last
predict = self.decode(context)
return predict, attn
class PretrainedWeightsDecoder(AttnDecoder) :
def forward(self, data) :
if self.use_attention :
output = data.hidden
attn = data.target_attn
context = (attn.unsqueeze(-1) * output).sum(1)
data.attn = attn
else :
context = data.last_hidden
predict = self.decode(context)
data.predict = predict
|
from typing import List
import re
def valid_line(line: str) -> bool:
"""
行バリデーション
Parameters
----------
line: str
行テキスト
Returns
-------
valid: bool
validであるフラグ
"""
lstriped = line.lstrip()
if len(lstriped) == 0:
return False
if lstriped[0] == '#':
return False
if lstriped[0] == '/':
return False
return True
def get_key(line: str) -> str:
"""
キー取得
Parameters
----------
line: str
行テキスト
Returns
-------
key: str
キー
"""
reg_dq = r'"(.*?)(?<!\\)" *:'
reg_sq = r"'(.*?)(?<!\\)' *:"
reg_nq = r"(.*?) *:"
stripped = line.strip()
matched = re.match(reg_dq, stripped)
if matched:
return matched.group()[:-1].strip()[1:-1].replace('\\"', '"')
matched = re.match(reg_sq, stripped)
if matched:
return matched.group()[:-1].strip()[1:-1].replace("\\'", "'")
matched = re.match(reg_nq, stripped)
if matched:
return matched.group()[:-1].strip()
return ''
def seek_key(lines: List[str], index: int) -> str:
"""
キー階層シーク
Parameters
----------
lines: List[str]
テキスト
index: int
探索行インデックス
Returns
-------
key: str
該当キー
"""
ret: str = '.'
current_indent: int = 10000 # 十分に大きなインデントを初期値としておく
for line in lines[:index + 1][::-1]:
if not valid_line(line):
continue
indent = len(line) - len(line.lstrip())
if indent >= current_indent:
continue
current_key = get_key(line)
if not current_key:
continue
ret = '.' + current_key + ret
current_indent = indent
if current_indent == 0:
break
# . が余分に付くので削除
return ret[1:-1]
def dig_key(lines: List[str], key: str) -> [int, int, str]:
"""
キー掘り下げ
Parameters
----------
lines: List[str]
テキスト
key: str
探索キー
Returns
-------
row_index: int
該当キーの行、ヒットしなければ最も近い階層の行
column_index: int
テキスト開始列
hit_key: str
ヒットしたキー、対象なしなら空文字
"""
splited_key: List[str] = key.split('.')
current_key_index: int = 0
row_index: int = -1
current_indent: int = -1
last_hit_row_index: int = -1
for line in lines:
row_index += 1
if not valid_line(line):
continue
indent = len(line) - len(line.lstrip())
if indent <= current_indent:
continue
current_key = get_key(line)
if not current_key:
continue
if current_key == splited_key[current_key_index]:
current_key_index += 1
current_indent = indent
last_hit_row_index = row_index
if current_key_index == len(splited_key):
break
return [last_hit_row_index,
current_indent,
'.'.join(splited_key[:current_key_index])]
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.forms import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.forms import fields
from horizon import messages
from horizon_bsn.api import neutron
import logging
from openstack_dashboard.api import neutron as osneutron
import ast
import re
LOG = logging.getLogger(__name__)
NEW_LINES = re.compile(r"\r|\n")
EXPECTATION_CHOICES = [('default', _('--- Select Result ---')),
('dropped by route', _('dropped by route')),
('dropped by policy', _('dropped by policy')),
('not permitted by security groups',
_('not permitted by security groups')),
('dropped due to private segment',
_('dropped due to private segment')),
('dropped due to loop', _('dropped due to loop')),
('packet in', _('packet in')),
('forwarded', _('forwarded')),
('dropped', _('dropped')),
('unspecified source', _('unspecified source')),
('unsupported', _('unsupported')),
('invalid input', _('invalid input')),
('inconsistent status', _('inconsistent status')),
('no traffic detected', _('no traffic detected'))]
def extract_src_tenant_and_segment(obj):
"""Extract src_tenant and src_segment from obj
:param obj: an object that contains src_tenant and src_segment string
eg. obj['src_tenant']\
=u"{'tenant_id': u'tenant_id',
'tenant_name': u'tenant_name'}"
obj['src_segment']\
=u"{'segment_id': u'segment_id',
'segment_name': u'segment_name'}"
:return: this operates on the original object
tenant and segment info will be extracted from the obj
src_tenant and src_segment are deleted after the operation
eg.
obj{ ...#other attr
'src_tenant_id'=u'tenant_id',
'src_tenant_name'=u'tenant_name',
'src_segment_id'=u'segment_id',
'src_segment_name'=u'segment_name'
}
"""
if obj.get('src_tenant'):
src_tenant = ast.literal_eval(obj['src_tenant'])
obj['src_tenant_id'] = src_tenant.get('tenant_id')
obj['src_tenant_name'] = src_tenant.get('tenant_name')
del(obj['src_tenant'])
if obj.get('src_segment'):
src_segment = ast.literal_eval(obj['src_segment'])
obj['src_segment_id'] = src_segment.get('segment_id')
obj['src_segment_name'] = src_segment.get('segment_name')
del(obj['src_segment'])
def populate_tenant_choices(request):
"""Returns a list of tenant info tuple for creating select options
This only creates 1 option, which is user's current tenant/project
:param request: object that contents tenant info
- request.user.tenant_name
- request.user.tenant_id
:return: [(tenant_obj, tenant_display_string)]
eg.
[{'tenant_id':u'tenant_id', 'tenant_name': u'tenant_name'},
u'tenant_name (tenant_id)']
"""
# tenant_name (tenant_id)
display = '%s (%s)' % (request.user.tenant_name,
request.user.tenant_id)
value = {'tenant_name': request.user.tenant_name,
'tenant_id': request.user.tenant_id}
return [(value, display)]
def populate_segment_choices(request):
"""Returns a list of segment info tuples for creating select options
This creates the list based on current project
:param request: request info
- request.user.project_id
:return: [(segment_obj, segment_display_string)]
eg1. Has a segment name
[{'segment_id':u'tenant_id', 'segment_name': u'segment_name'},
u'segment_name (segment_id)']
eg2. No segment name
[{'segment_id':u'tenant_id', 'segment_name': u'segment_name'},
u'segment_name (segment_id)']
"""
networks = osneutron.network_list(request,
tenant_id=request.user.project_id,
shared=False)
segment_list = []
for network in networks:
value = {'segment_id': network.id,
'segment_name': network.name}
if network.name:
# segment_name (segment_id)
display = '%s (%s)' % (network.name, network.id)
else:
# (segment_id)
display = '(%s)' % network.id
segment_list.append((value, display))
if segment_list:
segment_list.insert(0, ("", _("Select a Segment")))
else:
segment_list.insert(0, ("", _("No segments available")))
return segment_list
class CreateReachabilityTest(forms.SelfHandlingForm):
name = forms.CharField(max_length="64",
label=_("Name"),
required=True)
src_tenant = forms.ChoiceField(
label=_("Source Tenant"),
help_text=_("Test reachability for current tenant only."))
src_segment = forms.ChoiceField(
label=_("Source Segment"),
help_text=_("Select a source segment."))
def __init__(self, request, *args, **kwargs):
super(CreateReachabilityTest, self).__init__(request, *args, **kwargs)
self.fields['src_tenant'].choices = populate_tenant_choices(request)
self.fields['src_tenant'].widget.attrs['readonly'] = True
self.fields['src_segment'].choices = populate_segment_choices(request)
src_ip = fields.IPField(
label=_("Source IP Address"),
required=True,
initial="0.0.0.0")
dst_ip = fields.IPField(
label=_("Destination IP Address"),
required=True,
initial="0.0.0.0")
expected_result = forms.ChoiceField(
label=_('Expected Connection Results'),
required=True,
choices=EXPECTATION_CHOICES,
widget=forms.Select(
attrs={'class': 'switchable',
'data-slug': 'expected_result'}))
def clean(self):
cleaned_data = super(CreateReachabilityTest, self).clean()
def update_cleaned_data(key, value):
cleaned_data[key] = value
self.errors.pop(key, None)
expected_result = cleaned_data.get('expected_result')
if expected_result == 'default':
msg = _('A expected connection result must be selected.')
raise ValidationError(msg)
return cleaned_data
def handle(self, request, data):
try:
extract_src_tenant_and_segment(data)
reachabilitytest = neutron.reachabilitytest_create(request, **data)
msg = _("Reachability Test %s was successfully created") \
% data['name']
LOG.debug(msg)
messages.success(request, msg)
return reachabilitytest
except Exception as e:
exceptions.handle(request,
_("Failed to create reachability test. Info: "
"%s") % e.message)
class UpdateForm(CreateReachabilityTest):
id = forms.CharField(max_length="36", widget=forms.HiddenInput())
def __init__(self, request, *args, **kwargs):
CreateReachabilityTest.__init__(self, request, *args, **kwargs)
# set src_segment initial
# if segment id/name is missing, this won't select the correct choice
# user needs to reselect the segment
if kwargs.get('initial') and kwargs.get('initial').get('src_segment'):
src_seg = kwargs.get('initial').get('src_segment')
for choice in self.fields['src_segment'].choices:
if src_seg in choice:
self.initial['src_segment'] = str(choice[0])
break
def clean(self):
cleaned_data = super(UpdateForm, self).clean()
def update_cleaned_data(key, value):
cleaned_data[key] = value
self.errors.pop(key, None)
expected_result = cleaned_data.get('expected_result')
if expected_result == 'default':
msg = _('A expected connection result must be selected.')
raise ValidationError(msg)
return cleaned_data
def handle(self, request, data):
try:
extract_src_tenant_and_segment(data)
id = data['id']
reachabilitytest = neutron \
.reachabilitytest_update(request, id, **data)
msg = _("Reachability Test %s was successfully updated") \
% data['name']
LOG.debug(msg)
messages.success(request, msg)
return reachabilitytest
except Exception as e:
exceptions.handle(request,
_("Failed to update reachability test. Info: "
"%s") % e.message)
class RunQuickTestForm(forms.SelfHandlingForm):
src_tenant = forms.ChoiceField(
label=_("Source Tenant"),
help_text=_("Test reachability for current tenant only."))
src_segment = forms.ChoiceField(
label=_("Source Segment"),
help_text=_("Select a source segment."))
def __init__(self, request, *args, **kwargs):
super(RunQuickTestForm, self).__init__(request, *args, **kwargs)
self.fields['src_tenant'].choices = populate_tenant_choices(request)
self.fields['src_tenant'].widget.attrs['readonly'] = True
self.fields['src_segment'].choices = populate_segment_choices(request)
src_ip = fields.IPField(
label=_("Source IP Address"),
required=True,
initial="0.0.0.0")
dst_ip = fields.IPField(
label=_("Destination IP Address"),
required=True,
initial="0.0.0.0")
expected_result = forms.ChoiceField(
label=_('Expected Connection Results'),
required=True,
choices=EXPECTATION_CHOICES,
widget=forms.Select(
attrs={'class': 'switchable',
'data-slug': 'expected_connection'}))
def clean(self):
cleaned_data = super(RunQuickTestForm, self).clean()
def update_cleaned_data(key, value):
cleaned_data[key] = value
self.errors.pop(key, None)
expected_result = cleaned_data.get('expected_result')
if expected_result == 'default':
msg = _('A expected connection result must be selected.')
raise ValidationError(msg)
return cleaned_data
def handle(self, request, data):
data['name'] = "quicktest_" + str(request.user.project_id)
try:
extract_src_tenant_and_segment(data)
reachabilityquicktest = neutron \
.reachabilityquicktest_get(request, request.user.project_id)
# update with new fields
neutron.reachabilityquicktest_update(
request, request.user.project_id, **data)
except Exception:
# test doesn't exist, create
reachabilityquicktest = neutron.reachabilityquicktest_create(
request, **data)
# clear dict
data = {}
# set run_test to true and update test to get results
data['run_test'] = True
reachabilityquicktest = neutron.reachabilityquicktest_update(
request, reachabilityquicktest.id, **data)
return reachabilityquicktest
class SaveQuickTestForm(forms.SelfHandlingForm):
name = forms.CharField(max_length="255",
label=_("Name"),
required=True)
def clean(self):
cleaned_data = super(SaveQuickTestForm, self).clean()
def update_cleaned_data(key, value):
cleaned_data[key] = value
self.errors.pop(key, None)
return cleaned_data
def handle(self, request, data):
try:
extract_src_tenant_and_segment(data)
data['save_test'] = True
reachabilityquicktest = neutron.reachabilityquicktest_update(
request, request.user.project_id, **data)
messages.success(
request, _('Successfully saved quicktest %s') % data['name'])
return reachabilityquicktest
except Exception as e:
messages.error(
request, _('Failed to save quicktest %(name)s. Info: %(msg)s')
% {'name': data['name'], 'msg': e.message})
|
## ========================================================================= ##
## Copyright (c) 2019 Agustin Durand Diaz. ##
## This code is licensed under the MIT license. ##
## utils.py ##
## ========================================================================= ##
import pygame
import pickle
from pathlib import Path
from tkinter import Tk
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
def existsFile(path):
my_file = Path(path)
if my_file.is_file():
# file exists
return True
return False
def existsDir(path):
my_file = Path(path)
if my_file.is_dir():
# dir exists
return True
return False
def getPathWithoutExtension(path):
my_file = Path(path)
res = path
if my_file.is_file():
suffix = my_file.suffix
res = str(path).replace(suffix,'')
return res
def getImageSize(path):
image = pygame.image.load(path)
rect = image.get_rect()
return (rect[2], rect[3])
def loadPickle(defaultPath):
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename(filetypes=[("Pkl files", "*.pkl")]) # show an "Open" dialog box and return the path to the selected file
if len(filename) == 0:
filename = defaultPath
if filename:
return filename, pickle.load(open(filename, 'rb'))
return filename, None
def savePickle(obj, defaultPath):
Tk().withdraw()
filename = asksaveasfilename(filetypes=[("Pkl files", "*.pkl")])
file_path = Path(filename)
file_path = file_path.with_suffix('.pkl')
filename = file_path.as_posix()
if len(filename) == 0:
filename = defaultPath
if filename:
pickle.dump(obj, open(filename, 'wb'))
return filename
import neat
import neat_utils.visualize
# Use this way:
# generatePickleGraph(defaultPath='../pkl_files/winner_neat_dip.pkl', configFile='../config_files/config_neat_dip')
def generatePickleGraph(defaultPath, configFile):
pickleBundle = loadPickle(defaultPath=defaultPaht)
config = neat.Config(neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
'../config_files/config_neat_dip')
path = getPathWithoutExtension(pickleBundle[0])
node_names = {-1:'a1', -2: 'a1\'',-3:'a2', -4: 'a2\'',-5:'a0', -6: 'a0\'', 0:'u'}
neat_utils.visualize.draw_net(config, pickleBundle[1], False, filename=path, fmt="png", node_names=node_names)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import copy
import warnings
from .utils import *
def abs(input, *, out=None):
return paddle.abs(input)
def add(input, other, *, out=None):
return paddle.add(input, other)
def arange(start,
end,
step=1,
*,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False):
if requires_grad:
return paddle.arange(start, end, step, dtype).requires_grad_(True)
else:
return paddle.arange(start, end, step, dtype)
def clip(input, min, max, *, out=None):
return paddle.clip(input, min, max)
def concat(tensors, dim=0):
x = tensors
last_index = -1
for ele in x:
t = str(ele.dtype).lower().strip().split(".")[-1]
if t in TYPE_MAPPER:
t = TYPE_MAPPER[t]
index = TYPE_ORDER.index(t)
if last_index < index:
last_index = index
real_type = TYPE_ORDER[last_index]
x = list(x)
for i in range(len(x)):
x[i] = x[i].cast(real_type)
return paddle.concat(x, dim)
def create_tensor(*size):
if len(size) > 1:
return paddle.zeros(size, dtype="float32")
else:
return paddle.to_tensor(size[0])
def create_float32_tensor(*size):
if len(size) > 1:
return paddle.zeros(size, dtype="float32")
else:
out = paddle.to_tensor(size[0])
out = paddle.cast(out, "float32")
return out
def create_uint8_tensor(*size):
if len(size) > 1:
return paddle.zeros(size, dtype="uint8")
else:
out = paddle.to_tensor(size[0])
out = paddle.cast(out, "uint8")
return out
def exp(input, *, out=None):
return paddle.exp(input)
def full(size,
fill_value,
*,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False):
if requires_grad:
return paddle.full(size, fill_value, dtype).requires_grad_(True)
else:
return paddle.full(size, fill_value, dtype)
def full_like(input,
fill_value,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=None):
if requires_grad:
return paddle.full_like(input, fill_value, dtype).requires_grad_(True)
else:
return paddle.full_like(input, fill_value, dtype)
def linspace(start,
end,
steps,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False):
if requires_grad:
return paddle.linspace(start, end, step, dtype).requires_grad_(True)
else:
return paddle.linspace(start, end, step, dtype)
def load(f, map_location=None, pickle_module=None, **pickle_load_args):
return paddle.load(f)
def load_state_dict_from_url(url,
model_dir=None,
map_location=None,
progress=True,
check_hash=False,
file_name=None):
return paddle.load(url)
def load_url(url,
model_dir=None,
map_location=None,
progress=True,
check_hash=False,
file_name=None):
return paddle.load(url)
def log(input, *, out=None):
return paddle.log(input)
def logical_and(input, other, *, out=None):
return paddle.logical_and(input, other, out)
def logical_not(input, *, out=None):
return paddle.logical_not(input, out)
def logical_or(input, other, *, out=None):
return paddle.logical_or(input, other, out)
def logical_xor(input, other, *, out=None):
return paddle.logical_xor(input, other, out)
def matmul(input, other, *, out=None):
return paddle.matmul(input, other)
def mul(input, other, *, out=None):
return paddle.multiply(input, other)
def max(input, dim_other=None, keepdim=False, *, out=None):
if dim_other is None:
return paddle.max(input)
elif isinstance(dim_other, paddle.Tensor):
return paddle.maximum(input, dim_other)
else:
return paddle.max(input, axis=dim_other, keepdim=keepdim)
def mean(input, dim=None, keepdim=False, *, out=None):
if dim is None:
warnings.warn('The output of paddle.mean is not scalar!')
return paddle.mean(input)
else:
return paddle.mean(input, axis=dim, keepdim=keepdim)
def min(input, dim_other=None, keepdim=False, *, out=None):
if dim_other is None:
return paddle.min(input)
elif isinstance(dim_other, paddle.Tensor):
return paddle.minimum(input, dim_other)
else:
return paddle.min(input, axis=dim_other, keepdim=keepdim)
def ones(*size,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False):
if len(size) == 1 and isinstance(size[0], (tuple, list)):
shape = size[0]
else:
shape = size
if requires_grad:
return paddle.ones(shape, dtype).requires_grad_(True)
else:
return paddle.ones(shape, dtype)
def ones_like(input,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=None):
if requires_grad:
return paddle.ones_like(input, dtype).requires_grad_(True)
else:
return paddle.ones_like(input, dtype)
def set_cuda_device(device):
if isinstance(device, int):
return paddle.set_device("gpu:{}".format(device))
else:
return paddle.set_device("gpu")
def rand(*size,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False):
if len(size) == 1 and isinstance(size[0], (tuple, list)):
shape = size[0]
else:
shape = size
if requires_grad:
return paddle.rand(shape, dtype).requires_grad_(True)
else:
return paddle.rand(shape, dtype)
def randn(*size,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False):
if len(size) == 1 and isinstance(size[0], (tuple, list)):
shape = size[0]
else:
shape = size
if requires_grad:
return paddle.randn(shape, dtype).requires_grad_(True)
else:
return paddle.randn(shape, dtype)
def randn_like(input,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=None):
shape = input.shape
if requires_grad:
return paddle.randn(shape, dtype).requires_grad_(True)
else:
return paddle.randn(shape, dtype)
def randperm(n,
*,
generator=None,
out=None,
dtype="int64",
layout=None,
device=None,
requires_grad=False,
pin_memory=False):
if requires_grad:
return paddle.randperm(n, dtype).requires_grad_(True)
else:
return paddle.randperm(n, dtype)
def save(obj, f, pickle_module=None, pickle_protocol=2):
return paddle.save(obj, f, pickle_protocol=pickle_protocol)
def split(tensor, split_size_or_sections, dim=0):
return paddle.split(tensor, split_size_or_sections, dim)
def sqrt(input, *, out=None):
return paddle.sqrt(input)
def stack(tensors, dim=0, *, out=None):
return paddle.stack(tensors, dim)
def sum(input, dim=None, keepdim=False, *, out=None):
if dim is None:
warnings.warn('The output of paddle.sum is not scalar!')
return paddle.sum(input)
else:
return paddle.sum(input, axis=dim, keepdim=keepdim)
def unsqueeze(input, dim):
return paddle.squeeze(input, dim)
def zeros(*size,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False):
if len(size) == 1 and isinstance(size[0], (tuple, list)):
shape = size[0]
else:
shape = size
if requires_grad:
return paddle.zeros(shape, dtype).requires_grad_(True)
else:
return paddle.zeros(shape, dtype)
def zeros_like(input,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=None):
if requires_grad:
return paddle.zeros_like(input, dtype).requires_grad_(True)
else:
return paddle.zeros_like(input, dtype)
class DataParallel(paddle.DataParallel):
def __init__(self, module, device_ids=None, output_device=None, dim=0):
super().__init__(module)
def invalid(*args, **kwargs):
return None
|
numbers = [int(x) for x in input().split()]
print(sorted(numbers))
|
import os, sys
import cv2
import numpy as np
import pandas as pd
import string
import mediapipe as mp
import pickle
from zipfile import ZipFile
from utils import StaticSignProcessor, mp_process_image, generate_dataframe, annotate_image, pred_class_to_letter
# load the model
with open('saved_model.pkl', 'rb') as f:
model = pickle.load(f)
# TODO change to generic video src handler
class VideoHandler():
def __init__(self, vid_src=0):
self.cap = cv2.VideoCapture(vid_src)
self.processor = StaticSignProcessor((126,))
self.timestamps = []
self.hand_results = []
self.pose_results = []
self.framecount = 0
self.prediction = 'Neutral'
self.score = ''
self.pred_thresh = 0.7
# self.colormap = {'No hands detected': (0,0,255), 'Neutral': (255,0,0)}
def load_source(self, vid_src):
self.cap.release()
self.cap = cv2.VideoCapture(vid_src)
def generate_buffer(self, frame, buffer_size=10, sliding_window=1, callback=None):
'''
Generates a buffer of fixed length from a live video stream
to be processed and passed into the recognition model.
Returns:
A dict containing timestamps, hand_results, and pose_results
if the buffer condition is met
'''
assert buffer_size > 0, 'Buffer size must be a positive number'
assert sliding_window > 0, 'Sliding window size must be a positive number'
assert buffer_size > sliding_window, 'Sliding window must be smaller than buffer'
hand_result, pose_result = mp_process_image(frame)
if not hand_result.multi_handedness:
self.timestamps = []
self.hand_results = []
self.pose_results = []
self.framecount = 0
return
# time is a construct
self.timestamps.append(0.0)
self.hand_results.append(hand_result)
self.pose_results.append(pose_result)
self.framecount += 1
if (self.framecount % buffer_size == 0) or \
(self.framecount % sliding_window == 0 and self.framecount > buffer_size):
buf = {'timestamps': self.timestamps,
'hand_results': self.hand_results,
'pose_results': self.pose_results}
self.timestamps = self.timestamps[sliding_window:]
self.hand_results = self.hand_results[sliding_window:]
self.pose_results = self.pose_results[sliding_window:]
if callback:
callback(buf)
return buf
def get_next_frame(self):
'''
Reads the next frame from the webcam and makes a prediction when applicable.
Returns:
- None if webcam feed is closed or can't read feed
- annotated image if feed is open
- annotated image, prediction, score if feed is open and buffer condition is met
'''
if not self.cap.isOpened():
return
success, image = self.cap.read()
if not success:
return
buf = self.generate_buffer(image, buffer_size=10, sliding_window=1, callback=self.predict)
# if blur:
# image = cv2.blur(image, (25,25))
if self.hand_results:
image = annotate_image(image, self.hand_results[-1], self.pose_results[-1])
else:
self.prediction = 'No hands detected'
self.score = ''
image = cv2.flip(image, 1)
if self.prediction:
if self.prediction == 'No hands detected':
color = (0,0,255)
elif self.prediction == 'Neutral':
color = (255,0,0)
else:
color = (0,150,0)
cv2.putText(image, self.prediction + ' ' + self.score, (50,80), cv2.FONT_HERSHEY_SIMPLEX, 2, color, 4)
return image, self.prediction, self.score
def predict(self, buf):
# Make a prediction on the generated buffer
df = generate_dataframe(buf)
data = self.processor.process(df)
pred_prob = model.predict_proba([data])[0]
pred_class = list(pred_prob).index(max(pred_prob))
if max(pred_prob) < self.pred_thresh:
self.prediction = 'Neutral'
self.score = ''
else:
self.prediction = pred_class_to_letter(pred_class)[0]
self.score = str(round(max(pred_prob),2))
def get_frame(self):
if self.cap.isOpened():
success, frame = self.cap.read()
return frame
def stream_webcam(self):
'''
A helper function to demonstrate the VideoHandler's functionality.
Note that this is a blocking function: it will keep running until the webcam feed is closed.
'''
while self.cap.isOpened():
image,_,_ = self.get_next_frame()
cv2.imshow('webcam', image)
if cv2.waitKey(5) & 0xFF == 27:
print('esc')
break
# out = self.get_next_frame()
# while out:
# image,_,_ = out
# cv2.imshow('webcam', image)
# out = self.get_next_frame()
# if cv2.waitKey(5) & 0xFF == 27:
# print('esc')
# break
def evaluate_model(self, show=False):
'''
A helper function for evaluating the recognition model's performance.
It uses pre-recorded videos in test_webcam_data to test each letter.
The videos in the test data were not used to train the model.
'''
if not os.path.isdir('test_webcam_data'):
print('Unzipping test data...')
with ZipFile('test_webcam_data.zip','r') as zipobj:
zipobj.extractall()
accuracy = 0
for i in string.ascii_uppercase:
print('input:', i)
tmp = []
vid_src = f"test_webcam_data/{i}.mp4"
self.cap = cv2.VideoCapture(vid_src)
while self.cap.isOpened():
try:
image, pred, score = self.get_next_frame()
if pred not in ('Neutral','No hands detected'):
tmp.append(pred.replace('LETTER-',''))
if show:
cv2.imshow('webcam', image)
if cv2.waitKey(5) & 0xFF == 27:
print('esc')
break
except:
break
final_pred = max(set(tmp), key = tmp.count)
print('prediction:', final_pred)
if i == final_pred:
print('CORRECT')
accuracy += 1
else:
print('INCORRECT')
print('\n\nFinal Accuracy: {}/26 ({}%)'.format(str(accuracy), round(accuracy/26, 2)))
if __name__ == "__main__":
webcam = VideoHandler()
# webcam.stream_webcam()
webcam.evaluate_model(show=(len(sys.argv) > 1 and sys.argv[1] == '--show'))
|
"""
Asyncio pydispatch (Signal Manager)
This is based on [pyDispatcher](http://pydispatcher.sourceforge.net/) reference
[Django Signals](https://docs.djangoproject.com/en/4.0/topics/signals/) and reference
[scrapy SignalManager](https://docs.scrapy.org/en/latest/topics/signals.html) implementation on
[Asyncio](https://docs.python.org/3/library/asyncio.html)
"""
import asyncio
import functools
import logging
import threading
import weakref
from collections.abc import Awaitable
from typing import Any, Callable, TypeVar, Union
from aio_pydispatch.utils import func_accepts_kwargs, id_maker, safe_ref
T = TypeVar('T') # pylint: disable=invalid-name
Receiver = Callable[..., Union[T, Awaitable]]
logger = logging.getLogger(__name__)
class _IgnoredException(Exception):
"""
Ignore exception.
"""
class Signal:
"""
Signal, or event.
example:
import asyncio
from aio_pydispatch import Signal
server_start = Signal()
server_stop = Signal()
def ppp(value: str, **kwargs) -> None:
print(value, kwargs)
async def main():
server_start.connect(ppp, sender='loading config')
server_stop.connect(ppp)
await server_start.send(sender='loading config', value='foo')
await asyncio.sleep(1)
await server_stop.send(value='foo')
if __name__ == '__main__':
asyncio.run(main())
"""
def __init__(self):
"""Signal, or event."""
self.__lock = threading.Lock()
self.__clean_receiver = False
self._all_receivers: dict[int, dict[int, Any]] = {}
@property
def receivers(self):
"""Receivers."""
return self._all_receivers
def connect(
self,
receiver: Receiver,
sender: Any = None,
weak=True,
):
"""
Connect a receiver on this signal.
:param receiver:
:param sender:
:param weak:
:return:
"""
assert callable(receiver), "Signal receivers must be callable."
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
sender_key = id_maker(sender)
receiver_key = id_maker(receiver)
if weak:
receiver = safe_ref(receiver, self._enable_clean_receiver)
with self.__lock:
self._clean_dead_receivers()
receivers = self._all_receivers.get(sender_key, {})
receivers.setdefault(receiver_key, receiver)
self._all_receivers.update({sender_key: receivers})
async def send(self, *, sender: Any = None, **kwargs) -> list[tuple[Any, Any]]:
"""Send signal, touch off all registered function."""
_dont_log = kwargs.pop('_ignored_exception', _IgnoredException)
responses = []
loop = asyncio.get_running_loop()
for receiver in self.live_receivers(sender):
func = functools.partial(
receiver,
**kwargs
)
try:
if asyncio.iscoroutinefunction(receiver):
response = await func()
else:
response = await loop.run_in_executor(None, func)
except _dont_log as ex:
response = ex
except Exception as ex: # pylint: disable=broad-except
response = ex
logger.error('Caught an error on %s', receiver, exc_info=True)
responses.append((receiver, response))
return responses
def sync_send(self, *, sender: None = None, **kwargs) -> list[tuple[Any, Any]]:
"""
Can only trigger sync func. If func is coroutine function,
it will return awaitable object
:param sender:
:param kwargs:
:return:
"""
_dont_log = kwargs.pop('_ignored_exception', _IgnoredException)
responses = []
for receiver in self.live_receivers(sender):
try:
if asyncio.iscoroutinefunction(receiver):
logger.warning('%s is coroutine, but it not awaited', receiver)
response = receiver(**kwargs)
except _dont_log as ex:
response = ex
except Exception as ex: # pylint: disable=broad-except
response = ex
logger.error('Caught an error on %s', receiver, exc_info=True)
responses.append((receiver, response))
return responses
def live_receivers(self, sender: None = None) -> list[Receiver]:
"""Get all live receiver."""
with self.__lock:
self._clean_dead_receivers()
receivers: dict[int, Any] = self._all_receivers.get(id_maker(sender), {})
real_receivers = []
has_dead = False
for receiver_key, receiver in receivers.copy().items():
if isinstance(receiver, weakref.ReferenceType):
real_receiver: Callable = receiver()
if real_receiver:
real_receivers.append(real_receiver)
else:
# receiver is dead
has_dead = True
receivers.pop(receiver_key)
else:
# not use weak for receiver
real_receivers.append(receiver)
# update cleaned sender of receiver to all receivers
if has_dead:
self._all_receivers.update({id_maker(sender): receivers})
return real_receivers
def _enable_clean_receiver(self) -> None:
"""Register to the receiver weakerf finalize callback."""
self.__clean_receiver = True
def _clean_dead_receivers(self) -> None:
if self.__clean_receiver:
self.__clean_receiver = False
for receivers in self._all_receivers.values():
for receiver_key, receiver in receivers.copy().items():
if isinstance(receiver, weakref.ReferenceType) and receiver() is None:
receivers.pop(receiver_key)
def disconnect(self, receiver: Receiver, sender: Any = None) -> None:
"""Clean receivers of a sender."""
lookup_key = id_maker(sender)
receiver_key = id_maker(receiver)
with self.__lock:
receivers: dict[int, Any] = self._all_receivers.get(lookup_key)
receiver_ref = receivers.pop(receiver_key)
if receivers and receiver_ref:
self._all_receivers.update({lookup_key: receivers})
def disconnect_all(self) -> None:
"""Clean all receiver."""
self._all_receivers.clear()
def connect(signal: Signal, sender: Any = None, weak=True):
"""
Connect decorator.
:param signal:
:param sender:
:param weak:
:return:
"""
def _decorator(func):
if isinstance(signal, Signal):
signal.connect(receiver=func, sender=sender, weak=weak)
return func
return _decorator
|
# By manish.17, contest: ITMO Academy. Двоичный поиск - 2, problem: (D) Children Holiday
# https://codeforces.com/profile/manish.17
m, n = map(int, input().split())
if m == 0:
print(0)
print(*[0]*n)
quit()
a = []
for i in range(n):
t, z, y = map(int, input().split())
a += [[t, z, y]]
alpha, omega = 1, 10**18
while alpha < omega:
mid = (alpha + omega) // 2
ans = 0
for t,z,y in a:ans += z*(mid//(t*z + y)) + min(z,(mid % (t*z + y))//t)
if ans >= m:
omega = mid
else:
alpha = mid + 1
print(omega)
b = [z*(omega//(t*z + y)) + min(z,(omega % (t*z + y))//t) for t, z, y in a]
total = 0
for i in range(n):
b[i] = min(b[i], m - total)
total += b[i]
print(*b)
|
# coding: utf-8
# # created on April Fool's Day 2018
# In[1]:
import os
from pymatgen.io.vasp.sets import MPRelaxSet
from pymatgen import Structure
from Utilities import get_time_str, get_current_firework_from_cal_loc
# In[2]:
def Write_Vasp_POTCAR(cal_loc, structure_filename, workflow):
"""
Write POTCAR in folder cal_loc as follows:
If POTCAR is missing, write POTCAR using pymatgen.io.vasp.sets.MPRelaxSet
Input arguments:
cal_loc (str): the absolute path
structure_filename (str): the file from which the structure is read using pymatgen.Structure.from_file
workflow
"""
firework = get_current_firework_from_cal_loc(cal_loc, workflow)
log_txt_loc, firework_name = os.path.split(cal_loc)
log_txt = os.path.join(log_txt_loc, "log.txt")
if not os.path.isfile(os.path.join(cal_loc, "POTCAR")):
structure = Structure.from_file(os.path.join(cal_loc, structure_filename))
vis = MPRelaxSet(structure=structure)
vis.potcar.write_file(filename=os.path.join(cal_loc, "POTCAR"))
write_INCAR = True
with open(log_txt, "a") as f:
f.write("{} INFO: no POTCAR in {}\n".format(get_time_str(), firework_name))
f.write("\t\t\tuse pymatgen.io.vasp.sets.MPRelaxSet to write POTCAR\n")
|
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
class Organization(models.Model):
name = models.CharField(max_length=255)
class CustomUserWithM2MManager(BaseUserManager):
def create_superuser(self, username, orgs, password):
user = self.model(username=username)
user.set_password(password)
user.save(using=self._db)
user.orgs.add(*orgs)
return user
class CustomUserWithM2M(AbstractBaseUser):
username = models.CharField(max_length=30, unique=True)
orgs = models.ManyToManyField(Organization)
custom_objects = CustomUserWithM2MManager()
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["orgs"]
class CustomUserWithM2MThrough(AbstractBaseUser):
username = models.CharField(max_length=30, unique=True)
orgs = models.ManyToManyField(Organization, through="Membership")
custom_objects = CustomUserWithM2MManager()
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["orgs"]
class Membership(models.Model):
user = models.ForeignKey(CustomUserWithM2MThrough, on_delete=models.CASCADE)
organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
|
import os
from setuptools import setup
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
setup(
name="unitable",
version="0.0.2",
author="Mark Howison",
author_email="mark@howison.org",
url="https://github.com/mhowison/unitable",
keywords=["data", "data analysis", "data frame", "data science"],
description="A data analysis environment that unites the best features of pandas, R, Stata, and others.",
long_description=read("README.md"),
long_description_content_type="text/markdown",
license="BSD",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering"
],
provides=["unitable"],
packages=["unitable"],
install_requires=["pandas"]
)
|
from ferrisnose import AppEngineWebTest
from ferris.core.controller import Controller
from ferris.core.messages import Messaging
from ferris.core.ndb import Model, ndb
import json
class Person(Model):
title = ndb.StringProperty(required=True)
content = ndb.TextProperty()
class People(Controller):
class Meta:
Model = Person
components = (Messaging,)
prefixes = ('api',)
def api_list(self):
self.context['data'] = Person.query()
def api_view(self, key):
self.context['data'] = self.util.decode_key(key).get()
def api_add(self):
result = self.parse_request()
if result.validate():
item = result.update(Person)
item.put()
self.context['data'] = item
else:
self.context['errors'] = result.errors
return 400
def api_edit(self, key):
item = self.util.decode_key(key).get()
result = self.parse_request()
if item and result.validate():
item = result.update(item)
item.put()
self.context['data'] = item
else:
self.context['errors'] = result.errors
return 400
class MessagingTest(AppEngineWebTest):
def setUp(self):
super(MessagingTest, self).setUp()
People._build_routes(self.testapp.app.router)
self._createData()
def _createData(self):
Person(title='The Doctor', content='Time Lord').put()
Person(title='Rose Tyler', content='Companion').put()
Person(title='Clara Oswald', content='Mystery').put()
def testList(self):
response = self.testapp.get('/api/people')
assert response.content_type == 'application/json'
data = json.loads(response.body)
assert 'items' in data
assert len(data['items']) == Person.query().count()
assert 'title' in data['items'][0]
assert 'content' in data['items'][0]
assert 'key' in data['items'][0]
def testView(self):
item = Person.query().get()
response = self.testapp.get('/api/people/:%s' % item.key.urlsafe())
assert response.content_type == 'application/json'
data = json.loads(response.body)
assert data['title'] == item.title
assert data['content'] == item.content
assert data['key']['urlsafe'] == item.key.urlsafe()
def testAdd(self):
data = json.dumps({'title': 'Dalek', 'content': 'Exterminate!'})
response = self.testapp.post('/api/people', data, content_type='application/json')
assert response.content_type == 'application/json'
data = json.loads(response.body)
assert data['title'] == 'Dalek'
item = ndb.Key(urlsafe=data['key']['urlsafe']).get()
assert item
assert item.title == data['title']
assert item.content == data['content']
def testEdit(self):
item = Person.query().get()
data = json.dumps({'title': 'Captain Jack'})
response = self.testapp.put('/api/people/:%s' % item.key.urlsafe(), data, content_type='application/json')
assert response.content_type == 'application/json'
data = json.loads(response.body)
new_item = ndb.Key(urlsafe=data['key']['urlsafe']).get()
assert new_item
assert new_item.key == item.key
assert new_item.content == item.content
assert new_item.title == 'Captain Jack'
def testErrors(self):
data = json.dumps({'title': 'Dalek', 'content': 12346})
r = self.testapp.post('/api/people', data, status=400, content_type='application/json')
assert len(r.json['errors']) == 1
|
import soba.visualization.ramen.mapGenerator as ramen
import soba.run
from collections import OrderedDict
from model import SEBAModel
from time import time
import os
import signal
from unittest import TestCase
import json, requests
from jsonschema import validate
import socket
import unittest
import listener
import sys
from PyUnitReport import HTMLTestRunner
## Rest Service variables ##
ipServer = socket.gethostbyname(socket.gethostname())
port = "10000"
URLBASE = "http://127.0.0.1:" + port
URISOBA = "/api/v1/occupants"
URISEBA = "/api/v1/occupants"
URIFIRE = "/api/v1/fire"
stringTemplate = {"type": "string"}
numberTemplate = {"type": "number"}
global modelSto
modelSto = None
## Test Class ##
class test(TestCase):
## Test methods ##
model = False
occupantTest0 = True
occupantTest1 = True
occupantTest2 = True
def setUp(self):
global modelSto
self.model = modelSto
self.N = 1
self.model.updateOccupancyInfo()
self.occupantTest0 = self.model.getOccupantId(0)
self.occupantTest1 = self.model.getOccupantId(1)
self.occupantTest2 = self.model.getOccupantId(2)
def test01_ListOccupants(self):
print(str('Testing {}').format('GET /api/v1/occupants'))
occupantsId = [0, 1, 2]
occupantsIdSim = []
for o in self.model.occupants:
occupantsIdSim.append(o.unique_id)
self.model.updateOccupancyInfo()
url = URLBASE + URISOBA
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson['occupants']
self.assertCountEqual(occupantsIdSim, occupantsId)
self.assertCountEqual(APIResponse, occupantsIdSim)
def test02_PositionsOccupants(self):
print(str('Testing {}').format('GET /api/v1/occupants/positions'))
self.occupantTest0 = self.model.getOccupantId(0)
self.occupantTest1 = self.model.getOccupantId(1)
self.occupantTest2 = self.model.getOccupantId(2)
pos0 = (6, 7)
pos1 = (3, 8)
pos2 = (5, 4)
self.model.grid.move_agent(self.occupantTest0, pos0)
self.model.grid.move_agent(self.occupantTest1, pos1)
self.model.grid.move_agent(self.occupantTest2, pos2)
self.model.updateOccupancyInfo()
occupantsPos = {
'0': {'y': 7, 'x': 6},
'1': {'y': 8, 'x': 3},
'2': {'y': 4, 'x': 5},
}
pos0Sim = self.occupantTest0.pos
pos1Sim = self.occupantTest1.pos
pos2Sim = self.occupantTest2.pos
x0, y0 = pos0Sim
x1, y1 = pos1Sim
x2, y2 = pos2Sim
occupantsPosSim = {
'0': {'y': y0, 'x': x0},
'1': {'y': y1, 'x': x1},
'2': {'y': y2, 'x': x2},
}
url = URLBASE + URISOBA + "/positions"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantsPosSim, occupantsPos)
self.assertDictContainsSubset(APIResponse, occupantsPosSim)
def test03_StatesOccupants(self):
print(str('Testing {}').format('GET /api/v1/occupants/states'))
state1 = 'testState1'
state2 = 'testState2'
occupantsSta1 = {
'0': 'testState1',
'1': 'testState1',
'2': 'testState1',
}
occupantsStaSim1 = {
'0': self.occupantTest0.state,
'1': self.occupantTest1.state,
'2': self.occupantTest2.state,
}
url = URLBASE + URISOBA + "/states"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantsStaSim1, occupantsSta1)
self.assertDictContainsSubset(APIResponse, occupantsStaSim1)
self.occupantTest0.state = state2
self.occupantTest1.state = state2
self.occupantTest2.state = state2
self.model.updateOccupancyInfo()
occupantsSta2 = {
'0': 'testState2',
'1': 'testState2',
'2': 'testState2',
}
occupantsStaSim2 = {
'0': self.occupantTest0.state,
'1': self.occupantTest1.state,
'2': self.occupantTest2.state,
}
url = URLBASE + URISOBA + "/states"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantsStaSim2, occupantsSta2)
self.assertDictContainsSubset(APIResponse, occupantsStaSim2)
def test04_MovementsOccupants(self):
print(str('Testing {}').format('GET /api/v1/occupants/movements'))
occupantsMov = {
'0': {'orientation': 'out', 'speed': 0.71428},
'1': {'orientation': 'out', 'speed': 0.71428},
'2': {'orientation': 'out', 'speed': 0.71428},
}
speed0Sim = self.occupantTest0.movement['speed']
speed1Sim = self.occupantTest1.movement['speed']
speed2Sim = self.occupantTest2.movement['speed']
orientation0Sim = self.occupantTest0.movement['orientation']
orientation1Sim = self.occupantTest1.movement['orientation']
orientation2Sim = self.occupantTest2.movement['orientation']
occupantsMovSim = {
'0': {'orientation': orientation0Sim, 'speed': speed0Sim},
'1': {'orientation': orientation1Sim, 'speed': speed1Sim},
'2': {'orientation': orientation2Sim, 'speed': speed2Sim},
}
url = URLBASE + URISOBA + "/movements"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantsMovSim, occupantsMov)
self.assertDictContainsSubset(APIResponse, occupantsMovSim)
self.occupantTest0.movement = {'orientation': 'E', 'speed': 1}
self.occupantTest1.movement = {'orientation': 'S', 'speed': 1}
self.occupantTest2.movement = {'orientation': 'N', 'speed': 1}
self.model.updateOccupancyInfo()
occupantsMov = {
'0': {'orientation': 'E', 'speed': 1},
'1': {'orientation': 'S', 'speed': 1},
'2': {'orientation': 'N', 'speed': 1},
}
speed0Sim = self.occupantTest0.movement['speed']
speed1Sim = self.occupantTest1.movement['speed']
speed2Sim = self.occupantTest2.movement['speed']
orientation0Sim = self.occupantTest0.movement['orientation']
orientation1Sim = self.occupantTest1.movement['orientation']
orientation2Sim = self.occupantTest2.movement['orientation']
occupantsMovSim = {
'0': {'orientation': orientation0Sim, 'speed': speed0Sim},
'1': {'orientation': orientation1Sim, 'speed': speed1Sim},
'2': {'orientation': orientation2Sim, 'speed': speed2Sim},
}
url = URLBASE + URISOBA + "/movements"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantsMovSim, occupantsMov)
self.assertDictContainsSubset(APIResponse, occupantsMovSim)
def test05_InformationOccupant(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}'))
occupantInfo = {
"occupant": {
"movement": {"orientation": "E", "speed": 1},
"unique_id": "0",
"position":{"x": 6,"y": 7},
"fov":[],
"state": "testState2"
}
}
occupantInfoSim = {
"occupant": {
"movement": self.occupantTest0.movement,
"unique_id": str(self.occupantTest0.unique_id),
"position":{"x": self.occupantTest0.pos[0],"y": self.occupantTest0.pos[1]},
"fov": self.occupantTest0.fov,
"state": self.occupantTest0.state
}
}
self.occupantTest0.movement = {'orientation': 'E', 'speed': 1}
idOc0 = self.occupantTest0.unique_id
url = URLBASE + URISOBA + "/" + str(idOc0)
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantInfoSim, occupantInfo)
self.assertDictContainsSubset(APIResponse, occupantInfoSim)
def test06_MovementOccupant(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/movement'))
occupantMov = { "movement": {'orientation': "E", 'speed': 1} }
speed0Sim = self.occupantTest0.movement['speed']
orientation0Sim = self.occupantTest0.movement['orientation']
occupantMovSim = { "movement": {'orientation': orientation0Sim, 'speed': speed0Sim} }
idOc0 = self.occupantTest0.unique_id
url = URLBASE + URISOBA + "/" + str(idOc0) + "/movement"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantMovSim, occupantMov)
self.assertDictContainsSubset(APIResponse, occupantMovSim)
def test07_PositionOccupant(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/position'))
occupantPos = { 'position': {'y': 7, 'x': 6} }
occupantPosSim = { 'position': {'y': self.occupantTest0.pos[1], 'x': self.occupantTest0.pos[0]} }
idOc0 = self.occupantTest0.unique_id
url = URLBASE + URISOBA + "/" + str(idOc0) + "/position"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantPosSim, occupantPos)
self.assertDictContainsSubset(APIResponse, occupantPosSim)
def test08_StateOccupant(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/state'))
idOc0 = self.occupantTest0.unique_id
occupantState2 = { 'state': 'testState2' }
occupantStateSim2 = { 'state': self.occupantTest0.state }
url = URLBASE + URISOBA + "/" + str(idOc0) + "/state"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantStateSim2, occupantState2)
self.assertDictContainsSubset(APIResponse, occupantStateSim2)
def test09_FovOccupant(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/fov'))
idOc0 = self.occupantTest0.unique_id
occupantFov = {"fov": []}
occupantFovSim = {"fov": self.occupantTest0.fov}
url = URLBASE + URISOBA + "/" + str(idOc0) + "/fov"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantFovSim, occupantFov)
self.assertDictContainsSubset(APIResponse, occupantFovSim)
self.occupantTest0.getFOV()
self.model.updateOccupancyInfo()
fovKnonwn = [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0), (9, 0), (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2), (8, 2), (9, 2), (0, 3), (1, 3), (2, 3), (3, 3), (4, 3), (5, 3), (6, 3), (7, 3), (8, 3), (9, 3), (0, 4), (1, 4), (2, 4), (3, 4), (4, 4), (5, 4), (6, 4), (7, 4), (8, 4), (9, 4), (0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 5), (6, 5), (7, 5), (8, 5), (9, 5), (0, 6), (1, 6), (2, 6), (3, 6), (4, 6), (5, 6), (6, 6), (7, 6), (8, 6), (9, 6), (0, 7), (1, 7), (2, 7), (3, 7), (4, 7), (5, 7), (7, 7), (8, 7), (9, 7), (0, 8), (1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (6, 8), (7, 8), (8, 8), (9, 8), (0, 9), (1, 9), (2, 9), (3, 9), (4, 9), (5, 9), (6, 9), (7, 9), (8, 9), (9, 9), (0, 10), (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10), (10, 10), (8, 11), (9, 11), (10, 11), (11, 11), (9, 12), (10, 12), (11, 12), (12, 12), (9, 13), (10, 13), (11, 13), (12, 13), (13, 13), (10, 14), (11, 14), (12, 14), (13, 14), (14, 14), (10, 15), (11, 15), (12, 15), (13, 15), (14, 15), (15, 15), (11, 16), (12, 16), (13, 16), (14, 16), (15, 16), (16, 16), (12, 17), (13, 17), (14, 17), (15, 17), (16, 17), (17, 17), (12, 18), (13, 18), (14, 18), (15, 18), (16, 18), (17, 18), (18, 18)]
fovDicts = []
for pos in fovKnonwn:
fovDicts.append({"x": pos[0], "y": pos[1]})
fovDictsSim = []
for pos in self.occupantTest0.fov:
fovDictsSim.append({"x": pos[0], "y": pos[1]})
occupantFov = { "fov": fovDicts }
occupantFovSim = { "fov": fovDictsSim }
url = URLBASE + URISOBA + "/" + str(idOc0) + "/fov"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantFovSim, occupantFov)
self.assertDictContainsSubset(APIResponse, occupantFovSim)
def test10_CreateSobaAvatar(self):
print(str('Testing {}').format('PUT /api/v1/occupants/{id}'))
idAvCreation = 1
avatarXPos = 3
avatarYPos = 6
idAvCreationResponse = idAvCreation + 100000
avatarCreation = { 'avatar': { 'position': {'y': avatarYPos, 'x': avatarXPos}, 'id': idAvCreationResponse}}
dataBody = {"x": avatarXPos, "y": avatarYPos}
url = URLBASE + URISOBA + "/" + str(idAvCreation)
data = requests.put(url, json=dataBody, headers={'Content-Type': "application/json", 'Accept': "application/json"})
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(APIResponse, avatarCreation)
self.model.updateOccupancyInfo()
avatarTestSOBA = self.model.getOccupantId(idAvCreationResponse)
avatarPos = {'position': {'y': 6, 'x': 3}}
avatarPosSim = {'position': {'y': avatarTestSOBA.pos[1], 'x': avatarTestSOBA.pos[0]}}
url = URLBASE + URISOBA + "/" + str(idAvCreationResponse) + "/position"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(avatarPosSim, avatarPos)
self.assertDictContainsSubset(APIResponse, avatarPosSim)
def test11_MoveAvatar(self):
print(str('Testing {}').format('POST /api/v1/occupants/{id}/position'))
idAvCreation = 1
idAvCreationResponse = idAvCreation + 100000
avatarXPos = 5
avatarYPos = 8
avatarTestSOBA = self.model.getOccupantId(idAvCreationResponse)
avatarMove = { 'avatar': { 'position': {'y': avatarYPos, 'x': avatarXPos}, 'id': idAvCreationResponse}}
dataBody = {"x": avatarXPos, "y": avatarYPos}
url = URLBASE + URISOBA + "/" + str(idAvCreationResponse) + "/position"
data = requests.post(url, json=dataBody, headers={'Content-Type': "application/json", 'Accept': "application/json"})
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(APIResponse, avatarMove)
self.model.updateOccupancyInfo()
avatarTestSOBA = self.model.getOccupantId(idAvCreationResponse)
avatarPos = { 'position': {'y': avatarYPos, 'x': avatarXPos}}
avatarPosSim = {'position': {'y': avatarTestSOBA.pos[1], 'x': avatarTestSOBA.pos[0]}}
url = URLBASE + URISOBA + "/" + str(idAvCreationResponse) + "/position"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(avatarPosSim, avatarPos)
self.assertDictContainsSubset(APIResponse, avatarPosSim)
def test12_RouteOccupant(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/route/{route_id}'))
occupantRoute = { "positions": [{'x': 4, 'y': 8}, {'x': 3, 'y': 8}, {'x': 2, 'y': 7}, {'x': 1, 'y': 6}, {'x': 0, 'y': 6}]}
idAvCreation = 1
idAvCreationResponse = idAvCreation + 100000
avatarTestSOBA = self.model.getOccupantId(idAvCreationResponse)
url = URLBASE + URISEBA + "/" + str(idAvCreationResponse) + "/route/1"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(APIResponse, occupantRoute)
self.model.updateOccupancyInfo()
lastPosRouteDict = occupantRoute["positions"][-1]
lastPosRouteX = lastPosRouteDict["x"]
lastPosRouteY = lastPosRouteDict["y"]
lastPosRoute = (lastPosRouteX, lastPosRouteY)
lastPosRouteSim = avatarTestSOBA.pos_to_go
print(str('Testing {}').format('GET /api/v1/occupants/{id}/route/{route_id}'))
print("Pos_to_go", lastPosRoute)
self.assertCountEqual(lastPosRoute, lastPosRouteSim)
def test13_CreateSebaAvatar(self):
print(str('Testing {}').format('PUT /api/v1/occupants/{id}'))
idAvCreation = 2
avatarXPos = 3
avatarYPos = 6
idAvCreationResponse = idAvCreation + 100000
avatarCreation = { 'avatar': { 'position': {'y': avatarYPos, 'x': avatarXPos}, 'id': idAvCreationResponse}}
dataBody = {"x": avatarXPos, "y": avatarYPos}
url = URLBASE + URISEBA + "/" + str(idAvCreation)
data = requests.put(url, json=dataBody, headers={'Content-Type': "application/json", 'Accept': "application/json"})
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(APIResponse, avatarCreation)
self.model.updateOccupancyInfo()
avatarTestSEBA = self.model.getOccupantId(idAvCreationResponse)
avatarPos = {'position': {'y': 6, 'x': 3}}
avatarPosSim = {'position': {'y': avatarTestSEBA.pos[1], 'x': avatarTestSEBA.pos[0]}}
url = URLBASE + URISOBA + "/" + str(idAvCreationResponse) + "/position"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(avatarPosSim, avatarPos)
self.assertDictContainsSubset(APIResponse, avatarPosSim)
def test14_FireInFovOccupant(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/fire'))
idAvCreation = 2
idAvCreationResponse = idAvCreation + 100000
posFire1 = (5, 2)
posFire2 = (7, 4)
self.model.createFire(posFire1)
self.model.FireControl.createFirePos(posFire2)
idAvSEBA = idAvCreationResponse
avatarTestSEBA = self.model.getOccupantId(idAvCreationResponse)
occupantFireSimAux = avatarTestSEBA.getPosFireFOV()
occupantFireSimPos = []
for pos in occupantFireSimAux:
occupantFireSimPos.append({'x': pos[0],'y': pos[1]})
occupantFire = {"positions": [{'y': posFire1[1], 'x': posFire1[0]}, {'y': posFire2[1], 'x': posFire2[0]}]}
occupantFireSim = {"positions": occupantFireSimPos}
url = URLBASE + URISEBA + "/" + str(idAvSEBA) + "/fire"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(occupantFireSim, occupantFire)
self.assertDictContainsSubset(APIResponse, occupantFireSim)
def test15_FirePositions(self):
print(str('Testing {}').format('GET /api/v1/fire'))
posFire1 = (5, 2)
posFire2 = (7, 4)
FirePosAux = []
for pos in self.model.FireControl.fireMovements:
FirePosAux.append({'x': pos[0],'y': pos[1]})
firePos = {"positions": [{'y': posFire1[1], 'x': posFire1[0]}, {'y': posFire2[1], 'x': posFire2[0]}]}
firePosSim = {"positions": FirePosAux}
url = URLBASE + URIFIRE
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
APIResponse = datajson
self.assertDictContainsSubset(firePosSim, firePos)
self.assertDictContainsSubset(APIResponse, firePosSim)
## Running the test to evaluate the values of the model##
def test16_ListOccupantsSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants'))
template = {
"type": "object",
"properties": {
"occupants": {
"type": "array"
}
},
"required": ["occupants"]
}
for i in range(self.N):
url = URLBASE + URISOBA
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
for o in datajson["occupants"]:
validate(o, numberTemplate)
def test17_PositionsOccupantsSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants/movements'))
template = {
"type": "object",
"properties": {
"orientation": {
"type": "string"
},
"speed": {
"type": "number"
}
},
"required": ["orientation", "speed"]
}
template2 = {
"type": "object"
}
for i in range(self.N):
url = URLBASE + URISOBA + "/movements"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template2)
for k, v in datajson.items():
validate(k, stringTemplate)
validate(int(k), numberTemplate)
validate(v, template)
def test18_StatesOccupantsSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants/positions'))
template = {
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
for i in range(self.N):
url = URLBASE + URISOBA + "/positions"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
for k, v in datajson.items():
validate(k, stringTemplate)
validate(int(k), numberTemplate)
validate(v, template)
def test19_MovementsOccupantsSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants/states'))
for i in range(self.N):
url = URLBASE + URISOBA + "/states"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
for k,v in datajson.items():
validate(v, stringTemplate)
validate(k, stringTemplate)
validate(int(k), numberTemplate)
def test20_InformationOccupantSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}'))
template = {
"type": "object",
"properties": {
"occupant":{
"type": "object",
"properties": {
"state":{
"type": "string"
},
"fov": {
"type": "array"
},
"unique_id":{
"type": "string"
},
"movement": {
"type": "object",
"properties": {
"orientation":{
"type": "string"
},
"speed":{
"type": "number"
},
},
"required": ["orientation", "speed"]
},
"position": {
"type": "object",
"properties": {
"x":{
"type": "number"
},
"y":{
"type": "number"
}
},
"required": ["x", "y"]
}
},
"required": ["state", "fov", "unique_id", "movement", "position"]
}
},
"required": ["occupant"]
}
template2 = {
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
for i in range(self.N):
url = URLBASE + URISOBA + "/" + str(0)
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
validate(int(datajson['occupant']['unique_id']), numberTemplate)
print(template)
for p in datajson['occupant']['fov']:
validate(p, template2)
def test21_MovementOccupantSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/movement'))
template = {
"type": "object",
"properties": {
"movement":{
"type": "object",
"properties": {
"orientation": {
"type": "string"
},
"speed": {
"type": "number"
}
},
"required": ["orientation", "speed"]
}
},
"required": ["movement"]
}
for i in range(self.N):
url = URLBASE + URISOBA + "/" + str(0) + "/movement"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
def test22_PositionOccupantSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/position'))
template = {
"type": "object",
"properties": {
"position":{
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
},
"required": ["position"]
}
for i in range(self.N):
url = URLBASE + URISOBA + "/" + str(0) + "/position"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
def test23_StateOccupantSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/state'))
template = {
"type": "object",
"properties":{
"state": {
"type": "string"
}
},
"required": ["state"]
}
for i in range(self.N):
url = URLBASE + URISOBA + "/" + str(0) + "/state"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
def test24_FovOccupantSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/fov'))
template = {
"type": "object",
"properties": {
"fov": {
"type": "array"
}
},
"required": ["fov"]
}
template2 = {
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
for i in range(self.N):
url = URLBASE + URISOBA + "/" + str(0) + "/fov"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
for p in datajson['fov']:
validate(p, template2)
def test25_CreateSobaAvatarSchema(self):
print(str('Testing {}').format('PUT /api/v1/occupants/{id}'))
template = {
"type": "object",
"properties": {
"avatar":{
"type": "object",
"properties": {
"position":{
"type": "object",
"properties": {
"x": {
"type": "number",
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
},
"id":{
"type": "number"
}
},
"required": ["position", "id"]
}
},
"required": ["avatar"]
}
dataBody = {"x": 10, "y": 10}
for i in range(self.N):
url = URLBASE + URISOBA + "/" + str(0)
data = requests.put(url, json=dataBody, headers={'Content-Type': "application/json", 'Accept': "application/json"})
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
def test26_MoveAvatarSchema(self):
print(str('Testing {}').format('POST /api/v1/occupants/{id}/position'))
template = {
"type": "object",
"properties": {
"avatar":{
"type": "object",
"properties": {
"position":{
"type": "object",
"properties": {
"x": {
"type": "number",
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
},
"id":{
"type": "number"
}
},
"required": ["position", "id"]
}
},
"required": ["avatar"]
}
dataBody = {"x": 11, "y": 11}
for i in range(self.N):
url = URLBASE + URISOBA + "/" + str(100000) + "/position"
data = requests.post(url, json=dataBody, headers={'Content-Type': "application/json", 'Accept': "application/json"})
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
def test27_RouteOccupantSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/route/{route_id}'))
template = {
"type": "object",
"properties": {
"positions": {
"type": "array"
}
}
}
template2 = {
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
for i in range(self.N):
url = URLBASE + URISEBA + "/" + str(100000) + "/route/1"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
for m in datajson["positions"]:
validate(m, template2)
def test28_CreateSebaAvatarSchema(self):
print(str('Testing {}').format('PUT /api/v1/occupants/{id}'))
template = {
"type": "object",
"properties": {
"avatar": {
"type": "object",
"properties":{
"position":{
"type": "object",
"properties":{
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
},
"id": {
"type": "number"
}
},
"required": ["position", "id"]
}
},
"required": ["avatar"]
}
dataBody = {"x": 13, "y": 13}
for i in range(self.N):
url = URLBASE + URISEBA + "/" + str(1)
data = requests.put(url, json=dataBody, headers={'Content-Type': "application/json", 'Accept': "application/json"})
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
def test29_FireInFovOccupantSchema(self):
print(str('Testing {}').format('GET /api/v1/occupants/{id}/fire'))
template = {
"type": "object",
"properties": {
"positions": {
"type": "array"
}
},
"required": ["positions"]
}
template2 = {
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
for i in range(self.N):
url = URLBASE + URISEBA + "/" + str(100000) + "/fire"
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
for m in datajson["positions"]:
validate(m, template2)
def test30_FirePositionsSchema(self):
print(str('Testing {}').format('GET /api/v1/fire'))
template = {
"type": "object",
"properties": {
"positions": {
"type": "array"
}
},
"required": ["positions"]
}
template2 = {
"type": "object",
"properties": {
"x": {
"type": "number"
},
"y": {
"type": "number"
}
},
"required": ["x", "y"]
}
for i in range(self.N):
url = URLBASE + URIFIRE
data = requests.get(url)
datajson = data.json()
print("Response: ", datajson)
validate(datajson, template)
for m in datajson["positions"]:
validate(m, template2)
def setDown(self):
print("Testing finished.")
os.system("kill -9 %d"%(os.getpid()))
os.killpg(os.getpgid(os.getpid()), signal.SIGTERM)
## Defining the Test Model and running the test ##
class SebaApiTest(SEBAModel):
def __init__(self, width, height, jsonMap, jsonsOccupants, sebaConfiguration, seed):
super().__init__(width, height, jsonMap, jsonsOccupants, sebaConfiguration, seed)
sys.argv = [sys.argv[0]]
global modelSto
modelSto = self
unittest.TestLoader.sortTestMethodsUsing = None
unittest.main(testRunner=HTMLTestRunner(output='APIRest_test'), failfast=True)
sys.argv.append('-b')
sys.argv.append('-s')
strategy = 'nearest'
N = 3
states = OrderedDict([('testState1','out')])
json = [{'type': 'regular' , 'N': N, 'states': states , 'schedule': {}, 'variation': {}, 'markovActivity': {}, 'timeActivity': {}, 'timeActivityVariation': {}, 'strategy': strategy, 'speedEmergency': 1}]
conf = {'families': [], 'hazard': "10:00:00"}
with open('auxiliarFiles/labgsi.blueprint3d') as data_file:
jsonMap = ramen.returnMap(data_file, offsety = 9, offsetx = 0)
fixed_params = {"width": 20, "height": 20, "jsonMap": jsonMap, "jsonsOccupants": json, 'sebaConfiguration': conf}
variable_params = {"seed": range(10, 500, 10)}
soba.run.run(SebaApiTest, fixed_params, variable_params)
|
from rest_framework import viewsets
from rest_framework import permissions
from rest_framework.response import Response
from newsfeed.models import Entry
from .renderers import MongoDBJSONRenderer
from .mixins import MongoDBPaginationMixin
class NewsfeedViewset(MongoDBPaginationMixin, viewsets.ViewSet):
renderer_classes = (MongoDBJSONRenderer,)
def public_newsfeed(self, request):
data = Entry.objects.get_public_newsfeed(
**self.get_pagination_context())
return Response(self.get_paginated_response(data))
def private_newsfeed(self, request):
data = Entry.objects.get_private_newsfeed(
user=self.request.user, **self.get_pagination_context())
return Response(self.get_paginated_response(data))
public_newsfeed = NewsfeedViewset.as_view(
{'get': 'public_newsfeed'}
)
private_newsfeed = NewsfeedViewset.as_view(
{'get': 'private_newsfeed'},
permission_classes=(permissions.IsAuthenticated,)
)
|
"""Tic Tac Toe game using Min-Max algorithm"""
from copy import deepcopy
from tkinter import Button, Tk
from tkinter.font import Font
class Board:
"""Tic Tac Toe Game Board"""
def __init__(self, other=None):
self.player = "X"
self.opponent = "O"
self.empty = "-"
self.size = 3
self.fields = {}
for y in range(self.size):
for x in range(self.size):
self.fields[x, y] = self.empty
# copy constructor
if other:
self.__dict__ = deepcopy(other.__dict__)
def move(self, x, y):
"""Move one step"""
board = Board(self)
board.fields[x, y] = board.player
(board.player, board.opponent) = (board.opponent, board.player)
return board
def __minimax(self, player):
"""Min Max algorithm"""
if self.won():
if player:
return (-1, None)
else:
return (+1, None)
elif self.tied():
return (0, None)
elif player:
best = (-2, None)
for x, y in self.fields:
if self.fields[x, y] == self.empty:
value = self.move(x, y).__minimax(not player)[0]
if value > best[0]:
best = (value, (x, y))
return best
else:
best = (+2, None)
for x, y in self.fields:
if self.fields[x, y] == self.empty:
value = self.move(x, y).__minimax(not player)[0]
if value < best[0]:
best = (value, (x, y))
return best
def best(self):
"""Return best move available"""
return self.__minimax(True)[1]
def tied(self):
"""Game Tie State"""
for (x, y) in self.fields:
if self.fields[x, y] == self.empty:
return False
return True
def won(self):
"""Game Win State"""
# horizontal
for y in range(self.size):
winning = []
for x in range(self.size):
if self.fields[x, y] == self.opponent:
winning.append((x, y))
if len(winning) == self.size:
return winning
# vertical
for x in range(self.size):
winning = []
for y in range(self.size):
if self.fields[x, y] == self.opponent:
winning.append((x, y))
if len(winning) == self.size:
return winning
# diagonal
winning = []
for y in range(self.size):
x = y
if self.fields[x, y] == self.opponent:
winning.append((x, y))
if len(winning) == self.size:
return winning
# other diagonal
winning = []
for y in range(self.size):
x = self.size - 1 - y
if self.fields[x, y] == self.opponent:
winning.append((x, y))
if len(winning) == self.size:
return winning
# default
return None
def __str__(self):
string = ""
for y in range(self.size):
for x in range(self.size):
string += self.fields[x, y]
string += "\n"
return string
class GUI:
"""TkInter GUI App"""
def __init__(self):
self.app = Tk()
self.app.title("Tic-Tac-Toe")
self.app.resizable(width=True, height=True)
self.board = Board()
self.font = Font(family="Ubuntu Mono", size=32)
self.buttons = {}
self.reset_btn = None
for x, y in self.board.fields:
handler = lambda x=x, y=y: self.move(x, y) # noqa
button = Button(
self.app, command=handler, font=self.font, width=2, height=1
)
button.grid(row=y, column=x)
self.buttons[x, y] = button
handler = lambda: self.reset() # noqa
button = Button(self.app, text="Reset", command=handler)
button.grid(
row=self.board.size + 1,
column=0,
columnspan=self.board.size,
sticky="WE",
)
self.reset_btn = button
self.update()
def reset(self):
self.board = Board()
self.update()
def move(self, x, y):
self.app.config(cursor="watch")
self.app.update()
self.board = self.board.move(x, y)
self.update()
move = self.board.best()
if move:
self.board = self.board.move(*move)
self.update()
self.app.config(cursor="")
def update(self):
for (x, y) in self.board.fields:
text = self.board.fields[x, y]
self.buttons[x, y]["text"] = text
self.buttons[x, y]["disabledforeground"] = "black"
if text == self.board.empty:
self.buttons[x, y]["state"] = "normal"
else:
self.buttons[x, y]["state"] = "disabled"
self.reset_btn["text"] = "Reset"
winning = self.board.won()
if winning:
winner = ""
for x, y in winning:
self.buttons[x, y]["disabledforeground"] = "red"
winner = self.buttons[x, y]["text"]
for x, y in self.buttons:
self.buttons[x, y]["state"] = "disabled"
self.reset_btn["text"] = f"'{winner}' won! Click to reset."
for (x, y) in self.board.fields:
self.buttons[x, y].update()
def mainloop(self):
self.app.mainloop()
if __name__ == "__main__":
GUI().mainloop()
|
# coding: utf-8
from common.np import * # import numpy as np
from common.layers import Softmax
class WeightSum:
'''
Encoderの全系列の隠れ状態hs(N, T, H)と
系列ごとの重みを示すアライメントa(N, T)から積和を取り、
現系列の変換に必要な情報を含むコンテキストベクトルc(N, H)を
出力するレイヤ
'''
def __init__(self):
self.params, self.grads = [], []
self.cache = None
def forward(self, hs, a):
'''
重み係数aをnp.repeatで(N, T, H)に拡張し、
hsとのアダマール積を取って時系列について総和を取ることで
コンテキストベクトルc(N, H)を得る
Parameters
----------
hs : np.ndarray(N, T, H)
Encoderの全系列の隠れ状態
a : np.ndarray(N, T)
系列ごとの重みを示すアライメント
Returns
-------
np.ndarray(N, H)
コンテキストベクトル
'''
N, T, H = hs.shape
ar = a.reshape(N, T, 1)#.repeat(H, axis=2)
t = hs * ar
c = t.sum(axis=1) # (N, H)
self.cache = (hs, ar)
return c
def backward(self, dc):
'''
sumの逆伝播はrepeat
repeatの逆伝播はsum
Parameters
----------
dc : np.ndarray(N, H)
コンテキストベクトルの勾配
Returns
-------
dhs, da : np.ndarray(N, T, H), np.ndarray(N, T)
全系列の隠れ状態hsの勾配と系列の重み係数aの勾配
'''
hs, ar = self.cache
N, T, H = hs.shape
# sumの逆伝播
dt = dc.reshape(N, 1, H).repeat(T, axis=1) # (N, T, H)
dhs = dt * ar
dar = dt * hs
# repeatの逆伝播
da = dar.sum(axis=2) # (N, T)
return dhs, da
class AttentionWeight:
'''
Encoderの全系列の隠れ状態hs(N, T, H)と
Decoderの現系列の隠れ状態h(N, H)とのドット積をとり、
softmax関数にかけることで系列ごとのアライメントa(N, T)を
出力するレイヤ
'''
def __init__(self):
self.params, self.grads = [], []
self.softmax = Softmax()
self.cache = None
def forward(self, hs, h):
'''
Decoderの隠れ状態h(N, H)をnp.repeatで(N, T, H)に拡張し、
hsとのアダマール積を取ってHについて総和を取り、
Softmax関数で正規化してアライメントa(N, T)を得る
Parameters
----------
hs : np.ndarray(N, T, H)
Encoderの全系列の隠れ状態
h : np.ndarray(N, H)
Decoderの現系列の隠れ状態
Returns
-------
np.ndarray(N, T)
hsに対し、系列ごとの重みを示すアライメント
'''
N, T, H = hs.shape
hr = h.reshape(N, 1, H)#.repeat(T, axis=1)
t = hs * hr # (N, T, H)
s = t.sum(axis=2)
a = self.softmax.forward(s) # (N, T)
self.cache = (hs, hr)
return a
def backward(self, da):
'''
sumの逆伝播はrepeat
repeatの逆伝播はsum
Parameters
----------
da : np.ndarray(N, T)
アライメントの勾配
Returns
-------
dhs, dh : np.ndarray(N, T, H), np.ndarray(N, H)
全系列の隠れ状態hsの勾配と系列の隠れ状態hの勾配
'''
hs, hr = self.cache
N, T, H = hs.shape
ds = self.softmax.backward(da)
dt = ds.reshape(N, T, 1).repeat(H, axis=2)
dhs = dt * hr # (N, T, H)
dhr = dt * hs # (N, T, H)
dh = dhr.sum(axis=1) # (N, H)
return dhs, dh
class Attention:
'''
Attentionレイヤ
in - AttentionWeight - WeightSum - out
Input
-------
hs, h : np.ndarray(N, T, H), np.ndarray(N, H)
Encoderの全系列の隠れ状態, Decoderの現系列隠れ状態
Output
-------
c : np.ndarray(N, H)
現系列の変換に必要な情報を含むコンテキストベクトル
'''
def __init__(self):
self.params, self.grads = [], []
self.attention_weight_layer = AttentionWeight()
self.weight_sum_layer = WeightSum()
# 外から各系列の重みを参照できるようにする
self.__attention_weight = None
@property
def attention_weight(self):
return self.__attention_weight
def forward(self, hs, h):
a = self.attention_weight_layer.forward(hs, h)
c = self.weight_sum_layer.forward(hs, a)
self.__attention_weight = a
return c
def backward(self, dc):
dhs0, da = self.weight_sum_layer.backward(dc)
dhs1, dh = self.attention_weight_layer.backward(da)
dhs = dhs0 + dhs1
return dhs, dh
class TimeAttention:
'''
Attentionレイヤの全系列バージョン
Input
-------
hs_enc, hs_dec : np.ndarray(N, T, H), np.ndarray(N, T, H)
Encoderの全系列の隠れ状態, Decoderの全系列の隠れ状態
Output
-------
cs : np.ndarray(N, T, H)
変換に必要な情報を含むコンテキストベクトルの全系列分
'''
def __init__(self):
self.params, self.grads = [], []
self.layers = None
self.attention_weights = None
def forward(self, hs_enc, hs_dec):
N, T, H = hs_dec.shape
out = np.empty_like(hs_dec)
self.layers = []
self.attention_weights = []
for t in range(T):
layer = Attention()
out[:, t, :] = layer.forward(hs_enc, hs_dec[:, t, :])
self.layers.append(layer)
self.attention_weights.append(layer.attention_weight)
return out
def backward(self, dout):
dhs_enc = 0
dhs_dec = np.empty_like(dout)
for t, layer in enumerate(self.layers):
dhs, dh = layer.backward(dout[:, t, :])
dhs_enc += dhs
dhs_dec[:, t, :] = dh
return dhs_enc, dhs_dec
|
import argparse
from itertools import chain
import logging
import pickle
from multiprocessing import Pool
from collections import defaultdict
from gilda.grounder import load_gilda_models
from indra_db_lite import get_plaintexts_for_text_ref_ids
from indra_db_lite import get_text_ref_ids_for_agent_text
from .cases import get_training_cases_for_grounding
logger = logging.getLogger(__file__)
def get_test_cases_for_model(model):
assert len(model.shortforms) == 1
agent_text = model.shortforms[0]
test_trids = get_text_ref_ids_for_agent_text(agent_text)
test_texts = get_plaintexts_for_text_ref_ids(test_trids)
if not test_texts:
return []
preds = model.predict(test_texts)
test_data_dict = defaultdict(dict)
for trid, pred in zip(test_trids, preds):
test_data_dict[pred].update({trid: None})
result = []
for curie in model.estimator.classes_:
namespace, identifier = curie.split(":", maxsplit=1)
train_info = get_training_cases_for_grounding(namespace, identifier)
if train_info is None:
continue
result.append(
(
agent_text,
[agent_text],
curie,
train_info["mesh_terms"],
train_info["num_entrez"],
train_info["num_mesh"],
train_info["train_trids"],
test_data_dict[curie],
)
)
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('outpath')
parser.add_argument('--n_jobs', type=int, default=1)
args = parser.parse_args()
outpath = args.outpath
models = load_gilda_models()
n_jobs = args.n_jobs
with Pool(n_jobs) as pool:
result = pool.map(get_test_cases_for_model, models.values())
result = list(chain(*result))
with open(outpath, 'wb') as f:
pickle.dump(result, f, protocol=pickle.HIGHEST_PROTOCOL)
|
import cv2 as cv
import numpy as np
from time import gmtime, strftime, sleep, time
import logging
import math
# NVIDIA jetson ues the gstreamer pipeline for getting it's data
def gstreamer_pipeline (capture_width=800, capture_height=640, display_width=800, display_height=640, framerate=60, flip_method=0) :
return ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)%d, height=(int)%d, '
'format=(string)NV12, framerate=(fraction)%d/1 ! '
'nvvidconv flip-method=%d ! '
'video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! '
'videoconvert ! '
'video/x-raw, format=(string)BGR ! appsink' % (capture_width,capture_height,framerate,flip_method,display_width,display_height))
def draw_lines(img, lines, color=[255, 0, 0], thickness=3):
# If there are no lines to draw, exit.
if lines is None:
return
# Make a copy of the original image.
img = np.copy(img)
# Create a blank image that matches the original in size.
line_img = np.zeros(
(
img.shape[0],
img.shape[1],
3
),
dtype=np.uint8,
)
# Loop over all lines and draw them on the blank image.
for line in lines:
for x1, y1, x2, y2 in line:
cv.line(line_img, (x1, y1), (x2, y2), color, thickness)
# Merge the image with the lines onto the original.
img = cv.addWeighted(img, 0.8, line_img, 1.0, 0.0)
# Return the modified image.
return img
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
"""
return cv.addWeighted(initial_img, α, img, β, λ)
def detect_line_segments(cropped_edges):
# tuning min_threshold, minLineLength, maxLineGap is a trial and error process by hand
rho = 1 # distance precision in pixel, i.e. 1 pixel
angle = np.pi / 180 # angular precision in radian, i.e. 1 degree
min_threshold = 10 # minimal of votes
line_segments = cv.HoughLinesP(cropped_edges, rho, angle, min_threshold,
np.array([]), minLineLength=10, maxLineGap=15)
return line_segments
def make_points(frame, line):
height, width, _ = frame.shape
slope, intercept = line
y1 = height # bottom of the frame
y2 = int(y1 * 1 / 2) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - intercept) / slope)))
x2 = max(-width, min(2 * width, int((y2 - intercept) / slope)))
return [[x1, y1, x2, y2]]
def average_slope_intercept(frame, line_segments):
"""
This function combines line segments into one or two lane lines
If all line slopes are < 0: then we only have detected left lane
If all line slopes are > 0: then we only have detected right lane
"""
lane_lines = []
if line_segments is None:
logging.info('No line_segment segments detected')
return lane_lines
height, width, _ = frame.shape
left_fit = []
right_fit = []
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line_segment in line_segments:
for x1, y1, x2, y2 in line_segment:
if x1 == x2:
logging.info('skipping vertical line segment (slope=inf): %s' % line_segment)
continue
fit = np.polyfit((x1, x2), (y1, y2), 1)
slope = fit[0]
intercept = fit[1]
if slope < 0:
if x1 < left_region_boundary and x2 < left_region_boundary:
left_fit.append((slope, intercept))
else:
if x1 > right_region_boundary and x2 > right_region_boundary:
right_fit.append((slope, intercept))
left_fit_average = np.average(left_fit, axis=0)
if len(left_fit) > 0:
lane_lines.append(make_points(frame, left_fit_average))
right_fit_average = np.average(right_fit, axis=0)
if len(right_fit) > 0:
lane_lines.append(make_points(frame, right_fit_average))
logging.debug('lane lines: %s' % lane_lines) # [[[316, 720, 484, 432]], [[1009, 720, 718, 432]]]
return lane_lines
def display_lines(frame, lines, line_color=(0, 255, 255), line_width=2):
line_image = np.zeros_like(frame)
if lines is not None:
for line in lines:
for x1, y1, x2, y2 in line:
cv.line(line_image, (x1, y1), (x2, y2), line_color, line_width)
line_image = cv.addWeighted(frame, 0.8, line_image, 1, 1)
return line_image
def region_of_interest(edges):
height, width = edges.shape
mask = np.zeros_like(edges)
# only focus bottom half of the screen
polygon = np.array([[
(0, height * 1 / 5),
(width, height * 1 / 5),
(width, height),
(0, height),
]], np.int32)
cv.fillPoly(mask, polygon, 255)
cropped_edges = cv.bitwise_and(edges, mask)
return cropped_edges
# define a range of black color in HSV
lower_black = np.array([0, 0, 0])
upper_black = np.array([227, 100, 70])
# Rectangular Kernel
rectKernel = cv.getStructuringElement(cv.MORPH_RECT,(7,7))
if __name__=='__main__':
# capture the video stream data
capture = cv.VideoCapture(gstreamer_pipeline(flip_method=0, framerate=60), cv.CAP_GSTREAMER)
if not capture.isOpened:
print('Unable to open: Camera interface')
exit(0)
print('Started')
print ("Beginning Transmitting to channel: Happy_Robots")
now = time()
# commencing subtraction
while True:
try:
# fetching each frame
ret, frame = capture.read()
if frame is None:
break
# apply some gaussian blur to the image
kenerl_size = (3, 3)
gauss_image = cv.GaussianBlur(frame, kenerl_size, 0)
#gauss_image = cv.bilateralFilter(frame,9,75,75)
# here we convert to the HSV colorspace
hsv_image = cv.cvtColor(gauss_image, cv.COLOR_BGR2HSV)
# apply color threshold to the HSV image to get only black colors
thres_1 = cv.inRange(hsv_image, lower_black, upper_black)
# dilate the the threshold image
thresh = cv.dilate(thres_1, rectKernel, iterations=1)
# apply canny edge detection
low_threshold = 200
high_threshold = 400
canny_edges = cv.Canny(gauss_image, low_threshold, high_threshold)
roi_image = region_of_interest(canny_edges)
# here we use hough transform to detect line segments
# works but not averaging the lines
lines = detect_line_segments(roi_image)
#line_img = np.zeros((roi_image.shape[0], roi_image.shape[1], 3), dtype=np.uint8)
#draw_lines(line_img, lines)
#result = weighted_img(line_img, frame)
#line_segments = detect_line_segments(roi_image)
#lane_lines = average_slope_intercept(frame, line_segments)
#line_image = display_lines(frame, lane_lines)
left_line_x = []
left_line_y = []
right_line_x = []
right_line_y = []
for line in lines:
for x1, y1, x2, y2 in line:
slope = (y2 - y1) / (x2 - x1) # <-- Calculating the slope.
#if math.fabs(slope) < 0.5: # <-- Only consider extreme slope
# continue
if slope <= 0: # <-- If the slope is negative, left group.
left_line_x.extend([x1, x2])
left_line_y.extend([y1, y2])
else: # <-- Otherwise, right group.
right_line_x.extend([x1, x2])
right_line_y.extend([y1, y2])
min_y = int(frame.shape[0] * (3 / 5)) # <-- Just below the horizon
max_y = frame.shape[0] # <-- The bottom of the image
poly_left = np.poly1d(np.polyfit(
left_line_y,
left_line_x,
deg=1
))
left_x_start = int(poly_left(max_y))
left_x_end = int(poly_left(min_y))
poly_right = np.poly1d(np.polyfit(
right_line_y,
right_line_x,
deg=1
))
right_x_start = int(poly_right(max_y))
right_x_end = int(poly_right(min_y))
line_image = draw_lines(
frame,
[[
[left_x_start, max_y, left_x_end, min_y],
[right_x_start, max_y, right_x_end, min_y],
]],
thickness=5,
)
# display both the current frame and the fg masks
cv.imshow('Frame', frame)
cv.imshow('New Image', roi_image)
cv.imshow('Line Image', line_image)
keyboard = cv.waitKey(30)
if keyboard == 'q' or keyboard == 27:
break
except KeyboardInterrupt:
break
# cleanup
capture.release()
cv.destroyAllWindows()
del capture
print('Stopped')
|
#!/usr/bin/python3
import pytest
from brownie.convert import to_bytes
def test_type_bounds():
with pytest.raises(ValueError):
to_bytes("0x00", "bytes0")
with pytest.raises(ValueError):
to_bytes("0x00", "bytes33")
def test_length_bounds():
for i in range(1, 33):
type_ = "bytes" + str(i)
to_bytes("0x" + "ff" * i, type_)
with pytest.raises(OverflowError):
to_bytes("0x" + "ff" * (i + 1), type_)
def test_string_raises():
with pytest.raises(ValueError):
to_bytes("abcdefg")
def test_hexstring():
assert to_bytes("0xffff", "bytes") == b"\xff\xff"
assert to_bytes("0xffff", "bytes2") == b"\xff\xff"
assert to_bytes("0xffff", "bytes4") == b"\x00\x00\xff\xff"
assert to_bytes("abcdef")
def test_left_pad():
for i in range(1, 33):
type_ = "bytes" + str(i)
assert to_bytes("0xff", type_).hex() == (i - 1) * "00" + "ff"
def test_int_bounds():
for i in range(1, 33):
type_ = "bytes" + str(i)
assert to_bytes(2 ** (i * 8) - 1, type_).hex() == "ff" * i
with pytest.raises(OverflowError):
to_bytes(2 ** (i * 8), type_)
def test_byte_is_bytes1():
assert to_bytes(42, "byte") == to_bytes(42, "bytes1")
def test_zero_value():
assert to_bytes("", "bytes1").hex() == "00"
assert to_bytes("0x", "bytes1").hex() == "00"
assert to_bytes(0, "bytes1").hex() == "00"
def test_invalid_type():
with pytest.raises(TypeError):
to_bytes(None)
with pytest.raises(TypeError):
to_bytes(3.1337)
with pytest.raises(TypeError):
to_bytes(True)
|
# -*- coding: utf-8 -*-
import os
import sys
import threading
import time
sys.path.append(os.path.join(os.getcwd(), "Lib"))
from Lib.OCC import BRepPrimAPI
from Lib.aocxchange import step_ocaf
from Lib.OCC.gp import gp_Pnt
from agent import Agent
def __createBlock__(color, step_exporter, position):
agent_box_shape = BRepPrimAPI.BRepPrimAPI_MakeBox(
gp_Pnt(position["x"], position["y"], position["z"]), 1, 1, 1).Shape()
if color == "red":
step_exporter.set_color(r=1, g=0, b=0) # red
step_exporter.set_layer('red')
else:
step_exporter.set_color(r=0, g=0, b=0) # black
step_exporter.set_layer('black')
step_exporter.add_shape(agent_box_shape)
class multiAgent(object):
def __init__(self):
self.agents = []
for i in range(50):
self.agents.append(Agent())
exportThreads = []
for j in range(5):
for x in range(50):
threads = []
for agent in self.agents:
t = threading.Thread(target=agent.doStep)
threads += [t]
t.start()
for t in threads:
t.join()
print "loop:" + str(x)
self.agents[0].removeVaporatedPheromones()
exportThread = threading.Thread(target=self.__exportToStepFile__()).start()
exportThreads += [exportThread]
for thread in exportThreads:
thread.join()
def __exportToStepFile__(self):
# Create empty export file
destinationFolder = os.path.join(os.curdir, "STEPExport")
destinationFileName = os.path.join(destinationFolder, time.strftime("%Y.%m.%d %H-%M-%S.stp", time.localtime()))
if not os.path.exists(destinationFolder):
os.mkdir(destinationFolder)
while os.path.exists(destinationFileName):
return
step_exporter = step_ocaf.StepOcafExporter(destinationFileName)
for agent in self.agents:
__createBlock__("red", step_exporter, agent.position)
for block in self.agents[0].blocks:
__createBlock__("black", step_exporter, block.position)
# Draw green initial pheromone
sphere_shape = BRepPrimAPI.BRepPrimAPI_MakeSphere(10).Shape()
step_exporter.set_color(r=0, g=1, b=0) # green
step_exporter.set_layer('green')
step_exporter.add_shape(sphere_shape)
step_exporter.write_file()
def main():
multiAgent()
if __name__ == "__main__":
main()
|
# Copyright (2017-2021)
# The Wormnet project
# Mathias Lechner (mlechner@ist.ac.at)
import numpy as np
import torch.nn as nn
import kerasncp as kncp
from kerasncp.torch import LTCCell
import pytorch_lightning as pl
import torch
import torch.utils.data as data
# nn.Module that unfolds a RNN cell into a sequence
class RNNSequence(nn.Module):
def __init__(
self,
rnn_cell,
):
super(RNNSequence, self).__init__()
self.rnn_cell = rnn_cell
def forward(self, x):
device = x.device
batch_size = x.size(0)
seq_len = x.size(1)
hidden_state = torch.zeros(
(batch_size, self.rnn_cell.state_size), device=device
)
outputs = []
for t in range(seq_len):
inputs = x[:, t]
new_output, hidden_state = self.rnn_cell.forward(inputs, hidden_state)
outputs.append(new_output)
outputs = torch.stack(outputs, dim=1) # return entire sequence
return outputs
# LightningModule for training a RNNSequence module
class SequenceLearner(pl.LightningModule):
def __init__(self, model, lr=0.005):
super().__init__()
self.model = model
self.lr = lr
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model.forward(x)
y_hat = y_hat.view_as(y)
loss = nn.MSELoss()(y_hat, y)
self.log("train_loss", loss, prog_bar=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model.forward(x)
y_hat = y_hat.view_as(y)
loss = nn.MSELoss()(y_hat, y)
self.log("val_loss", loss, prog_bar=True)
return loss
def test_step(self, batch, batch_idx):
# Here we just reuse the validation_step for testing
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters(), lr=self.lr)
def optimizer_step(
self,
current_epoch,
batch_nb,
optimizer,
optimizer_idx,
closure,
on_tpu=False,
using_native_amp=False,
using_lbfgs=False,
):
optimizer.optimizer.step(closure=closure)
# Apply weight constraints
self.model.rnn_cell.apply_weight_constraints()
in_features = 2
out_features = 1
N = 48 # Length of the time-series
# Input feature is a sine and a cosine wave
data_x = np.stack(
[np.sin(np.linspace(0, 3 * np.pi, N)), np.cos(np.linspace(0, 3 * np.pi, N))], axis=1
)
data_x = np.expand_dims(data_x, axis=0).astype(np.float32) # Add batch dimension
# Target output is a sine with double the frequency of the input signal
data_y = np.sin(np.linspace(0, 6 * np.pi, N)).reshape([1, N, 1]).astype(np.float32)
data_x = torch.Tensor(data_x)
data_y = torch.Tensor(data_y)
print("data_y.shape: ", str(data_y.shape))
wiring = kncp.wirings.FullyConnected(8, out_features) # 16 units, 8 motor neurons
ltc_cell = LTCCell(wiring, in_features)
dataloader = data.DataLoader(
data.TensorDataset(data_x, data_y), batch_size=1, shuffle=True, num_workers=4
)
ltc_sequence = RNNSequence(
ltc_cell,
)
learn = SequenceLearner(ltc_sequence, lr=0.01)
trainer = pl.Trainer(
logger=pl.loggers.CSVLogger("log"),
max_epochs=400,
progress_bar_refresh_rate=1,
gradient_clip_val=1, # Clip gradient to stabilize training
gpus=0,
)
trainer.fit(learn, dataloader)
results = trainer.test(learn, dataloader)
|
from __future__ import absolute_import
from .DatabaseConnection import DatabaseConnection
|
import numpy
from matplotlib import pyplot
from matplotlib import colors
from analytic_covariance import calculate_sky_power_spectrum
from analytic_covariance import calculate_beam_power_spectrum_averaged
from analytic_covariance import calculate_beam_power_spectrum
from analytic_covariance import calculate_total_power_spectrum
from Plot_Calibrated_Error_Comparison import plot_power_spectrum
from analytic_covariance import plot_PS
def main(labelfontsize = 12, ticksize= 11):
k_perp_range = numpy.array([1e-4, 1.1e-1])
u_range = numpy.logspace(-1, numpy.log10(200), 100)
frequency_range = numpy.linspace(135, 165, 251) * 1e6
eta, sky_power_spectrum = calculate_sky_power_spectrum(u_range, frequency_range)
#eta, beam_power_spectrum_averaged = calculate_beam_power_spectrum_averaged(u_range, frequency_range)
eta, beam_power_spectrum_1direction = calculate_beam_power_spectrum(u_range, frequency_range)
eta, total_power_spectrum_1 = calculate_total_power_spectrum(u_range, frequency_range)
#eta, total_power_spectrum_total_2 = calculate_total_power_spectrum(u_range, frequency_range)
figure, axes = pyplot.subplots(1, 3, figsize = (15, 5))
ps_norm = colors.LogNorm(vmin=1e3, vmax = 1e15)
plot_power_spectrum(u_range, eta, frequency_range, sky_power_spectrum, title="Sky Noise", axes=axes[0],
axes_label_font=labelfontsize, tickfontsize=ticksize, colorbar_show=True,
xlabel_show=True, norm = ps_norm, x_range=k_perp_range)
beamnorm = colors.SymLogNorm(linthresh=1e7, linscale = 1, vmin = -1e14, vmax = 1e14)
plot_power_spectrum(u_range, eta, frequency_range, total_power_spectrum_1 - sky_power_spectrum, title="Beam Noise", axes=axes[1],
axes_label_font=labelfontsize, tickfontsize=ticksize, colorbar_show=True,
xlabel_show=True, norm=beamnorm, diff=True, colormap='coolwarm', x_range=k_perp_range)
# plot_PS(u_range, eta, frequency_range, total_power_spectrum_1, cosmological=True)
plot_power_spectrum(u_range, eta, frequency_range, total_power_spectrum_1 , title="Total Noise", axes=axes[2],
axes_label_font=labelfontsize, tickfontsize=ticksize, colorbar_show=True,
xlabel_show=True, norm = ps_norm, x_range=k_perp_range)
figure.tight_layout()
pyplot.show()
return
if __name__ == "__main__":
main()
|
from __future__ import division, print_function, absolute_import
import numpy
from rep.estimators import XGBoostClassifier, XGBoostRegressor
from rep.test.test_estimators import check_classifier, check_regression, generate_classification_data
__author__ = 'Alex Rogozhnikov'
def very_basic_xgboost_test():
X, y, w = generate_classification_data(n_classes=2)
clf = XGBoostClassifier(n_estimators=10).fit(X, y)
clf.predict(X)
clf.predict_proba(X)
# testing that returned features in importances are correct and in the same order
assert numpy.all(clf.features == clf.get_feature_importances().index)
def test_xgboost():
check_classifier(XGBoostClassifier(n_estimators=20), n_classes=2)
check_classifier(XGBoostClassifier(n_estimators=20), n_classes=4)
check_regression(XGBoostRegressor(n_estimators=20))
|
"""
Make Catch pig with two agents.
forked from:
https://github.com/Bigpig4396/Multi-Agent-Reinforcement-Learning-Environment/
tree/master/env_CatchPigs
Thanks to their authors.
"""
# pylint: disable-all
# flake8: noqa
# noqa
import random
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.gridspec import GridSpec
import cv2
class EnvCatchPigs(object):
def __init__(self, size, if_PO):
assert self.check_size(size)
self.if_PO = if_PO
self.map_size = size
self.occupancy = np.zeros((self.map_size, self.map_size))
for i in range(self.map_size):
self.occupancy[0][i] = 1
self.occupancy[1][i] = 1
self.occupancy[self.map_size - 2][i] = 1
self.occupancy[self.map_size - 1][i] = 1
self.occupancy[i][0] = 1
self.occupancy[i][1] = 1
self.occupancy[i][self.map_size - 2] = 1
self.occupancy[i][self.map_size - 1] = 1
for i in range(3, self.map_size - 3, 2):
for j in range(3, self.map_size - 3, 2):
self.occupancy[i][j] = 1
self.raw_occupancy = np.zeros((self.map_size, self.map_size))
for i in range(self.map_size):
self.raw_occupancy[0][i] = 1
self.raw_occupancy[1][i] = 1
self.raw_occupancy[self.map_size - 2][i] = 1
self.raw_occupancy[self.map_size - 1][i] = 1
self.raw_occupancy[i][0] = 1
self.raw_occupancy[i][1] = 1
self.raw_occupancy[i][self.map_size - 2] = 1
self.raw_occupancy[i][self.map_size - 1] = 1
for i in range(3, self.map_size - 3, 2):
for j in range(3, self.map_size - 3, 2):
self.raw_occupancy[i][j] = 1
# initialize agent 1
self.agt1_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
while self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] == 1:
self.agt1_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
self.agt1_ori = random.randint(0, 3)
# initialize agent 2
self.agt2_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
while self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] == 1:
self.agt2_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
self.agt2_ori = random.randint(0, 3)
# initialize pig
self.pig_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
while self.occupancy[self.pig_pos[0]][self.pig_pos[1]] == 1:
self.pig_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
self.occupancy[self.pig_pos[0]][self.pig_pos[1]] = 1
self.pig_ori = random.randint(0, 3)
self.if_agt1_catches = False
self.if_agt2_catches = False
def check_size(self, size):
print("size of map should be an odd integer no smaller than 7")
if (size % 2) == 1 and size >= 7:
return True
else:
return False
def reset(self):
self.occupancy = np.zeros((self.map_size, self.map_size))
for i in range(self.map_size):
self.occupancy[0][i] = 1
self.occupancy[1][i] = 1
self.occupancy[self.map_size - 2][i] = 1
self.occupancy[self.map_size - 1][i] = 1
self.occupancy[i][0] = 1
self.occupancy[i][1] = 1
self.occupancy[i][self.map_size - 2] = 1
self.occupancy[i][self.map_size - 1] = 1
for i in range(3, self.map_size - 3, 2):
for j in range(3, self.map_size - 3, 2):
self.occupancy[i][j] = 1
# initialize agent 1
self.agt1_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
while self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] == 1:
self.agt1_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
self.agt1_ori = random.randint(0, 3)
# initialize agent 2
self.agt2_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
while self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] == 1:
self.agt2_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
self.agt2_ori = random.randint(0, 3)
# initialize pig
self.pig_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
while self.occupancy[self.pig_pos[0]][self.pig_pos[1]] == 1:
self.pig_pos = [
random.randint(2, self.map_size - 3),
random.randint(2, self.map_size - 3),
]
self.occupancy[self.pig_pos[0]][self.pig_pos[1]] = 1
self.pig_ori = random.randint(0, 3)
self.if_agt1_catches = False
self.if_agt2_catches = False
def list_add(self, a, b):
c = [a[i] + b[i] for i in range(min(len(a), len(b)))]
return c
def paint_block(self, obs, i, j):
new_obs = obs
for row in range(3):
for col in range(3):
new_obs[i * 3 + row, j * 3 + col, 0] = 0.0
new_obs[i * 3 + row, j * 3 + col, 1] = 0.0
new_obs[i * 3 + row, j * 3 + col, 2] = 0.0
return new_obs
def paint_agt1(self, obs, i, j, ori):
new_obs = obs
if ori == 0:
new_obs[i * 3, j * 3, 0] = 1.0
new_obs[i * 3, j * 3, 1] = 0.0
new_obs[i * 3, j * 3, 2] = 0.0
new_obs[i * 3 + 1, j * 3, 0] = 1.0
new_obs[i * 3 + 1, j * 3, 1] = 0.0
new_obs[i * 3 + 1, j * 3, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 0] = 1.0
new_obs[i * 3 + 1, j * 3 + 2, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 2] = 0.0
new_obs[i * 3 + 2, j * 3, 0] = 1.0
new_obs[i * 3 + 2, j * 3, 1] = 0.0
new_obs[i * 3 + 2, j * 3, 2] = 0.0
elif ori == 1:
new_obs[i * 3, j * 3, 0] = 1.0
new_obs[i * 3, j * 3, 1] = 0.0
new_obs[i * 3, j * 3, 2] = 0.0
new_obs[i * 3, j * 3 + 1, 0] = 1.0
new_obs[i * 3, j * 3 + 1, 1] = 0.0
new_obs[i * 3, j * 3 + 1, 2] = 0.0
new_obs[i * 3, j * 3 + 2, 0] = 1.0
new_obs[i * 3, j * 3 + 2, 1] = 0.0
new_obs[i * 3, j * 3 + 2, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 0] = 1.0
new_obs[i * 3 + 2, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 2] = 0.0
elif ori == 2:
new_obs[i * 3, j * 3 + 2, 0] = 1.0
new_obs[i * 3, j * 3 + 2, 1] = 0.0
new_obs[i * 3, j * 3 + 2, 2] = 0.0
new_obs[i * 3 + 1, j * 3, 0] = 1.0
new_obs[i * 3 + 1, j * 3, 1] = 0.0
new_obs[i * 3 + 1, j * 3, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 0] = 1.0
new_obs[i * 3 + 1, j * 3 + 2, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 2] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 0] = 1.0
new_obs[i * 3 + 2, j * 3 + 2, 1] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 2] = 0.0
elif ori == 3:
new_obs[i * 3, j * 3 + 1, 0] = 1.0
new_obs[i * 3, j * 3 + 1, 1] = 0.0
new_obs[i * 3, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 2, j * 3, 0] = 1.0
new_obs[i * 3 + 2, j * 3, 1] = 0.0
new_obs[i * 3 + 2, j * 3, 2] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 0] = 1.0
new_obs[i * 3 + 2, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 0] = 1.0
new_obs[i * 3 + 2, j * 3 + 2, 1] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 2] = 0.0
return new_obs
def paint_agt2(self, obs, i, j, ori):
new_obs = obs
if ori == 0:
new_obs[i * 3, j * 3, 0] = 0.0
new_obs[i * 3, j * 3, 1] = 0.0
new_obs[i * 3, j * 3, 2] = 1.0
new_obs[i * 3 + 1, j * 3, 0] = 0.0
new_obs[i * 3 + 1, j * 3, 1] = 0.0
new_obs[i * 3 + 1, j * 3, 2] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 1.0
new_obs[i * 3 + 1, j * 3 + 2, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 2] = 1.0
new_obs[i * 3 + 2, j * 3, 0] = 0.0
new_obs[i * 3 + 2, j * 3, 1] = 0.0
new_obs[i * 3 + 2, j * 3, 2] = 1.0
elif ori == 1:
new_obs[i * 3, j * 3, 0] = 0.0
new_obs[i * 3, j * 3, 1] = 0.0
new_obs[i * 3, j * 3, 2] = 1.0
new_obs[i * 3, j * 3 + 1, 0] = 0.0
new_obs[i * 3, j * 3 + 1, 1] = 0.0
new_obs[i * 3, j * 3 + 1, 2] = 1.0
new_obs[i * 3, j * 3 + 2, 0] = 0.0
new_obs[i * 3, j * 3 + 2, 1] = 0.0
new_obs[i * 3, j * 3 + 2, 2] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 1.0
new_obs[i * 3 + 2, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 2] = 1.0
elif ori == 2:
new_obs[i * 3, j * 3 + 2, 0] = 0.0
new_obs[i * 3, j * 3 + 2, 1] = 0.0
new_obs[i * 3, j * 3 + 2, 2] = 1.0
new_obs[i * 3 + 1, j * 3, 0] = 0.0
new_obs[i * 3 + 1, j * 3, 1] = 0.0
new_obs[i * 3 + 1, j * 3, 2] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 1.0
new_obs[i * 3 + 1, j * 3 + 2, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 2] = 1.0
new_obs[i * 3 + 2, j * 3 + 2, 0] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 1] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 2] = 1.0
elif ori == 3:
new_obs[i * 3, j * 3 + 1, 0] = 0.0
new_obs[i * 3, j * 3 + 1, 1] = 0.0
new_obs[i * 3, j * 3 + 1, 2] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 1.0
new_obs[i * 3 + 2, j * 3, 0] = 0.0
new_obs[i * 3 + 2, j * 3, 1] = 0.0
new_obs[i * 3 + 2, j * 3, 2] = 1.0
new_obs[i * 3 + 2, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 1] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 2] = 1.0
new_obs[i * 3 + 2, j * 3 + 2, 0] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 1] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 2] = 1.0
return new_obs
def paint_pig(self, obs, i, j, ori):
new_obs = obs
if ori == 0:
new_obs[i * 3, j * 3, 0] = 0.0
new_obs[i * 3, j * 3, 1] = 1.0
new_obs[i * 3, j * 3, 2] = 0.0
new_obs[i * 3 + 1, j * 3, 0] = 0.0
new_obs[i * 3 + 1, j * 3, 1] = 1.0
new_obs[i * 3 + 1, j * 3, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 1] = 1.0
new_obs[i * 3 + 1, j * 3 + 2, 2] = 0.0
new_obs[i * 3 + 2, j * 3, 0] = 0.0
new_obs[i * 3 + 2, j * 3, 1] = 1.0
new_obs[i * 3 + 2, j * 3, 2] = 0.0
elif ori == 1:
new_obs[i * 3, j * 3, 0] = 0.0
new_obs[i * 3, j * 3, 1] = 1.0
new_obs[i * 3, j * 3, 2] = 0.0
new_obs[i * 3, j * 3 + 1, 0] = 0.0
new_obs[i * 3, j * 3 + 1, 1] = 1.0
new_obs[i * 3, j * 3 + 1, 2] = 0.0
new_obs[i * 3, j * 3 + 2, 0] = 0.0
new_obs[i * 3, j * 3 + 2, 1] = 1.0
new_obs[i * 3, j * 3 + 2, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 1] = 1.0
new_obs[i * 3 + 2, j * 3 + 1, 2] = 0.0
elif ori == 2:
new_obs[i * 3, j * 3 + 2, 0] = 0.0
new_obs[i * 3, j * 3 + 2, 1] = 1.0
new_obs[i * 3, j * 3 + 2, 2] = 0.0
new_obs[i * 3 + 1, j * 3, 0] = 0.0
new_obs[i * 3 + 1, j * 3, 1] = 1.0
new_obs[i * 3 + 1, j * 3, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 2, 1] = 1.0
new_obs[i * 3 + 1, j * 3 + 2, 2] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 0] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 1] = 1.0
new_obs[i * 3 + 2, j * 3 + 2, 2] = 0.0
elif ori == 3:
new_obs[i * 3, j * 3 + 1, 0] = 0.0
new_obs[i * 3, j * 3 + 1, 1] = 1.0
new_obs[i * 3, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 1, j * 3 + 1, 1] = 1.0
new_obs[i * 3 + 1, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 2, j * 3, 0] = 0.0
new_obs[i * 3 + 2, j * 3, 1] = 1.0
new_obs[i * 3 + 2, j * 3, 2] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 0] = 0.0
new_obs[i * 3 + 2, j * 3 + 1, 1] = 1.0
new_obs[i * 3 + 2, j * 3 + 1, 2] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 0] = 0.0
new_obs[i * 3 + 2, j * 3 + 2, 1] = 1.0
new_obs[i * 3 + 2, j * 3 + 2, 2] = 0.0
return new_obs
def get_agt1_obs(self):
obs = []
if not self.if_PO:
obs = self.get_global_obs()
else:
obs = np.zeros((5 * 3, 5 * 3, 3))
for i in range(5 * 3):
for j in range(5 * 3):
obs[i, j, 0] = 1.0
obs[i, j, 1] = 1.0
obs[i, j, 2] = 1.0
for k in range(5):
if (self.raw_occupancy[self.agt1_pos[0] - 2][self.agt1_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 0)
if (self.raw_occupancy[self.agt1_pos[0] - 1][self.agt1_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 1)
if self.raw_occupancy[self.agt1_pos[0]][self.agt1_pos[1] + k - 2] == 1:
self.paint_block(obs, 4 - k, 2)
if (self.raw_occupancy[self.agt1_pos[0] + 1][self.agt1_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 3)
if (self.raw_occupancy[self.agt1_pos[0] + 2][self.agt1_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 4)
# detect self
self.paint_agt1(obs, 2, 2, self.agt1_ori)
# detect agent2
for k in range(5):
if self.agt2_pos == self.list_add(self.agt1_pos, [-2 + k, 2]):
self.paint_agt2(obs, 0, k, self.agt2_ori)
if self.agt2_pos == self.list_add(self.agt1_pos, [-2 + k, 1]):
self.paint_agt2(obs, 1, k, self.agt2_ori)
if self.agt2_pos == self.list_add(self.agt1_pos, [-2 + k, 0]):
self.paint_agt2(obs, 2, k, self.agt2_ori)
if self.agt2_pos == self.list_add(self.agt1_pos, [-2 + k, -1]):
self.paint_agt2(obs, 3, k, self.agt2_ori)
if self.agt2_pos == self.list_add(self.agt1_pos, [-2 + k, -2]):
self.paint_agt2(obs, 4, k, self.agt2_ori)
# detect pig
for k in range(5):
if self.pig_pos == self.list_add(self.agt1_pos, [-2 + k, 2]):
self.paint_pig(obs, 0, k, self.pig_ori)
if self.pig_pos == self.list_add(self.agt1_pos, [-2 + k, 1]):
self.paint_pig(obs, 1, k, self.pig_ori)
if self.pig_pos == self.list_add(self.agt1_pos, [-2 + k, 0]):
self.paint_pig(obs, 2, k, self.pig_ori)
if self.pig_pos == self.list_add(self.agt1_pos, [-2 + k, -1]):
self.paint_pig(obs, 3, k, self.pig_ori)
if self.pig_pos == self.list_add(self.agt1_pos, [-2 + k, -2]):
self.paint_pig(obs, 4, k, self.pig_ori)
# add fog
if self.agt1_ori == 0:
for i in range(5 * 3):
for j in range(2 * 3):
obs[i, 14 - j, 0] = 0.5
obs[i, 14 - j, 1] = 0.5
obs[i, 14 - j, 2] = 0.5
if self.agt1_ori == 1:
for i in range(5 * 3):
for j in range(2 * 3):
obs[14 - j, i, 0] = 0.5
obs[14 - j, i, 1] = 0.5
obs[14 - j, i, 2] = 0.5
if self.agt1_ori == 2:
for i in range(5 * 3):
for j in range(2 * 3):
obs[i, j, 0] = 0.5
obs[i, j, 1] = 0.5
obs[i, j, 2] = 0.5
if self.agt1_ori == 3:
for i in range(5 * 3):
for j in range(2 * 3):
obs[j, i, 0] = 0.5
obs[j, i, 1] = 0.5
obs[j, i, 2] = 0.5
return obs
def get_agt2_obs(self):
obs = []
if not self.if_PO:
obs = self.get_global_obs()
else:
obs = np.zeros((5 * 3, 5 * 3, 3))
for i in range(5 * 3):
for j in range(5 * 3):
obs[i, j, 0] = 1.0
obs[i, j, 1] = 1.0
obs[i, j, 2] = 1.0
for k in range(5):
if (self.raw_occupancy[self.agt2_pos[0] - 2][self.agt2_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 0)
if (self.raw_occupancy[self.agt2_pos[0] - 1][self.agt2_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 1)
if self.raw_occupancy[self.agt2_pos[0]][self.agt2_pos[1] + k - 2] == 1:
self.paint_block(obs, 4 - k, 2)
if (self.raw_occupancy[self.agt2_pos[0] + 1][self.agt2_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 3)
if (self.raw_occupancy[self.agt2_pos[0] + 2][self.agt2_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 4)
# detect self
self.paint_agt2(obs, 2, 2, self.agt2_ori)
# detect agent1
for k in range(5):
if self.agt1_pos == self.list_add(self.agt2_pos, [-2 + k, 2]):
self.paint_agt1(obs, 0, k, self.agt1_ori)
if self.agt1_pos == self.list_add(self.agt2_pos, [-2 + k, 1]):
self.paint_agt1(obs, 1, k, self.agt1_ori)
if self.agt1_pos == self.list_add(self.agt2_pos, [-2 + k, 0]):
self.paint_agt1(obs, 2, k, self.agt1_ori)
if self.agt1_pos == self.list_add(self.agt2_pos, [-2 + k, -1]):
self.paint_agt1(obs, 3, k, self.agt1_ori)
if self.agt1_pos == self.list_add(self.agt2_pos, [-2 + k, -2]):
self.paint_agt1(obs, 4, k, self.agt1_ori)
# detect pig
for k in range(5):
if self.pig_pos == self.list_add(self.agt2_pos, [-2 + k, 2]):
self.paint_pig(obs, 0, k, self.pig_ori)
if self.pig_pos == self.list_add(self.agt2_pos, [-2 + k, 1]):
self.paint_pig(obs, 1, k, self.pig_ori)
if self.pig_pos == self.list_add(self.agt2_pos, [-2 + k, 0]):
self.paint_pig(obs, 2, k, self.pig_ori)
if self.pig_pos == self.list_add(self.agt2_pos, [-2 + k, -1]):
self.paint_pig(obs, 3, k, self.pig_ori)
if self.pig_pos == self.list_add(self.agt2_pos, [-2 + k, -2]):
self.paint_pig(obs, 4, k, self.pig_ori)
# add fog
if self.agt2_ori == 0:
for i in range(5 * 3):
for j in range(2 * 3):
obs[i, 14 - j, 0] = 0.5
obs[i, 14 - j, 1] = 0.5
obs[i, 14 - j, 2] = 0.5
if self.agt2_ori == 1:
for i in range(5 * 3):
for j in range(2 * 3):
obs[14 - j, i, 0] = 0.5
obs[14 - j, i, 1] = 0.5
obs[14 - j, i, 2] = 0.5
if self.agt2_ori == 2:
for i in range(5 * 3):
for j in range(2 * 3):
obs[i, j, 0] = 0.5
obs[i, j, 1] = 0.5
obs[i, j, 2] = 0.5
if self.agt2_ori == 3:
for i in range(5 * 3):
for j in range(2 * 3):
obs[j, i, 0] = 0.5
obs[j, i, 1] = 0.5
obs[j, i, 2] = 0.5
return obs
def get_pig_obs(self):
obs = []
if not self.if_PO:
obs = self.get_global_obs()
else:
obs = np.zeros((5 * 3, 5 * 3, 3))
for i in range(5 * 3):
for j in range(5 * 3):
obs[i, j, 0] = 1.0
obs[i, j, 1] = 1.0
obs[i, j, 2] = 1.0
for k in range(5):
if (self.raw_occupancy[self.pig_pos[0] - 2][self.pig_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 0)
if (self.raw_occupancy[self.pig_pos[0] - 1][self.pig_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 1)
if self.raw_occupancy[self.pig_pos[0]][self.pig_pos[1] + k - 2] == 1:
self.paint_block(obs, 4 - k, 2)
if (self.raw_occupancy[self.pig_pos[0] + 1][self.pig_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 3)
if (self.raw_occupancy[self.pig_pos[0] + 2][self.pig_pos[1] + k - 2] == 1):
self.paint_block(obs, 4 - k, 4)
# detect self
self.paint_pig(obs, 2, 2, self.pig_ori)
# detect agent1
for k in range(5):
if self.agt1_pos == self.list_add(self.pig_pos, [-2 + k, 2]):
self.paint_agt1(obs, 0, k, self.agt1_ori)
if self.agt1_pos == self.list_add(self.pig_pos, [-2 + k, 1]):
self.paint_agt1(obs, 1, k, self.agt1_ori)
if self.agt1_pos == self.list_add(self.pig_pos, [-2 + k, 0]):
self.paint_agt1(obs, 2, k, self.agt1_ori)
if self.agt1_pos == self.list_add(self.pig_pos, [-2 + k, -1]):
self.paint_agt1(obs, 3, k, self.agt1_ori)
if self.agt1_pos == self.list_add(self.pig_pos, [-2 + k, -2]):
self.paint_agt1(obs, 4, k, self.agt1_ori)
# detect agent2
for k in range(5):
if self.agt2_pos == self.list_add(self.pig_pos, [-2 + k, 2]):
self.paint_agt2(obs, 0, k, self.agt2_ori)
if self.agt2_pos == self.list_add(self.pig_pos, [-2 + k, 1]):
self.paint_agt2(obs, 1, k, self.agt2_ori)
if self.agt2_pos == self.list_add(self.pig_pos, [-2 + k, 0]):
self.paint_agt2(obs, 2, k, self.agt2_ori)
if self.agt2_pos == self.list_add(self.pig_pos, [-2 + k, -1]):
self.paint_agt2(obs, 3, k, self.agt2_ori)
if self.agt2_pos == self.list_add(self.pig_pos, [-2 + k, -2]):
self.paint_agt2(obs, 4, k, self.agt2_ori)
# add fog
if self.pig_ori == 0:
for i in range(5 * 3):
for j in range(2 * 3):
obs[i, 14 - j, 0] = 0.5
obs[i, 14 - j, 1] = 0.5
obs[i, 14 - j, 2] = 0.5
if self.pig_ori == 1:
for i in range(5 * 3):
for j in range(2 * 3):
obs[14 - j, i, 0] = 0.5
obs[14 - j, i, 1] = 0.5
obs[14 - j, i, 2] = 0.5
if self.pig_ori == 2:
for i in range(5 * 3):
for j in range(2 * 3):
obs[i, j, 0] = 0.5
obs[i, j, 1] = 0.5
obs[i, j, 2] = 0.5
if self.pig_ori == 3:
for i in range(5 * 3):
for j in range(2 * 3):
obs[j, i, 0] = 0.5
obs[j, i, 1] = 0.5
obs[j, i, 2] = 0.5
return obs
def get_obs(self):
return [self.get_agt1_obs(), self.get_agt2_obs()]
def get_full_obs(self):
obs = np.zeros((self.map_size * 3, self.map_size * 3, 3))
for i in range(self.map_size * 3):
for j in range(self.map_size * 3):
obs[i, j, 0] = 1.0
obs[i, j, 1] = 1.0
obs[i, j, 2] = 1.0
for i in range(self.map_size):
for j in range(self.map_size):
if self.raw_occupancy[i][j] == 1:
self.paint_block(obs, i, j)
self.paint_agt1(obs, self.map_size -
self.agt1_pos[1] - 1, self.agt1_pos[0], self.agt1_ori)
self.paint_agt2(obs, self.map_size -
self.agt2_pos[1] - 1, self.agt2_pos[0], self.agt2_ori)
self.paint_pig(obs, self.map_size -
self.pig_pos[1] - 1, self.pig_pos[0], self.pig_ori)
return obs
def step(self, action_list):
reward_1 = 0
reward_2 = 0
reward_pig = 0
# agent1 move
if action_list[0] == 0: # turn left
reward_1 = reward_1 - 1
if self.agt1_ori == 0:
self.agt1_ori = 3
elif self.agt1_ori == 1:
self.agt1_ori = 0
elif self.agt1_ori == 2:
self.agt1_ori = 1
elif self.agt1_ori == 3:
self.agt1_ori = 2
elif action_list[0] == 1: # turn right
reward_1 = reward_1 - 1
if self.agt1_ori == 0:
self.agt1_ori = 1
elif self.agt1_ori == 1:
self.agt1_ori = 2
elif self.agt1_ori == 2:
self.agt1_ori = 3
elif self.agt1_ori == 3:
self.agt1_ori = 0
elif action_list[0] == 2: # move
reward_1 = reward_1 - 1
if self.agt1_ori == 0:
if (self.occupancy[self.agt1_pos[0] - 1][self.agt1_pos[1]] != 1): # if can move
self.agt1_pos[0] = self.agt1_pos[0] - 1
self.occupancy[self.agt1_pos[0] + 1][self.agt1_pos[1]] = 0
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
else:
reward_1 = reward_1 - 20
elif self.agt1_ori == 1:
if (self.occupancy[self.agt1_pos[0]][self.agt1_pos[1] + 1] != 1): # if can move
self.agt1_pos[1] = self.agt1_pos[1] + 1
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1] - 1] = 0
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
else:
reward_1 = reward_1 - 20
elif self.agt1_ori == 2:
if (self.occupancy[self.agt1_pos[0] + 1][self.agt1_pos[1]] != 1): # if can move
self.agt1_pos[0] = self.agt1_pos[0] + 1
self.occupancy[self.agt1_pos[0] - 1][self.agt1_pos[1]] = 0
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
else:
reward_1 = reward_1 - 20
elif self.agt1_ori == 3:
if (self.occupancy[self.agt1_pos[0]][self.agt1_pos[1] - 1] != 1): # if can move
self.agt1_pos[1] = self.agt1_pos[1] - 1
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1] + 1] = 0
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
else:
reward_1 = reward_1 - 20
elif action_list[0] == 3: # catch
reward_1 = reward_1 - 1
if self.agt1_ori == 0:
if self.pig_pos[0] == self.agt1_pos[0] - 1:
if self.pig_pos[1] == self.agt1_pos[1]:
self.if_agt1_catches = True
elif self.agt1_ori == 1:
if self.pig_pos[1] == self.agt1_pos[1] + 1:
if self.pig_pos[0] == self.agt1_pos[0]:
self.if_agt1_catches = True
elif self.agt1_ori == 2:
if self.pig_pos[0] == self.agt1_pos[0] + 1:
if self.pig_pos[1] == self.agt1_pos[1]:
self.if_agt1_catches = True
elif self.agt1_ori == 3:
if self.pig_pos[1] == self.agt1_pos[1] - 1:
if self.pig_pos[0] == self.agt1_pos[0]:
self.if_agt1_catches = True
# agent2 move
if action_list[1] == 0: # turn left
reward_2 = reward_2 - 1
if self.agt2_ori == 0:
self.agt2_ori = 3
elif self.agt2_ori == 1:
self.agt2_ori = 0
elif self.agt2_ori == 2:
self.agt2_ori = 1
elif self.agt2_ori == 3:
self.agt2_ori = 2
elif action_list[1] == 1: # turn right
reward_2 = reward_2 - 1
if self.agt2_ori == 0:
self.agt2_ori = 1
elif self.agt2_ori == 1:
self.agt2_ori = 2
elif self.agt2_ori == 2:
self.agt2_ori = 3
elif self.agt2_ori == 3:
self.agt2_ori = 0
elif action_list[1] == 2: # move
reward_2 = reward_2 - 1
if self.agt2_ori == 0:
if (self.occupancy[self.agt2_pos[0] - 1][self.agt2_pos[1]] != 1): # if can move
self.agt2_pos[0] = self.agt2_pos[0] - 1
self.occupancy[self.agt2_pos[0] + 1][self.agt2_pos[1]] = 0
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
else:
reward_2 = reward_2 - 20
elif self.agt2_ori == 1:
if (self.occupancy[self.agt2_pos[0]][self.agt2_pos[1] + 1] != 1): # if can move
self.agt2_pos[1] = self.agt2_pos[1] + 1
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1] - 1] = 0
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
else:
reward_2 = reward_2 - 20
elif self.agt2_ori == 2:
if (self.occupancy[self.agt2_pos[0] + 1][self.agt2_pos[1]] != 1): # if can move
self.agt2_pos[0] = self.agt2_pos[0] + 1
self.occupancy[self.agt2_pos[0] - 1][self.agt2_pos[1]] = 0
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
else:
reward_2 = reward_2 - 20
elif self.agt2_ori == 3:
if (self.occupancy[self.agt2_pos[0]][self.agt2_pos[1] - 1] != 1): # if can move
self.agt2_pos[1] = self.agt2_pos[1] - 1
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1] + 1] = 0
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
else:
reward_2 = reward_2 - 20
elif action_list[1] == 3: # catch
reward_2 = reward_2 - 1
if self.agt2_ori == 0:
if self.pig_pos[0] == self.agt2_pos[0] - 1:
if self.pig_pos[1] == self.agt2_pos[1]:
self.if_agt2_catches = True
elif self.agt2_ori == 1:
if self.pig_pos[1] == self.agt2_pos[1] + 1:
if self.pig_pos[0] == self.agt2_pos[0]:
self.if_agt2_catches = True
elif self.agt2_ori == 2:
if self.pig_pos[0] == self.agt2_pos[0] + 1:
if self.pig_pos[1] == self.agt2_pos[1]:
self.if_agt2_catches = True
elif self.agt2_ori == 3:
if self.pig_pos[1] == self.agt2_pos[1] - 1:
if self.pig_pos[0] == self.agt2_pos[0]:
self.if_agt2_catches = True
# pig move
action_pig = random.randint(0, 3)
if action_pig == 0: # turn left
reward_pig = reward_pig - 1
if self.pig_ori == 0:
self.pig_ori = 3
elif self.pig_ori == 1:
self.pig_ori = 0
elif self.pig_ori == 2:
self.pig_ori = 1
elif self.pig_ori == 3:
self.pig_ori = 2
elif action_pig == 1: # turn right
reward_pig = reward_pig - 1
if self.pig_ori == 0:
self.pig_ori = 1
elif self.pig_ori == 1:
self.pig_ori = 2
elif self.pig_ori == 2:
self.pig_ori = 3
elif self.pig_ori == 3:
self.pig_ori = 0
elif action_pig == 2: # move
reward_pig = reward_pig - 1
if self.pig_ori == 0:
if (self.occupancy[self.pig_pos[0] - 1][self.pig_pos[1]] != 1): # if can move
self.pig_pos[0] = self.pig_pos[0] - 1
self.occupancy[self.pig_pos[0] + 1][self.pig_pos[1]] = 0
self.occupancy[self.pig_pos[0]][self.pig_pos[1]] = 1
self.if_agt1_catches = False
self.if_agt2_catches = False
else:
reward_pig = reward_pig - 20
elif self.pig_ori == 1:
if (self.occupancy[self.pig_pos[0]][self.pig_pos[1] + 1] != 1): # if can move
self.pig_pos[1] = self.pig_pos[1] + 1
self.occupancy[self.pig_pos[0]][self.pig_pos[1] - 1] = 0
self.occupancy[self.pig_pos[0]][self.pig_pos[1]] = 1
self.if_agt1_catches = False
self.if_agt2_catches = False
else:
reward_pig = reward_pig - 20
elif self.pig_ori == 2:
if (self.occupancy[self.pig_pos[0] + 1][self.pig_pos[1]] != 1): # if can move
self.pig_pos[0] = self.pig_pos[0] + 1
self.occupancy[self.pig_pos[0] - 1][self.pig_pos[1]] = 0
self.occupancy[self.pig_pos[0]][self.pig_pos[1]] = 1
self.if_agt1_catches = False
self.if_agt2_catches = False
else:
reward_pig = reward_pig - 20
elif self.pig_ori == 3:
if (self.occupancy[self.pig_pos[0]][self.pig_pos[1] - 1] != 1): # if can move
self.pig_pos[1] = self.pig_pos[1] - 1
self.occupancy[self.pig_pos[0]][self.pig_pos[1] + 1] = 0
self.occupancy[self.pig_pos[0]][self.pig_pos[1]] = 1
self.if_agt1_catches = False
self.if_agt2_catches = False
else:
reward_pig = reward_pig - 20
# check if caught
if (self.if_agt1_catches) and (self.if_agt2_catches):
reward_1 = reward_1 + 500
reward_2 = reward_2 + 500
reward_pig = reward_pig - 500
self.reset()
else:
if action_list[1] == 3:
reward_2 = reward_2 - 50
if action_list[0] == 3:
reward_1 = reward_1 - 50
self.if_agt1_catches = False
self.if_agt2_catches = False
done = False
if reward_1 > 0:
done = True
# reward shape
if done:
reward_1 = 1.0
reward_2 = 1.0
else:
reward_1 = -0.001
reward_2 = -0.001
return [reward_1, reward_2], done
def plot_scene(self):
fig = plt.figure(figsize=(5, 5))
gs = GridSpec(3, 3, figure=fig)
ax1 = fig.add_subplot(gs[0:2, 0:3])
plt.xticks([])
plt.yticks([])
ax2 = fig.add_subplot(gs[2, 0:1])
plt.xticks([])
plt.yticks([])
ax3 = fig.add_subplot(gs[2, 1:2])
plt.xticks([])
plt.yticks([])
ax4 = fig.add_subplot(gs[2, 2:3])
plt.xticks([])
plt.yticks([])
ax1.imshow(self.get_full_obs())
ax2.imshow(self.get_agt1_obs())
ax3.imshow(self.get_agt2_obs())
ax4.imshow(self.get_pig_obs())
plt.show()
def set_agt1_at(self, tgt_pos, tgt_ori):
if self.occupancy[tgt_pos[0]][tgt_pos[1]] == 0: # free space
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 0
self.agt1_pos = tgt_pos
self.occupancy[self.agt1_pos[0]][self.agt1_pos[1]] = 1
self.agt1_ori = tgt_ori
def set_agt2_at(self, tgt_pos, tgt_ori):
if self.occupancy[tgt_pos[0]][tgt_pos[1]] == 0: # free space
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 0
self.agt2_pos = tgt_pos
self.occupancy[self.agt2_pos[0]][self.agt2_pos[1]] = 1
self.agt2_ori = tgt_ori
def set_pig_at(self, tgt_pos, tgt_ori):
if self.occupancy[tgt_pos[0]][tgt_pos[1]] == 0: # free space
self.occupancy[self.pig_pos[0]][self.pig_pos[1]] = 0
self.pig_pos = tgt_pos
self.occupancy[self.pig_pos[0]][self.pig_pos[1]] = 1
self.pig_ori = tgt_ori
def render(self):
obs = np.ones((self.map_size * 21, self.map_size * 21, 3))
for i in range(self.map_size):
for j in range(self.map_size):
if self.raw_occupancy[i, j] == 1:
cv2.rectangle(obs, (i * 21, j * 21),
(i * 21 + 21, j * 21 + 21), (0, 0, 0), -1)
# plot agent1
temp_x = self.agt1_pos[0]
temp_y = self.map_size - self.agt1_pos[1] - 1
if self.agt1_ori == 0:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21),
(temp_x * 21 + 7, temp_y * 21 + 21),
(0, 0, 255),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 7, temp_y * 21 + 7),
(temp_x * 21 + 21, temp_y * 21 + 14),
(0, 0, 255),
-1,
)
elif self.agt1_ori == 1:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21),
(temp_x * 21 + 21, temp_y * 21 + 7),
(0, 0, 255),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 7, temp_y * 21 + 7),
(temp_x * 21 + 14, temp_y * 21 + 21),
(0, 0, 255),
-1,
)
elif self.agt1_ori == 2:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21 + 7),
(temp_x * 21 + 14, temp_y * 21 + 14),
(0, 0, 255),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 14, temp_y * 21),
(temp_x * 21 + 21, temp_y * 21 + 21),
(0, 0, 255),
-1,
)
else:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21 + 14),
(temp_x * 21 + 21, temp_y * 21 + 21),
(0, 0, 255),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 7, temp_y * 21),
(temp_x * 21 + 14, temp_y * 21 + 14),
(0, 0, 255),
-1,
)
# plot agent2
temp_x = self.agt2_pos[0]
temp_y = self.map_size - self.agt2_pos[1] - 1
if self.agt2_ori == 0:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21),
(temp_x * 21 + 7, temp_y * 21 + 21),
(255, 0, 0),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 7, temp_y * 21 + 7),
(temp_x * 21 + 21, temp_y * 21 + 14),
(255, 0, 0),
-1,
)
elif self.agt2_ori == 1:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21),
(temp_x * 21 + 21, temp_y * 21 + 7),
(255, 0, 0),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 7, temp_y * 21 + 7),
(temp_x * 21 + 14, temp_y * 21 + 21),
(255, 0, 0),
-1,
)
elif self.agt2_ori == 2:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21 + 7),
(temp_x * 21 + 14, temp_y * 21 + 14),
(255, 0, 0),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 14, temp_y * 21),
(temp_x * 21 + 21, temp_y * 21 + 21),
(255, 0, 0),
-1,
)
else:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21 + 14),
(temp_x * 21 + 21, temp_y * 21 + 21),
(255, 0, 0),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 7, temp_y * 21),
(temp_x * 21 + 14, temp_y * 21 + 14),
(255, 0, 0),
-1,
)
# plot pig
temp_x = self.pig_pos[0]
temp_y = self.map_size - self.pig_pos[1] - 1
if self.pig_ori == 0:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21),
(temp_x * 21 + 7, temp_y * 21 + 21),
(0, 255, 0),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 7, temp_y * 21 + 7),
(temp_x * 21 + 21, temp_y * 21 + 14),
(0, 255, 0),
-1,
)
elif self.pig_ori == 1:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21),
(temp_x * 21 + 21, temp_y * 21 + 7),
(0, 255, 0),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 7, temp_y * 21 + 7),
(temp_x * 21 + 14, temp_y * 21 + 21),
(0, 255, 0),
-1,
)
elif self.pig_ori == 2:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21 + 7),
(temp_x * 21 + 14, temp_y * 21 + 14),
(0, 255, 0),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 14, temp_y * 21),
(temp_x * 21 + 21, temp_y * 21 + 21),
(0, 255, 0),
-1,
)
else:
cv2.rectangle(
obs,
(temp_x * 21, temp_y * 21 + 14),
(temp_x * 21 + 21, temp_y * 21 + 21),
(0, 255, 0),
-1,
)
cv2.rectangle(
obs,
(temp_x * 21 + 7, temp_y * 21),
(temp_x * 21 + 14, temp_y * 21 + 14),
(0, 255, 0),
-1,
)
cv2.imshow("image", obs)
cv2.waitKey(10)
|
#
# ISC License
#
# Copyright (C) 2021 DS-Homebrew
# Copyright (C) 2021-present lifehackerhansol
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import discord
from discord.ext import commands
from settings import auto_role
class Newcomers(commands.Cog):
"""
Automatic roles!
"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
role = discord.utils.get(member.guild.roles, name=auto_role)
await member.add_roles(role)
def setup(bot):
bot.add_cog(Newcomers(bot))
|
"""Test for my functions."""
from functions import *
def test_string_concatenator():
assert string_concatenator('abc', 'def') == 'abcdef'
def test_DNA_to_mRNA_List():
assert DNA_to_mRNA_List('CTGATCG') == ['G', 'A', 'C', 'U', 'A', 'G', 'C']
def test_DNA_to_mRNA():
assert DNA_to_mRNA('CTGATCG', '-') == 'GAC-UAG-C'
def test_DNA_to_tRNA_List():
assert DNA_to_tRNA_List('CTGATCG') == ['C', 'U', 'G', 'A', 'U', 'C', 'G']
def test_DNA_to_tRNA():
assert DNA_to_tRNA('CTGATCG', '-') == 'CUG-AUC-G'
|
from __future__ import absolute_import
from django.conf import settings
MEDIA_URL = getattr(settings, "MEDIA_URL", "/media/")
ADMIN_URL = getattr(settings, "ADMIN_URL", "/admin/")
STATIC_URL = getattr(settings, "STATIC_URL", "/static/")
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
from twisted.conch.error import ConchError
from twisted.conch.ssh import channel, connection
from twisted.internet import defer, protocol, reactor
from twisted.python import log
from twisted.spread import banana
import os, stat, pickle
import types # this is for evil
class SSHUnixClientFactory(protocol.ClientFactory):
# noisy = 1
def __init__(self, d, options, userAuthObject):
self.d = d
self.options = options
self.userAuthObject = userAuthObject
def clientConnectionLost(self, connector, reason):
if self.options['reconnect']:
connector.connect()
#log.err(reason)
if not self.d: return
d = self.d
self.d = None
d.errback(reason)
def clientConnectionFailed(self, connector, reason):
#try:
# os.unlink(connector.transport.addr)
#except:
# pass
#log.err(reason)
if not self.d: return
d = self.d
self.d = None
d.errback(reason)
#reactor.connectTCP(options['host'], options['port'], SSHClientFactory())
def startedConnecting(self, connector):
fd = connector.transport.fileno()
stats = os.fstat(fd)
try:
filestats = os.stat(connector.transport.addr)
except:
connector.stopConnecting()
return
if stat.S_IMODE(filestats[0]) != 0600:
log.msg("socket mode is not 0600: %s" % oct(stat.S_IMODE(stats[0])))
elif filestats[4] != os.getuid():
log.msg("socket not owned by us: %s" % stats[4])
elif filestats[5] != os.getgid():
log.msg("socket not owned by our group: %s" % stats[5])
# XXX reenable this when i can fix it for cygwin
#elif filestats[-3:] != stats[-3:]:
# log.msg("socket doesn't have same create times")
else:
log.msg('conecting OK')
return
connector.stopConnecting()
def buildProtocol(self, addr):
# here comes the EVIL
obj = self.userAuthObject.instance
bases = []
for base in obj.__class__.__bases__:
if base == connection.SSHConnection:
bases.append(SSHUnixClientProtocol)
else:
bases.append(base)
newClass = types.ClassType(obj.__class__.__name__, tuple(bases), obj.__class__.__dict__)
obj.__class__ = newClass
SSHUnixClientProtocol.__init__(obj)
log.msg('returning %s' % obj)
if self.d:
d = self.d
self.d = None
d.callback(None)
return obj
class SSHUnixServerFactory(protocol.Factory):
def __init__(self, conn):
self.conn = conn
def buildProtocol(self, addr):
return SSHUnixServerProtocol(self.conn)
class SSHUnixProtocol(banana.Banana):
knownDialects = ['none']
def __init__(self):
banana.Banana.__init__(self)
self.deferredQueue = []
self.deferreds = {}
self.deferredID = 0
def connectionMade(self):
log.msg('connection made %s' % self)
banana.Banana.connectionMade(self)
def expressionReceived(self, lst):
vocabName = lst[0]
fn = "msg_%s" % vocabName
func = getattr(self, fn)
func(lst[1:])
def sendMessage(self, vocabName, *tup):
self.sendEncoded([vocabName] + list(tup))
def returnDeferredLocal(self):
d = defer.Deferred()
self.deferredQueue.append(d)
return d
def returnDeferredWire(self, d):
di = self.deferredID
self.deferredID += 1
self.sendMessage('returnDeferred', di)
d.addCallback(self._cbDeferred, di)
d.addErrback(self._ebDeferred, di)
def _cbDeferred(self, result, di):
self.sendMessage('callbackDeferred', di, pickle.dumps(result))
def _ebDeferred(self, reason, di):
self.sendMessage('errbackDeferred', di, pickle.dumps(reason))
def msg_returnDeferred(self, lst):
deferredID = lst[0]
self.deferreds[deferredID] = self.deferredQueue.pop(0)
def msg_callbackDeferred(self, lst):
deferredID, result = lst
d = self.deferreds[deferredID]
del self.deferreds[deferredID]
d.callback(pickle.loads(result))
def msg_errbackDeferred(self, lst):
deferredID, result = lst
d = self.deferreds[deferredID]
del self.deferreds[deferredID]
d.errback(pickle.loads(result))
class SSHUnixClientProtocol(SSHUnixProtocol):
def __init__(self):
SSHUnixProtocol.__init__(self)
self.isClient = 1
self.channelQueue = []
self.channels = {}
def logPrefix(self):
return "SSHUnixClientProtocol (%i) on %s" % (id(self), self.transport.logPrefix())
def connectionReady(self):
log.msg('connection ready')
self.serviceStarted()
def connectionLost(self, reason):
self.serviceStopped()
def requestRemoteForwarding(self, remotePort, hostport):
self.sendMessage('requestRemoteForwarding', remotePort, hostport)
def cancelRemoteForwarding(self, remotePort):
self.sendMessage('cancelRemoteForwarding', remotePort)
def sendGlobalRequest(self, request, data, wantReply = 0):
self.sendMessage('sendGlobalRequest', request, data, wantReply)
if wantReply:
return self.returnDeferredLocal()
def openChannel(self, channel, extra = ''):
self.channelQueue.append(channel)
channel.conn = self
self.sendMessage('openChannel', channel.name,
channel.localWindowSize,
channel.localMaxPacket, extra)
def sendRequest(self, channel, requestType, data, wantReply = 0):
self.sendMessage('sendRequest', channel.id, requestType, data, wantReply)
if wantReply:
return self.returnDeferredLocal()
def adjustWindow(self, channel, bytesToAdd):
self.sendMessage('adjustWindow', channel.id, bytesToAdd)
def sendData(self, channel, data):
self.sendMessage('sendData', channel.id, data)
def sendExtendedData(self, channel, dataType, data):
self.sendMessage('sendExtendedData', channel.id, data)
def sendEOF(self, channel):
self.sendMessage('sendEOF', channel.id)
def sendClose(self, channel):
self.sendMessage('sendClose', channel.id)
def msg_channelID(self, lst):
channelID = lst[0]
self.channels[channelID] = self.channelQueue.pop(0)
self.channels[channelID].id = channelID
def msg_channelOpen(self, lst):
channelID, remoteWindow, remoteMax, specificData = lst
channel = self.channels[channelID]
channel.remoteWindowLeft = remoteWindow
channel.remoteMaxPacket = remoteMax
channel.channelOpen(specificData)
def msg_openFailed(self, lst):
channelID, reason = lst
self.channels[channelID].openFailed(pickle.loads(reason))
del self.channels[channelID]
def msg_addWindowBytes(self, lst):
channelID, bytes = lst
self.channels[channelID].addWindowBytes(bytes)
def msg_requestReceived(self, lst):
channelID, requestType, data = lst
d = defer.maybeDeferred(self.channels[channelID].requestReceived, requestType, data)
self.returnDeferredWire(d)
def msg_dataReceived(self, lst):
channelID, data = lst
self.channels[channelID].dataReceived(data)
def msg_extReceived(self, lst):
channelID, dataType, data = lst
self.channels[channelID].extReceived(dataType, data)
def msg_eofReceived(self, lst):
channelID = lst[0]
self.channels[channelID].eofReceived()
def msg_closeReceived(self, lst):
channelID = lst[0]
channel = self.channels[channelID]
channel.remoteClosed = 1
channel.closeReceived()
def msg_closed(self, lst):
channelID = lst[0]
channel = self.channels[channelID]
self.channelClosed(channel)
def channelClosed(self, channel):
channel.localClosed = channel.remoteClosed = 1
del self.channels[channel.id]
log.callWithLogger(channel, channel.closed)
# just in case the user doesn't override
def serviceStarted(self):
pass
def serviceStopped(self):
pass
class SSHUnixServerProtocol(SSHUnixProtocol):
def __init__(self, conn):
SSHUnixProtocol.__init__(self)
self.isClient = 0
self.conn = conn
def connectionLost(self, reason):
for channel in self.conn.channels.values():
if isinstance(channel, SSHUnixChannel) and channel.unix == self:
log.msg('forcibly closing %s' % channel)
try:
self.conn.sendClose(channel)
except:
pass
def haveChannel(self, channelID):
return self.conn.channels.has_key(channelID)
def getChannel(self, channelID):
channel = self.conn.channels[channelID]
if not isinstance(channel, SSHUnixChannel):
raise ConchError('nice try bub')
return channel
def msg_requestRemoteForwarding(self, lst):
remotePort, hostport = lst
hostport = tuple(hostport)
self.conn.requestRemoteForwarding(remotePort, hostport)
def msg_cancelRemoteForwarding(self, lst):
[remotePort] = lst
self.conn.cancelRemoteForwarding(remotePort)
def msg_sendGlobalRequest(self, lst):
requestName, data, wantReply = lst
d = self.conn.sendGlobalRequest(requestName, data, wantReply)
if wantReply:
self.returnDeferredWire(d)
def msg_openChannel(self, lst):
name, windowSize, maxPacket, extra = lst
channel = SSHUnixChannel(self, name, windowSize, maxPacket)
self.conn.openChannel(channel, extra)
self.sendMessage('channelID', channel.id)
def msg_sendRequest(self, lst):
cn, requestType, data, wantReply = lst
if not self.haveChannel(cn):
if wantReply:
self.returnDeferredWire(defer.fail(ConchError("no channel")))
channel = self.getChannel(cn)
d = self.conn.sendRequest(channel, requestType, data, wantReply)
if wantReply:
self.returnDeferredWire(d)
def msg_adjustWindow(self, lst):
cn, bytesToAdd = lst
if not self.haveChannel(cn): return
channel = self.getChannel(cn)
self.conn.adjustWindow(channel, bytesToAdd)
def msg_sendData(self, lst):
cn, data = lst
if not self.haveChannel(cn): return
channel = self.getChannel(cn)
self.conn.sendData(channel, data)
def msg_sendExtended(self, lst):
cn, dataType, data = lst
if not self.haveChannel(cn): return
channel = self.getChannel(cn)
self.conn.sendExtendedData(channel, dataType, data)
def msg_sendEOF(self, lst):
(cn, ) = lst
if not self.haveChannel(cn): return
channel = self.getChannel(cn)
self.conn.sendEOF(channel)
def msg_sendClose(self, lst):
(cn, ) = lst
if not self.haveChannel(cn): return
channel = self.getChannel(cn)
self.conn.sendClose(channel)
class SSHUnixChannel(channel.SSHChannel):
def __init__(self, unix, name, windowSize, maxPacket):
channel.SSHChannel.__init__(self, windowSize, maxPacket, conn = unix.conn)
self.unix = unix
self.name = name
def channelOpen(self, specificData):
self.unix.sendMessage('channelOpen', self.id, self.remoteWindowLeft,
self.remoteMaxPacket, specificData)
def openFailed(self, reason):
self.unix.sendMessage('openFailed', self.id, pickle.dumps(reason))
def addWindowBytes(self, bytes):
self.unix.sendMessage('addWindowBytes', self.id, bytes)
def dataReceived(self, data):
self.unix.sendMessage('dataReceived', self.id, data)
def requestReceived(self, reqType, data):
self.unix.sendMessage('requestReceived', self.id, reqType, data)
return self.unix.returnDeferredLocal()
def extReceived(self, dataType, data):
self.unix.sendMessage('extReceived', self.id, dataType, data)
def eofReceived(self):
self.unix.sendMessage('eofReceived', self.id)
def closeReceived(self):
self.unix.sendMessage('closeReceived', self.id)
def closed(self):
self.unix.sendMessage('closed', self.id)
def connect(host, port, options, verifyHostKey, userAuthObject):
if options['nocache']:
return defer.fail(ConchError('not using connection caching'))
d = defer.Deferred()
filename = os.path.expanduser("~/.conch-%s-%s-%i" % (userAuthObject.user, host, port))
factory = SSHUnixClientFactory(d, options, userAuthObject)
reactor.connectUNIX(filename, factory, timeout=2, checkPID=1)
return d
|
import re
'''
Step 1: Clean the triple file. In the dbpedia case, we just need the part of resource URI that indicate entity/type/predicate names.
'''
fileName = []#List of triple files to be process
notRdf = open('./notRdf.txt','w')#Record the lines that refers to a type but not rdf:type
for index2,fname in enumerate(fileName):
f = open('./'+fname)
triple = open('output triple files here','w')
prefix_f = open('output prefix files here','w')# save the prefix in files in case of it may be useful in the future.
i = 0
count = 0
prefix_set = set()
for line in f:
if line[0] != '<':
print(i)
i = i + 1
count += 1
continue
line = line[:-3].replace('> <','>$-$-$<').replace('> "','>$-$-$"')
line = line.split('$-$-$')
if i==0:
i += 1
continue
new_line=[]
if "type>" in line[1]:
if "rdf" not in line[1]:
notRdf.write(str(line)+'\n')
continue
for index,item in enumerate(line):
if not item:
count +=1
break
if item[0]=='<':
pos = item.rfind('/')
word = item[pos+1:-1].split("#")
if len(word)<2:
new_line.append('<'+word[0]+'>')
else:
new_line.append('<'+word[1]+'>')
if index == 1:
tmp = new_line[1][1:len(new_line[1])-1]
pos2 = line[1].rfind(tmp)
prefix = line[1][1:pos2-1]
prefix_set.add(tmp + '^^^'+prefix+'\n')
continue
elif item.count('"') >=2:
item = item.split('^^')[0].split('@')[0]
pattern = re.compile('"(.*)"')
word = '"'+''.join(pattern.findall(item))+'"'
new_line.append(word)
continue
else:
print(i)
i += 1
#print('\t'.join(new_line))
if i%1000000==0:
print("%d:%d"%(8,i))
triple.write('\t'.join(new_line)+'\n')
for item in prefix_set:
prefix_f.write(item)
f.close()
triple.close()
prefix_f.close()
|
import netlink
import pprint
import cStringIO
def get_family():
con = netlink.new_generic()
hdr = netlink.new_genlmsg(
{
"cmd": netlink.CTRL_CMD_GETFAMILY,
"version": 0,
"reserved": 0
}
)
payload = netlink.generic_id_ctrl(hdr,0x12345)
con.send(payload)
msgs = []
goout = False
while True:
d = con.recv(65533)
b = cStringIO.StringIO(d)
while True:
if b.tell() >= len(d):
break
msg = netlink.parse_nlmsg(b)
if msg["type"] == netlink.DONE:
goout = True
break
elif msg["type"] == netlink.ERROR:
raise ValueError(msg)
mlen = b.tell() - 16 + msg["len"]
payload = netlink.parse_genlmsg(b)
attrs = netlink.parse_attrs(b, mlen)
msgs.append({
"msg": msg,
"payload": payload,
"attrs": attrs
})
if goout:
break
b.close()
return msgs
pprint.pprint(get_family())
|
## Created by C. Cesarotti (ccesarotti@g.harvard.edu) 04/2019
## Last updated: 04/24/20
##
## Calculates the event isotropy for ring configurations with
## random orientations.
##
#############################
#
import sys
import time
import warnings
import numpy as np
import matplotlib.pylab as plt
import random
from eventIsotropy.cylGen import ringGen, ringGenShift
from eventIsotropy.emdVar import _cdist_phicos, emd_Calc
from matplotlib import rc
from mpl_toolkits.mplot3d import Axes3D
rc('text', usetex=True)
from prettytable import PrettyTable
##############################
# First, generate rings and pT lists
nList=[4,8,16,32,64]
ringSample = np.array([ringGen(nList[i]) for i in range(5)]) # THESE ARE THE PHI VALUES OF THE PARTICLES
ringPtSample=np.array([np.full(len(ringSample[i]), 1.) for i in range(5)]) # THE UNORMALIZED WEIGHT: ALL OF EQUAL PT. NORMALIZATION IN EMD CALC
for i in range(5):
ringPoints1 = ringSample[i]
ringPT1 = ringPtSample[i]
for j in range(5):
emdSpec=[]
# SET THE SECOND EVENT WITH j
ringPT2 = ringPtSample[j]
for num in range(1000):
ringPoints2 = ringGenShift(nList[j]) # The shift just randomly orients the ring, doesn't change particle spacing
M = _cdist_phicos(ringPoints1,ringPoints2) # M is distance in phi according to 1 - cos phi metric
emdval = emd_Calc(ringPT1, ringPT2, M)
emdSpec.append(emdval)
f= open("emdRingtoRing"+str(i)+"_"+str(j)+".dat","w+")
for emdVal in emdSpec:
f.write(str(emdVal)+ ' ')
f.close()
|
from pymongo import MongoClient
import json
client = MongoClient('localhost', 27017)
db = client['pymongo_test']
cursor = db.pymongo_test
posts = db.posts
# post_data = {
# 'Name': 'Milk',
# 'Quantity': '1',
# 'Expiration Date': '4/03/20'
# }
# result = posts.insert_one(post_data)
# print('One post: {0}'.format(result.inserted_id))
#
# post_1 = {
# 'title': 'PyMongo!!!',
# 'content': 'PyMongo is fun, you guys',
# 'author': 'Scott'
# }
# post_2 = {
# 'title': 'Virtual Environments',
# 'content': 'Use virtual environments, you guys',
# 'author': 'Scott'
# }
# post_3 = {
# 'title': 'Learning Python',
# 'content': 'Learn Python, it is easy',
# 'author': 'Bill'
# }
# new_result = posts.insert_many([post_1, post_2, post_3])
# print('Multiple posts: {0}'.format(new_result.inserted_ids))
#d = dict((db, [collection for collection in client[db].list_collection_names()]) for db in client.list_database_names())
#print(json.dumps(d))
for document in posts.find():
print(document)
|
import os.path
import logging
import shutil
import pandas as pd
import numpy as np
import ada.utils.experimentation as xp
class XpResults:
@staticmethod
def from_file(metrics, filepath):
if os.path.exists(filepath):
df = pd.read_csv(filepath, index_col=0)
time_str = xp.create_timestamp_string()
backup_file = f"{filepath}.{time_str}.bak"
logging.info(f"Copying {filepath} to {backup_file}")
shutil.copyfile(filepath, backup_file)
return XpResults(metrics, df)
else:
return XpResults(metrics)
def __init__(self, metrics, df=None):
"""
Args:
metrics (list of string): Which metrics to record.
df (pandas.DataFrame, optional): columns are: metrics + [seed, method, split].
Defaults to None.
"""
self._metrics = metrics[:]
if df is None:
self._df = pd.DataFrame(columns=metrics + ["seed", "method", "split"])
else:
self._df = df.copy()
def __len__(self):
return len(self._df)
def already_computed(self, method_name, seed):
if len(self._df) == 0:
return False
ms_df = self._df.query(f"method == '{method_name}' and seed == '{seed}'")
if len(ms_df) == 0:
return False
for m in self._metrics:
if m not in ms_df.columns:
return False
return True
def remove(self, method_names):
print(method_names)
self._df = self._df[~self._df["method"].isin(method_names)]
def update(self, is_validation, method_name, seed, metric_values):
split, prefix = ("Validation", "V") if is_validation else ("Test", "Te")
results = pd.DataFrame(
{
k: metric_values.get(f"{prefix}_{k.replace(' ', '_')}", None)
for k in self._metrics
},
index=[0],
)
results["seed"] = seed
results["method"] = method_name
results["split"] = split
self._df = self._df.append(results, ignore_index=True)
def get_data(self):
return self._df
def get_best_archi_seed(self):
return (
self._df.sort_values(by=self._metrics, ascending=False)
.head(1)
.seed.values[0]
)
def get_last_seed(self):
return self._df.tail(1).seed.values[0]
def get_mean_seed(self, mean_metric):
if mean_metric not in self._metrics:
raise ValueError(f"Unknown metric: {mean_metric}")
all_res_valid = self._df.query("split=='Validation'").dropna()
if all_res_valid.empty:
all_res_valid = self._df.query("split=='Test'").dropna()
all_res_valid[mean_metric] = all_res_valid[mean_metric].astype(np.float)
tres_means = all_res_valid.groupby("method").mean()[mean_metric]
all_seed_res = all_res_valid.pivot(
index="seed", columns="method", values=mean_metric
)
def dist_to_mean(row):
return np.mean((row - tres_means) ** 2)
all_seed_res["dist"] = all_seed_res.apply(dist_to_mean, axis=1)
return all_seed_res.sort_values(by="dist", ascending=True).head(1).index[0]
def to_csv(self, filepath):
self._df.to_csv(filepath)
def print_scores(
self,
method_name,
split="Validation",
stdout=True,
fdout=None,
print_func=print,
file_format="markdown",
):
mres = self._df.query(f"method == '{method_name}' and split == '{split}'")
nsamples = len(mres)
mmres = [
(mres[m].mean(), mres[m].std() / np.sqrt(nsamples)) for m in self._metrics
]
if stdout:
print_func(split, end=" ")
print_func(method_name, end="")
print_func(" " * (10 - len(method_name)), end="")
print_func(
"\t\t".join(
(
f"{m * 100:.1f}% +- {1.96 * s * 100:.2f} ({nsamples} runs)"
for m, s in mmres
)
)
)
if fdout is not None:
if file_format == "markdown":
fdout.write(f"|{method_name}|")
fdout.write(
"|".join(
(f"{m * 100:.1f}% +- {1.96 * s * 100:.2f}" for m, s in mmres)
)
)
fdout.write("|\n")
else:
fdout.write(method_name)
fdout.write(" " * (10 - len(method_name)))
fdout.write(
"\t\t".join(
(f"{m * 100:.1f}% +- {1.96 * s * 100:.2f}" for m, s in mmres)
)
)
fdout.write(f" ({split})")
fdout.write("\n")
def append_to_txt(self, filepath, test_params, nseeds, splits=None):
if splits is None:
splits = ["Validation", "Test"]
with open(filepath, "a") as fd:
fd.write(xp.param_to_str(test_params))
fd.write("\n")
print(" " * 10, "\t\t".join(self._metrics))
fd.write("nseeds = ")
fd.write(str(nseeds))
fd.write("\n")
fd.write("method\t")
fd.write("\t\t".join(self._metrics))
fd.write("\n")
for name in self._df.method.unique():
for split in splits:
self.print_scores(
method_name=name,
split=split,
stdout=True,
fdout=fd,
file_format="text",
)
fd.write("\n")
def append_to_markdown(self, filepath, test_params, nseeds, splits=None):
if splits is None:
splits = ["Validation", "Test"]
with open(filepath, "a") as fd:
fd.write(xp.param_to_str(test_params))
fd.write("\n")
print(" " * 10, "\t\t".join(self._metrics))
fd.write("nseeds = ")
fd.write(str(nseeds))
fd.write("\n")
fd.write(f"|Method|{'|'.join(self._metrics)}|\n")
fd.write("|:----|")
for i in range(len(self._metrics)):
fd.write(":---:|")
fd.write("\n")
for name in self._df.method.unique():
for split in splits:
self.print_scores(
method_name=name, split=split, stdout=True, fdout=fd
)
fd.write("\n")
|
import torch
from models.vae import VAE
# lambda_d, lambda_od = 100, 10
def matrix_diag(diagonal):
N = diagonal.shape[-1]
shape = diagonal.shape[:-1] + (N, N)
device, dtype = diagonal.device, diagonal.dtype
result = torch.zeros(shape, dtype=dtype, device=device)
indices = torch.arange(result.numel(), device=device).reshape(shape)
indices = indices.diagonal(dim1=-2, dim2=-1)
result.view(-1)[indices] = diagonal
return result
def dip_vae_i_loss(mu, lambda_d, lambda_od):
exp_mu = mu.mean(0)
exp_mu_mu_t = (mu.unsqueeze(1) * mu.unsqueeze(2)).mean(0)
cov = exp_mu_mu_t - exp_mu.unsqueeze(0) * exp_mu.unsqueeze(1)
diag = torch.diagonal(cov, dim1=-2, dim2=-1)
off_diag = cov - matrix_diag(diag)
regulariser_od = lambda_od * (off_diag**2).sum()
regulariser_d = lambda_d * ((diag-1)**2).sum()
return regulariser_od + regulariser_d
def dip_vae_ii_loss(mu, lv, lambda_d, lambda_od):
sigma = matrix_diag(lv.exp())
exp_cov = sigma.mean(0)
exp_mu = mu.mean(0)
exp_mu_mu_t = (mu.unsqueeze(1) * mu.unsqueeze(2)).mean(0)
cov_exp = exp_mu_mu_t - exp_mu.unsqueeze(0) * exp_mu.unsqueeze(1)
cov_z = cov_exp + exp_cov
diag = torch.diagonal(cov_z, dim1=-2, dim2=-1)
off_diag = cov_z - matrix_diag(diag)
regulariser_od = lambda_od * off_diag.pow(2).sum()
regulariser_d = lambda_d * (diag - 1).pow(2).sum()
return regulariser_d + regulariser_od
class DipVAE(VAE):
def __init__(self, encoder, decoder, beta, lambda_d, lambda_od, dip_type='ii', max_capacity=None, capacity_leadin=None, xav_init=False):
super().__init__(encoder, decoder, beta, max_capacity, capacity_leadin)
self.type = dip_type
if self.type == 'dip_vae_i':
self.dip_loss = lambda mu, lv: dip_vae_i_loss(mu, lambda_d, lambda_od)
else:
self.dip_loss = lambda mu, lv: dip_vae_ii_loss(mu, lv, lambda_d, lambda_od)
if xav_init:
for p in self.encoder.modules():
if isinstance(p, nn.Conv2d) or isinstance(p, nn.Linear) or \
isinstance(p, nn.ConvTranspose2d):
torch.nn.init.xavier_uniform_(p.weight)
for p in self.decoder.modules():
if isinstance(p, nn.Conv2d) or isinstance(p, nn.Linear) or \
isinstance(p, nn.ConvTranspose2d):
torch.nn.init.xavier_uniform_(p.weight)
def main_step(self, batch, batch_nb, loss_fn):
out = super().main_step(batch, batch_nb, loss_fn)
state = out['state']
x, y, mu, lv, z, x_hat = state['x'], state['y'], state['mu'], state['lv'], state['z'], state['x_hat']
dip_loss = self.dip_loss(mu, lv)
vae_loss = out['loss']
self.global_step += 1
tensorboard_logs = out['out']
tensorboard_logs['metric/dip_loss'] = dip_loss.detach()
return {'loss': vae_loss + dip_loss, 'out': tensorboard_logs,
'state': state}
def dip_vae(args):
if args.dataset == 'forward':
from models.forward_vae import ForwardDecoder, ForwardEncoder
encoder, decoder = ForwardEncoder(args.latents), ForwardDecoder(args.latents)
else:
from models.beta import beta_shape_encoder, beta_shapes_decoder
encoder, decoder = beta_shape_encoder(args), beta_shapes_decoder(args)
dip_type = args.base_model if args.model in ['rl_group_vae'] else args.model
return DipVAE(encoder, decoder, args.beta, args.lambda_d, args.lambda_od, dip_type, args.capacity, args.capacity_leadin, args.xav_init)
def dip_conv_vae(args):
from models.beta import beta_celeb_encoder, beta_celeb_decoder
encoder, decoder = beta_celeb_encoder(args), beta_celeb_decoder(args)
dip_type = 'dip_vae_i' if args.model == 'dip_conv_vae_i' else 'dip_vae_ii'
return DipVAE(encoder, decoder, args.beta, args.lambda_d, args.lambda_od, dip_type, args.capacity, args.capacity_leadin, args.xav_init)
|
import numpy as np
import re, collections
def get_vocab(filename):
vocab = collections.defaultdict(int)
try:
with open(filename, 'r', encoding='utf-8') as fhand:
for line in fhand:
words = line.strip().split()
for word in words:
vocab[' '.join(list(word)) + ' </w>'] += 1
except OSError:
print('error')
words = filename.strip().split()
for word in words:
vocab[' '.join(list(word)) + ' </w>'] += 1
return vocab
def merge_vocab(pair, v_in):
v_out = {}
bigram = re.escape(' '.join(pair))
p = re.compile(r'(?<!\S)' + bigram + r'(?!\S)')
for word in v_in:
w_out = p.sub(''.join(pair), word)
v_out[w_out] = v_in[word]
return v_out
def get_stats(vocab):
pairs = collections.defaultdict(int)
for word, freq in vocab.items():
symbols = word.split()
for i in range(len(symbols)-1):
pairs[symbols[i],symbols[i+1]] += freq
return pairs
def get_tokens_from_vocab(vocab):
tokens_frequencies = collections.defaultdict(int)
vocab_tokenization = {}
for word, freq in vocab.items():
word_tokens = word.split()
for token in word_tokens:
tokens_frequencies[token] += freq
vocab_tokenization[''.join(word_tokens)] = word_tokens
return tokens_frequencies, vocab_tokenization
def measure_token_length(token):
if token[-4:] == '</w>':
return len(token[:-4]) + 1
else:
return len(token)
def tokenize_word(string, sorted_tokens, unknown_token='</u>'):
if string == '':
return []
if sorted_tokens == []:
return [unknown_token]
string_tokens = []
for i in range(len(sorted_tokens)):
token = sorted_tokens[i]
token_reg = re.escape(token.replace('.', '[.]'))
matched_positions = [(m.start(0), m.end(0)) for m in re.finditer(token_reg, string)]
if len(matched_positions) == 0:
continue
substring_end_positions = [matched_position[0] for matched_position in matched_positions]
substring_start_position = 0
for substring_end_position in substring_end_positions:
substring = string[substring_start_position:substring_end_position]
string_tokens += tokenize_word(string=substring, sorted_tokens=sorted_tokens[i+1:], unknown_token=unknown_token)
string_tokens += [token]
substring_start_position = substring_end_position + len(token)
remaining_substring = string[substring_start_position:]
string_tokens += tokenize_word(string=remaining_substring, sorted_tokens=sorted_tokens[i+1:], unknown_token=unknown_token)
break
return string_tokens
vocab = get_vocab('oscar/oscar.test.raw')
print('==========')
print('Tokens Before BPE')
tokens_frequencies, vocab_tokenization = get_tokens_from_vocab(vocab)
print('Number of tokens: {}'.format(len(tokens_frequencies.keys())))
print('==========')
import json
num_merges = 55000
for i in range(num_merges):
pairs = get_stats(vocab)
if not pairs:
break
best = max(pairs, key=pairs.get)
vocab = merge_vocab(best, vocab)
print('Iter: {}'.format(i))
print('Best pair: {}'.format(best))
tokens_frequencies, vocab_tokenization = get_tokens_from_vocab(vocab)
print('Number of tokens: {}'.format(len(tokens_frequencies.keys())))
print('==========')
with open("vocab_tweets.txt","w") as F:
json.dump(tokens_frequencies,F)
|
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ._base import FeatureExtractor, BM25Stat, LmDirStat, DfrGl2Stat, DfrInExpB2Stat, DphStat, Proximity, TpScore, TpDist,\
DocSize, MatchingTermCount, QueryLength, SCS, SumMatchingTF, UniqueTermCount, QueryCoverageRatio, \
UnorderedSequentialPairs, OrderedSequentialPairs, UnorderedQueryPairs, OrderedQueryPairs, \
AvgPooler, SumPooler, MedianPooler, MinPooler, MaxPooler, VarPooler, TfStat, TfIdfStat, NormalizedTfStat, \
IdfStat, IcTfStat, ConfidencePooler, MaxMinRatioPooler, \
NormalizedTfIdf, ProbalitySum, RunList, IbmModel1, SpacyTextParser
from ._search_msmarco import MsmarcoLtrSearcher
__all__ = ['FeatureExtractor', 'BM25Stat', 'LmDirStat', 'DfrGl2Stat', 'DfrInExpB2Stat', 'DphStat', 'Proximity', 'TpScore', 'TpDist',
'DocSize', 'MatchingTermCount', 'QueryLength', 'SCS', 'SumMatchingTF', 'UniqueTermCount', 'QueryCoverageRatio',
'UnorderedSequentialPairs', 'OrderedSequentialPairs', 'UnorderedQueryPairs', 'OrderedQueryPairs',
'AvgPooler', 'SumPooler', 'MedianPooler', 'MinPooler', 'MaxPooler', 'VarPooler', 'TfStat', 'TfIdfStat',
'NormalizedTfStat','IdfStat', 'IcTfStat', 'ConfidencePooler', 'MaxMinRatioPooler','NormalizedTfIdf',
'ProbalitySum', 'RunList', 'IbmModel1', 'MsmarcoLtrSearcher','SpacyTextParser']
|
import xml.sax
from hashlib import md5
def func1():
test_digest = md5("test string").digest()
return test_digest
def func2(): # double vulnerability
test_digesta, test_digestb = md5("test string").digest(), md5("test string2").digest()
return test_digesta, test_digestb
def func3(val):
assert 0 != val
return val / 3
|
# !/usr/bin/python
# -*- coding: UTF-8 -*-
from svg.path.path import *
def get_quadratic_coordinate_with_time(t:float, /, *, p0:complex, p1:complex, p2:complex):
if t < 0 or t > 1:
return
points = [p0, p1, p2]
polynomials = [eval("(1-t)**2"), eval("2*t*(1-t)"), eval("t**2")]
return sum([prod * points[index] for index, prod in enumerate(polynomials)])
def get_quadratic_arc_length(n:int = 40, /, *, P0:complex, P1:complex, P2:complex):
"""
Get the arc length of a quadratc bezier curve
-------------
Formelae: (i=1 Σ n) ||f(i/n) - f((i-1)/n)||
"""
if n < 1:
return
return sum([abs(get_quadratic_coordinate_with_time((side_count + 1) / n, p0=P0, p1=P1, p2=P2) - get_quadratic_coordinate_with_time(side_count / n, p0=P0, p1=P1, p2=P2)) for side_count in range(n)])
def get_cubic_coordinate_with_time(t:float, /, *, p0:complex, p1:complex, p2:complex, p3:complex):
if t < 0 or t > 1:
return
points = [p0, p1, p2, p3]
polynomials = [eval("-t**3 + 3*t**2 -3*t + 1"), eval("3*t**3 - 6*t**2 + 3*t"), eval("-3*t**3 + 3*t**2"), eval("t**3")]
return sum([prod * points[index] for index, prod in enumerate(polynomials)])
def get_cubic_arc_length(n:int = 40, /, *, P0:complex, P1:complex, P2:complex, P3:complex):
"""
Get the arc length of a cubic bezier curve
-------------
(i=1 Σ n) ||f(i/n) - f((i-1)/n)||
"""
if n < 1:
return
return sum([abs(get_cubic_coordinate_with_time((side_count + 1) / n, p0=P0, p1=P1, p2=P2, p3=P3) - get_cubic_coordinate_with_time(side_count / n, p0=P0, p1=P1, p2=P2, p3=P3)) for side_count in range(n)])
# "linear" beziers
def get_linear_coordinate_with_time(t:float, /, *, start:complex, end:complex):
if t < 0 or t > 1:
return
return (1-t)*start + t*end
def get_linear_length(*, start:complex, end:complex):
return abs(end - start)
def point_from_proportion(t:float, elements:dict):
if t < 0 or t > 1:
return
for index, key in enumerate(elements):
if key >= t:
#The point is on this element
element = elements[key]
element_end = list(elements)[index]
if index:
element_start = list(elements)[index-1]
else:
element_start = 0
break
#find proportion of the point on element
element_end -= element_start
t -= element_start
alpha = t / element_end
#different kind of elements
if isinstance(element, Line):
return get_linear_coordinate_with_time(alpha, start=element.start, end=element.end)
elif isinstance(element, QuadraticBezier):
return get_quadratic_coordinate_with_time(alpha, p0=element.start, p1=element.control, p2=element.end)
elif isinstance(element, CubicBezier):
return get_cubic_coordinate_with_time(alpha, p0=element.start, p1=element.control1, p2=element.control2, p3=element.end)
elif isinstance(element, Arc):
return element.point(alpha)
else:
raise NotImplementedError(f"Element type \"{type(element)}\" is not implemented yet")
|
import numpy as np
import os
import sys
import tensorflow as tf
from model import CVAE
import matplotlib.pyplot as plt
import streamlit as st
@st.cache(allow_output_mutation=True)
def load_model(PATH):
model = CVAE(2, 128)
# Restore the weights
model.load_weights(PATH)
return model
def main():
st.title('Generate Synthetic Handwriting Digits')
st.sidebar.title('MNIST')
digit = st.sidebar.selectbox('Pick a digit from 0~9', range(0,10))
num_examples_to_generate = st.sidebar.selectbox('Pick the number of generated images', (4, 8, 16))
model = load_model('saved_model/my_checkpoint')
random_vector_for_generation = tf.random.normal(shape=[num_examples_to_generate, model.latent_dim])
y = np.zeros(shape=(10))
np.put(y, digit, 1)
y = np.array(num_examples_to_generate*[y])
pred = model.sample(random_vector_for_generation, tf.convert_to_tensor(y))
all_y = [pred[i, :, :, 0].numpy() for i in range(num_examples_to_generate)]
for i in range(int(num_examples_to_generate / 4)):
st.image(image=all_y[i*4 : (i+1)*4], width=64)
st.markdown('[Project Page](https://github.com/HongleiXie/demo-CVAE)')
if __name__ == '__main__':
main()
|
import os
import requests
from dotenv import load_dotenv
from fastapi import APIRouter, Depends
import sqlalchemy
from pydantic import BaseModel, SecretStr
from app import config
router = APIRouter()
headers = {'x-rapidapi-key': os.getenv('api_key'),
'x-rapidapi-host': os.getenv('host') }
class RentalList(BaseModel):
api_key: SecretStr = config.settings.api_key
city: str = "New York"
state: str = "NY"
prop_type: str = "condo"
limit: int = 5
@router.get('/for_rent_list_base')
async def for_rent_list_base(rentallist: RentalList):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns:
information about properties for rent
"""
url = os.getenv('url_list_for_rent')
querystring = {"city": rentallist.city,
"state_code": rentallist.state,
"limit": rentallist.limit,
"offset": "0",
"sort":"relevance",
"prop_type": rentallist.prop_type}
response_for_rent = requests.request("GET", url, params = querystring, headers = headers,)
return response_for_rent.json()
|
import random
import time
import my_set
import sys
class Element:
def __init__(self, val: int):
self.val = val
def __hash__(self) -> int:
return self.val
def time_fct(l1, l2, fct):
start = time.time()
output = fct(l1, l2)
# output = timeout(fct, args=(l1, l2), timeout_duration=10)
diff = time.time() - start
if output is None:
return float('inf')
return diff
def run(size, fct):
l1 = [random.randint(0, size) for i in range(size)]
l2 = [random.randint(0, size) for i in range(size)]
return time_fct(l1, l2, fct)
# from https://stackoverflow.com/a/13821695/10203321
def timeout(func, args=(), kwargs={}, timeout_duration=1, default=None):
import signal
class TimeoutError(Exception):
pass
def handler(signum, frame):
raise TimeoutError()
# set the timeout handler
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_duration)
try:
result = func(*args, **kwargs)
except TimeoutError as exc:
result = default
finally:
signal.alarm(0)
return result
print(run(10000, my_set.intersection))
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import requests
import getpass
from .utils import MyAdapter
from .utils import make_response
try:
r_input = raw_input
except NameError:
r_input = input
DEFAULT_BASE_URL = "https://127.0.0.1:5000/api/v1.0"
FUNC_KEYS = ('name', 'invoked', 'timestamp', 'author',
'udef', 'description', 'args')
class Client(object):
"""Function evaluation API client for Unicorn service.
"""
__jsonheader = {'content-type': 'application/json'}
def __init__(self, base_url=None):
self._url_config = [DEFAULT_BASE_URL, '/functions']
self.url = base_url
self._session = requests.Session()
self._session.mount(self.url, MyAdapter())
@property
def url(self):
"""Base URL.
"""
return ''.join(self._url_config)
@url.setter
def url(self, u):
if u is None:
self._url_config[0] = DEFAULT_BASE_URL
else:
self._url_config[0] = u
def __repr__(self):
return "[Function API] Unicorn Service on: '{url}'.".format(url=self.url)
def get(self, name, **kws):
"""Get evaluation result from function defined by *name*, key-value
pairs as function's input parameters.
"""
url = '/'.join([self.url, name])
r = self._session.get(url, params=kws, verify=False)
return make_response(r)
if __name__ == "__main__":
client = Client()
print(client.get('f1', x=1, y=2))
|
from .db import schemas
from typing import List, Optional
from pydantic import BaseModel
class EnrichedUrl(BaseModel):
url: Optional[schemas.Url] = None
stats: List
class RequestMetadata:
def __init__(
self, referer: str = "", os: str = "", browser: str = "", device: str = ""
):
self.referer = referer
self.os = os
self.browser = browser
self.device = device
|
from util import *
import yaml
import sys
from connect import create_service
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/gmail.compose']
def main():
if sys.argv[1] == None:
print('To create and send email, include details in YAML file and specify it as an argument')
exit(0)
with open(sys.argv[1], 'r') as f:
email = yaml.load(f, Loader=yaml.FullLoader)
service = create_gmail_service()
message = create_message(email["sender"], email["to"], email["subject"], email["message_text"])
send_message(service, 'me', message)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'glucose sample data requires Pandas (http://pandas.pydata.org) to be installed')
from os.path import join
from . import _data_dir
data_dir = _data_dir()
data = pd.read_csv(
join(data_dir, 'CGM.csv'),
sep=',',
parse_dates=[1],
index_col=1
)
|
#!/usr/bin/python
# Copyright (C) Vladimir Prus 2003. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
from BoostBuild import Tester, List
t = Tester()
# Attempt to declare a generator for creating OBJ from RC files.
# That generator should be considered together with standard
# CPP->OBJ generators and successfully create the target.
# Since we don't have RC compiler everywhere, we fake the action.
# The resulting OBJ will be unusable, but it must be created.
t.write("project-root.jam", """
import rcc ;
""")
t.write("rcc.jam", """
import type ;
import generators ;
import print ;
# Use 'RCC' to avoid conflicts with definitions in
# the standard rc.jam and msvc.jam
type.register RCC : rcc ;
rule resource-compile ( targets * : sources * : properties * )
{
print.output $(targets[1]) ;
print.text "rc-object" ;
}
generators.register-standard rcc.resource-compile : RCC : OBJ ;
""")
t.write("Jamfile", """
obj r : r.rcc ;
""")
t.write("r.rcc", """
""")
t.run_build_system()
t.expect_content("bin/$toolset/debug/r.obj", "rc-object\n")
t.cleanup()
|
def max_word(filename):
"""Return the most frequent word in the file filename."""
freq = {}
for piece in open(filename, encoding = 'UTF-8').read().lower().split(): # is "piece" a line?
# only consider alphabetic characters within this piece
word = ''.join(c for c in piece if c.isalpha())
if word:
freq[word] = 1 + freq.get(word, 0) # word goes in as the key,
# value is freq which is incremented
# if word was already in dict.
# Else 0 is the default freq
max_word = ''
max_count = 0
for (w, c) in freq.items(): # (key, value) tuples represent (word, count)
if c > max_count:
max_word = w
max_count = c
print(f"The most frequent word is '{max_word}'")
print(f"{max_word} occurs '{max_count}' times")
return (max_word, max_count)
filename = 'sherlock_holmes.txt'
max_word(filename)
|
import argparse
import subprocess
import sys
from typing import Optional
from .containerization import CAN_RUN_NATIVELY, DockerContainer, DockerRun
from .plugins import Command
class PolyBuild(Command):
name = "build"
help = "runs `polybuild`: clang with PolyTracker instrumentation enabled"
_container: Optional[DockerContainer] = None
def __init_arguments__(self, parser: argparse.ArgumentParser):
parser.add_argument("--c++", action="store_true", help="run polybuild++ in C++ mode")
parser.add_argument("args", nargs=argparse.REMAINDER)
def run(self, args: argparse.Namespace):
if getattr(args, "c++", False):
cmd = "polybuild_script++"
else:
cmd = "polybuild_script"
# Are we trying to compile C++ code without using `polybuild++`?
if sys.stderr.isatty() and sys.stdin.isatty() and any(
arg.strip()[-4:].lower() in (".cpp", ".cxx", ".c++") for arg in args.args
):
# one of the arguments ends in .cpp, .cxx, or .c++
sys.stderr.write("It looks like you are trying to compile C++ code.\n"
"This requires `polybuild++`, not `polybuild`!\n")
while True:
sys.stderr.write(f"Would you like to run with `polybuild++` instead? [Yn] ")
try:
choice = input().lower()
except KeyboardInterrupt:
exit(1)
if choice == "n":
break
elif choice == "y" or choice == "":
cmd = "polybuild_script++"
break
args = [cmd] + args.args
if CAN_RUN_NATIVELY:
return subprocess.call(args) # type: ignore
else:
if self._container is None:
self._container = DockerContainer()
return DockerRun.run_on(self._container, args, interactive=False)
class PolyInst(Command):
name = "lower"
help = "runs `polybuild` --lower-bitcode"
_container: Optional[DockerContainer] = None
def __init_arguments__(self, parser: argparse.ArgumentParser):
parser.add_argument("--input-file", type=str, help="input bitcode file")
parser.add_argument("--output-file", type=str, help="output bitcode file")
def run(self, args: argparse.Namespace):
cmd = "polybuild_script"
items = [cmd] + ["--lower-bitcode", "-i", args.input_file, "-o", args.output_file]
if CAN_RUN_NATIVELY:
return subprocess.call(items)
else:
if self._container is None:
self._container = DockerContainer()
return DockerRun.run_on(self._container, items, interactive=False)
def main():
PolyBuild(argparse.ArgumentParser(add_help=False)).run(argparse.Namespace(args=sys.argv[1:], **{"c++": False}))
def main_plus_plus():
PolyBuild(argparse.ArgumentParser(add_help=False)).run(argparse.Namespace(args=sys.argv[1:], **{"c++": True}))
|
#!/usr/bin/python
# The usual preamble
import numpy as np
import pandas as pd
import time
# Get data (NYC 311 service request dataset) and start cleanup
data = pd.read_csv('data/us_cities_states_counties.csv', delimiter='|')
data.dropna(inplace=True)
print "Done reading input file..."
start = time.time()
# Get all city information with total population greater than 500,000
data_big_cities = data[data["Total population"] > 500000]
# Compute "crime index" proportional to
# (Total population + 2*(Total adult population) - 2000*(Number of robberies)) / 100000
data_big_cities_stats = data_big_cities[
["Total population", "Total adult population", "Number of robberies"]].values
predictions = np.dot(data_big_cities_stats, np.array(
[1.0, 2.0, -2000.0])) / 100000.0
data_big_cities["Crime index"] = predictions
# Aggregate "crime index" scores by state
data_big_cities["Crime index"][data_big_cities["Crime index"] >= 0.02] = 0.032
data_big_cities["Crime index"][data_big_cities["Crime index"] < 0.01] = 0.005
print data_big_cities["Crime index"].sum()
end = time.time()
print "Total end-to-end time: %.2f" % (end - start)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 12, 2011
@author:Isabel Restrepo
A script to run (fast) k-means on the set of means found for each category
"""
import os;
import dbrec3d_batch
import multiprocessing
import Queue
import time
import random
import optparse
import sys
from math import log, ceil
from xml.etree.ElementTree import ElementTree
import glob
#time.sleep(30);
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
class bof_job():
def __init__(self, cm_i_file, CM_set, max_it, fm_i_file):
self.cm_i_file = cm_i_file;
self.CM_set = CM_set;
self.max_it = max_it;
self.fm_i_file = fm_i_file;
def execute_bof_jobs(jobs, num_procs=4):
work_queue=multiprocessing.Queue();
result_queue=multiprocessing.Queue();
for job in jobs:
work_queue.put(job)
for i in range(num_procs):
worker= bof_worker(work_queue,result_queue)
worker.start();
print("worker with name ",worker.name," started!")
class bof_worker(multiprocessing.Process):
def __init__(self,work_queue,result_queue):
# base class initialization
multiprocessing.Process.__init__(self)
# job management stuff
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
def run(self):
while not self.kill_received:
# get a task
try:
job = self.work_queue.get_nowait()
except Queue.Empty:
break
start_time = time.time();
dbrec3d_batch.set_stdout('logs/log_' + str(os.getpid())+ ".txt");
dbrec3d_batch.init_process("bofKMeansOnVectorProcess");
dbrec3d_batch.set_input_string(0, job.cm_i_file);
dbrec3d_batch.set_input_from_db(1, job.CM_set);
dbrec3d_batch.set_input_unsigned(2, job.max_it);
dbrec3d_batch.set_input_string(3, job.fm_i_file);
dbrec3d_batch.run_process();
dbrec3d_batch.reset_stdout();
dbrec3d_batch.clear();
# print ("Runing time for worker:", self.name)
# print(time.time() - start_time);
#*******************The Main Algorithm ************************#
if __name__=="__main__":
dbrec3d_batch.register_processes();
dbrec3d_batch.register_datatypes();
#Parse inputs
parser = optparse.OptionParser(description='bof Statistics Pass 0');
parser.add_option('--k_means_dir', action="store", dest="k_means_dir");
parser.add_option('--num_cores', action="store", dest="num_cores", type="int", default=4);
parser.add_option('--max_it', action="store", dest="max_it", type="int", default=100);
parser.add_option('--nclasses', action="store", dest="nclasses", type="int", default=5);
options, args = parser.parse_args()
k_means_dir = options.k_means_dir; #path where all CM_i means are saved and where the ouput FM_i will be written to
num_cores = options.num_cores;
max_it = options.max_it;
nclasses = options.nclasses;
if not os.path.isdir(k_means_dir +"/"):
print "Invalid k_means Dir"
sys.exit(-1);
FM_path = k_means_dir + "/FM";
if not os.path.isdir(FM_path +"/"):
os.mkdir(FM_path +"/");
start_time = time.time();
#Combine all CM_i means into one set CM to be passed for k-means
mean_file_sfx = k_means_dir + "/class" ;
dbrec3d_batch.init_process("bof_combine_category_means_process");
dbrec3d_batch.set_input_string(0, mean_file_sfx);
dbrec3d_batch.set_input_unsigned(1, nclasses);
dbrec3d_batch.run_process();
(id, type) = dbrec3d_batch.commit_output(0);
CM_set= dbvalue(id, type);
saveout = sys.stdout # save initial state of stdout
#Begin multiprocessing
job_list=[];
#Enqueue jobs
for class_id in range (0, nclasses):
cm_file = mean_file_sfx + str(class_id) + "/lowest_sse_means.txt";
fm_file = FM_path + "/FM_means_class" + str(class_id) + ".txt";
current_job = bof_job(cm_file, CM_set, max_it, fm_file);
job_list.append(current_job);
execute_bof_jobs(job_list, num_cores);
sys.stdout = saveout
print ("Pass 0 done")
print ("Total running time: ");
print(time.time() - start_time);
|
import tweepy #library for building apps for twitter
import time #library for controling the sleep of the message
CONSUMER_KEY ="Enter your api key"
CONSUMER_SECRET = "Enter your api secret key"
ACCESS_KEY ="Enter your access key"
ACCESS_SECRET="Enter your access secret key"
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
file = "last_tweet_id.txt" #linking the file which stores the last tweet id
def read_last_id(file):
file_read = open(file,'r')
last_id = file_read.read()
file_read.close()
return int(last_id)
def write_last_id(file,last_tweet_id):
file_write=open(file,'w')
file_write.write(last_tweet_id)
file_write.close()
return
def reply_to_tweets():
print("rerieving and replting to the tweets......",flush=True)
last_tweet_id=read_last_id(file)
mentions =api.mentions_timeline(last_tweet_id,tweetmode="extended",flush=True)
for mention in reversed(mentions):
print(str(mention.id)+" "+mention.text,flush=True)
last_tweet_id=str(mention.id)
write_last_id(file,last_tweet_id)
if "#helloworld" in mention.text.lower():
print("found Hello world!\nWaving you back user ......",flush=True)
api.update_status("@"+mention.user.screen_name+" "+"Hello world waving back to you !",mention.id)
if "#hello" in mention.text.lower():
print("found hello \nWaving you back user ......",flush=True)
api.update_status("@"+mention.user.screen_name+" "+"Hello buddy waving back you !",mention.id)
if "#test" in mention.text.lower():
print("matching content found \nWaving you back user ......",flush=True)
api.update_status("@"+mention.user.screen_name+" "+"this is a bot replyed to the tweet successfully",mention.id)
while True: #infinite loop
reply_to_tweets()
time.sleep(6)
|
def get_maintenance_cost(
mean,
remainder,
factor=40,
decimal_places=3
):
expected_value_variable_squared = mean + (mean ** 2)
return round(
remainder + (expected_value_variable_squared * factor),
decimal_places
)
a_mean, b_mean = map(float, input().split())
print(
get_maintenance_cost(a_mean, 160, 40)
)
print(
get_maintenance_cost(b_mean, 128, 40)
)
|
#!/usr/bin/env python3
import sys
import os
import time
import serial
# This is a quick and dirty control program for the Kenwood TM-271A and TM-281A
# transceiver to allow remote base like operations for use with Allstar or
# other digital modes. It is primarily targeted at the Raspberry Pi but being
# in Python allows it to be built and run on multiple platforms including
# Windows and Linux.
#
# This is targeting Python3 and you must install the pyserial libraries by
# issuing "pip3 install pyserial"
### Some global variables most for configuration and operation modes
usage = """
Arguments passed in can be:
ser xxx
Where xxx is the name for the serial port appropriate for the OS.
For example "ser COM3" for Windows or "ser /dev/tty0" for linux.
NOTE - must be first argument if used. Environment variable
"TM271Aser" or "TM281Aser: is read if it exists as the default
port to use.
mem xxx
Where xxx is up to a 3 digit memory number
vfo xxxxxxxxxx{-|+}
Where xxxxxxxxxx is the 10 digit frequency in Hz.
If the leading character is not "1" a zero is appended as the GHz value.
If 10 digits is not supplied, "0"s are appended to the end to 13 digits.
Thus you can enter 0147330000 or 14733 for the same thing.
The optional + or - sets the offset
This command clears any tone setting, set desired tone afterwards
tone {x}xx.x
Where {x}xx.x is a 2 or 3 digit whole number followed by a decimal.
For example tone 141.3
Note these must match exactly the standard tones
ctcss {x}xx.x
Where {x}xx.x is a 2 or 3 digit whole number followed by a decimal.
For example tone 141.3
Note these must match exactly the standard tones
pow [h|l]
Set transmit power to high or low (h or l)
freq
Read frequency from display suitable for use with TTS.
Multiple arguments can be passed like "mem 33 freq" to change to a memory
and read back what the frequency is. Or "vfo 147330+ tone 100.0".
"""
serialName=os.getenv("TM271Aser")
if serialName is None:
serialName=os.getenv("TM281Aser")
if serialName is None:
serialName = "/dev/ttyUSB0"
verbose=0
radioID = ""
CTCSS_Tones = { # dictionary for tone to control number for the radio
"67.0" : "00",
"69.3" : "01",
"71.9" : "02",
"74.4" : "03",
"77.0" : "04",
"79.7" : "05",
"82.5" : "06",
"85.4" : "07",
"88.5" : "08",
"91.5" : "09",
"94.8" : "10",
"97.4" : "11",
"100.0" : "12",
"103.5" : "13",
"107.2" : "14",
"110.9" : "15",
"114.8" : "16",
"118.8" : "17",
"123.0" : "18",
"127.3" : "19",
"131.8" : "20",
"136.5" : "21",
"141.3" : "22",
"146.2" : "23",
"151.4" : "24",
"156.7" : "25",
"162.2" : "26",
"167.9" : "27",
"173.8" : "28",
"179.9" : "29",
"186.2" : "30",
"192.8" : "31",
"203.5" : "32",
"206.5" : "33",
"210.7" : "34",
"218.1" : "35",
"225.7" : "36",
"229.1" : "37",
"233.6" : "38",
"241.8" : "39",
"250.3" : "40",
"254.1" : "41"
}
### Some functions we'll use
# Send and check for same thing to echo, try to resync if needed.
def sendAndWait(data):
cnt = 50
while 1:
if cnt == 0:
return "ERR"
cnt -= 1
ser.read(1000)
ser.write((data + "\r").encode())
rtn = ser.readline().decode()
if rtn[0:2] == data[0:2]:
break
# Sometimes the radio gets out of sync and will return ?, E or the tail of something else...
# It has not taken the command if it doesn't echo it back.
if verbose >= 2:
print("Retrying - Sent: " + data + " Got: " + rtn)
# time.sleep(0.25)
ser.write(("\r").encode())
ser.read(1000) # force timeout to flush buffers
ser.read(1000) # force timeout to flush buffers
if verbose >= 2:
print(rtn)
return rtn
# Select a memory channel. Should be 3 digits but will fix it up if not
def memorySelect(mem):
data = "VM 1"
sendAndWait(data)
if len(mem) > 3: # sanity check in case more digits passed in than radio can handled
mem = mem[-3]
while len(mem) < 3: # radio requires 3 digit memory numbers
mem = "0" + mem
data="MR " + mem
sendAndWait(data)
return
# Select and set the vfo frequency passed in as string.
# freq should be 10 digits as Hz. as in 0147330000
# An appended + or - is used to signify offset
# VF format: (spaces only to align with description, omit when sending to radio)
# 3 14 16 18 20 22 24 26 29 32 36 45 47
# VF 0147330000, 0, 0, 0, 1, 0, 0, 13, 13,056,00600000,0 ,0
# freq,step,shift,reverse,Tone,CTCSS,DCS,ENC,DEC,DCS,Offset ,Narrow,BeatShift
def vfoSelect(freq):
data = "VM 0"
sendAndWait(data)
current = sendAndWait("VF")
if current[-1] == "\r":
current = current[0:-1]
if freq[-1] == "-":
shift = "2"
freq=freq[0:-1]
elif freq[-1] == "+":
shift = "1"
freq=freq[0:-1]
else:
shift = "0"
if freq[0] != "0":
freq = "0" + freq
if len(freq) > 10:
freq = freq[0:10]
while len(freq) < 10:
freq = freq + "0"
data = current[0:3] + freq + ",0," + shift + current[17:20] + "0,0,0" + current[25:]
sendAndWait(data)
return
# Set the tone parameters for the current VFO setting. Reads what is in the radio,
# makes the changes, then writes it back.
# VF format: (spaces only to align with description, omit when sending to radio)
# 3 14 16 18 20 22 24 26 29 32 36 45 47
# VF 0147330000, 0, 0, 0, 1, 0, 0, 13, 13,056,00600000,0 ,0
# freq,step,shift,reverse,Tone,CTCSS,DCS,ENC,DEC,DCS,Offset ,Narrow,BeatShift
def vfoTone(toneFreq, tx, rx):
if rx == 1: #there can only be one
tx = 0
current = sendAndWait("VF")
if current[-1] == "\r":
current = current[0:-1]
if toneFreq == "0": #tone of zero to turn off tone
tx=0
rx=0
theToneNumber = "00"
else:
theToneNumber = CTCSS_Tones[toneFreq]
if verbose >= 2:
print( "Tone set to: " + theToneNumber)
data = current[0:20] + str(tx) + "," + str(rx) + ",0," + theToneNumber + "," + theToneNumber + current[31:]
if verbose >= 2:
print("Setting: " + data)
sendAndWait(data)
return
def powerSelect(pow):
pow = pow.lower()[0:1]
if pow == "h":
sendAndWait("PC 0")
elif pow == "l":
sendAndWait("PC 2")
return
# Read radio frequency
def getFreq():
rtn = sendAndWait("FQ")
# rtn will be "FQ 0147330000,0"
mhz = rtn[4:7]
khz = rtn[7:13]
print(mhz + "." + khz)
# Initialize the serial port as global variable ser
def serialInit(serPort):
ser = serial.Serial(
port= serPort, #Replace ttyS0 with ttyAM0 for Pi1,Pi2,Pi0
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
rtscts=False,
timeout=0.100
)
time.sleep(0.5) # mostly needed on Windows to allow port to settle in background
return ser
#### Start of exectution
i=1
ser = None
if (len(sys.argv) > i) and ((sys.argv[i].lower())[0:2] == "-v"):
# verbose must be first
verbose = len(sys.argv[i]) - 1
i += 1
print ("Verbose: " + str(verbose))
try:
# serial init must happen first or second
if (len(sys.argv) > i) and (sys.argv[i].lower() == "ser"):
serialName = sys.argv[i+1]
i += 2
ser = serialInit(serialName)
radioID = sendAndWait("ID")
except:
print("Could not open: " + serialName)
sys.exit(1)
while i < len(sys.argv):
if sys.argv[i].lower() == "mem":
memorySelect(sys.argv[i+1])
i += 2
elif sys.argv[i].lower() == "vfo":
vfoSelect(sys.argv[i+1])
i += 2
elif sys.argv[i].lower() == "tone":
vfoTone(sys.argv[i+1], 1, 0)
i += 2
elif sys.argv[i].lower() == "ctcss":
vfoTone(sys.argv[i+1], 0, 1)
i += 2
elif sys.argv[i].lower()[0:3] == "pow":
powerSelect(sys.argv[i+1])
i += 2
elif sys.argv[i].lower()[0:4] == "freq":
getFreq()
i += 1
elif sys.argv[i].lower() == "help":
print(usage)
break
else:
print ("Error input:" + sys.argv[i])
break
# while
if ser is not None:
ser.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/12/19 9:27
# @Author : NingAnMe <ninganme@qq.com>
import argparse
from dateutil.relativedelta import relativedelta
import time
from datetime import datetime
import os
import requests
from lib.lib_cimiss import get_cimiss_ssi_ssh_tem_data
from config import DATA_OBS_DIR
def download_cimiss(ymd):
print(f"({datetime.now()})开始下载CIMISS数据: {ymd}")
out_dir = os.path.join(DATA_OBS_DIR, ymd)
if not os.path.isdir(out_dir):
print(f"创建文件夹: {out_dir}")
os.makedirs(out_dir)
elif len(os.listdir(out_dir)) >= 2000:
print(f"文件数量超过2000个,不再重新下载: {out_dir}")
return True
else:
print(f"文件数量少于2000个,删除原文件后重新下载: {out_dir}")
os.system("rm -rf {}".format(os.path.join(DATA_OBS_DIR, ymd)))
success = False
try_download = 0
while not success:
try_download += 1
print(f"尝试第 {try_download} 次下载")
try:
success = get_cimiss_ssi_ssh_tem_data(ymd, out_dir, copy_file=False)
except requests.exceptions.ReadTimeout as why:
print(why)
pass
if try_download >= 3:
os.system("rm -rf {}".format(os.path.join(DATA_OBS_DIR, ymd)))
print(f"({datetime.now()})尝试下载CIMISS失败: {ymd}")
break
if success:
print(f"({datetime.now()})成功下载CIMISS: {out_dir}")
return success
def download_yesterday():
print(f"({datetime.now()})启动CIMISS实时下载")
while True:
date_now = datetime.now() - relativedelta(days=1)
ymd_now = date_now.strftime("%Y%m%d")
if date_now.minute % 15 == 5:
try:
os.system("rm -rf {}".format(os.path.join(DATA_OBS_DIR, ymd_now)))
success = download_cimiss(ymd_now)
if success:
time.sleep(60)
except Exception as why:
print(why)
else:
time.sleep(60)
def download_today():
print(f"({datetime.now()})启动CIMISS实时下载")
tmp_minute = -1
tmp_hour = -1
while True:
date_now = datetime.now()
ymd_now = date_now.strftime("%Y%m%d")
if date_now.minute % 20 == 5 and date_now.minute != tmp_minute:
try:
tmp_minute = date_now.minute
os.system("rm -rf {}".format(os.path.join(DATA_OBS_DIR, ymd_now)))
success = download_cimiss(ymd_now)
if success:
time.sleep(60)
except Exception as why:
print(why)
else:
time.sleep(20)
date_now = datetime.now() - relativedelta(days=1)
ymd_now = date_now.strftime("%Y%m%d")
hours = {5, 6, 7}
if date_now.hour in hours and date_now.hour != tmp_hour:
try:
tmp_hour = date_now.hour
os.system("rm -rf {}".format(os.path.join(DATA_OBS_DIR, ymd_now)))
success = download_cimiss(ymd_now)
if success:
time.sleep(60)
except Exception as why:
print(why)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download CIMISS')
parser.add_argument('--datetime_start', '-s', help='开始时间,YYYYmmddHHMMSS(20190101000000)')
parser.add_argument('--datetime_end', '-e', help='结束时间,YYYYmmddHHMMSS(20190101235959)')
args = parser.parse_args()
if args.datetime_start is not None and args.datetime_end is not None:
datetime_start = datetime.strptime(args.datetime_start, "%Y%m%d%H%M%S")
datetime_end = datetime.strptime(args.datetime_end, "%Y%m%d%H%M%S")
datetime_now = datetime_start
while datetime_now <= datetime_end:
download_cimiss(datetime_now.strftime("%Y%m%d"))
datetime_now += relativedelta(days=1)
else:
download_today()
|
#!/usr/bin/python
from __future__ import with_statement
import sys
import os
default_path = '/usr/local/id/streaming_pb_downloader/current/configs'
default_filename = 'event_name_list.txt'
if len(sys.argv) > 2:
print('Usage:\n{0}\nor\n{0} output_path'.format(sys.argv[0]))
sys.exit(0)
if len(sys.argv) == 2:
default_path=sys.argv[1]
output_file = os.path.join(os.path.normpath(default_path), default_filename)
with open(output_file, 'w') as fileobj:
fileobj.write('ID-MOTION\nID-TAMPERING\nID-DI-ON\nID-DI-OFF\nID-SYS\nID-VLOSS\n')
print 'The default event name list is generated in ', output_file
|
from jinja2 import Environment, FileSystemLoader
def main(frank, output_dir):
print "Executing python codegen: main()", frank
# load up the jinja template
# env = Environment(loader=FileSystemLoader(""))
env = Environment(loader=FileSystemLoader(__path__))
client = env.get_template("client.jinja")
rendered_client = client.render(frank=frank)
# copy frankdux.types over
# the client is going to reuse all our metaclass stuff
loc = "{}/client.py".format(output_dir)
fp = open(loc, 'w')
fp.write(rendered_client)
fp.write("\n")
fp.close()
# setup.py
setup = env.get_template("setup.jinja")
rendered_setup = setup.render(frank=frank)
loc = "{}/setup.py".format(output_dir)
fp = open(loc, 'w')
fp.write(rendered_client)
fp.write("\n")
fp.close()
# make sure we've got an empty __init__ so it's a real package
loc = "{}/__init__.py".format(output_dir)
open(loc, 'w').close()
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""MongoDB storage backend"""
from oslo_log import log
import pymongo
from panko import storage
from panko.storage.mongo import utils as pymongo_utils
from panko.storage import pymongo_base
LOG = log.getLogger(__name__)
class Connection(pymongo_base.Connection):
"""Put the event data into a MongoDB database."""
CONNECTION_POOL = pymongo_utils.ConnectionPool()
def __init__(self, url, conf):
# NOTE(jd) Use our own connection pooling on top of the Pymongo one.
# We need that otherwise we overflow the MongoDB instance with new
# connection since we instantiate a Pymongo client each time someone
# requires a new storage connection.
self.conn = self.CONNECTION_POOL.connect(
url,
conf.database.max_retries,
conf.database.retry_interval)
# Require MongoDB 2.4 to use $setOnInsert
if self.conn.server_info()['versionArray'] < [2, 4]:
raise storage.StorageBadVersion("Need at least MongoDB 2.4")
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
# NOTE(jd) Upgrading is just about creating index, so let's do this
# on connection to be sure at least the TTL is correctly updated if
# needed.
self.upgrade()
@staticmethod
def update_ttl(ttl, ttl_index_name, index_field, coll):
"""Update or create time_to_live indexes.
:param ttl: time to live in seconds.
:param ttl_index_name: name of the index we want to update or create.
:param index_field: field with the index that we need to update.
:param coll: collection which indexes need to be updated.
"""
indexes = coll.index_information()
if ttl <= 0:
if ttl_index_name in indexes:
coll.drop_index(ttl_index_name)
return
if ttl_index_name in indexes:
return coll.database.command(
'collMod', coll.name,
index={'keyPattern': {index_field: pymongo.ASCENDING},
'expireAfterSeconds': ttl})
coll.create_index([(index_field, pymongo.ASCENDING)],
expireAfterSeconds=ttl,
name=ttl_index_name)
def upgrade(self):
# create collection if not present
if 'event' not in self.db.conn.collection_names():
self.db.conn.create_collection('event')
# Establish indexes
# NOTE(idegtiarov): This indexes cover get_events, get_event_types, and
# get_trait_types requests based on event_type and timestamp fields.
self.db.event.create_index(
[('event_type', pymongo.ASCENDING),
('timestamp', pymongo.ASCENDING)],
name='event_type_idx'
)
def clear(self):
self.conn.drop_database(self.db.name)
# Connection will be reopened automatically if needed
self.conn.close()
def clear_expired_data(self, ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
self.update_ttl(ttl, 'event_ttl', 'timestamp', self.db.event)
LOG.info("Clearing expired event data is based on native "
"MongoDB time to live feature and going in background.")
|
from functools import lru_cache
from typing import Tuple, List, Dict
import math
from .RayInfluenceModel import RayGridInfluenceModel, Ray
class WideRayGridInfluenceModel(RayGridInfluenceModel):
"""
Calculates the weights of the grid cells by calculating the distance of the center of the cell to the ray.
"""
max_distance = 1.0
@lru_cache(maxsize=512)
def getInfluencesForRay(self, ray: Ray) -> List[Tuple[Tuple[int, int], float]]:
# find relevant x coordinates
start_x = ray.start_x
end_x = ray.end_x
if ray.dx < 0:
start_x += self.max_distance
end_x -= self.max_distance
else:
start_x -= self.max_distance
end_x += self.max_distance
start_i = self.gridDefinition.getIatX(start_x)
end_i = self.gridDefinition.getIatX(end_x)
values: List[Tuple[Tuple[int, int], float]] = []
c = ray.c
# direction in which to count
idir = int(math.copysign(1, end_i-start_i))
# including end
for i in range(start_i, end_i+idir, idir):
x = self.gridDefinition.getXatI(i)
start_y = self.gridDefinition.startY
end_y = self.gridDefinition.endY
if abs(ray.dx) > 0:
start_y = max(
self.gridDefinition.startY, min(
self.gridDefinition.endY,
(self.max_distance * ray.length - c - ray.dy*x) / -ray.dx))
end_y = max(
self.gridDefinition.startY, min(
self.gridDefinition.endY,
(- self.max_distance * ray.length - c - ray.dy*x) / -ray.dx))
start_j = self.gridDefinition.getJatY(start_y)
end_j = self.gridDefinition.getJatY(end_y)
jdir = int(math.copysign(1, end_j-start_j))
for j in range(start_j, end_j+jdir, jdir):
y = self.gridDefinition.getYatJ(j)
p = ray.closest_point_on_line(x, y)
dx = abs(p[0]-x)
dy = abs(p[1]-y)
dl = ray.distance_of_point_along_ray(p, relative=True)
# limit ray to from LED to sensor
if not 0 < dl < 1:
continue
influence = self.getInfluenceFromDistance(dx, dy, dl, ray)
values.append(((i, j), influence))
return values
def getInfluenceFromDistance(self, dx: float, dy: float, dl: float, ray: Ray) -> float:
"""
Returns the influence of a cell given the minimal distance to the direct ray
dx and dy as separate parameters so possible non-square grids can be handled
(if the grid-size is relevant to the influence function)
:param dx: x distance to ray
:param dy: y distance to ray
:param dl: position along the ray; 0.0 = led; 1.0 = sensor
:param ray: the ray currently in question
:return: intensity
"""
return 1/(1 + 0.1 * ((dx**2 + dy**2)*100)**1.5) / ray.length
|
# Generated by Django 2.2.5 on 2019-09-06 16:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('search', '0001_initial'),
('team', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='wordmodel',
name='related_team',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='team.TeamModel'),
),
migrations.AddField(
model_name='newsmodel',
name='contain_words',
field=models.ManyToManyField(through='search.FreqModel', to='search.WordModel'),
),
migrations.AddField(
model_name='newsmodel',
name='related_teams',
field=models.ManyToManyField(through='team.RelationModel', to='team.TeamModel'),
),
migrations.AddField(
model_name='freqmodel',
name='news',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='search.NewsModel'),
),
migrations.AddField(
model_name='freqmodel',
name='word',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='search.WordModel'),
),
]
|
import re
import itertools
regex = r"<x=(-*\d+),\sy=(-*\d+),\sz=(-*\d+)>"
p = re.compile(regex)
def moon(p_x, p_y, p_z, v_x, v_y, v_z):
return {"pos": {"x": p_x, "y": p_y, "z": p_z}, "vel": {"x": v_x, "y": v_y, "z": v_z}}
a = []
for line in open('day12-2019.txt').read().split('\n'):
x, y, z = p.match(line).groups()
a.append(moon(int(x), int(y), int(z), 0, 0, 0))
def simulate_one_step(moons):
for i in range(len(moons)):
for j in range(i + 1, len(moons)):
moon = moons[i]
other = moons[j]
updated_moon, updated_other = apply_gravity(
moon, other)
moons[i] = updated_moon
moons[j] = updated_other
for i in range(len(moons)):
moons[i] = apply_velocity(moons[i])
def apply_gravity(one, another):
return (moon(one["pos"]["x"],
one["pos"]["y"],
one["pos"]["z"],
one["vel"]["x"] + gravity(one["pos"]["x"],
another["pos"]["x"]),
one["vel"]["y"] + gravity(one["pos"]["y"],
another["pos"]["y"]),
one["vel"]["z"] + gravity(one["pos"]["z"],
another["pos"]["z"])),
moon(another["pos"]["x"],
another["pos"]["y"],
another["pos"]["z"],
another["vel"]["x"] + gravity(another["pos"]["x"],
one["pos"]["x"]),
another["vel"]["y"] + gravity(another["pos"]["y"],
one["pos"]["y"]),
another["vel"]["z"] + gravity(another["pos"]["z"],
one["pos"]["z"]))
)
def apply_velocity(m):
return moon(m["pos"]["x"] + m["vel"]["x"],
m["pos"]["y"] + m["vel"]["y"],
m["pos"]["z"] + m["vel"]["z"],
m["vel"]["x"],
m["vel"]["y"],
m["vel"]["z"],
)
def gravity(a, b):
if a < b:
return 1
elif a == b:
return 0
else:
return -1
for i in range(1000):
simulate_one_step(a)
def total_energy(m):
return (abs(m["pos"]["x"]) + abs(m["pos"]["y"]) + abs(m["pos"]["z"])) * \
(abs(m["vel"]["x"]) + abs(m["vel"]["y"]) + abs(m["vel"]["z"]))
print(sum(map(total_energy, a)))
|
from . import *
from ..fetchers import OAIFetcher
from ..parsers import OAIParser
class OAIController(CorpusController):
'''CorpusController designed for the OAI database
'''
# Default start date for batch retrieval
DATE_FROM = datetime.date(2010, 1, 1)
# Default time window for batch retrieval
DATE_INCR = datetime.timedelta(days=1)
def batch(self, date_from=None, date_incr=DATE_INCR, **kwargs):
'''Retrieve, parse, and push a batch of documents.
Args:
date_from (datetime.date): Start date
date_incr (datetime.timedelta): Time window
kwargs: Optional OAI parameters
'''
#TODO: save kwargs to query metadata
# Get start date
if not date_from:
date_from = self.database.last_date_range('oai')[0]['until'] + \
datetime.timedelta(days=1)
# Get end date
if date_incr:
date_until = date_from + date_incr
else:
date_until = None
# Fetch batch of articles
#fetcher = fetchers.OAIFetcher.OAIFetcher()
fetcher = OAIFetcher.OAIFetcher()
doc_batches = fetcher.fetch_batch(date_from, date_until, **kwargs)
# Initialize parser
parser = OAIParser.OAIParser()
#parser = parsers.OAIParser()
# Loop over batches of articles
for doc_batch in doc_batches:
# Loop over articles in batch
for doc in doc_batch:
# Parse document
parsed_docs = parser.parse_document(doc)
# Send parsed articles to DB
for doc in parsed_docs:
self.database.add_or_update(doc)
# Update date range
self.database.add_date_range('oai', date_from, date_until)
|
# Backward compatibility with Python 2.
from __future__ import print_function, absolute_import, division
# Suppress all warnings.
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.contrib import legacy_seq2seq
import numpy as np
class Model:
"""Multi-layer Recurrent Neural Networks (LSTM, RNN) for character-level language models.
To learn more about character-rnn,
Visit Andrej Karpathy's [char-rnn](https://github.com/karpathy/char-rnn).
Arguments:
args {argparse.ArgumentParser} -- Command line arguments from train.py
Keyword Arguments:
training {bool} -- Training mode. (default: {True})
Raises:
ValueError -- Model type not supported. Supported types include:
RNN, LSTM, GRU and NAS.
"""
def __init__(self, args, training=True):
self.args = args
# Set batch size & sequence length to 1 if not in training mode.
if not training:
args.batch_size = 1
args.seq_length = 1
# Recurrent Architecture.
if args.model.lower() == 'rnn':
cell_fn = rnn.BasicRNNCell
elif args.model.lower() == 'lstm':
cell_fn = rnn.BasicLSTMCell
elif args.model.lower() == 'gru':
cell_fn = rnn.GRUCell
elif args.model.lower() == 'nas':
cell_fn = rnn.NASCell
else:
raise ValueError("Model type not supported.")
# Construct the hidden layers' cell.
cell = None
cells = []
for _ in range(args.num_layers):
cell = cell_fn(args.rnn_size)
# Add dropout only during training.
if training and (args.input_keep_prob < 1.0 or args.output_keep_prob < 1.0):
cell = rnn.DropoutWrapper(cell,
input_keep_prob=args.input_keep_prob,
output_keep_prob=args.output_keep_prob)
# Append the hidden cell.
cells.append(cell)
# Recurrent Cell.
self.cell = rnn.MultiRNNCell(cells=cells, state_is_tuple=True)
# Model placeholders.
self.input_data = tf.placeholder(dtype=tf.int32, shape=[args.batch_size, args.seq_length], name="input_data")
self.targets = tf.placeholder(dtype=tf.int32, shape=[args.batch_size, args.seq_length], name="targets")
self.initial_state = cell.zero_state(batch_size=args.batch_size, dtype=tf.float32)
# Recurrent Neural Net Language Modelling.
with tf.variable_scope('rnnlm'):
softmax_W = tf.get_variable(name='softmax_W', shape=[args.rnn_size, args.vocab_size])
softmax_b = tf.get_variable(name='softmax_b', shape=[args.vocab_size])
# Embeddings.
embedding = tf.get_variable('embedding', shape=[args.vocab_size, args.rnn_size])
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
# Dropout input embeddings.
if training:
inputs = tf.nn.dropout(inputs, keep_prob=args.input_keep_prob)
# Split & reshape inputs.
inputs = tf.split(value=inputs, num_or_size_splits=args.seq_length, axis=1)
inputs = [tf.squeeze(input_, axis=[1]) for input_ in inputs]
def loop(prev, _):
"""Function to be performed at each recurrent layer.
This function will be applied to the i-th output in order to generate the i+1-st input, and
decoder_inputs will be ignored, except for the first element ("GO" symbol). This can be used
for decoding, but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Arguments:
prev {tf.Tensor} -- prev is a 2D Tensor of shape [batch_size x output_size].
_ {tf.Tensor} -- i is an integer, the step number (when advanced control is needed).
Returns:
{tf.Tensor} -- A 2D Tensor of shape [batch_size, input_size] which represents
the embedding matrix of the predicted next character.
"""
prev = tf.matmul(prev, softmax_W) + softmax_b
prev_symbol = tf.stop_gradient(input=tf.arg_max(prev, dimension=1))
return tf.embedding_lookup(embedding, prev_symbol)
# Decoder.
outputs, prev_state = legacy_seq2seq.rnn_decoder(inputs, self.initial_state, cell,
loop_function=loop if not training else None,
scope='rnnlm')
outputs = tf.reshape(tf.concat(outputs, axis=1), shape=[-1, args.rnn_size])
# Fully connected & softmax layer.
self.logits = tf.matmul(outputs, softmax_W) + softmax_b
self.probs = tf.nn.softmax(self.logits, name="probs")
# Loss function.
with tf.variable_scope('loss'):
seq_loss = legacy_seq2seq.sequence_loss_by_example(
logits=[self.logits],
targets=[tf.reshape(self.targets, shape=[-1])],
weights=[tf.ones(shape=[args.batch_size * args.seq_length])])
self.loss = tf.reduce_sum(seq_loss) / args.batch_size / args.seq_length
self.final_state = prev_state
self.lr = tf.Variable(0.0, trainable=False, name="learning_rate")
# Trainable variables & gradient clipping.
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(t_list=tf.gradients(self.loss, tvars),
clip_norm=args.grad_clip)
# Optimizer.
with tf.variable_scope("optimizer"):
self.global_step = tf.Variable(0, trainable=False, name="global_step")
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
# Train ops.
self.train_op = optimizer.apply_gradients(grads_and_vars=zip(grads, tvars),
global_step=self.global_step,
name="train_op")
# Tensorboard.
tf.summary.histogram('logits', self.logits)
tf.summary.histogram('seq_loss', seq_loss)
tf.summary.scalar('loss', self.loss)
def sample(self, sess: tf.Session, chars: tuple, vocab: dict,
num: int = 200, prime: str = 'The', sampling_type: int = 1):
"""Sample from the prediction probability one character at a time.
Arguments:
sess {tf.Session} -- Session containing the default graph.
chars {tuple} -- List of characters in the vocab.
vocab {dict} -- Mapping from character to id. Dictionary containing characters & corresponding numeric
value.
Keyword Arguments: num {int} -- Number of character to predict. (default: {200}) prime {str} -- Beginning of
prediction sequence. (default: {'The'}) sampling_type {int} -- Description of how to choose the top most
likely character. Options are 1, 2, & 3. (default: {1})
Returns:
ret {str} -- Sequence containing the prediction of the `num` characters.
"""
# Initial cell state. TODO: Change dtype=tf.float32
# Predict final state given input data & prev state.
state = sess.run(self.cell.zero_state(batch_size=1, dtype=tf.float32))
for char in prime[:-1]:
# Input data: one char at a time.
x = np.zeros(shape=(1, 1))
x[0, 0] = vocab[char]
# Given input data & initial state, predict final state.
feed_dict = {self.input_data: x, self.initial_state: state}
[state] = sess.run([self.final_state], feed_dict=feed_dict)
def weighted_pick(weights):
c = np.cumsum(weights)
s = np.sum(weights)
return int(np.searchsorted(c, np.random.rand(1) * s))
# Initial character.
ret = prime
char = prime[-1]
# Prediction loop.
for i in range(num):
x = np.zeros(shape=(1, 1))
x[0, 0] = vocab[char]
# Predict probability of next word & prev state.
feed_dict = {self.input_data: x, self.initial_state: state}
[probs, state] = sess.run([self.probs, self.final_state], feed_dict=feed_dict)
p = probs[0]
if sampling_type == 0:
sample = np.argmax(p)
elif sampling_type == 2:
if char == ' ':
sample = weighted_pick(p)
else:
sample = np.argmax(p)
else: # Sampling type = 1 (default)
sample = weighted_pick(p)
# Get the character representation of sampled character.
pred = chars[sample]
ret += pred
char = pred
return ret
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.db.models import Q
from django.views.decorators.csrf import csrf_exempt
from django.forms.models import modelformset_factory
from leads.models import Lead
from contacts.forms import ContactForm
from common.models import User, Address, Comment, Team
from common.utils import LEAD_STATUS, LEAD_SOURCE, INDCHOICES, TYPECHOICES, COUNTRIES
from leads.forms import LeadCommentForm, LeadForm
from accounts.forms import AccountForm
from common.forms import BillingAddressForm
from accounts.models import Account
from planner.models import Event, Reminder
from planner.forms import ReminderForm
# CRUD Operations Start
@login_required
def leads_list(request):
lead_obj = Lead.objects.all()
page = request.POST.get('per_page')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
city = request.POST.get('city')
email = request.POST.get('email')
if first_name:
lead_obj = Lead.objects.filter(first_name__icontains=first_name)
if last_name:
lead_obj = Lead.objects.filter(last_name__icontains=last_name)
if city:
lead_obj = Lead.objects.filter(address=Address.objects.filter
(city__icontains=city))
if email:
lead_obj = Lead.objects.filter(email__icontains=email)
return render(request, 'leads/leads.html', {
'lead_obj': lead_obj, 'per_page': page})
@login_required
def add_lead(request):
accounts = Account.objects.all()
users = User.objects.filter(is_active=True).order_by('email')
teams = Team.objects.all()
assignedto_list = request.POST.getlist('assigned_to')
teams_list = request.POST.getlist('teams')
lead_account = request.POST.get('account_name')
lead_email = request.POST.get('email')
lead_phone = request.POST.get('phone')
form = LeadForm(assigned_to=users)
address_form = BillingAddressForm()
if request.method == 'POST':
form = LeadForm(request.POST, assigned_to=users)
address_form = BillingAddressForm(request.POST)
if form.is_valid() and address_form.is_valid():
lead_obj = form.save(commit=False)
address_object = address_form.save()
lead_obj.address = address_object
lead_obj.created_by = request.user
lead_obj.save()
lead_obj.assigned_to.add(*assignedto_list)
lead_obj.teams.add(*teams_list)
if request.POST.get('status') == "converted":
Account.objects.create(
created_by=request.user, name=lead_account,
email=lead_email, phone=lead_phone
)
if request.POST.get("savenewform"):
return HttpResponseRedirect(reverse("leads:add_lead"))
else:
return HttpResponseRedirect(reverse('leads:list'))
else:
return render(request, 'leads/create_lead.html', {
'lead_form': form, 'address_form': address_form,
'accounts': accounts, 'countries': COUNTRIES,
'teams': teams, 'users': users,
'status': LEAD_STATUS, 'source': LEAD_SOURCE,
'assignedto_list': assignedto_list, 'teams_list': teams_list})
else:
return render(request, 'leads/create_lead.html', {
'lead_form': form, 'address_form': address_form,
'accounts': accounts, 'countries': COUNTRIES, 'teams': teams,
'users': users, 'status': LEAD_STATUS, 'source': LEAD_SOURCE,
'assignedto_list': assignedto_list, 'teams_list': teams_list})
@login_required
@csrf_exempt
def view_lead(request, lead_id):
lead_record = get_object_or_404(Lead, id=lead_id)
comments = Comment.objects.filter(lead__id=lead_id).order_by('-id')
meetings = Event.objects.filter(Q(created_by=request.user) | Q(updated_by=request.user),
event_type='Meeting', attendees_leads=lead_record).order_by('-id')
calls = Event.objects.filter(Q(created_by=request.user) | Q(updated_by=request.user),
event_type='Call', attendees_leads=lead_record).order_by('-id')
RemindersFormSet = modelformset_factory(Reminder, form=ReminderForm, can_delete=True)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '10',
}
reminder_form_set = RemindersFormSet(data)
return render(request, "leads/view_leads.html", {
"lead_record": lead_record, 'status': LEAD_STATUS, 'countries': COUNTRIES,
'comments': comments, 'reminder_form_set': reminder_form_set,
'meetings': meetings, 'calls': calls
})
@login_required
def edit_lead(request, lead_id):
lead_obj = get_object_or_404(Lead, id=lead_id)
address_obj = get_object_or_404(Address, id=lead_obj.address.id)
accounts = Account.objects.all()
users = User.objects.filter().order_by('email')
form = LeadForm(instance=lead_obj, assigned_to=users)
address_form = BillingAddressForm(instance=address_obj)
assignedto_list = request.POST.getlist('assigned_to')
teams_list = request.POST.getlist('teams')
lead_account = request.POST.get('account_name')
lead_email = request.POST.get('email')
lead_phone = request.POST.get('phone')
teams = Team.objects.all()
if request.method == 'POST':
form = LeadForm(request.POST, instance=lead_obj, assigned_to=users)
address_form = BillingAddressForm(request.POST, instance=address_obj)
if request.POST.get('status') == "converted":
form.fields['account_name'].required = True
else:
form.fields['account_name'].required = False
if form.is_valid() and address_form.is_valid():
dis_address_obj = address_form.save()
lead_obj = form.save(commit=False)
lead_obj.address = dis_address_obj
lead_obj.created_by = request.user
lead_obj.save()
lead_obj.assigned_to.clear()
lead_obj.assigned_to.add(*assignedto_list)
lead_obj.teams.clear()
lead_obj.teams.add(*teams_list)
if request.POST.get('status') == "converted":
Account.objects.create(
created_by=request.user, name=lead_account,
email=lead_email, phone=lead_phone
)
return HttpResponseRedirect(reverse('leads:list'))
else:
return render(request, 'leads/create_lead.html', {
'lead_obj': lead_obj,
'lead_form': form,
'address_form': address_form,
'accounts': accounts, 'countries': COUNTRIES,
'teams': teams, 'users': users,
'status': LEAD_STATUS, 'source': LEAD_SOURCE,
'assignedto_list': assignedto_list, 'teams_list': teams_list})
else:
return render(request, 'leads/create_lead.html', {
'lead_form': form, 'address_form': address_form,
'lead_obj': lead_obj, 'address_obj': address_obj,
'accounts': accounts, 'countries': COUNTRIES, 'teams': teams,
'users': users, 'status': LEAD_STATUS, 'source': LEAD_SOURCE,
'assignedto_list': assignedto_list, 'teams_list': teams_list})
@login_required
def remove_lead(request, lead_id):
lead_obj = get_object_or_404(Lead, id=lead_id)
lead_obj.delete()
return HttpResponseRedirect(reverse('leads:list'))
# CRUD Operations Ends
# Leads Conversion Functionality Starts
# The Below View Should be Discussed for Functionality
@login_required
def leads_convert(request, pk):
account_form = AccountForm()
billing_form = BillingAddressForm()
shipping_form = ShippingAddressForm(prefix='ship')
lead_objects = Lead.objects.all()
lead_obj = Lead.objects.get(id=pk)
accounts = Account.objects.all()
contact_form = ContactForm()
opportunity_form = OpportunityForm()
teams = Team.objects.all()
if request.method == "POST":
if request.POST.get('accountname') == "on":
account_form = AccountForm(request.POST)
billing_form = BillingAddressForm(request.POST)
shipping_form = ShippingAddressForm(request.POST, prefix='ship')
if account_form.is_valid() and billing_form.is_valid() and shipping_form.is_valid():
billing_object = billing_form.save()
shipping_object = shipping_form.save()
account_object = account_form.save(commit=False)
account_object.billing_address = billing_object
account_object.shipping_address = shipping_object
account_object.created_by = request.user
account_object.save()
lead_obj.delete()
return HttpResponseRedirect(reverse('leads:list'))
else:
street1 = request.POST.get('street')
city1 = request.POST.get('city')
state1 = request.POST.get('state')
postcode1 = request.POST.get('postcode')
country1 = request.POST.get('country')
shipdata = {'street1': street1, 'city1': city1, 'state1': state1,
'postcode1': postcode1, 'country1': country1}
return render(request, 'leads/checkbox.html', {
'account_form': account_form, 'form1': billing_form, 'form2': shipping_form,
'form5': COUNTRIES, 'stages': STAGES, 'acc_error': account_form.errors,
'shipdata': shipdata, 'sources': LEAD_SOURCE, 'industries': INDCHOICES,
'teams': teams, 'task': lead_objects, 'post': lead_obj,
'accounts': accounts,
'counties': COUNTRIES})
if request.POST.get('contactname') == "on":
contact_form = ContactForm(request.POST)
address_form = BillingAddressForm(request.POST)
if contact_form.is_valid() and address_form.is_valid():
address_obj = address_form.save()
contact_obj = contact_form.save(commit=False)
contact_obj.address = address_obj
contact_obj.created_by = request.user
contact_obj.save()
return HttpResponseRedirect(reverse('contacts:list'))
else:
return render(request, 'leads/checkbox.html', {
'post': lead_obj, 'accounts': accounts, 'teams': teams,
'contact_form': contact_form, 'address_form': address_form})
if request.POST.get('opportunityname') == "on":
opportunity_form = OpportunityForm(request.POST)
if opportunity_form.is_valid():
opportunity_form.save()
return HttpResponseRedirect(reverse('opportunities:list'))
else:
return render(request, 'leads/checkbox.html', {
'post': lead_obj, 'accounts': accounts, 'sources': LEAD_SOURCE,
'teams': teams, 'stages': STAGES, 'opportunity_form': opportunity_form})
else:
return render(request, 'leads/checkbox.html', {
'form': account_form, 'form1': billing_form, 'form2': shipping_form,
'form5': COUNTRIES, 'industries': INDCHOICES, 'teams': teams,
'task': lead_objects, 'counties': COUNTRIES,
'post': lead_obj, 'accounts': accounts})
# Leads Conversion Functionality Ends
# Comments Section Start
@login_required
def add_comment(request):
if request.method == 'POST':
lead = get_object_or_404(Lead, id=request.POST.get('leadid'))
if request.user in lead.assigned_to.all() or request.user == lead.created_by:
form = LeadCommentForm(request.POST)
if form.is_valid():
lead_comment = form.save(commit=False)
lead_comment.comment = request.POST.get('comment')
lead_comment.commented_by = request.user
lead_comment.lead = lead
lead_comment.save()
data = {
"comment_id": lead_comment.id, "comment": lead_comment.comment,
"commented_on": lead_comment.commented_on,
"commented_by": lead_comment.commented_by.email
}
return JsonResponse(data)
else:
return JsonResponse({"error": form['comment'].errors})
else:
data = {'error': "You Dont Have permissions to Comment"}
return JsonResponse(data)
@login_required
def edit_comment(request):
if request.method == "POST":
comment = request.POST.get('comment')
comment_id = request.POST.get("commentid")
lead_comment = get_object_or_404(Comment, id=comment_id)
form = LeadCommentForm(request.POST)
if request.user == lead_comment.commented_by:
if form.is_valid():
lead_comment.comment = comment
lead_comment.save()
data = {"comment": lead_comment.comment, "commentid": comment_id}
return JsonResponse(data)
else:
return JsonResponse({"error": form['comment'].errors})
else:
return JsonResponse({"error": "You dont have authentication to edit"})
else:
return render(request, "404.html")
@login_required
def remove_comment(request):
if request.method == 'POST':
comment_id = request.POST.get('comment_id')
comment = get_object_or_404(Comment, id=comment_id)
if request.user == comment.commented_by:
comment.delete()
data = {"cid": comment_id}
return JsonResponse(data)
else:
return JsonResponse({"error": "You Dont have permisions to delete"})
else:
return HttpResponse("Something Went Wrong")
# Comments Section End
# Other Views
@login_required
def get_leads(request):
if request.method == 'GET':
leads = Lead.objects.all()
return render(request, 'leads/leads_list.html', {'leads': leads})
else:
return HttpResponse('Invalid Method or No Authanticated in load_calls')
|
import logging
import inspect
from ..core.memory import MemoryException, FileMap, AnonMap
from .helpers import issymbolic
######################################################################
# Abstract classes for capstone/unicorn based cpus
# no emulator by default
from unicorn import *
from unicorn.x86_const import *
from unicorn.arm_const import *
from capstone import *
from capstone.arm import *
from capstone.x86 import *
logger = logging.getLogger("EMULATOR")
class UnicornEmulator(object):
'''
Helper class to emulate a single instruction via Unicorn.
'''
def __init__(self, cpu):
self._cpu = cpu
text = cpu.memory.map_containing(cpu.PC)
# Keep track of all memory mappings. We start with just the text section
self._should_be_mapped = {
text.start: (len(text), UC_PROT_READ | UC_PROT_EXEC)
}
# Keep track of all the memory Unicorn needs while executing this
# instruction
self._should_be_written = {}
def reset(self):
self._emu = self._unicorn()
self._to_raise = None
def _unicorn(self):
if self._cpu.arch == CS_ARCH_ARM:
return Uc(UC_ARCH_ARM, UC_MODE_ARM)
elif self._cpu.arch == CS_ARCH_X86:
if self._cpu.mode == CS_MODE_32:
return Uc(UC_ARCH_X86, UC_MODE_32)
elif self._cpu.mode == CS_MODE_64:
return Uc(UC_ARCH_X86, UC_MODE_64)
raise RuntimeError("Unsupported architecture")
def _create_emulated_mapping(self, uc, address):
'''
Create a mapping in Unicorn and note that we'll need it if we retry.
:param uc: The Unicorn instance.
:param address: The address which is contained by the mapping.
:rtype Map
'''
m = self._cpu.memory.map_containing(address)
permissions = UC_PROT_NONE
if 'r' in m.perms:
permissions |= UC_PROT_READ
if 'w' in m.perms:
permissions |= UC_PROT_WRITE
if 'x' in m.perms:
permissions |= UC_PROT_EXEC
uc.mem_map(m.start, len(m), permissions)
self._should_be_mapped[m.start] = (len(m), permissions)
return m
def get_unicorn_pc(self):
if self._cpu.arch == CS_ARCH_ARM:
return self._emu.reg_read(UC_ARM_REG_R15)
elif self._cpu.arch == CS_ARCH_X86:
if self._cpu.mode == CS_MODE_32:
return self._emu.reg_read(UC_X86_REG_EIP)
elif self._cpu.mode == CS_MODE_64:
return self._emu.reg_read(UC_X86_REG_RIP)
def _hook_xfer_mem(self, uc, access, address, size, value, data):
'''
Handle memory operations from unicorn.
'''
assert access in (UC_MEM_WRITE, UC_MEM_READ, UC_MEM_FETCH)
if access == UC_MEM_WRITE:
self._cpu.write_int(address, value, size*8)
# If client code is attempting to read a value, we need to bring it
# in from Manticore state. If we try to mem_write it here, Unicorn
# will segfault. We add the value to a list of things that need to
# be written, and ask to restart the emulation.
elif access == UC_MEM_READ:
value = self._cpu.read_bytes(address, size)
if address in self._should_be_written:
return True
self._should_be_written[address] = value
self._should_try_again = True
return False
return True
def _hook_unmapped(self, uc, access, address, size, value, data):
'''
We hit an unmapped region; map it into unicorn.
'''
try:
m = self._create_emulated_mapping(uc, address)
except MemoryException as e:
self._to_raise = e
self._should_try_again = False
return False
self._should_try_again = True
return False
def _interrupt(self, uc, number, data):
'''
Handle software interrupt (SVC/INT)
'''
from ..core.cpu.abstractcpu import Interruption
self._to_raise = Interruption(number)
return True
def _to_unicorn_id(self, reg_name):
# TODO(felipe, yan): Register naming is broken in current unicorn
# packages, but works on unicorn git's master. We leave this hack
# in until unicorn gets updated.
if unicorn.__version__ <= '1.0.0' and reg_name == 'APSR':
reg_name = 'CPSR'
if self._cpu.arch == CS_ARCH_ARM:
return globals()['UC_ARM_REG_' + reg_name]
elif self._cpu.arch == CS_ARCH_X86:
# TODO(yan): This needs to handle AF register
return globals()['UC_X86_REG_' + reg_name]
else:
# TODO(yan): raise a more appropriate exception
raise TypeError
def emulate(self, instruction):
'''
Emulate a single instruction.
'''
# The emulation might restart if Unicorn needs to bring in a memory map
# or bring a value from Manticore state.
while True:
self.reset()
# Establish Manticore state, potentially from past emulation
# attempts
for base in self._should_be_mapped:
size, perms = self._should_be_mapped[base]
self._emu.mem_map(base, size, perms)
for address, values in self._should_be_written.items():
for offset, byte in enumerate(values, start=address):
if issymbolic(byte):
from ..core.cpu.abstractcpu import ConcretizeMemory
raise ConcretizeMemory(offset, 8,
"Concretizing for emulation")
self._emu.mem_write(address, ''.join(values))
# Try emulation
self._should_try_again = False
self._step(instruction)
if not self._should_try_again:
break
def _step(self, instruction):
'''
A single attempt at executing an instruction.
'''
registers = set(self._cpu.canonical_registers)
# Refer to EFLAGS instead of individual flags for x86
if self._cpu.arch == CS_ARCH_X86:
# The last 8 canonical registers of x86 are individual flags; replace
# with the eflags
registers -= set(['CF','PF','AF','ZF','SF','IF','DF','OF'])
registers.add('EFLAGS')
# XXX(yan): This concretizes the entire register state. This is overly
# aggressive. Once capstone adds consistent support for accessing
# referred registers, make this only concretize those registers being
# read from.
for reg in registers:
val = self._cpu.read_register(reg)
if issymbolic(val):
from ..core.cpu.abstractcpu import ConcretizeRegister
raise ConcretizeRegister(reg, "Concretizing for emulation.",
policy='ONE')
self._emu.reg_write(self._to_unicorn_id(reg), val)
# Bring in the instruction itself
text_bytes = self._cpu.read_bytes(self._cpu.PC, instruction.size)
self._emu.mem_write(self._cpu.PC, ''.join(text_bytes))
self._emu.hook_add(UC_HOOK_MEM_READ_UNMAPPED, self._hook_unmapped)
self._emu.hook_add(UC_HOOK_MEM_WRITE_UNMAPPED, self._hook_unmapped)
self._emu.hook_add(UC_HOOK_MEM_FETCH_UNMAPPED, self._hook_unmapped)
self._emu.hook_add(UC_HOOK_MEM_READ, self._hook_xfer_mem)
self._emu.hook_add(UC_HOOK_MEM_WRITE, self._hook_xfer_mem)
self._emu.hook_add(UC_HOOK_INTR, self._interrupt)
saved_PC = self._cpu.PC
try:
self._emu.emu_start(self._cpu.PC, self._cpu.PC+instruction.size, count=1)
except UcError as e:
# We request re-execution by signaling error; if we we didn't set
# _should_try_again, it was likely an actual error
if not self._should_try_again:
raise
if self._should_try_again:
return
if logger.isEnabledFor(logging.DEBUG):
logger.debug("="*10)
for register in self._cpu.canonical_registers:
logger.debug("Register % 3s Manticore: %08x, Unicorn %08x",
register, self._cpu.read_register(register),
self._emu.reg_read(self._to_unicorn_id(register)) )
logger.debug(">"*10)
# Bring back Unicorn registers to Manticore
for reg in registers:
val = self._emu.reg_read(self._to_unicorn_id(reg))
self._cpu.write_register(reg, val)
#Unicorn hack. On single step unicorn wont advance the PC register
mu_pc = self.get_unicorn_pc()
if saved_PC == mu_pc:
self._cpu.PC = saved_PC + instruction.size
# Raise the exception from a hook that Unicorn would have eaten
if self._to_raise:
raise self._to_raise
return
|
#!/usr/bin/env python3
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from tqdm import tqdm
from util.constants import PLEARN_ACTIONS, plearn_action_to_text
from mivp_agent.util.data_structures import LimitedHistory
def plot_rewards(reward_function, defender=False):
# Init lists to graph
x_pos = []
y_pos = []
v_pos = []
for x in tqdm(range(-85, 85, 4)):
for y in range(-115, 20, 4):
# Find the max value across all headings
max_value = None
for action in PLEARN_ACTIONS:
fake_state = {
'NAV_X': x,
'NAV_Y': y,
'NAV_HEADING': PLEARN_ACTIONS[action]['course'],
'TAGGED': False,
'NODE_REPORTS': {
'evan': {
'NAV_X': 200,
'NAV_Y': 200,
'NAV_HEADING': 200,
}
}
}
value = reward_function(fake_state)
if max_value is None or value > max_value:
max_value = value
v_pos.append(max_value.item(0))
x_pos.append(x)
y_pos.append(y)
fig = plt.figure()
ax = plt.axes(projection='3d')
# Do the plotting
ax.plot([56,-83,-53,82,56], [16,-49,-114,-56,16], 'red', linewidth=4)
ax.plot_trisurf(x_pos, y_pos, v_pos)
plt.show()
class TestGrapher:
def __init__(self, save_dir=None):
# Parse args
self.save_dir = save_dir
# Setup matplotlib
matplotlib.use('TkAgg')
plt.ion()
# Configure axes
self.fig, self.axs = plt.subplots(2,2)
self.axs[0, 0].set_title("Success Precent")
self.success, = self.axs[0,0].plot([], [], '-go')
self.axs[0,0].set_ylim(-5,100)
self.axs[0, 1].set_title("Min Dist to Flag")
self.min_dist, = self.axs[0,1].plot([], [], '-bo')
self.axs[1, 0].set_title("Avg Durration")
self.avg_duration, = self.axs[1,0].plot([], [], '-mo')
self.axs[1,0].set_ylim(0,100)
self.other, = self.axs[1,1].plot([], [], '-ro')
# Stylisitic details
self.fig.tight_layout(pad=2.0)
self.fig.set_size_inches(8, 7)
self.fig.canvas.manager.set_window_title('pLearn Tester')
# Create data structures
self.iters = []
self.success_data = []
self.min_dist_data = []
self.avg_duration_data = []
self.other_data = []
self.max_iter = -1
# Show graph just for the nice-ness factor :)
self._plot()
def add_iteration(self, iter, success_pcnt, min_dist, avg_duration, other, plot=True):
self.iters.append(iter)
self.success_data.append(success_pcnt)
self.min_dist_data.append(min_dist)
self.avg_duration_data.append(avg_duration)
self.other_data.append(other)
if iter > self.max_iter:
self.max_iter = iter
if plot:
self._plot()
def _plot(self):
right_bound = max(self.max_iter, 1) # So matplotlib doesn yell about set_xlim(0,0)
self.success.set_data(self.iters, self.success_data)
self.axs[0,0].set_xlim(0, right_bound)
self.min_dist.set_data(self.iters, self.min_dist_data)
self.axs[0,1].relim()
self.axs[0,1].set_xlim(0, right_bound)
self.axs[0,1].autoscale()
self.avg_duration.set_data(self.iters, self.avg_duration_data)
self.axs[1,0].set_xlim(0, right_bound)
self.other.set_data(self.iters, self.other_data)
self.axs[1,1].relim()
self.axs[1,1].set_xlim(0, right_bound)
self.axs[1,1].autoscale()
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if self.save_dir != None:
plt.savefig(os.path.join(self.save_dir, 'test_graph.png'))
class DebugGrapher:
FRAME_SIZE = 25
def __init__(self, save_dir=None):
# Parse args
self.save_dir = save_dir
# Setup matplotlib
matplotlib.use('TkAgg')
plt.ion()
# Create data structures
self.data_entries = len(PLEARN_ACTIONS) + 2 # 2 for iters and td_data
self.history = LimitedHistory(self.FRAME_SIZE, self.data_entries)
self.episode_iters = []
self.expected_reward = []
# Configure figure
# Gridspec reference: https://matplotlib.org/stable/tutorials/intermediate/gridspec.html
self.fig = plt.figure(constrained_layout=True)
gs = self.fig.add_gridspec(2, 2)
self.ax1 = self.fig.add_subplot(gs[0,:]) # First row all columns
self.ax2 = self.fig.add_subplot(gs[1,0])
self.ax3 = self.fig.add_subplot(gs[1,1])
# Stylisitic details
self.fig.tight_layout(pad=2.0)
self.fig.set_size_inches(8, 7)
self.fig.canvas.manager.set_window_title('pLearn Debugging Charts')
# Setup lines
self.ax1.set_title("~Relative~ Action Value")
self.action_lines = {}
self.action_labels = {}
for a in PLEARN_ACTIONS:
self.action_lines[a], = self.ax1.plot([], [])
self.action_labels[a] = self.ax1.text(0, 0, "")
self.ax2.set_title("Expected Reward")
self.reward, = self.ax2.plot(self.episode_iters, self.expected_reward)
self.ax3.set_title("Loop Time (in seconds)")
self.td, = self.ax3.plot([], [],)
# Show graph just for the nice-ness factor :)
self._plot()
def add_iteration(self, iter, action_values, episode_iters, expected_reward, td, plot=True):
# Store reward info
self.episode_iters = episode_iters
self.expected_reward = expected_reward
# Construct data frame
frame_data = [iter, td]
for a in PLEARN_ACTIONS:
frame_data.append(action_values[a])
# Push to history
self.history.push_frame(np.array(frame_data))
# Plot
if plot:
self._plot()
def _plot(self):
# Get data from history
iters = self.history.entry_history(0)
td = self.history.entry_history(1)
a_values = self.history.select_history([2,3,4,5,6,7], scale=1.0)
for i, a in enumerate(PLEARN_ACTIONS):
# Set line data
if a_values is not None:
self.action_lines[a].set_data(iters, a_values[:,i])
# Reset labels
self.action_labels[a].set_visible(False)
self.action_labels[a] = self.ax1.text(
iters[0]+3, # X position
a_values[:,i][0], # Y position
f'{plearn_action_to_text(a)} {a}')
self.reward.set_data(self.episode_iters, self.expected_reward)
self.td.set_data(iters, td)
# Rescale
x_min = 0
x_max = 1
try:
x_min = iters.min()
x_max = iters.max()+35
except ValueError:
pass
self.ax1.relim()
self.ax1.autoscale()
self.ax1.set_xlim(x_min, x_max)
self.ax2.relim()
self.ax2.autoscale()
self.ax3.relim()
self.ax3.autoscale()
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if self.save_dir != None:
plt.savefig(os.path.join(self.save_dir, 'debug_graph.png'))
'''
# Update the value for each action
for a in PLEARN_ACTIONS:
# Normalize the data between 0 and 1
self.action_lines[a].set_data(self.iters, self.action_data[a])
# Update the action plot window
self.axs[0].relim()
self.axs[0].set_yscale('log')
self.axs[0].autoscale()
self.td.set_data(self.iters, self.td_data)
self.axs[1].relim()
self.axs[1].autoscale()'''
|
from collections import Counter
from bs4 import BeautifulSoup
from bs4.dammit import EncodingDetector
from sparkcc import CCIndexWarcSparkJob
from word_count import WordCountJob
class CCIndexWordCountJob(CCIndexWarcSparkJob, WordCountJob):
""" Word count (frequency list) from WARC records matching a SQL query
on the columnar URL index """
name = "CCIndexWordCount"
records_parsing_failed = None
records_non_html = None
def init_accumulators(self, sc):
super(CCIndexWordCountJob, self).init_accumulators(sc)
self.records_parsing_failed = sc.accumulator(0)
self.records_non_html = sc.accumulator(0)
def log_aggregators(self, sc):
super(CCIndexWordCountJob, self).log_aggregators(sc)
self.log_aggregator(sc, self.records_parsing_failed,
'records failed to parse = {}')
self.log_aggregator(sc, self.records_non_html,
'records not HTML = {}')
@staticmethod
def reduce_by_key_func(a, b):
# sum values of tuple <term_frequency, document_frequency>
return ((a[0] + b[0]), (a[1] + b[1]))
def html_to_text(self, page, record):
try:
encoding = record.rec_headers['WARC-Identified-Content-Charset']
if not encoding:
for encoding in EncodingDetector(page, is_html=True).encodings:
# take the first detected encoding
break
soup = BeautifulSoup(page, 'lxml', from_encoding=encoding)
for script in soup(['script', 'style']):
script.extract()
return soup.get_text(' ', strip=True)
except Exception as e:
self.get_logger().error("Error converting HTML to text for {}: {}",
record.rec_headers['WARC-Target-URI'], e)
self.records_parsing_failed.add(1)
return ''
def process_record(self, record):
if record.rec_type != 'response':
# skip over WARC request or metadata records
return
if not self.is_html(record):
self.records_non_html.add(1)
return
page = record.content_stream().read()
text = self.html_to_text(page, record)
words = map(lambda w: w.lower(),
WordCountJob.word_pattern.findall(text))
for word, count in Counter(words).items():
yield word, (count, 1)
if __name__ == '__main__':
job = CCIndexWordCountJob()
job.run()
|
import pytest
import pathlib
from align.schema.subcircuit import SubCircuit
from align.schema.parser import SpiceParser
# WARNING: Parser capitalizes everything internally as SPICE is case-insensitive
# Please formulate tests accordingly
@pytest.fixture
def setup_basic():
return 'X1 a b testdev x=1f y=0.1'
@pytest.fixture
def setup_multiline():
return '''
X1 a b testdev x =1f y= 0.1
X2 a b testdev x = {capval*2}
'''
@pytest.fixture
def setup_realistic():
return '''
R1 vcc outplus 1e4
R2 vcc outminus 1e4
M1 outplus inplus src 0 NMOS l=0.014u nfin=2
M2 outminus inminus src 0 NMOS l=0.014u nfin=2
C1 outplus 0 1e-12
C2 outminus 0 1e-12
'''
@pytest.fixture
def setup_annotation():
return '''
.subckt diffamp vcc outplus outminus inplus src 0 inminus
* This is one awesome diffamp
* Subcircuit constraints can be directly specified here
* @: Order(instances=['R2', 'M1'], direction='left_to_right')
* @: Order(instances=['M1', 'M2'], direction='left_to_right')
R1 vcc outplus 1e4; Or even here! Amazing !
R2 vcc outminus 1e4; @: Order(instances=['R1', 'R2'], direction='left_to_right')
M1 outplus inplus src 0 NMOS l=0.014u nfin=2
M2 outminus inminus src 0 NMOS l=0.014u nfin=2
.ends
'''
@pytest.fixture
def parser():
parser = SpiceParser()
return parser
def test_lexer_basic(setup_basic):
str_ = setup_basic
types = ['NAME', 'NAME', 'NAME', 'NAME', 'NAME', 'EQUALS', 'NUMBER', 'NAME', 'EQUALS', 'NUMBER']
assert [tok.type for tok in SpiceParser._generate_tokens(str_)] == types
def test_lexer_with_comments1(setup_basic):
str_ = '''* Some comment here
X1 a b testdev; COMMENT ABOUT M1 pins
; SOME MORE COMMENTS ABOUT PARAMETERS
+ x=1f y=0.1; AND A FW MORE FOR GOOD MEASURE
'''
tokens = list(SpiceParser._generate_tokens(str_))
assert tokens.pop(0).type == 'NEWL'
assert all(tok1.type == tok2.type and tok1.value == tok2.value for tok1, tok2 in zip(tokens, SpiceParser._generate_tokens(setup_basic))), tokens
def test_lexer_with_comments2(setup_basic):
str_ = '''; Some comment here
X1 a b testdev; COMMENT ABOUT M1 pins
* SOME MORE COMMENTS ABOUT PARAMETERS
+ x=1f y=0.1; AND A FW MORE FOR GOOD MEASURE
'''
tokens = list(SpiceParser._generate_tokens(str_))
assert tokens.pop(0).type == 'NEWL'
assert all(tok1.type == tok2.type and tok1.value == tok2.value for tok1, tok2 in zip(tokens, SpiceParser._generate_tokens(setup_basic))), tokens
def test_lexer_multiline(setup_multiline):
str_ = setup_multiline
types = ['NEWL',
'NAME', 'NAME', 'NAME', 'NAME', 'NAME', 'EQUALS', 'NUMBER', 'NAME', 'EQUALS', 'NUMBER', 'NEWL',
'NAME', 'NAME', 'NAME', 'NAME', 'NAME', 'EQUALS', 'EXPR', 'NEWL']
assert [tok.type for tok in SpiceParser._generate_tokens(str_)] == types
def test_lexer_annotation(setup_annotation):
str_ = setup_annotation
types = ['NEWL', 'DECL', 'NAME', 'NAME', 'NAME', 'NAME', 'NAME', 'NAME', 'NUMBER', 'NAME',
'ANNOTATION', 'ANNOTATION', 'NEWL',
'NAME', 'NAME', 'NAME', 'NUMBER', 'NEWL',
'NAME', 'NAME', 'NAME', 'NUMBER', 'ANNOTATION', 'NEWL',
'NAME', 'NAME', 'NAME', 'NAME', 'NUMBER', 'NAME', 'NAME', 'EQUALS', 'NUMBER', 'NAME', 'EQUALS', 'NUMBER', 'NEWL',
'NAME', 'NAME', 'NAME', 'NAME', 'NUMBER', 'NAME', 'NAME', 'EQUALS', 'NUMBER', 'NAME', 'EQUALS', 'NUMBER', 'NEWL',
'DECL', 'NEWL']
assert [tok.type for tok in SpiceParser._generate_tokens(str_)] == types
def test_lexer_realistic(setup_realistic):
str_ = setup_realistic
types = ['NEWL',
'NAME', 'NAME', 'NAME', 'NUMBER', 'NEWL',
'NAME', 'NAME', 'NAME', 'NUMBER', 'NEWL',
'NAME', 'NAME', 'NAME', 'NAME', 'NUMBER', 'NAME', 'NAME', 'EQUALS', 'NUMBER', 'NAME', 'EQUALS', 'NUMBER', 'NEWL',
'NAME', 'NAME', 'NAME', 'NAME', 'NUMBER', 'NAME', 'NAME', 'EQUALS', 'NUMBER', 'NAME', 'EQUALS', 'NUMBER', 'NEWL',
'NAME', 'NAME', 'NUMBER', 'NUMBER', 'NEWL',
'NAME', 'NAME', 'NUMBER', 'NUMBER', 'NEWL']
assert [tok.type for tok in SpiceParser._generate_tokens(str_)] == types
def test_parser_basic(setup_basic, parser):
parser.library['TESTDEV'] = SubCircuit(name='TESTDEV', pins=['+', '-'], parameters={'X':'1F', 'Y':'0.1'})
parser.parse(setup_basic)
assert len(parser.circuit.elements) == 1
assert parser.circuit.elements[0].name == 'X1'
assert parser.circuit.elements[0].model.name == 'TESTDEV'
assert parser.circuit.nets == ['A', 'B']
def test_parser_multiline(setup_multiline, parser):
parser.library['TESTDEV'] = SubCircuit(name='TESTDEV', pins=['+', '-'], parameters={'X':'1F', 'Y':'0.1'})
parser.parse(setup_multiline)
assert len(parser.circuit.elements) == 2
assert parser.circuit.elements[0].name == 'X1'
assert parser.circuit.elements[1].name == 'X2'
assert parser.circuit.elements[0].model.name == 'TESTDEV'
assert parser.circuit.elements[1].model.name == 'TESTDEV'
assert parser.circuit.nets == ['A', 'B']
def test_parser_realistic(setup_realistic, parser):
parser.parse(setup_realistic)
assert len(parser.circuit.elements) == 6, parser.circuit.elements
assert [x.name for x in parser.circuit.elements] == ['R1', 'R2', 'M1', 'M2', 'C1', 'C2'], parser.circuit.elements
assert len(parser.circuit.nets) == 7, parser.circuit.nets
assert parser.circuit.nets == ['VCC', 'OUTPLUS', 'OUTMINUS', 'INPLUS', 'SRC', '0', 'INMINUS'], parser.circuit.nets
def test_parser_annotation(setup_annotation, parser):
parser.parse(setup_annotation)
assert 'DIFFAMP' in parser.library
assert len(parser.library['DIFFAMP'].elements) == 4
assert [x.name for x in parser.library['DIFFAMP'].elements] == ['R1', 'R2', 'M1', 'M2'], parser.library['DIFFAMP'].elements
assert len(parser.library['DIFFAMP'].nets) == 7, parser.library['DIFFAMP'].nets
assert parser.library['DIFFAMP'].nets == ['VCC', 'OUTPLUS', 'OUTMINUS', 'INPLUS', 'SRC', '0', 'INMINUS'], parser.circuit.nets
assert len(parser.library['DIFFAMP'].constraints) == 3
def test_subckt_decl(setup_realistic, parser):
parser.parse(f'''
.subckt diffamp vcc outplus outminus inplus src 0 inminus
.param res = 100
{setup_realistic}
.ends
X1 vcc outplus outminus inplus src 0 inminus diffamp res=200
''')
assert 'DIFFAMP' in parser.library
assert len(parser.library['DIFFAMP'].elements) == 6
assert len(parser.circuit.elements) == 1
assert parser.circuit.elements[0].model.name == 'DIFFAMP'
def test_model(parser):
parser.parse('.MODEL nmos_rvt nmos KP=0.5M VT0=2')
assert 'NMOS_RVT' in parser.library
assert list(parser.library['NMOS_RVT'].parameters.keys()) == ['W', 'L', 'NFIN', 'KP', 'VT0']
def test_ota_cir_parsing(parser):
with open((pathlib.Path(__file__).parent.parent / 'files' / 'ota.cir').resolve()) as fp:
parser.parse(fp.read())
assert 'OTA' in parser.library
assert len(parser.library['OTA'].elements) == 10
def test_ota_sp_parsing(parser):
with open((pathlib.Path(__file__).parent.parent / 'files' / 'ota.sp').resolve()) as fp:
parser.parse(fp.read())
assert 'OTA' in parser.library
assert len(parser.library['OTA'].elements) == 10
def test_basic_template_parsing(parser):
libsize = len(parser.library)
with open((pathlib.Path(__file__).parent.parent / 'files' / 'basic_template.sp').resolve()) as fp:
parser.parse(fp.read())
assert len(parser.library) - libsize == 31
|
import torch
import torch.nn as nn
from torch.nn import LSTM, LSTMCell, Linear, Parameter
class InteractorwoLSTM(nn.Module):
def __init__(self, hidden_size_textual: int, hidden_size_visual: int,
hidden_size_ilstm: int):
"""
:param input_size:
:param hidden_size:
"""
super(InteractorwoLSTM, self).__init__()
# represented by W_S, W_R, W_V with bias b
self.projection_S = Linear(hidden_size_textual, hidden_size_ilstm, bias=True)
self.projection_V = Linear(hidden_size_visual, hidden_size_ilstm, bias=True)
#self.projection_R = Linear(hidden_size_ilstm, hidden_size_ilstm, bias=True)
# parameter w with bias c
self.projection_w = Linear(hidden_size_ilstm, 1, bias=True)
self.hidden_size_textual = hidden_size_textual
self.hidden_size_visual = hidden_size_visual
self.hidden_size_ilstm = hidden_size_ilstm
# self.iLSTM = LSTMCell(input_size=hidden_size_textual,
# hidden_size=hidden_size_ilstm)
def get_mask_from_sequence_lengths(self, sequence_lengths: torch.Tensor, max_length: int):
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
def masked_softmax(self, vector: torch.Tensor, mask: torch.Tensor, dim: int = -1, memory_efficient: bool = False, mask_fill_value: float = -1e32):
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result + 1e-13
def forward(self, h_s: torch.Tensor, h_v: torch.Tensor, lengths=None,):
"""
:param h_v: with shape (n_batch, T, hidden_size_visual)
:param h_s: with shape (n_batch, N, hidden_size_textual)
:return: outputs of the iLSTM with shape (n_batch, T, hidden_size_ilstm)
"""
n_batch, T, N = h_v.shape[0], h_v.shape[1], h_s.shape[1]
# h_r_{t-1} in the paper
# h_r_prev = torch.zeros([n_batch, self.hidden_size_ilstm], device=self.device)
# c_r_prev = torch.zeros([n_batch, self.hidden_size_ilstm], device=self.device)
token_mask = self.get_mask_from_sequence_lengths(lengths,N) #(n_batch, N)
outputs = []
attention_weights = []
for t in range(T):
beta_t = self.projection_w(torch.tanh(self.projection_S(h_s) +
self.projection_V(h_v[:, t, :]).unsqueeze(dim=1))
).squeeze(dim=2) # shape (n_batch, N)
# alpha_t = torch.softmax(beta_t, dim=1) # shape: (n_batch, N)
alpha_t = self.masked_softmax(beta_t,token_mask,dim=1)
# computing H_t_s with shape (n_batch, hidden_size_textual)
H_t_s = torch.bmm(h_s.permute(0, 2, 1), alpha_t.unsqueeze(dim=2)).squeeze(dim=2)
#r_t = torch.cat([h_v[:, t, :], H_t_s], dim=1) # shape (n_batch, hidden_size_textual+hidden_size_visual)
# r_t = h_v[:, t, :] - H_t_s
# computing h_r_new and c_r_new with shape (n_batch, hidden_size_ilstm)
# h_r_new, c_r_new = self.iLSTM(r_t, (h_r_prev, c_r_prev))
outputs.append(H_t_s.unsqueeze(dim=1))
# h_r_prev, c_r_prev = h_r_new, c_r_new
# attention_weights.append(alpha_t.unsqueeze(dim=1))
return torch.cat(outputs, dim=1)
@property
def device(self) -> torch.device:
"""
Determine which device to place the Tensors upon, CPU or GPU.
"""
return self.projection_S.weight.device
|
from flask import Flask
from flask import request
app = Flask(__name__)
# level 0
@app.route('/')
def home_page():
html = """
<head>
<title>我的网站</title>
</head>
<body>
这是我的网站
</body>
"""
return html
# level 1
@app.route('/<name>')
def sayhello(name):
html = f"""
<h3>hello, {name}</h3>
"""
return html
from flask import redirect
@app.route('/baidu')
def tobaidu():
return redirect('http://www.baidu.com')
from flask import render_template
@app.route('/render')
def render():
data = {'Name': "Mory", 'Age': 24}
return render_template('mysite.html', **data)
@app.route('/renderlist')
def renderlist():
data = "Ann Mory Jenny".split()
return render_template('mysitelist.html', names=data)
if __name__ == '__main__':
app.run(host='localhost', port=8000, debug=True)
|
#2 step process take image of background and select a colour for the cloth.
#code for image of the background
import cv2
#This is my webcam
cap = cv2.VideoCapture(0)
#back is what camera is reading
while cap.isOpened():
ret,back=cap.read() #Here i am simply reading from my webcam
if ret: #same as writing if ret == True
cv2.imshow("image",back)
if cv2.waitKey(5)==ord('q'):#ord is giving unicode value of q ie when q is
cv2.imwrite('image.jpg',back)#pressed on keypad capture and save the image
break
cap.release()
cap.destroyAllWindows()
|
# -*- coding: utf-8 -*-
# Inspired by:
# https://github.com/lingdb/CoBL/issues/223#issuecomment-256815113
from __future__ import unicode_literals, print_function
from django.db import migrations
from cobl.source_scripts.handle_duplicate_sources import handle_sources, \
sourcesExist
sources_changes = {'merge': {},
'delete': [],
'deprecate': [35, 191, 139, 362]
}
def forwards_func(apps, schema_editor):
Source = apps.get_model('lexicon', 'Source')
if sourcesExist(sources_changes, Source):
handle_sources(sources_changes)
def reverse_func(apps, schema_editor):
print('Reverse of 306_0131_deprecate_sources does nothing.')
class Migration(migrations.Migration):
dependencies = [('lexicon', '306_0130_auto_20161104_1721')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
import pandas as pd
def load_data(portfolio_data_absolute_path="/home/chris/Dropbox/Finance/data/portfolio_trades.ods",
stock_data_absolute_path="/home/chris/Dropbox/Finance/data/stock_trades.ods",
income_data_absolute_path="/home/chris/Dropbox/Finance/data/income.ods",
etf_master_data_absolute_path="/home/chris/Dropbox/Finance/data/generated/master_data_stocks.ods",
stock_price_data_absolute_path="/home/chris/Dropbox/Finance/data/generated/stock_prices.ods",
cashflow_path = "/home/chris/Dropbox/Finance/data/data_cashflow/bilanz_full.csv",
crypto_path = "/home/chris/Dropbox/Finance/data/crypto/crypto_trades_manual.ods",
include_speculation=False):
"""
Needs odfpy library to load .ods files!
Loads all necessary data sources of the given portfolio: ETF savings portfolio data, speculation data
(stocks, cryptos, etc).
:param order_data__absolute_path: path to source data for ETF portfolio (filetype: .ods)
:param etf_master_data_absolute_path: path to master data of ETFs (filetype: .ods)
:param stock_price_data_absolute_path: path to price data of ETFs (filetype: .ods)
:param include_speculation: Whether orders of speculation portfolio should be included in output
:param cashflow_path: csv file of cashflow data
:return: tupel of pd.DataFrames with portfolio transactions and master data
"""
orders_portfolio = pd.read_excel(portfolio_data_absolute_path, engine="odf", sheet_name="Buys")
dividends_portfolio = pd.read_excel(portfolio_data_absolute_path, engine="odf", sheet_name="Dividends")
orders_speculation = pd.read_excel(stock_data_absolute_path, engine="odf", sheet_name="Buys")
income = pd.read_excel(income_data_absolute_path, engine="odf")
stock_prices = pd.read_csv(stock_price_data_absolute_path)
etf_master = pd.read_csv(etf_master_data_absolute_path)
cashflow_init = pd.read_csv(cashflow_path)
df_crypto_deposits = pd.read_excel(crypto_path, engine="odf", sheet_name="Deposits", skiprows=2, usecols="A:G")
df_crypto_trades = pd.read_excel(crypto_path, engine="odf", sheet_name="Trades", skiprows=1)
if include_speculation == True:
return ((etf_master, orders_portfolio, dividends_portfolio, income, stock_prices, cashflow_init,
orders_speculation, df_crypto_deposits, df_crypto_trades))
else:
return ((etf_master, orders_portfolio, dividends_portfolio, income, stock_prices, cashflow_init,
None, df_crypto_deposits, df_crypto_trades))
def cleaning_cashflow(df_input: pd.DataFrame) -> pd.DataFrame:
"""
Data cleaning and preprocessing of cashflow data.
:param df_input: Multiple toshl monthly-exports appended into a single dataframe
:return: preprocessed dataframe
"""
import numpy as np
assert df_input.drop("Description",
axis=1).isna().sum().sum() == 0, \
f"There are NaN values in inputfile: {path_data}{filename_cashflow}"
### Data cleaning
df_init = df_input.copy()
df_init['Date'] = pd.to_datetime(df_init['Date'], format='%m/%d/%y')
df_init.drop(columns=['Account', 'Currency', 'Main currency', 'Description'], inplace=True)
df_init['Expense amount'] = df_init['Expense amount'].str.replace(',', '')
df_init['Income amount'] = df_init['Income amount'].str.replace(',', '').astype(np.float64)
df_init['In main currency'] = df_init['In main currency'].str.replace(',', '')
df_init['Expense amount'] = df_init['Expense amount'].astype(np.float64)
df_init['In main currency'] = df_init['In main currency'].astype(np.float64)
### Preprocessing of cashflow amounts
df_init['Amount'] = pd.Series([-y if x > 0. else y
for x, y in zip(df_init['Expense amount'],
df_init['In main currency']
)
]
)
assert df_init[(~df_init["Income amount"].isin(["0.0", "0"])) &
(df_init["In main currency"] != df_init["Amount"])
].count().sum() == 0, "Income amount does not match with main currency amount!"
assert df_init[(~df_init["Expense amount"].isin(["0.0", "0"])) &
(-df_init["In main currency"] != df_init["Amount"])
].count().sum() == 0, "Expense amount does not match with main currency amount!"
### Remap all tags with category "Urlaub" to "old-tag, Urlaub" and map afterwards all double-tags
### containing "Urlaub" to the Urlaub tag
df_init.loc[df_init["Category"] == "Urlaub", "Tags"] = df_init["Tags"].apply(lambda tag: tag + ", Urlaub")
df_init["split_tags"] = df_init["Tags"].apply(lambda x: x.split(","))
assert df_init[df_init["split_tags"].apply(len) > 1]["split_tags"].apply(lambda x: \
"Urlaub" in [s.strip() for s in x]
).all() == True,\
'Some entries with multiple tags do not contain "Urlaub"! Mapping not possible!'
df_init.loc[df_init["split_tags"].apply(len) > 1, "Tags"] = "Urlaub"
df_init = df_init[["Date", "Category", "Tags", "Amount"]]
return(df_init)
def split_cashflow_data(df_cleaned: pd.DataFrame) -> pd.DataFrame:
"""
Splits whole cashflow data into incomes and expenses and groups it monthly and sums amounts per tag
:param df_cleaned: Cleaned dataframe of cashflow
:return: Tuple of dataframes holding incomes and expenses, each grouped by month
"""
needed_columns = ["Tags", "Date", "Amount"]
assert set(needed_columns).intersection(set(df_cleaned.columns)) == set(needed_columns), \
"Columns missing! Need: {0}, Have: {1}".format(needed_columns, list(df_cleaned.columns))
df_grouped = df_cleaned.groupby([pd.Grouper(key='Date', freq='1M'), 'Tags']).sum()
incomes = df_grouped[df_grouped["Amount"] > 0.].copy()
expenses = df_grouped[df_grouped["Amount"] <= 0.].copy()
return((incomes, expenses))
def preprocess_cashflow(df: pd.DataFrame) -> pd.DataFrame:
"""
Remap tags of input data to custom categories, and change the format of the dataframe in order to
easily to computations and plots of the cashflow data.
:param df: Dataframe, holding either incomes or expenses (cleaned) and grouped by month (tags as rows)
:return: dataframe, where each row consists of cashflow data of of a month, each column represents a
custom category
"""
assert isinstance(df.index, pd.core.indexes.multi.MultiIndex) and \
set(df.index.names) == set(["Date", "Tags"]) and \
list(df.columns) == ["Amount"], "Dataframe is not grouped by month!"
### Define custom categories for all tags of Toshl: Make sure category names differ from tag-names,
### otherwise column is dropped and aggregate is wrong
category_dict = {
"home": ['rent', 'insurance', 'Miete'],
"food_healthy": ['restaurants', 'Lebensmittel', 'groceries', 'Restaurants', 'Restaurant Mittag'],
"food_unhealthy": ['Fast Food', 'Süßigkeiten'],
"alcoholic_drinks": ['alcohol', 'Alkohol'],
"non-alcoholic_drinks": ['Kaffee und Tee', 'Erfrischungsgetränke', 'coffee & tea', 'soft drinks'],
"travel_vacation": ['sightseeing', 'Sightseeing', 'Beherbergung', 'accommodation', 'Urlaub'],
"transportation": ['bus', 'Bus', 'taxi', 'Taxi', 'metro', 'Metro', 'Eisenbahn', 'train', 'car',
'Auto', 'parking', 'airplane', 'fuel', 'Flugzeug'],
"sports": ['training', 'Training', 'MoTu', 'Turnier', 'sport equipment', 'Billard', 'Konsum Training'],
"events_leisure_books_abos": ['events', 'Events', 'adult fun', 'Spaß für Erwachsene', 'games', 'sport venues',
'membership fees', 'apps', 'music', 'books'],
"clothes_medicine": ['clothes', 'accessories', 'cosmetics', 'medicine', 'hairdresser',
'medical services', 'medical servies', "shoes"],
"private_devices": ['devices', 'bike', 'bicycle', 'movies & TV', 'mobile phone', 'home improvement',
'internet', 'landline phone', 'furniture'],
"presents": ['birthday', 'X-Mas'],
"other": ['wechsel', 'income tax', 'tuition', 'publications', 'Spende'],
"stocks": ['equity purchase'],
#### Income categories
"compensation_caution": ["Entschädigung"],
"salary": ["Salary", "Gehalt Vorschuss"],
"present": ["Geschenk"],
"tax_compensation": ["Kirchensteuer Erstattung", "Steuerausgleich"],
"investment_profit": ["Investing"]
}
from functools import reduce
category_list = reduce(lambda x, y: x + y, category_dict.values())
### Need another format of the table, fill NaNs with zero and drop level 0 index "Amount"
pivot_init = df.unstack()
pivot_init.fillna(0, inplace=True)
pivot_init.columns = pivot_init.columns.droplevel()
#### Extract expenses and incomes from building-upkeep (caution) when switching flats
if 'building upkeep' in pivot_init.columns:
building_upkeep = pivot_init['building upkeep']
pivot_init.drop(columns=['building upkeep'], inplace=True)
elif 'Wechsel' in pivot_init.columns:
building_upkeep = pivot_init['Wechsel']
pivot_init.drop(columns=['Wechsel'], inplace=True)
else:
building_upkeep = None
### Apply custom category definition to dataframe
not_categorized = [tag for tag in pivot_init.columns if tag not in category_list]
assert len(not_categorized) == 0, "There are some tags, which are not yet categorized: {}".format(not_categorized)
pivot = pivot_init.copy()
for category, tag_list in category_dict.items():
tag_list_in_data = list(set(tag_list).intersection(set(pivot.columns)))
pivot[category] = pivot[tag_list_in_data].sum(axis=1)
pivot.drop(columns=tag_list_in_data, inplace=True)
### Keep only categories with non-zero total amount in dataframe
category_sum = pivot.sum().reset_index()
nonzero_categories = list(category_sum[category_sum[0] != 0.]["Tags"])
pivot = pivot[nonzero_categories]
return((building_upkeep, pivot))
def combine_incomes(toshl_income, excel_income):
"""
Combines two data sources of incomes: toshl incomes and incomes from cashflow excel.
:param toshl_income: Preprocessed dataframe of toshl incomes (after cleaning and splitting)
:param excel_income: Raw excel income data
:return: Total income data
"""
df_in = toshl_income.reset_index().copy()
df_in["Tags"] = df_in["Tags"].apply(lambda x: "Salary" if x in ["Privat", "NHK", "OL"] else x)
df_in2 = excel_income.copy()
df_in2 = df_in2[["Datum", "Art", "Betrag"]].rename(columns={"Datum": "Date",
"Art": "Tags",
"Betrag": "Amount"}).dropna()
df_in2["Date"] = pd.to_datetime(df_in2["Date"], format="%d.%m.%Y")
df_in2["Tags"] = df_in2["Tags"].apply(lambda x: "Salary" if x in ["Gehalt", "Sodexo"] else x)
df_income = pd.concat([df_in, df_in2], ignore_index=True)
assert df_income.count()[0] == df_in.count()[0] + df_in2.count()[0], "Some income rows were lost!"
df_income = df_income.groupby([pd.Grouper(key='Date', freq='1M'), 'Tags']).sum()
return(df_income)
def preprocess_prices(df_prices: pd.DataFrame) -> pd.DataFrame:
"""
Preprocessing of price dataframe. Get latest available price.
:param df_prices: Needed columns: ISIN, Price, Datum, Currency
:return: dataframe containing prices of stocks defined by ISIN on latest available date
"""
dfp = df_prices.copy()
assert dfp["Currency"].drop_duplicates().count() == 1, "Multiple currencies used for price data!"
assert dfp["Currency"].iloc[0] == "EUR", "Currency is not Euro!"
dfp["Date"] = pd.to_datetime(dfp["Date"], format="%d.%m.%Y")
latest_date = dfp["Date"].max()
df_current_prices = dfp[dfp["Date"] == latest_date].reset_index(drop=True)
return(df_current_prices)
def preprocess_orders(df_orders: pd.DataFrame) -> pd.DataFrame:
"""
Set datatypes of columns and split input into dividends transactions and savings-plan transactions.
:param df_orders: Includes all transaction data of the portfolio, all columns in list portfolio_columns
need to be present, Kommentar column needs to be either "monatlich" (transaction of the
savings plan, an ETF is bought) or "Dividende" (income)
:return: tuple of orders- and dividend transaction entries
"""
orders_portfolio = df_orders.copy()
portfolio_columns = ["Index", "Datum", "Kurs", "Betrag", "Kosten", "Anbieter", "Name", "ISIN"]
new_portfolio_columns = ["Index", "Date", "Price", "Investment", "Ordercost", "Depotprovider", "Name", "ISIN"]
rename_columns = {key: value for key, value in zip(portfolio_columns, new_portfolio_columns)}
orders_portfolio = orders_portfolio.rename(columns=rename_columns)
assert set(orders_portfolio.columns).intersection(set(new_portfolio_columns)) == set(new_portfolio_columns), \
"Some necessary columns are missing in the input dataframe!"
### Keep only valid entries
orders_portfolio = orders_portfolio[~orders_portfolio["Investment"].isna()]
orders_portfolio = orders_portfolio[orders_portfolio["Art"] == "ETF Sparplan"]
orders_portfolio = orders_portfolio[new_portfolio_columns]
orders_portfolio = orders_portfolio[~orders_portfolio["Date"].isna()]
orders_portfolio["Date"] = pd.to_datetime(orders_portfolio["Date"], format="%d.%m.%Y")
orders_portfolio["Index"] = orders_portfolio["Index"].astype(int)
assert (orders_portfolio[orders_portfolio["Investment"] > 0.].count() != 0).any() == False, \
"Positive Einträge im Orderportfolio!"
orders_portfolio["Investment"] = -orders_portfolio["Investment"]
orders_portfolio["Ordercost"] = -orders_portfolio["Ordercost"]
return (orders_portfolio)
def preprocess_etf_masterdata(df_master: pd.DataFrame) -> pd.DataFrame:
"""
Convert columns "physical" and "Acc" to booleans and map all entries in "Region" containing "Emerging" to "Emerging"
:param df_master: Master data of all ETFs, columns in etf_columns are required
:return: preprocessed dataframe
"""
etf_master = df_master.copy()
etf_columns = ["Type", "Name", "ISIN", "Region", "Replikationsmethode", "Ausschüttung", "TER%"]
new_etf_columns = ["Type", "Name", "ISIN", "Region", "Replicationmethod", "Distributing", "TER%"]
etf_master = etf_master.rename(columns={key: value for key, value in zip(etf_columns, new_etf_columns)})
assert set(etf_master.columns).intersection(set(new_etf_columns)) == set(new_etf_columns), \
"Some necessary columns are missing in the input dataframe!"
etf_master = etf_master[new_etf_columns]
etf_master["Replicationmethod"] = etf_master["Replicationmethod"].map(lambda x: "Physical" \
if x[:8] == "Physisch" else "Synthetic")
etf_master["Distributing"] = etf_master["Distributing"].map(lambda x: "Distributing" \
if x == "Ausschüttend" else "Accumulating")
etf_master["Region"] = etf_master["Region"].fillna("").map(lambda x: "Emerging" if "Emerging" in x else x)
return (etf_master)
def preprocess_crypto_data(df_deposits, df_trades_init):
"""
Preprocessing of crypto deposits and trade history. Check if trade data is consistent.
:param df_deposits: Holds all deposits of cryptocurrencies.
:param df_trades_init: Table of all trades between different cryptocurrencies.
:return: tuple of cleaned deposits and trade overview dataframes
"""
df_deposits["date"] = pd.to_datetime(df_deposits["date"])
df_deposits = df_deposits[~df_deposits["currency"].isna()]
price_tolerance = 1e-8
for idx in df_trades_init.index:
if df_trades_init["exchange 1"].iloc[idx] != df_trades_init["exchange 2"].iloc[idx]:
if abs(df_trades_init["amount_spent"].iloc[idx] - df_trades_init["amount_gained"].iloc[idx] -
df_trades_init["fee"].iloc[idx]) > price_tolerance:
print("Error in data! Amount spent does not equal gained with fees!")
gain_columns = {
"date": "date",
"amount_gained": "amount",
"currency gained": "currency",
"exchange 2": "exchange"
}
spent_columns = {
"date": "date",
"amount_spent": "amount",
"currency spent": "currency",
"exchange 1": "exchange"
}
df_trades_cleaned = df_trades_init[gain_columns.keys()].rename(columns=gain_columns)
df_spent = df_trades_init[spent_columns.keys()].rename(columns=spent_columns)
df_spent["amount"] *= -1
df_trades = df_trades_cleaned.append(df_spent, ignore_index=True)
return((df_deposits, df_trades))
def compute_crypto_portfolio(df_deposits_init, df_trades_init):
"""
Combines deposits and trades into a single dataframe and compute portfolio of currencies.
:param df_deposits: preprocessed deposits of cryptocurrencies
:param df_trades: preprocessed trades of cryptocurrencies
:return: portfolio of cryptocurrencies: exchange, currency, amount
"""
df_deposits = df_deposits_init[["exchange", "currency", "amount"]].copy()
df_trades = df_trades_init[["exchange", "currency", "amount"]].copy()
df_all = df_deposits.append(df_trades, ignore_index=True)
crypto_portfolio = df_all.groupby(["exchange", "currency"]).sum().reset_index()
return(crypto_portfolio)
def compute_crypto_portfolio_value(portfolio: pd.DataFrame, prices: pd.DataFrame) -> pd.DataFrame:
"""
Combines current crypto-price data with portfolio and computes value per exchange/currency.
Adds the overall portfolio value with exchange-name "Overall" to the data.
:param portfolio: Holds crypto portfolio data (exchange, currency, amount)
:param prices: Holds prices and masterdata of cryptos (name, symbol, price)
:return: Value of portfolio per cryptocurrency
"""
portfolio_all = portfolio.merge(prices, left_on="currency", right_on="symbol").copy()
portfolio_all = portfolio_all[["exchange", "currency", "name", "amount", "price"]]
portfolio_all.loc[:, "value"] = round(portfolio_all["amount"] * portfolio_all["price"], 3)
portfolio_all = portfolio_all.drop("price", axis=1)
portfolio_overall = portfolio_all.groupby(["currency", "name"]).sum().reset_index()
portfolio_overall["exchange"] = "Overall"
portfolio_value = portfolio_all.append(portfolio_overall, ignore_index=True, sort=False)
return(portfolio_value)
def enrich_orders(df_orders, df_etf):
"""
Join ETF master data to transaction data of ETFs.
:param df_orders: ETF transaction data
:param df_etf: ETF master data
:return:
"""
join_columns_etf_master = ["ISIN", "Type", "Region", "Replicationmethod", "Distributing", "TER%"]
orders_etf = df_orders.merge(df_etf[join_columns_etf_master].drop_duplicates(),
how="inner",
left_on="ISIN",
right_on="ISIN").copy()
assert (orders_etf[orders_etf["Region"].isna()][["ISIN", "Name"]].drop_duplicates().count() > 0).any() == False, \
"No ETF master data!"
return (orders_etf)
def get_current_portfolio(df_orders: pd.DataFrame) -> pd.DataFrame:
"""
Gets transactions of latest executed monthly savings plan of ETF portfolio.
:param df_orders: ETF transaction data
:return:
"""
portfolio = df_orders.copy()
last_execution_index = portfolio["Index"].max()
portfolio = portfolio[portfolio["Index"] == last_execution_index].reset_index(drop=True).drop("Index", axis=1)
return (portfolio)
def compute_percentage_per_group(df: pd.DataFrame, group_names: list, compute_columns:list, agg_functions:list) -> list:
"""
Computes len(group_names) aggregations of input dataframe df according to the given agg_functions wrt to the
specified columns in compute_columns.
These three lists need to have the same length!
Currently only sum() as aggregate function is available.
:param df: pd.DataFrame, that needs to have all columns specified in group_names, compute_columns
:param group_names: list of grouping columns
:param compute_columns: list of columns along which groupby computation should be done
:param agg_functions: list of aggregate functions, which are applied to compute_columns
:return result_list: list of resulting dataframes after groupby aggregation
"""
all_columns = set(df.columns)
all_needed_columns = set(group_names).union(set(compute_columns))
assert all_columns.intersection(all_needed_columns) == all_needed_columns, "Columns not present!"
assert len(group_names) == len(compute_columns), "Number of grouping columns does not match compute columns!"
assert len(group_names) == len(
agg_functions), "Number of grouping columns does not match number of aggregate functions!"
df_copy = df.copy()
result_list = []
for idx, group in enumerate(group_names):
compute_col = compute_columns[idx]
agg_func = agg_functions[idx]
if agg_func == "sum":
df_grouped = df_copy[[group, compute_col]].groupby([group]).sum()
total_sum = df_copy[compute_col].sum()
df_grouped["Percentage"] = round(df_grouped[compute_col] / total_sum, 3) * 100
result_list.append(df_grouped.reset_index())
return (result_list)
def get_portfolio_value(df_trx: pd.DataFrame, df_prices: pd.DataFrame) -> pd.DataFrame:
"""
Computes the current value of each stock given in the transaction list by using most recent price data.
:param df_trx: dataframe containing all transactions
:param df_prices: dataframe containing historic price data
:return:
"""
if (df_trx.isna().sum()>0).any():
print("Some entries contain NaN values! The statistics might be wrong!")
print(df_trx.isna().sum())
needed_columns_trx = set(["Investment", "Price", "ISIN"])
needed_columns_prices = set(["Price", "ISIN"])
assert needed_columns_trx.intersection(set(df_trx.columns)) == needed_columns_trx, \
"One of the following columns are missing in df_trx: {}".format(needed_columns_trx)
assert needed_columns_prices.intersection(set(df_prices.columns)) == needed_columns_prices, \
"One of the following columns are missing in df_prices: {}".format(needed_columns_prices)
df = df_trx.copy()
dfp = df_prices.copy()
### Compute amount of stocks bought
df["Amount"] = df["Investment"] / df["Price"]
### Drop price of orderdata, which is the price at which a stock was bought --> here we use the current price
df = df.drop("Price", axis=1)
df_portfolio = df.merge(dfp, how="left", left_on="ISIN", right_on="ISIN", suffixes=["", "_y"])\
.rename(columns={"Date_y": "last_price_update"})
assert (df_portfolio["Price"].isna().sum()>0).any() == False, "Prices are missing for a transaction!"
df_portfolio["Value"] = round(df_portfolio["Amount"] * df_portfolio["Price"], 2)
return (df_portfolio)
def filter_portfolio_date(portfolio: pd.DataFrame, offset_months: int) -> pd.DataFrame:
"""
Filters the dataframe, portfolio, to all entries that occur after today's date minus offset_months.
:param portfolio: Needs column Date
:param offset_months: Offset of how many months into the past the output of the dataframe should contain.
:return: dataframe filtered up to offset_months into the past
"""
from datetime import date
assert "Date" in portfolio.columns, 'Column "Date" is missing in input dataframe!'
date_today = pd.Timestamp(date.today())
if offset_months == -1:
return(portfolio)
else:
date_offset = pd.DateOffset(months=offset_months)
portfolio_date_filtered = portfolio[portfolio["Date"] >= date_today - date_offset]
return(portfolio_date_filtered)
def filter_portfolio_stock(portfolio: pd.DataFrame, stock_name: str) -> pd.DataFrame:
"""
Filters the dataframe, portfolio, to the given stock_name.
:param portfolio: Dataframe holding transactions
:param stock_name: Name of the stock, to which the dataframe should be filtered.
:return: dataframe filtered on the specified stock name
"""
assert "Name" in portfolio.columns, 'Column "Name" is missing in input dataframe!'
return(portfolio[portfolio["Name"] == stock_name])
def prepare_orderAmounts_prices(orders: pd.DataFrame):
"""
Extracts a dataframe of buy-prices for each stock at each date. Additionally prepare order-dataframe
with amount of stocks at each date.
:param orders: Holds price and investmentamount data for each stock at every date.
:return: Tuple of orders (including amount of stocks) and prices.
"""
prices = orders[["Date", "Name", "Price"]]
necessary_columns = ["Date", "Name", "Investment", "Ordercost", "Amount"]
df_orders = orders.drop_duplicates().copy()
df_orders["Amount"] = df_orders["Investment"] / df_orders["Price"]
df_orders = df_orders[necessary_columns]
return((df_orders, prices))
def prepare_timeseries(orders: pd.DataFrame):
"""
Computes timeseries chart (value/investment vs date) for all stocks in the portfolio.
Computes timeseries chart for overall portfolio (sum of all stock values at given date) and adds it
to the dataframe.
:param orders: dataframe, containing Investmentamount, ordercost and price for each stock per transactiondate
:return:
"""
necessary_columns = ["Date", "Name", "Investment", "Price", "Ordercost"]
assert set(orders.columns).intersection(set(necessary_columns)) == set(necessary_columns), \
"Necessary columns missing in order data for timeseries preparation!"
orders["Amount"] = orders["Investment"]/orders["Price"]
### Map each transaction-date to the beginning of the month for easier comparison
orders["Date"] = orders["Date"].apply(lambda date: pd.offsets.MonthBegin().rollback(date))
### Prepare master data of all stocks and dates in order history
### TODO: Refine all data preprocessing to just once define master data for all needed tasks
all_stocks = pd.DataFrame(orders["Name"].drop_duplicates()).copy()
all_stocks["key"] = 0
all_dates = pd.DataFrame(orders["Date"].drop_duplicates()).copy()
all_dates["key"] = 0
all_combinations = pd.merge(all_dates, all_stocks, on='key').drop("key", axis=1)
### Prepare dataframe, that gets converted to a timeseries, it has entries of all stocks, that were
### bought in the past at each transaction-date (stock data for stocks, which were not bought at that date,
### is filled with 0 to enable correct computation of cumsum()
group_columns = ["Investment", "Ordercost", "Amount"]
df_init = all_combinations.merge(orders[["Date", "Name"] + group_columns], how="left",
left_on=["Date", "Name"],
right_on=["Date", "Name"]
).fillna(0).copy()
price_lookup = orders[["Date", "Name", "Price"]].copy()
### Compute cumsum() per stockgroup and rejoin date
df_grouped = df_init.sort_values("Date").groupby("Name").cumsum()
df_grouped_all = df_init.merge(df_grouped, how="left",
left_index=True,
right_index=True,
suffixes=["_init", None]
)
df_grouped_all = df_grouped_all.drop(["Investment_init", "Ordercost_init", "Amount_init"], axis=1)
### Rejoin prices and compute values for each stock at each date, fill values of stocks, which were not
### bought at that date again with 0s
df_grouped_all = df_grouped_all.merge(price_lookup, how="left",
left_on=["Date", "Name"],
right_on=["Date", "Name"],
suffixes=[None, "_y"]
)
df_grouped_all["Value"] = df_grouped_all["Amount"] * df_grouped_all["Price"]
df_grouped_all = df_grouped_all.drop(["Amount", "Price"], axis=1)#.fillna(0)
### Finally sum over stock values at each date to arrive at timeseries format
df_overall = df_grouped_all.sort_values("Date").set_index("Date")\
.drop(["Name"], axis=1)\
.groupby("Date").sum() \
.reset_index()
df_overall["Name"] = "Overall Portfolio"
df_timeseries = pd.concat([df_grouped_all, df_overall], ignore_index=True, sort=False)
return(df_timeseries)
|
import os
from PyQt4.QtCore import QDir, QFile, QSettings
from whoosh import fields
__appname__ = 'mikidown'
__version__ = '0.3.1'
class Setting():
def __init__(self, notebooks):
# Index directory of whoosh, located in notebookPath.
self.schema = fields.Schema(
path = fields.TEXT(stored=True),
title = fields.TEXT(stored=True),
content = fields.TEXT(stored=True))
self.notebookName = notebooks[0][0]
self.notebookPath = notebooks[0][1]
self.notePath = os.path.join(self.notebookPath, "notes")
self.htmlPath = os.path.join(self.notebookPath, "html", "notes")
self.indexdir = os.path.join(self.notePath, ".indexdir")
self.attachmentPath = os.path.join(self.notebookPath, "attachments")
self.configfile = os.path.join(self.notebookPath, "notebook.conf")
cssPath = os.path.join(self.notebookPath, "css")
self.cssfile = os.path.join(cssPath, "notebook.css")
self.qsettings = QSettings(self.configfile, QSettings.NativeFormat)
if os.path.exists(self.configfile):
self.extensions = readListFromSettings(self.qsettings,
"extensions")
self.fileExt = self.qsettings.value("fileExt")
self.attachmentImage = self.qsettings.value("attachmentImage")
self.attachmentDocument = self.qsettings.value("attachmentDocument")
self.version = self.qsettings.value("version")
self.geometry = self.qsettings.value("geometry")
self.windowstate = self.qsettings.value("windowstate")
else:
self.extensions = []
self.fileExt = ""
self.attachmentImage = []
self.attachmentDocument = []
self.version = None
self.geometry = None
self.windowstate = None
# Default enabled python-markdown extensions.
# http://pythonhosted.org/Markdown/extensions/index.html
if not self.extensions:
self.extensions = [
'nl2br' # newline to break
, 'strkundr' # bold-italics-underline-delete style
, 'codehilite' # code syntax highlight
, 'fenced_code' # code block
, 'headerid' # add id to headers
, 'headerlink' # add anchor to headers
, 'footnotes'
]
writeListToSettings(self.qsettings, "extensions", self.extensions)
# Default file extension name
if not self.fileExt:
self.fileExt = ".md"
self.qsettings.setValue("fileExt", self.fileExt)
# Image file types that will be copied to attachmentDir
# Inserted as image link
if not self.attachmentImage:
self.attachmentImage = [".jpg", ".jpeg", ".png", ".gif", ".svg"]
self.qsettings.setValue("attachmentImage", self.attachmentImage)
# Document file types that will be copied to attachmentDir
# Inserted as link
if not self.attachmentDocument:
self.attachmentDocument = [".pdf", ".doc", ".odt"]
self.qsettings.setValue("attachmentDocument", self.attachmentDocument)
# Migrate notebookPath to v0.3.0 folder structure
if not self.version:
notebookDir = QDir(self.notebookPath)
# move all markdown files to notes/
dirList = notebookDir.entryList(QDir.Dirs | QDir.NoDotAndDotDot)
if 'css' in dirList:
dirList.remove('css')
fileList = notebookDir.entryList(['*.md', '*.mkd', '*.markdown'])
notebookDir.mkdir('notes')
for d in dirList + fileList:
notebookDir.rename(d, os.path.join('notes', d))
# remove .indexdir folder
oldIndexDir = QDir(os.path.join(self.notebookPath, '.indexdir'))
indexFileList = oldIndexDir.entryList()
for f in indexFileList:
oldIndexDir.remove(f)
notebookDir.rmdir('.indexdir')
# rename notes.css to css/notebook.css
oldCssFile = os.path.join(self.notebookPath, 'notes.css')
QDir().mkpath(cssPath)
if os.path.exists(oldCssFile):
QFile.rename(oldCssFile, self.cssfile)
self.version = '0'
def saveGeometry(self, geometry):
self.qsettings.setValue("geometry", geometry)
def saveWindowState(self, state):
self.qsettings.setValue("windowstate", state)
def recentViewedNotes(self):
return readListFromSettings(self.qsettings, "recentViewedNoteList")
def updateRecentViewedNotes(self, notesList):
writeListToSettings(self.qsettings, "recentViewedNoteList", notesList)
def readListFromSettings(settings, key):
if not settings.contains(key):
return []
value = settings.value(key)
if isinstance(value, str):
return [value]
else:
return value
def writeListToSettings(settings, key, value):
if len(value) >= 1:
settings.setValue(key, value)
else:
settings.remove(key)
|
import asyncio
from perftests.helpers import Time, client
print('Running rest_implementation benchmark with async_v20 version', client.version)
async def rest_implementation(repeats):
for _ in range(repeats):
account = await client.account()
account.json()
loop = asyncio.get_event_loop()
with Time() as t:
loop.run_until_complete(rest_implementation(1000))
|
#!/usr/bin/env python3
import sys, getopt, os
from shutil import copyfile
def main(argv):
if os.environ['NEWT_TEMPLATE_PATH'] == '':
template_path = "/opt/newt/templates"
else:
template_path = os.environ['NEWT_TEMPLATE_PATH']
template = ''
filename = ''
EXECUTABLE = False
try:
opts, args = getopt.getopt(argv,"ht:xo:",["template=","output="])
except getopt.GetoptError:
print('newt -t <template> [-o <outputfile>] [-x]')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('newt -t <template> [-o <outputfile>] [-x]')
sys.exit()
elif opt in ("-t", "--template"):
template = arg
elif opt in ("-o", "--output"):
filename = arg
elif opt in ("-x"):
EXECUTABLE = True
if template == '':
print('newt -t <template> [-o <outputfile>] [-x]')
sys.exit(2)
if filename == '':
filename = template
available_templates = {
'c':'.c',
'python2':'.py',
'python3':'.py',
'bash':'.sh'
}
if template not in available_templates:
print("ERROR: Template not found.")
sys.exit(2)
try:
#tf = open(template_path+"/"+template+".template","r")
full_template_filename = template_path+"/"+template+".template"
extension = available_templates[template]
copyfile(full_template_filename,filename+extension)
if EXECUTABLE:
os.chmod(filename+extension,0o755)
except:
print("ERROR: Could not access "+full_template_file)
sys.exit(2)
if __name__ == "__main__":
main(sys.argv[1:])
|
def cci_parens(n):
ls = [['(',1,0]]
while(True):
if(len(ls) == 0):
break
new_ls = []
for elem in ls:
l = elem[1]
r = elem[2]
comb = elem[0]
if(l == n):
print(comb + (')'*(l-r)))
continue
if(r < l):
new_ls.append([comb+')',l,r+1])
new_ls.append([comb+'(',l+1,r])
del ls
ls = new_ls
cci_parens(3)
|
import os
import json
import time
import pandas as pd
from haystack import Label, MultiLabel, Answer
from haystack.utils import launch_es, fetch_archive_from_http, print_answers
from haystack.document_stores import ElasticsearchDocumentStore
from haystack import Document, Pipeline
from haystack.nodes.retriever import EmbeddingRetriever
from haystack.nodes import TableReader, FARMReader, RouteDocuments, JoinAnswers, ParsrConverter
def tutorial15_tableqa():
# Recommended: Start Elasticsearch using Docker via the Haystack utility function
launch_es()
## Connect to Elasticsearch
document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document")
## Add Tables to DocumentStore
# Let's first fetch some tables that we want to query
# Here: 1000 tables + texts
doc_dir = "data/tutorial15"
s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/table_text_dataset.zip"
fetch_archive_from_http(url=s3_url, output_dir=doc_dir)
# Add the tables to the DocumentStore
def read_tables(filename):
processed_tables = []
with open(filename) as tables:
tables = json.load(tables)
for key, table in tables.items():
current_columns = table["header"]
current_rows = table["data"]
current_df = pd.DataFrame(columns=current_columns, data=current_rows)
document = Document(content=current_df, content_type="table", id=key)
processed_tables.append(document)
return processed_tables
tables = read_tables(f"{doc_dir}/tables.json")
document_store.write_documents(tables, index="document")
### Retriever
# Retrievers help narrowing down the scope for the Reader to a subset of tables where a given question could be answered.
# They use some simple but fast algorithm.
#
# **Here:** We use the EmbeddingRetriever capable of retrieving relevant content among a database
# of texts and tables using dense embeddings.
retriever = EmbeddingRetriever(
document_store=document_store,
embedding_model="deepset/all-mpnet-base-v2-table",
model_format="sentence_transformers",
)
# Add table embeddings to the tables in DocumentStore
document_store.update_embeddings(retriever=retriever)
## Alternative: BM25Retriever
# from haystack.nodes.retriever import BM25Retriever
# retriever = BM25Retriever(document_store=document_store)
# Try the Retriever
from haystack.utils import print_documents
retrieved_tables = retriever.retrieve("Who won the Super Bowl?", top_k=5)
# Get highest scored table
print(retrieved_tables[0].content)
### Reader
# The TableReader is based on TaPas, a transformer-based language model capable of grasping the two-dimensional structure of a table.
# It scans the tables returned by the retriever and extracts the anser.
# The available TableReader models can be found [here](https://huggingface.co/models?pipeline_tag=table-question-answering&sort=downloads).
#
# **Notice**: The TableReader will return an answer for each table, even if the query cannot be answered by the table.
# Furthermore, the confidence scores are not useful as of now, given that they will *always* be very high (i.e. 1 or close to 1).
reader = TableReader(model_name_or_path="google/tapas-base-finetuned-wtq", max_seq_len=512)
# Try the TableReader on one Table
table_doc = document_store.get_document_by_id("36964e90-3735-4ba1-8e6a-bec236e88bb2")
print(table_doc.content)
prediction = reader.predict(query="Who played Gregory House in the series House?", documents=[table_doc])
print_answers(prediction, details="minimum")
### Pipeline
# The Retriever and the Reader can be sticked together to a pipeline in order to first retrieve relevant tables
# and then extract the answer.
#
# **Notice**: Given that the `TableReader` does not provide useful confidence scores and returns an answer
# for each of the tables, the sorting of the answers might be not helpful.
table_qa_pipeline = Pipeline()
table_qa_pipeline.add_node(component=retriever, name="EmbeddingRetriever", inputs=["Query"])
table_qa_pipeline.add_node(component=reader, name="TableReader", inputs=["EmbeddingRetriever"])
prediction = table_qa_pipeline.run("When was Guilty Gear Xrd : Sign released?")
print_answers(prediction, details="minimum")
### Pipeline for QA on Combination of Text and Tables
# We are using one node for retrieving both texts and tables, the EmbeddingRetriever.
# In order to do question-answering on the Documents coming from the EmbeddingRetriever, we need to route
# Documents of type "text" to a FARMReader ( or alternatively TransformersReader) and Documents of type
# "table" to a TableReader.
text_reader = FARMReader("deepset/roberta-base-squad2")
# In order to get meaningful scores from the TableReader, use "deepset/tapas-large-nq-hn-reader" or
# "deepset/tapas-large-nq-reader" as TableReader models. The disadvantage of these models is, however,
# that they are not capable of doing aggregations over multiple table cells.
table_reader = TableReader("deepset/tapas-large-nq-hn-reader")
route_documents = RouteDocuments()
join_answers = JoinAnswers()
text_table_qa_pipeline = Pipeline()
text_table_qa_pipeline.add_node(component=retriever, name="EmbeddingRetriever", inputs=["Query"])
text_table_qa_pipeline.add_node(component=route_documents, name="RouteDocuments", inputs=["EmbeddingRetriever"])
text_table_qa_pipeline.add_node(component=text_reader, name="TextReader", inputs=["RouteDocuments.output_1"])
text_table_qa_pipeline.add_node(component=table_reader, name="TableReader", inputs=["RouteDocuments.output_2"])
text_table_qa_pipeline.add_node(component=join_answers, name="JoinAnswers", inputs=["TextReader", "TableReader"])
# Add texts to the document store
def read_texts(filename):
processed_passages = []
with open(filename) as passages:
passages = json.load(passages)
for key, content in passages.items():
document = Document(content=content, content_type="text", id=key)
processed_passages.append(document)
return processed_passages
passages = read_texts(f"{doc_dir}/texts.json")
document_store.write_documents(passages)
# Example query whose answer resides in a text passage
predictions = text_table_qa_pipeline.run(query="Which country does the film Macaroni come from?")
# We can see both text passages and tables as contexts of the predicted answers.
print_answers(predictions, details="minimum")
# Example query whose answer resides in a table
predictions = text_table_qa_pipeline.run(query="Who was Thomas Alva Edison?")
# We can see both text passages and tables as contexts of the predicted answers.
print_answers(predictions, details="minimum")
### Evaluation
# To evaluate our pipeline, we can use haystack's evaluation feature. We just need to convert our labels into `MultiLabel` objects and the `eval` method will do the rest.
def read_labels(filename, tables):
processed_labels = []
with open(filename) as labels:
labels = json.load(labels)
for table in tables:
if table.id not in labels:
continue
label = labels[table.id]
label = Label(
query=label["query"],
document=table,
is_correct_answer=True,
is_correct_document=True,
answer=Answer(answer=label["answer"]),
origin="gold-label",
)
processed_labels.append(MultiLabel(labels=[label]))
return processed_labels
table_labels = read_labels(f"{doc_dir}/labels.json", tables)
passage_labels = read_labels(f"{doc_dir}/labels.json", passages)
eval_results = text_table_qa_pipeline.eval(table_labels + passage_labels, params={"top_k": 10})
# Calculating and printing the evaluation metrics
print(eval_results.calculate_metrics())
## Adding tables from PDFs
# It can sometimes be hard to provide your data in form of a pandas DataFrame.
# For this case, we provide the `ParsrConverter` wrapper that can help you to convert, for example, a PDF file into a document that you can index.
os.system("docker run -d -p 3001:3001 axarev/parsr")
time.sleep(30)
os.system("wget https://www.w3.org/WAI/WCAG21/working-examples/pdf-table/table.pdf")
converter = ParsrConverter()
docs = converter.convert("table.pdf")
tables = [doc for doc in docs if doc.content_type == "table"]
print(tables)
if __name__ == "__main__":
tutorial15_tableqa()
# This Haystack script was made with love by deepset in Berlin, Germany
# Haystack: https://github.com/deepset-ai/haystack
# deepset: https://deepset.ai/
|
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="gpu_slic",
version="0.0.1a3",
python_requires=">=3.6",
package_dir={"": "src"},
packages=find_packages(where="src"),
include_package_data=True,
install_requires=[
"numpy",
"jinja2",
"scikit-image",
"cupy>=7.0.0,<8.0.0",
],
# metadata to display on PyPI
author="Omar Elamin",
author_email="omar.elamin@diamond.ac.uk",
description="CUDA implementation of the SLIC segmentaion algorithm.",
keywords="segmentation fast cuda slic clustering kmeans",
url="https://gitlab.stfc.ac.uk/RosalindFranklinInstitute/cuda-slic", # project home page, if any
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
options={"bdist_wheel": {"universal": "1"}},
# could also include long_description, download_url, etc.
long_description=long_description,
long_description_content_type="text/markdown",
)
|
from typing import List, Tuple, Union
import numpy as np
from pgdrive.component.lane.circular_lane import CircularLane
from pgdrive.component.lane.straight_lane import StraightLane
from pgdrive.constants import LineType
from pgdrive.utils.utils import import_pygame
PositionType = Union[Tuple[float, float], np.ndarray]
pygame = import_pygame()
COLOR_BLACK = pygame.Color("black")
class ObservationWindow:
def __init__(self, max_range, resolution):
self.max_range = max_range
self.resolution = resolution
self.receptive_field = None
self.receptive_field_double = None
self.canvas_rotate = None
self.canvas_uncropped = pygame.Surface(
(int(self.resolution[0] * np.sqrt(2)) + 1, int(self.resolution[1] * np.sqrt(2)) + 1)
)
self.canvas_display = pygame.Surface(self.resolution)
self.canvas_display.fill(COLOR_BLACK)
def reset(self, canvas_runtime):
canvas_runtime.fill(COLOR_BLACK)
# Assume max_range is only the radius!
self.receptive_field_double = (
int(canvas_runtime.pix(self.max_range[0] * np.sqrt(2))) * 2,
int(canvas_runtime.pix(self.max_range[1] * np.sqrt(2))) * 2
)
self.receptive_field = (
int(canvas_runtime.pix(self.max_range[0])) * 2, int(canvas_runtime.pix(self.max_range[1])) * 2
)
self.canvas_rotate = pygame.Surface(self.receptive_field_double)
self.canvas_rotate.fill(COLOR_BLACK)
self.canvas_display.fill(COLOR_BLACK)
self.canvas_uncropped.fill(COLOR_BLACK)
def _blit(self, canvas, position):
self.canvas_rotate.blit(
canvas, (0, 0), (
position[0] - self.receptive_field_double[0] / 2, position[1] - self.receptive_field_double[1] / 2,
self.receptive_field_double[0], self.receptive_field_double[1]
)
)
def _rotate(self, heading):
rotation = np.rad2deg(heading) + 90
scale = self.canvas_uncropped.get_size()[0] / self.canvas_rotate.get_size()[0]
return pygame.transform.rotozoom(self.canvas_rotate, rotation, scale)
def _crop(self, new_canvas):
size = self.canvas_display.get_size()
self.canvas_display.blit(
new_canvas,
(0, 0),
(
new_canvas.get_size()[0] / 2 - size[0] / 2, # Left
new_canvas.get_size()[1] / 2 - size[1] / 2, # Top
size[0], # Width
size[1] # Height
)
)
def render(self, canvas, position, heading):
# Prepare a runtime canvas for rotation. Assume max_range is only the radius, not diameter!
self._blit(canvas, position)
# Rotate the image so that ego is always heading top
new_canvas = self._rotate(heading)
# Crop the rotated image and then resize to the desired resolution
self._crop(new_canvas)
return self.canvas_display
def get_observation_window(self):
return self.canvas_display
def get_size(self):
assert self.canvas_rotate is not None
return self.canvas_rotate.get_size()
def get_screen_window(self):
return self.get_observation_window()
class WorldSurface(pygame.Surface):
"""
A pygame Surface implementing a local coordinate system so that we can move and zoom in the displayed area.
From highway-env, See more information on its Github page: https://github.com/eleurent/highway-env.
"""
BLACK = (0, 0, 0)
GREY = (100, 100, 100)
GREEN = (50, 200, 0)
YELLOW = (200, 200, 0)
WHITE = (255, 255, 255)
INITIAL_SCALING = 5.5
INITIAL_CENTERING = [0.5, 0.5]
SCALING_FACTOR = 1.3
MOVING_FACTOR = 0.1
LANE_LINE_COLOR = (35, 35, 35)
def __init__(self, size: Tuple[int, int], flags: object, surf: pygame.SurfaceType) -> None:
surf.fill(pygame.Color("Black"))
super().__init__(size, flags, surf)
self.raw_size = size
self.raw_flags = flags
self.raw_surface = surf
self.origin = np.array([0, 0])
self.scaling = self.INITIAL_SCALING
self.centering_position = self.INITIAL_CENTERING
self.fill(self.BLACK)
def pix(self, length: float) -> int:
"""
Convert a distance [m] to pixels [px].
:param length: the input distance [m]
:return: the corresponding size [px]
"""
return int(length * self.scaling)
def pos2pix(self, x: float, y: float) -> Tuple[int, int]:
"""
Convert two world coordinates [m] into a position in the surface [px]
:param x: x world coordinate [m]
:param y: y world coordinate [m]
:return: the coordinates of the corresponding pixel [px]
"""
return self.pix(x - self.origin[0]), self.pix(y - self.origin[1])
def vec2pix(self, vec: PositionType) -> Tuple[int, int]:
"""
Convert a world position [m] into a position in the surface [px].
:param vec: a world position [m]
:return: the coordinates of the corresponding pixel [px]
"""
return self.pos2pix(vec[0], vec[1])
def is_visible(self, vec: PositionType, margin: int = 50) -> bool:
"""
Is a position visible in the surface?
:param vec: a position
:param margin: margins around the frame to test for visibility
:return: whether the position is visible
"""
x, y = self.vec2pix(vec)
return -margin < x < self.get_width() + margin and -margin < y < self.get_height() + margin
def move_display_window_to(self, position: PositionType) -> None:
"""
Set the origin of the displayed area to center on a given world position.
:param position: a world position [m]
"""
self.origin = position - np.array(
[
self.centering_position[0] * self.get_width() / self.scaling,
self.centering_position[1] * self.get_height() / self.scaling
]
)
def handle_event(self, event: pygame.event.EventType) -> None:
"""
Handle pygame events for moving and zooming in the displayed area.
:param event: a pygame event
"""
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_l:
self.scaling *= 1 / self.SCALING_FACTOR
if event.key == pygame.K_o:
self.scaling *= self.SCALING_FACTOR
if event.key == pygame.K_m:
self.centering_position[0] -= self.MOVING_FACTOR
if event.key == pygame.K_k:
self.centering_position[0] += self.MOVING_FACTOR
def copy(self):
ret = WorldSurface(size=self.raw_size, flags=self.raw_flags, surf=self.raw_surface)
ret.origin = self.origin
ret.scaling = self.scaling
ret.centering_position = self.centering_position
ret.blit(self, (0, 0))
return ret
class VehicleGraphics:
RED = (255, 100, 100)
GREEN = (50, 200, 0)
BLUE = (100, 200, 255)
YELLOW = (200, 200, 0)
BLACK = (60, 60, 60)
PURPLE = (200, 0, 150)
DEFAULT_COLOR = YELLOW
EGO_COLOR = GREEN
font = None
@classmethod
def display(
cls, vehicle, surface, color, heading, label: bool = False, draw_countour=False, contour_width=1
) -> None:
"""
Display a vehicle on a pygame surface.
The vehicle is represented as a colored rotated rectangle.
:param vehicle: the vehicle to be drawn
:param surface: the surface to draw the vehicle on
:param label: whether a text label should be rendered
"""
if not surface.is_visible(vehicle.position):
return
w = surface.pix(vehicle.WIDTH)
h = surface.pix(vehicle.LENGTH)
position = [*surface.pos2pix(vehicle.position[0], vehicle.position[1])]
angle = np.rad2deg(heading)
box = [pygame.math.Vector2(p) for p in [(-h / 2, -w / 2), (-h / 2, w / 2), (h / 2, w / 2), (h / 2, -w / 2)]]
box_rotate = [p.rotate(angle) + position for p in box]
pygame.draw.polygon(surface, color=color, points=box_rotate)
if draw_countour:
pygame.draw.polygon(surface, cls.BLACK, box_rotate, width=contour_width) # , 1)
# Label
if label:
if cls.font is None:
cls.font = pygame.font.Font(None, 15)
text = "#{}".format(id(vehicle) % 1000)
text = cls.font.render(text, 1, (10, 10, 10), (255, 255, 255))
surface.blit(text, position)
@classmethod
def get_color(cls, vehicle) -> Tuple[int]:
if vehicle.crashed:
color = cls.RED
else:
color = cls.BLUE
return color
class LaneGraphics:
"""A visualization of a lane."""
STRIPE_SPACING: float = 5
""" Offset between stripes [m]"""
STRIPE_LENGTH: float = 3
""" Length of a stripe [m]"""
STRIPE_WIDTH: float = 0.3
""" Width of a stripe [m]"""
LANE_LINE_WIDTH: float = 1
@classmethod
def display(cls, lane, surface, two_side=True, color=None) -> None:
"""
Display a lane on a surface.
:param lane: the lane to be displayed
:param surface: the pygame surface
:param two_side: draw two sides of the lane, or only one side
"""
side = 2 if two_side else 1
stripes_count = int(2 * (surface.get_height() + surface.get_width()) / (cls.STRIPE_SPACING * surface.scaling))
s_origin, _ = lane.local_coordinates(surface.origin)
s0 = (int(s_origin) // cls.STRIPE_SPACING - stripes_count // 2) * cls.STRIPE_SPACING
for side in range(side):
if lane.line_types[side] == LineType.BROKEN:
cls.striped_line(lane, surface, stripes_count, s0, side, color=color)
# circular side or continuous, it is same now
elif lane.line_types[side] == LineType.CONTINUOUS and isinstance(lane, CircularLane):
cls.continuous_curve(lane, surface, stripes_count, s0, side, color=color)
elif lane.line_types[side] == LineType.SIDE and isinstance(lane, CircularLane):
cls.continuous_curve(lane, surface, stripes_count, s0, side, color=color)
# the line of continuous straight and side straight is same now
elif (lane.line_types[side] == LineType.CONTINUOUS) and isinstance(lane, StraightLane):
cls.continuous_line(lane, surface, stripes_count, s0, side, color=color)
elif (lane.line_types[side] == LineType.SIDE) and isinstance(lane, StraightLane):
cls.continuous_line(lane, surface, stripes_count, s0, side, color=color)
# special case
elif lane.line_types[side] == LineType.NONE:
continue
else:
raise ValueError("I don't know how to draw this line type: {}".format(lane.line_types[side]))
@classmethod
def striped_line(cls, lane, surface, stripes_count: int, longitudinal: float, side: int, color=None) -> None:
"""
Draw a striped line on one side of a lane, on a surface.
:param lane: the lane
:param surface: the pygame surface
:param stripes_count: the number of stripes to draw
:param longitudinal: the longitudinal position of the first stripe [m]
:param side: which side of the road to draw [0:left, 1:right]
"""
starts = longitudinal + np.arange(stripes_count) * cls.STRIPE_SPACING
ends = longitudinal + np.arange(stripes_count) * cls.STRIPE_SPACING + cls.STRIPE_LENGTH
lats = [(side - 0.5) * lane.width_at(s) for s in starts]
cls.draw_stripes(lane, surface, starts, ends, lats, color=color)
@classmethod
def continuous_curve(cls, lane, surface, stripes_count: int, longitudinal: float, side: int, color=None) -> None:
"""
Draw a striped line on one side of a lane, on a surface.
:param lane: the lane
:param surface: the pygame surface
:param stripes_count: the number of stripes to draw
:param longitudinal: the longitudinal position of the first stripe [m]
:param side: which side of the road to draw [0:left, 1:right]
"""
starts = longitudinal + np.arange(stripes_count) * cls.STRIPE_SPACING
ends = longitudinal + np.arange(stripes_count) * cls.STRIPE_SPACING + cls.STRIPE_SPACING
lats = [(side - 0.5) * lane.width_at(s) for s in starts]
cls.draw_stripes(lane, surface, starts, ends, lats, color=color)
@classmethod
def continuous_line(cls, lane, surface, stripes_count: int, longitudinal: float, side: int, color=None) -> None:
"""
Draw a continuous line on one side of a lane, on a surface.
:param lane: the lane
:param surface: the pygame surface
:param stripes_count: the number of stripes that would be drawn if the line was striped
:param longitudinal: the longitudinal position of the start of the line [m]
:param side: which side of the road to draw [0:left, 1:right]
"""
starts = [longitudinal + 0 * cls.STRIPE_SPACING]
ends = [longitudinal + stripes_count * cls.STRIPE_SPACING + cls.STRIPE_LENGTH]
lats = [(side - 0.5) * lane.width_at(s) for s in starts]
cls.draw_stripes(lane, surface, starts, ends, lats, color=color)
@classmethod
def draw_stripes(cls, lane, surface, starts: List[float], ends: List[float], lats: List[float], color=None) -> None:
"""
Draw a set of stripes along a lane.
:param lane: the lane
:param surface: the surface to draw on
:param starts: a list of starting longitudinal positions for each stripe [m]
:param ends: a list of ending longitudinal positions for each stripe [m]
:param lats: a list of lateral positions for each stripe [m]
"""
if color is None:
color = surface.LANE_LINE_COLOR
starts = np.clip(starts, 0, lane.length)
ends = np.clip(ends, 0, lane.length)
for k, _ in enumerate(starts):
if abs(starts[k] - ends[k]) > 0.5 * cls.STRIPE_LENGTH:
pygame.draw.line(
surface, color, (surface.vec2pix(lane.position(starts[k], lats[k]))),
(surface.vec2pix(lane.position(ends[k], lats[k]))),
max(surface.pix(cls.STRIPE_WIDTH), surface.pix(cls.LANE_LINE_WIDTH))
)
@classmethod
def simple_draw(cls, lane, surface, color=(255, 255, 255)):
from pgdrive.component.blocks.pg_block import PGBlock
segment_num = int(lane.length / PGBlock.CIRCULAR_SEGMENT_LENGTH)
width = lane.width
for segment in range(segment_num):
p_1 = lane.position(segment * PGBlock.CIRCULAR_SEGMENT_LENGTH, -width / 2)
p_2 = lane.position(segment * PGBlock.CIRCULAR_SEGMENT_LENGTH, width / 2)
p_3 = lane.position((segment + 1) * PGBlock.CIRCULAR_SEGMENT_LENGTH, width / 2)
p_4 = lane.position((segment + 1) * PGBlock.CIRCULAR_SEGMENT_LENGTH, -width / 2)
pygame.draw.polygon(
surface, color,
[surface.pos2pix(*p_1),
surface.pos2pix(*p_2),
surface.pos2pix(*p_3),
surface.pos2pix(*p_4)]
)
# # for last part
p_1 = lane.position(segment_num * PGBlock.CIRCULAR_SEGMENT_LENGTH, -width / 2)
p_2 = lane.position(segment_num * PGBlock.CIRCULAR_SEGMENT_LENGTH, width / 2)
p_3 = lane.position(lane.length, width / 2)
p_4 = lane.position(lane.length, -width / 2)
pygame.draw.polygon(
surface, color,
[surface.pos2pix(*p_1),
surface.pos2pix(*p_2),
surface.pos2pix(*p_3),
surface.pos2pix(*p_4)]
)
class ObservationWindowMultiChannel:
CHANNEL_NAMES = ["road_network", "traffic_flow", "target_vehicle", "past_pos"]
def __init__(self, names, max_range, resolution):
assert isinstance(names, list)
assert set(self.CHANNEL_NAMES)
self.sub_observations = {
k: ObservationWindow(max_range=max_range, resolution=resolution)
for k in ["traffic_flow", "target_vehicle"]
}
self.sub_observations["road_network"] = ObservationWindow(
max_range=max_range,
resolution=(resolution[0] * 2, resolution[1] * 2)
# max_range=max_range, resolution=resolution
)
self.resolution = (resolution[0] * 2, resolution[1] * 2)
self.canvas_display = None
def get_canvas_display(self):
if self.canvas_display is None:
self.canvas_display = pygame.Surface(self.resolution)
self.canvas_display.fill(COLOR_BLACK)
return self.canvas_display
def reset(self, canvas_runtime):
for k, sub in self.sub_observations.items():
sub.reset(canvas_runtime)
def render(self, canvas_dict, position, heading):
assert isinstance(canvas_dict, dict)
assert set(canvas_dict.keys()) == set(self.sub_observations.keys())
ret = dict()
for k, canvas in canvas_dict.items():
ret[k] = self.sub_observations[k].render(canvas, position, heading)
return self.get_observation_window(ret)
def get_observation_window(self, canvas_dict=None):
if canvas_dict is None:
canvas_dict = {k: v.get_observation_window() for k, v in self.sub_observations.items()}
return canvas_dict
def get_size(self):
return next(iter(self.sub_observations.values())).get_size()
def get_screen_window(self):
canvas = self.get_canvas_display()
ret = self.get_observation_window()
for k in ret.keys():
if k == "road_network":
continue
ret[k] = pygame.transform.scale2x(ret[k])
def _draw(canvas, key, color):
mask = pygame.mask.from_threshold(ret[key], (0, 0, 0, 0), (10, 10, 10, 255))
mask.to_surface(canvas, setcolor=None, unsetcolor=color)
_draw(canvas, "navigation", pygame.Color("Blue"))
_draw(canvas, "road_network", pygame.Color("White"))
_draw(canvas, "traffic_flow", pygame.Color("Red"))
_draw(canvas, "target_vehicle", pygame.Color("Green"))
return canvas
|
"""
A Python wrapper for Transmission's RPC interface.
>>> from transmission import Transmission
>>> client = Transmission()
>>> client('torrent-get', ids=range(1,11), fields=['name'])
{u'torrents': [
{u'name': u'Elvis spotted in Florida.mov'},
{u'name': u'Bigfoot sings the hits'},
# ...
{u'name': u'a-song-of-ice-and-fire_final-chapter.txt'}
]}
"""
__version__ = '0.7-dev'
import json
import requests
from .json_utils import (TransmissionJSONEncoder, TransmissionJSONDecoder)
CSRF_ERROR_CODE = 409
UNAUTHORIZED_ERROR_CODE = 401
CSRF_HEADER = 'X-Transmission-Session-Id'
class BadRequest(Exception):
pass
class Unauthorized(Exception):
pass
class Transmission(object):
def __init__(self, host='localhost', port=9091, path='/transmission/rpc',
username=None, password=None, ssl=False, timeout=None):
"""
Initialize the Transmission client.
The default host, port and path are all set to Transmission's
default.
"""
self.url = "http://%s:%d%s" % (host, port, path)
if ssl:
self.url = "https://%s:%d%s" % (host, port, path)
self.headers = {}
self.tag = 0
self.timeout = timeout
self.auth = None
if username or password:
self.auth = (username, password)
def __call__(self, method, **kwargs):
"""
Send request to Transmission's RPC interface.
"""
response = self._make_request(method, **kwargs)
return self._deserialize_response(response)
def _make_request(self, method, **kwargs):
body = json.dumps(self._format_request_body(method, **kwargs), cls=TransmissionJSONEncoder)
response = requests.post(self.url, data=body, headers=self.headers,
auth=self.auth, verify=False, timeout=self.timeout)
if response.status_code == CSRF_ERROR_CODE:
self.headers[CSRF_HEADER] = response.headers[CSRF_HEADER]
return self._make_request(method, **kwargs)
elif response.status_code == UNAUTHORIZED_ERROR_CODE:
raise Unauthorized("Check Username and Password")
return response
def _format_request_body(self, method, **kwargs):
"""
Create a request object to be serialized and sent to Transmission.
"""
fixed = {}
# As Python can't accept dashes in kwargs keys, replace any
# underscores with them here.
for k, v in kwargs.items():
fixed[k.replace('_', '-')] = v
return {"method": method, "tag": self.tag, "arguments": fixed}
def _deserialize_response(self, response):
"""
Return the response generated by the request object, raising
BadRequest if there were any problems.
"""
doc = json.loads(response.text, cls=TransmissionJSONDecoder)
if doc['result'] != 'success':
raise BadRequest("Request failed: '%s'" % doc['result'])
if doc['tag'] != self.tag:
raise BadRequest("Tag mismatch: (got %d, expected %d)" % (doc['tag'], self.tag))
else:
self.tag += 1
if 'arguments' in doc:
return doc['arguments'] or None
return None
|
import collections
import json
import random
import string
import urllib.request
from datetime import datetime, timedelta
from os import urandom
from pathlib import Path
from flask import Flask, jsonify, request
from flask_marshmallow import Marshmallow
from flask_sharded_sqlalchemy import BindKeyPattern
from flask_sharded_sqlalchemy import ShardedSQLAlchemy as SQLAlchemy
from osnk.http.auth import EmailAuthentication, TokenAuthentication
from osnk.validations import requires
def post(url, **kwargs):
headers = {'User-Agent': 'Kyak/0.0.0'}
if 'json' in kwargs:
data = json.dumps(kwargs['json']).encode()
headers['Content-Type'] = 'application/json'
elif 'data' in kwargs:
data = kwargs['data']
else:
data = None
req = urllib.request.Request(url, data, headers, method='POST')
urllib.request.urlopen(req)
def tree(keytree, value, dict_class=dict):
try:
k = next(keytree)
except StopIteration:
return value
else:
return dict_class({k: tree(keytree, value)})
def merge(a, b, dict_class=dict, inplace=False):
if not inplace:
a = a.copy()
for k, v in b.items():
if isinstance(v, dict):
a[k] = merge(a.get(k, dict_class()), v, dict_class, inplace=True)
else:
a[k] = v
return a
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
def normalize(d, dict_class=dict):
if isinstance(d, dict):
if all(x.isdigit() for x in d.keys()):
return [normalize(v, dict_class) for k, v in d.items()]
else:
return dict_class({k: normalize(v, dict_class)
for k, v in d.items()})
return d
def load(f, dict_class=dict):
loaded = dict_class()
for line in f:
s = line.split('#', 1)[0]
if s:
kv = s.split(' ', 1)
if len(kv) == 2:
k, v = [x.strip() for x in kv]
t = tree(iter(k.split('.')), v, dict_class)
merge(loaded, t, dict_class, inplace=True)
return normalize(loaded, dict_class)
def config(path, dict_class=dict):
with open(path) as f:
return load(f, dict_class)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def rand16hex():
return urandom(16).hex()
conf = config(Path.home() / '.kyak', dict_class=AttrDict)
binds = {}
for k, v in conf.databases.items():
if isinstance(v, list):
binds.update({':'.join([k, str(i)]): bind for i, bind in enumerate(v)})
else:
binds.update({k + ':0': v})
app = Flask(__name__)
app.config['SQLALCHEMY_BINDS'] = binds
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = app.debug
db = SQLAlchemy(app)
ma = Marshmallow(app)
class Account(db.Model):
__tablename__ = 'accounts'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
id = db.Column(db.String(16), primary_key=True)
type = db.Column(db.String(16), nullable=False, default='personal')
name = db.Column(db.String(), nullable=False)
email = db.Column(db.String(), nullable=True)
address = db.Column(db.String(), nullable=True)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class Access(db.Model):
__tablename__ = 'accesses'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
account_id = db.Column(db.String(16), primary_key=True)
owner = db.Column(db.String(16), primary_key=True)
access = db.Column(db.String(), nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class Hook(db.Model):
__tablename__ = 'hooks'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
account_id = db.Column(db.String(16), primary_key=True)
type = db.Column(db.String(16), primary_key=True)
url = db.Column(db.String(), unique=True, nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class Offer(db.Model):
__tablename__ = 'offers'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
account_id = db.Column(db.String(16), primary_key=True)
offeror = db.Column(db.String(16), primary_key=True)
c = db.Column(db.String(32), primary_key=True, default=rand16hex)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class Contract(db.Model):
__tablename__ = 'contracts'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
account_id = db.Column(db.String(16), primary_key=True)
contractor = db.Column(db.String(16), primary_key=True)
contractee = db.Column(db.String(16), primary_key=True)
c = db.Column(db.String(32), primary_key=True, default=rand16hex)
type = db.Column(db.String(16), nullable=False)
since = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
until = db.Column(db.DateTime, nullable=True)
payment_terms = db.Column(db.String(), nullable=True)
deposit = db.Column(db.Integer, nullable=True)
contractor_signed = db.Column(db.DateTime, nullable=True)
contractee_signed = db.Column(db.DateTime, nullable=True)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class Term(db.Model):
__tablename__ = 'terms'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
account_id = db.Column(db.String(16), primary_key=True)
contractor = db.Column(db.String(16), primary_key=True)
contractee = db.Column(db.String(16), primary_key=True)
c = db.Column(db.String(32), primary_key=True, default=rand16hex)
cc = db.Column(db.String(32), primary_key=True, default=rand16hex)
order = db.Column(db.Integer, nullable=False)
title = db.Column(db.String(), nullable=False)
description = db.Column(db.String(), nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class TimeAndMaterialsPrice(db.Model):
__tablename__ = 'time_and_materials_prices'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
account_id = db.Column(db.String(16), primary_key=True)
contractor = db.Column(db.String(16), primary_key=True)
contractee = db.Column(db.String(16), primary_key=True)
c = db.Column(db.String(32), primary_key=True, default=rand16hex)
cc = db.Column(db.String(32), primary_key=True, default=rand16hex)
until = db.Column(db.Float, nullable=False)
price = db.Column(db.Float, nullable=False, default=0)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class TimeAndMaterialsActivity(db.Model):
__tablename__ = 'time_and_materials_activities'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
account_id = db.Column(db.String(16), primary_key=True)
contractor = db.Column(db.String(16), primary_key=True)
contractee = db.Column(db.String(16), primary_key=True)
c = db.Column(db.String(32), primary_key=True, default=rand16hex)
cc = db.Column(db.String(32), primary_key=True, default=rand16hex)
day = db.Column(db.Integer, nullable=False)
since = db.Column(db.DateTime, nullable=True)
until = db.Column(db.DateTime, nullable=True)
description = db.Column(db.String(), nullable=True)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class Payment(db.Model):
__tablename__ = 'payments'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
account_id = db.Column(db.String(16), primary_key=True)
contractor = db.Column(db.String(16), primary_key=True)
contractee = db.Column(db.String(16), primary_key=True)
c = db.Column(db.String(32), primary_key=True, default=rand16hex)
amount = db.Column(db.Float, nullable=False)
tax = db.Column(db.Float, nullable=False)
requested = db.Column(db.DateTime, nullable=True)
paid = db.Column(db.DateTime, nullable=True)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class ContractTemplates(db.Model):
__tablename__ = 'contract_templates'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
account_id = db.Column(db.String(16), primary_key=True)
contractor = db.Column(db.String(16), primary_key=True)
contractee = db.Column(db.String(16), primary_key=True)
c = db.Column(db.String(32), primary_key=True, default=rand16hex)
cc = db.Column(db.String(32), primary_key=True, default=rand16hex)
title = db.Column(db.String(), nullable=False)
description = db.Column(db.String(), nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class TermTemplates(db.Model):
__tablename__ = 'term_templates'
__bind_key__ = BindKeyPattern(r'accounts:\d+')
account_id = db.Column(db.String(16), primary_key=True)
contractor = db.Column(db.String(16), primary_key=True)
contractee = db.Column(db.String(16), primary_key=True)
c = db.Column(db.String(32), primary_key=True, default=rand16hex)
cc = db.Column(db.String(32), primary_key=True, default=rand16hex)
title = db.Column(db.String(), nullable=False)
description = db.Column(db.String(), nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class Alias(db.Model):
__tablename__ = 'aliases'
__bind_key__ = BindKeyPattern(r'aliases:\d+')
id = db.Column(db.String(16), primary_key=True)
account_id = db.Column(db.String(16), nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=True)
@classmethod
def __hash_id__(cls, ident):
return ord(ident[0][0])
class AccountSchema(ma.ModelSchema):
class Meta:
model = Account
def get_system_contracts(*, contractors=None, when=datetime.now()):
return None
@app.before_first_request
def init():
db.create_all()
@app.route('/')
def index():
return Path('index.html').read_text().replace('{{ title }}', conf.title)
secret = conf.secret.encode()
auth = EmailAuthentication(secret, scheme='Hook')
hook_token = TokenAuthentication(secret)
account_token = TokenAuthentication(secret)
@auth.authorization
@hook_token.authorization
@account_token.authorization
def authorization(header):
return request.headers.get(header)
@auth.authenticate
@hook_token.authenticate
@account_token.authenticate
def authenticate(header, scheme):
return jsonify('Unauthorized'), 401, {header: scheme}
@hook_token.payload_from_bytes
@account_token.payload_from_bytes
def token_payload_from_bytes(b):
return json.loads(b.decode())
@hook_token.payload_to_bytes
@account_token.payload_to_bytes
def token_payload_to_bytes(payload):
return json.dumps(payload).encode()
@hook_token.confirm
def hook_token_confirm(payload):
return 'hook' in payload
@account_token.confirm
def account_token_confirm(payload):
return 'account' in payload
@app.route('/auth', methods=['POST'])
def post_auth():
expires = datetime.now() + timedelta(hours=1)
aid_or_hook = request.form['id']
if aid_or_hook.startswith('http'):
url = aid_or_hook
hook = Hook.query.filter_by(url=url).first()
if hook:
payload = {'account': hook.account_id}
else:
payload = {'hook': url}
else:
aid = aid_or_hook
hook = Hook.query.filter_by(account_id=aid, type='auth').first()
payload = {'account': hook.account_id}
url = hook.url
s = string.ascii_uppercase + string.digits
password = ''.join(random.choices(s, k=8))
encoded = json.dumps(payload).encode()
hint = auth.hint([(aid_or_hook, password)], expires, encoded)
post(url, json={'text': password})
return jsonify(hint)
@app.route('/token', methods=['GET'])
@requires(auth)
def get_token():
expires = datetime.now() + timedelta(hours=1)
payload = json.loads(auth.payload)
if hook_token_confirm(payload):
t, token = 'hook', hook_token
else:
t, token = 'account', account_token
return jsonify(type=t, value=token.build(expires, payload))
@app.route('/accounts', methods=['POST'])
@requires(hook_token | account_token)
def post_accounts(passed):
aid = request.form['id']
if aid == 'me':
return jsonify('Bad Request', 400)
account = Account()
account.id = aid
account.name = request.form['name']
if hook_token in passed:
hook = Hook()
hook.account_id = account.id
hook.type = 'auth'
hook.url = hook_token.payload['hook']
db.session.add(hook)
else:
account.type = 'corporate'
access = Access()
access.account_id = account.id
access.owner = account_token.payload['account']
access.access = 'Allow full access'
db.session.add(access)
account.email = request.form.get('email')
account.address = request.form.get('address')
db.session.add(account)
db.session.commit()
return jsonify(AccountSchema().dump(account).data)
@app.route('/accounts/<aid>')
@requires(account_token)
def get_accounts(aid):
if aid == 'me':
account = Account.query.get(account_token.payload['account'])
else:
account = Account.query.get(aid)
return jsonify(AccountSchema().dump(account).data)
@app.route('/accounts/<aid>', methods=['DELETE'])
@requires(account_token)
def delete_accounts(aid):
if aid != account_token.payload['account']:
return jsonify('Forbidden'), 403
account = Account.query.get(aid)
db.session.delete(account)
db.session.commit()
return jsonify(AccountSchema().dump(account).data)
|
#!/usr/bin/python3
#basic class will be imported from here
import astm_file2mysql_general as astmg
import sys, logging, time
#For mysql password
sys.path.append('/var/gmcs_config')
import astm_var
log=1
my_host='127.0.0.1'
my_user=astm_var.my_user
my_pass=astm_var.my_pass
my_db='biochemistry'
inbox='/root/ashish.data/'
archived='/root/ashish.arch/'
log_filename='/var/log/ashish.log'
logging.basicConfig(filename=log_filename,level=logging.DEBUG)
if log==0:
logging.disable(logging.CRITICAL)
class old_LIS(astmg.astm_file):
def send_to_mysql(self):
print(m.final_data)
while True:
m=old_LIS(inbox,archived)
if(m.get_first_file()):
m.analyse_file()
#print(m.relevant_data)
m.mk_tuple()
m.send_to_mysql()
time.sleep(1)
|
from itertools import groupby
class Solution:
def countAndSay(self, n):
def gen(s):
return "".join(str(len(list(g))) + k for k, g in groupby(s))
s, i = "1", 1
while i < n:
s = gen(s)
i += 1
return s
|
import json
history_path = 'outputs/ResUNet.json'
history_file = open(history_path, "r")
history = history_file.read()
history_file.close()
history = json.loads(history)
import matplotlib.pyplot as plt
plt.plot(history['train'])
plt.plot(history['valid'])
plt.legend(['train loss', 'valid loss'])
plt.show()
|
from selenium import webdriver
from PIL import Image
from io import BytesIO
import time
import re
import argparse
import os
import json
from utils import generate_img_filename
from templates import template_path
def get_args():
example_text = '''
examples:
python openassessit/%(capture_element_pic)s --input-file="/abs/path/to/lighthouse-report.json" --assets-dir="/abs/path/to/assets" --sleep=1 --driver=firefox
''' % {'capture_element_pic': os.path.basename(__file__)}
parser = argparse.ArgumentParser(epilog=example_text, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--input-file', help='Use absolute path to the lighthouse json report')
parser.add_argument('-a', '--assets-dir', help='Use absolute path to /assets dir')
parser.add_argument('-s', '--sleep', type=float, help='Number of seconds to wait before taking screenshots')
parser.add_argument('-d', '--driver', choices=['firefox', 'chrome'], help='Name of the webdriver.')
return parser.parse_args()
def get_firefox_driver():
options = webdriver.FirefoxOptions()
options.add_argument('--headless')
return webdriver.Firefox(firefox_options=options)
def get_chrome_driver():
options = webdriver.ChromeOptions()
options.add_argument('--headless')
return webdriver.Chrome(chrome_options=options)
def capture_screenshot(assets_dir, url, sleep, driver):
driver.get(url)
time.sleep(sleep)
driver.set_window_size(1400, 700)
Image.open(BytesIO(driver.get_screenshot_as_png())).save(os.path.join(assets_dir,'screenshot.png'))
print(os.path.join(assets_dir,'screenshot.png'))
def capture_element_pic(input_file, assets_dir, url, elem_identifier, sleep, driver):
driver.get(url)
time.sleep(sleep) # wait for page to load a bit
driver.set_window_size(1400, driver.execute_script("return document.body.parentNode.scrollHeight"))
try:
elem = driver.find_element_by_css_selector(elem_identifier) # find element
location = elem.location
size = elem.size
im = Image.open(BytesIO(driver.get_screenshot_as_png())) # uses PIL library to open image in memory
im = im.crop((location['x'] -25,
location['y'],
location['x'] + size['width'] + 25,
location['y'] + size['height']
))
elem_image_name = generate_img_filename(url, elem_identifier)
im.save(os.path.join(assets_dir,elem_image_name)) # saves new cropped image
print(os.path.join(assets_dir,elem_image_name))
except Exception as ex:
print(ex)
def identifier_generator(data, *auditref_whitelist):
for sel in auditref_whitelist:
audit = data.get('audits', {}).get(sel)
if audit is None:
print("Invalid audit id: %s" % sel)
continue
for item in audit.get('details', {}).get('items', []):
if item['node']['selector'] == ':root':
print('Selector returned as ":root", no image will be created.') # If Axe returns ":root" it does not create a helpful screenshot
else:
yield item['node']['selector']
def main():
args = get_args()
input_file = args.input_file
assets_dir = args.assets_dir
sleep = args.sleep
if args.driver == 'firefox':
driver = get_firefox_driver()
elif args.driver == 'chrome':
driver = get_chrome_driver()
else:
raise ValueError("Driver must be one of: firefox, chrome")
try:
with open(input_file) as json_file:
data = json.load(json_file)
capture_screenshot(assets_dir, data['finalUrl'], sleep, driver)
for sel in identifier_generator(data, 'color-contrast', 'link-name', 'button-name', 'image-alt', 'input-image-alt', 'label', 'accesskeys', 'frame-title'):
capture_element_pic(input_file, assets_dir, data['finalUrl'], sel, sleep, driver)
finally:
driver.quit()
if __name__ == '__main__':
main()
|
from electrum_ltc.i18n import _
fullname = _('Satochip 2FA')
description = ' '.join([
_("This plugin allows the use of a second factor to authorize transactions on a Satochip hardware wallet."),
_("It sends and receives transaction challenge and response."),
_("Data is encrypted and stored on a remote server.")
])
available_for = ['qt']
|
from django.db import models
from django.utils.timezone import now
class Company(models.Model):
class CompanyStatus(models.TextChoices):
LAYOFFS = "Layoffs"
HIRING_FREEZE = "Hiring Freeze"
HIRING = "Hiring"
name = models.CharField(max_length=30, unique=True)
status = models.CharField(choices=CompanyStatus.choices, default=CompanyStatus.HIRING, max_length=30)
last_update = models.DateTimeField(default=now, editable=True)
application_link = models.URLField(blank=True)
notes = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.name
|
from unittest import TestCase
from mock import Mock, patch
class TestAPIConnection(TestCase):
def setUp(self):
self._requests_patcher = patch('broadstreetads.requests')
self.requests = self._requests_patcher.start()
def tearDown(self):
self._requests_patcher.stop()
def one(self, access_token='123', host='api.broadstreetads.com'):
from broadstreetads import APIConnection
return APIConnection('123')
def test_5XX_get(self):
from broadstreetads import APIServerError
api = self.one()
self.requests.get().status_code = 504
self.assertRaises(APIServerError, api.get, '/whatever')
def test_5XX_post(self):
from broadstreetads import APIServerError
api = self.one()
self.requests.post().status_code = 504
self.assertRaises(APIServerError, api.post, '/whatever', {})
def test_5XX_delete(self):
from broadstreetads import APIServerError
api = self.one()
self.requests.delete().status_code = 504
self.assertRaises(APIServerError, api.delete, '/whatever')
def test_5XX_patch(self):
from broadstreetads import APIServerError
api = self.one()
self.requests.patch().status_code = 504
self.assertRaises(APIServerError, api.patch, '/whatever', {})
|
import plato_fit_integrals.initialise.create_ecurve_workflows as ecurves
import matplotlib.pyplot as plt
def plotFittedIntsVsInitial(integInfo,coeffsToTablesObj):
initInts = coeffsToTablesObj._integHolder.getIntegTableFromInfoObj(integInfo,inclCorrs=False).integrals
fitInts = coeffsToTablesObj._integHolder.getIntegTableFromInfoObj(integInfo,inclCorrs=True).integrals
figA = plt.figure()
axA = figA.add_subplot(1,1,1)
axA.plot(initInts[:,0],initInts[:,1])
axA.plot(fitInts[:,0],fitInts[:,1])
return figA
def plotDissocCurvesInitVsFinal(structList, initEnergies, finalEnergies):
xData = _getDistsFromUCellStructsList(structList)
figA = plt.figure()
axA = figA.add_subplot(1,1,1)
axA.scatter(xData,initEnergies)
axA.scatter(xData,finalEnergies)
return figA
def _getDistsFromUCellStructsList(structList):
allDists = list()
for x in structList:
assert len(x.cartCoords)==2, "Only dimers are supported"
currDist = ecurves.getSepTwoAtomsInUnitCellStruct(x)
allDists.append(currDist)
return allDists
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.