content
stringlengths 5
1.05M
|
|---|
#scrap temptalia brands and check if mongodb has it already, if not insert
from html_scraping import Temptalia_Scrapping
from mongodb import Makeup_MongoDB
from color_analyse import Color_Class
from color_analyse import Color_Analysis
from write_results import Write_Results_Class
from os import system, name
from subprocess import call
import os
import io
from PIL import Image
from array import array
import sys
import math
import time
from bs4 import BeautifulSoup
import requests
import re
#~/.bashrc or ~/.bash_aliases
#alias python=python3
#run source ~/.bashrc or source ~/.bash_aliases
def Insert_New_Brands():
print("Get all brands from Temptalia")
AllBrands = Temptalia_Scrapping.Get_Brands()
#print(AllBrands)
#print(Makeup_MongoDB.Contain_Brand(AllBrands[0]))
print("Check if brand is already in db and insert")
for brand in AllBrands:
print(brand.name)
brand_exist = Makeup_MongoDB.Contain_Brand(brand.name, brand.id)
if not brand_exist:
insertid = Makeup_MongoDB.Insert_Brand(brand.name, brand.id)
print(insertid)
else:
print("exists")
def clear():
_ = call('clear' if os.name == 'posix' else 'cls')
def Insert_New_Eyeshadows():
branddata = Makeup_MongoDB.Get_All_Brands()
for brand in branddata:
print(brand)
totalpages = Temptalia_Scrapping.Get_Nav_Pages(brand["temptalia_id"])
alleyeshadows = []
#for pageindex in range(1, totalpages + 1):
for pageindex in range(1,2):
alleyeshadows = alleyeshadows + Temptalia_Scrapping.Get_Eyeshadow(brand["name"], brand["temptalia_id"], pageindex)
for eyeshadow in alleyeshadows:
exist = Makeup_MongoDB.Contain_Eyeshadow(eyeshadow.brand, eyeshadow.name)
if not exist:
insertid = Makeup_MongoDB.Insert_Eyeshadow(eyeshadow)
print(insertid)
else:
print("Added")
def Calculate_RGB(filename, filenamejs, filenamejson):
Brands = Makeup_MongoDB.Get_All_Brands()
fieldnames = ["Brand", "FoundIn", "Name", "MiddleRGB", "AvgRGB","ModeCount", "ModeRGB","BorderDistance","Finish", "ColorRGB", "ColorAvgRGB", "ColorModeRGB" ]
Write_Results_Class.Write_To_XSLX_Title(fieldnames, filename)
Write_Results_Class.Initialize_JS_File(filenamejs)
Write_Results_Class.Initialize_JSON_File(filenamejson)
for brand in Brands:
print(brand)
Eyeshadows = Makeup_MongoDB.Get_All_Eyeshadows_By_Brand(brand["name"])
#check four corners and middle
count = 0
errorcolors = []
error = 0
colorRows = []
for eyeshadow in Eyeshadows:
print(eyeshadow["name"])
try:
image = Image.open(io.BytesIO(eyeshadow["byte"]))
except:
count+= 1
errorcolors.append(eyeshadow["name"])
continue
width, height = image.size
middle = middlex, middley = height/2, width/2
middlergb = image.getpixel(middle)
#borderdistance = Calculate_Image_Box(image, middle, middlehsv)
borderdistance = Color_Analysis.Calculate_Image_Box(image, middle);
if borderdistance == 0:
count += 1
errorcolors.append(eyeshadow["name"])
else:
results = Color_Analysis.AVG_Image_RGB(image, middle, borderdistance)
MIN, MAX = Color_Analysis.Min_Max_RGB(image, middle, borderdistance)
ModeCount, ModeRGB = Color_Analysis.Calculate_Mode_RGB(image,middle, borderdistance)
RowDict = {}
RowDict["Brand"] = eyeshadow["brand"]
RowDict["FoundIn"] = eyeshadow["foundin"]
RowDict["Name"] = eyeshadow["name"]
RowDict["MiddleRGB"] = middlergb
RowDict["AvgRGB"] = results
RowDict["ModeCount"] = ModeCount
RowDict["ModeRGB"] = ModeRGB
RowDict["BorderDistance"] = borderdistance
RowDict["Finish"] = eyeshadow["finish"]
colorRows.append(RowDict)
print('writing brand to excel')
Write_Results_Class.Write_To_XSLX_RGB(colorRows, filename)
print('writing brand to js file')
Write_Results_Class.Write_To_JS(brand, colorRows, filenamejs)
print('writing eyeshadow detail to json objects')
Write_Results_Class.Write_To_JSon_Objects(colorRows, filenamejson)
print("Total " + str(count))
print(errorcolors)
print("Error " + str(error))
Write_Results_Class.Write_To_CSV(fieldnames, colorRows)
Write_Results_Class.Close_JSON_File(filenamejson)
#workbook.close()
def Find_Finishes():
Brands = Makeup_MongoDB.Get_All_Brands()
#for brandindex in range(0,1):
# brand = Brands[brandindex]
for brand in Brands:
Eyeshadows = Makeup_MongoDB.Get_All_Eyeshadows_By_Brand(brand["name"])
#for eyeshadowindex in range(0,3):
#eyeshadow = Eyeshadows[eyeshadowindex]
for eyeshadow in Eyeshadows:
print(eyeshadow["src"])
print(eyeshadow["name"])
finish = Temptalia_Scrapping.Get_Eyeshadow_Finish(eyeshadow["src"])
Makeup_MongoDB.Update_Eyeshadow_Finish(brand["name"], eyeshadow["name"], finish)
def Welcome_Screen():
print('###########################################')
print('## Welcome to Temptalia Eyeshadow DB ##')
print('###########################################')
def Print_Menu():
print('Menu')
print('1. Exit')
print('2. Delete Brands Collection')
print('3. Screen Scrap Brands')
print('4. Delete Eyeshadows Collection')
print('5. Screen Scrap Eyeshadows')
print('6. Print All Eyeshadows')
print('7. Convert Eyeshadow Byte To Img')
print('8. Anaylze Eyeshadows and Save to File')
print('9. Copy Collection')
print('10. Save Brands to JS File')
print('11. Find Finishes and save to MongoDB')
if __name__ == "__main__":
clear()
Welcome_Screen()
Print_Menu()
while True:
user_input = input('What would you like to do? ')
if user_input == "Menu" or user_input=="menu":
Print_Menu()
elif user_input == "1" or user_input == "Exit" or user_input == "exit" or user_input == "quit":
print('Goodbye!')
break
elif user_input == "2":
Makeup_MongoDB.Delete_Brands_Collection()
print('Deleted all brands from MongoDB')
elif user_input == "3":
print('Inserting Brands')
Insert_New_Brands()
elif user_input == "4":
Makeup_MongoDB.Delete_Eyeshadow_Collection()
elif user_input == "5":
Insert_New_Eyeshadows()
elif user_input == "6":
Eyeshadows = Makeup_MongoDB.Get_All_Eyeshadows()
for es in Eyeshadows:
print(es)
elif user_input == "7":
Eyeshadows = Makeup_MongoDB.Get_All_Eyeshadows()
#put everything in brands folder first
if not os.path.isdir("Brands"):
os.mkdir("Brands")
for eyeshadow in Eyeshadows:
brand = eyeshadow["brand"]
filepath = "Brands/{0}".format(brand)
if not os.path.isdir(filepath):
os.mkdir(filepath)
try:
image = Image.open(io.BytesIO(eyeshadow["byte"]))
image.save(filepath + "/" + eyeshadow["name"] + ".jpg")
print("Saved " + eyeshadow["name"] + ".jpg")
except:
print("Could not save " + eyeshadow["name"] + ".jpg")
elif user_input == "8":
filename = input("Enter Results File Name (Default is Results):")
if len(filename) == 0:
filename = "Results"
filenamejs = "{0}.js".format(filename)
filenamejson = "{0}_EyeshadowDetail.js".format(filename)
filename = "{0}.xlsx".format(filename)
Calculate_RGB(filename, filenamejs, filenamejson)
elif user_input == "9":
from_collection = input('From Collection: ')
to_collection = input('To Collection: ')
Makeup_MongoDB.Copy_Collection(from_collection, to_collection)
print('Complete Copying Collection')
elif user_input == "10":
Brands = Makeup_MongoDB.Get_All_Brands()
Write_Results_Class.Write_Brands_To_JS(Brands)
print("Complete")
elif user_input == "11":
Find_Finishes()
elif user_input == "reset":
python = sys.executable
os.execl(python, python, * sys.argv)
|
from abc import ABC, abstractmethod
from typing import NoReturn
class DistributionInterface(ABC):
@abstractmethod
def __init__(self, mean: float, stdev: float, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def __add__(self, other):
raise NotImplementedError
@abstractmethod
def __repr__(self):
raise NotImplementedError
@abstractmethod
def read(self, file: str) -> NoReturn:
raise NotImplementedError
@abstractmethod
def calculate_mean(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def calculate_stdev(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def pdf(self, *args, **kwargs) -> float:
raise NotImplementedError
@abstractmethod
def plot(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def plot_pdf(self, *args, **kwargs):
raise NotImplementedError
class Distribution(DistributionInterface, ABC):
def __init__(self, mean=0., stdev=1., *args, **kwargs):
"""General distribution parent class.
Args:
mean (float): The mean value of the distribution.
stdev (float): The standard deviation of the distribution.
"""
self.mean = None
self.set_mean(mean)
self.stdev = None
self.set_stdev(stdev)
self.data = []
def set_mean(self, new_value: float) -> NoReturn:
"""Mutator method realizes encapsulation of mean attribute.
Args:
new_value (float): New value of mean attribute.
Returns:
NoReturn
"""
self.mean = new_value
def get_mean(self) -> float:
"""Accessor method realizes encapsulation of mean attribute.
Returns:
float: The mean value of the distribution.
"""
return self.mean
def set_stdev(self, new_value: float) -> NoReturn:
"""Mutator method realizes encapsulation of stdev attribute.
Args:
new_value (float): New value of stdev attribute.
Returns:
NoReturn
"""
self.stdev = new_value
def get_stdev(self) -> float:
"""Accessor method realizes encapsulation of stdev attribute.
Returns:
float: The stdev value of the distribution.
"""
return self.stdev
def get_data(self) -> list:
"""Accessor method realizes encapsulation of data attribute.
Returns:
list: Data sample.
"""
return self.data
def read(self, file_name: str) -> NoReturn:
"""Method to read data from a text file.
Args:
file_name (str): Name of a file to read.
Returns:
NoReturn
"""
data = []
with open(file_name) as file:
while line := file.readline():
line = int(line.rstrip())
data.append(line)
self.data = data
|
from .business_delegate import ServiceDelegate
class Client(object):
def __init__(self, business_delegate: ServiceDelegate):
self.business_delegate = business_delegate
def to_call(self):
return self.business_delegate.to_call()
|
import unittest
from nymms import utils
class TestBase(unittest.TestCase):
def test_load_class_from_string(self):
from logging import Logger
cls = utils.load_object_from_string('logging.Logger')
self.assertIs(cls, Logger)
def test_parse_time(self):
base_time = 1415311935
self.assertEqual(utils.parse_time('+60s', base_time).timestamp,
base_time + 60)
self.assertEqual(utils.parse_time('+10m', base_time).timestamp,
base_time + (60 * 10))
self.assertEqual(utils.parse_time('+10h', base_time).timestamp,
base_time + (60 * 60 * 10))
self.assertEqual(utils.parse_time('-10d', base_time).timestamp,
base_time - (10 * 60 * 60 * 24))
with self.assertRaises(ValueError):
utils.parse_time('+2000xxx')
|
import csv
import logging
import secrets
import string
from camps.mixins import CampViewMixin
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
from django.views.generic import CreateView, DeleteView, ListView, UpdateView
from oauth2_provider.views.generic import ProtectedResourceView
from utils.mixins import RaisePermissionRequiredMixin, UserIsObjectOwnerMixin
from .mixins import DectRegistrationViewMixin
from .models import DectRegistration
logger = logging.getLogger("bornhack.%s" % __name__)
class DectExportView(
CampViewMixin, RaisePermissionRequiredMixin, ProtectedResourceView
):
"""
CSV export for the POC team / DECT system
"""
permission_required = "camps.pocteam_permission"
def get(self, request, *args, **kwargs):
response = HttpResponse(content_type="text/csv")
response[
"Content-Disposition"
] = f'attachment; filename="{self.camp.slug}-dect-export-{timezone.now()}.csv"'
writer = csv.writer(response)
writer.writerow(
[
"number",
"letters",
"description",
"activation_code",
"publish_in_phonebook",
]
)
for dect in DectRegistration.objects.filter(camp=self.camp):
writer.writerow(
[
dect.number,
dect.letters,
dect.description,
dect.activation_code,
dect.publish_in_phonebook,
]
)
return response
class PhonebookListView(CampViewMixin, ListView):
"""
Our phonebook view currently only shows DectRegistration entries,
but could later be extended to show maybe GSM or other kinds of
phone numbers.
"""
model = DectRegistration
template_name = "phonebook.html"
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
return qs.filter(publish_in_phonebook=True)
class DectRegistrationListView(LoginRequiredMixin, CampViewMixin, ListView):
model = DectRegistration
template_name = "dectregistration_list.html"
def get_queryset(self, *args, **kwargs):
"""
Show only DectRegistration entries belonging to the current user
"""
qs = super().get_queryset(*args, **kwargs)
return qs.filter(user=self.request.user)
class DectRegistrationCreateView(LoginRequiredMixin, CampViewMixin, CreateView):
model = DectRegistration
fields = ["number", "letters", "description", "publish_in_phonebook"]
template_name = "dectregistration_form.html"
def form_valid(self, form):
dect = form.save(commit=False)
dect.camp = self.camp
dect.user = self.request.user
try:
dect.clean_number()
except ValidationError as E:
form.add_error("number", E)
return super().form_invalid(form)
try:
dect.clean_letters()
except ValidationError as E:
form.add_error("letters", E)
return super().form_invalid(form)
# this check needs to be in this form, but not in model.save(), because then we cant save service numbers from the admin
if len(dect.number) < 4:
form.add_error(
"number",
ValidationError(
"Numbers with fewer than 4 digits are reserved for special use"
),
)
return super().form_invalid(form)
# generate a 10 digit activation code for this dect registration?
if not dect.activation_code:
dect.activation_code = "".join(
secrets.choice(string.digits) for i in range(10)
)
# all good, save and return to list
dect.save()
messages.success(
self.request,
"New DECT registration created successfully. Call the activation number from your handset to activate it!",
)
return redirect(
reverse(
"phonebook:dectregistration_list", kwargs={"camp_slug": self.camp.slug}
)
)
class DectRegistrationUpdateView(
LoginRequiredMixin, DectRegistrationViewMixin, UserIsObjectOwnerMixin, UpdateView
):
model = DectRegistration
fields = ["letters", "description", "publish_in_phonebook"]
template_name = "dectregistration_form.html"
def form_valid(self, form):
dect = form.save(commit=False)
# check if the letters match the DECT number
try:
dect.clean_letters()
except ValidationError as E:
form.add_error("letters", E)
return super().form_invalid(form)
# save and return
dect.save()
messages.success(
self.request, "Your DECT registration has been updated successfully"
)
return redirect(
reverse(
"phonebook:dectregistration_list", kwargs={"camp_slug": self.camp.slug}
)
)
class DectRegistrationDeleteView(
LoginRequiredMixin, DectRegistrationViewMixin, UserIsObjectOwnerMixin, DeleteView
):
model = DectRegistration
template_name = "dectregistration_delete.html"
def get_success_url(self):
messages.success(
self.request,
f"Your DECT registration for number {self.get_object().number} has been deleted successfully",
)
return reverse(
"phonebook:dectregistration_list", kwargs={"camp_slug": self.camp.slug}
)
|
import csv
import copy
import sys
from Aries.table import TableCSVFile
class VCFVariant:
def __init__(self, chrom, pos, ref, alt, info=None, annotation=None, raw_data=None):
self.chrom = chrom
self.pos = pos
self.ref = ref
self.alt = alt
self.raw_data = raw_data
self.rs_id = None
if annotation is None:
self.annotation = dict()
self.annotation = annotation
if info is None:
self.info = dict()
self.info = info
@property
def key(self):
return "%s:g.%s%s>%s" % (self.chrom, self.pos, self.ref, self.alt)
def __str__(self):
return self.key
class VariantsFile:
def __init__(self, uri):
self.uri = uri
self.file_obj = None
# header_line should NOT contain the ending line break
self.header_line = None
self.read_headers()
def variant_key(self, line):
"""Returns a unique identifier for a variant represented by
a line or an object containing all information of a variant in the file.
The format of the identifier must be:
"[Chromosome]:p.[Position][Ref]>[Alt]"
Chromosome should be a number, X or Y, without the "chr"
This unique identifier should be consistent for all sub-classes.
The same variant should have the same identifier regardless of the file format.
Args:
line: A text line or an object containing all information of a variant
"""
raise NotImplementedError
def build_index(self):
return dict()
def read_headers(self):
raise NotImplementedError
def write_headers(self, *args, **kwargs):
raise NotImplementedError
@property
def lines(self):
with open(self.uri, 'r') as f:
for line in f:
yield line
# Handles the open and close of the file
def __enter__(self):
self.file_obj = open(self.uri, 'r')
return self.file_obj
def __exit__(self, exc_type, exc_val, exc_tb):
self.file_obj.close()
class VCFVariants(VariantsFile):
FILTER_COLUMN = 6
def __init__(self, uri):
self.meta = dict()
self.headers = []
# Caches the number of total variants
self.__count = None
super().__init__(uri)
def variants(self):
for line in self.lines:
if line.startswith("#"):
continue
key = self.variant_key(line)
val = line
yield key, val
@staticmethod
def parse_meta(meta_line):
arr = meta_line.split("=", 1)
key = arr[0]
val = arr[1]
if val.startswith("<") and val.startswith(">"):
pairs = val[1:-1].split(",")
val = dict()
for pair in pairs:
arr = pair.split("=", 1)
k = arr[0]
v = arr[1]
val[k] = v
return key, val
def read_headers(self):
for line in self.lines:
if not line.startswith("#"):
break
if line.startswith("##"):
key, val = self.parse_meta(line)
val_list = self.meta.get(key, [])
val_list.append(val)
self.meta[key] = val_list
continue
if line.startswith("#"):
self.header_line = line
def write_headers(self, to_file, meta=None):
"""
Args:
to_file:
meta:
Returns:
"""
# Write meta data
if meta is None:
meta = self.meta
for key, val_list in meta.items():
for val in val_list:
if isinstance(val, dict):
value = "<" + ",".join(["%s=%s" % (k, v) for k, v in val.items()]) + ">"
else:
value = val
to_file.write("%s=%s" % (key, value))
# Write header line
to_file.write(self.header_line)
def variant_key(self, line):
arr = line.split()
if len(arr) < 5:
return None
chromosome = arr[0].replace("chr", "")
position = arr[1]
ref = arr[3]
alt = arr[4]
return "%s:g.%s%s>%s" % (chromosome, position, ref, alt)
def build_index(self):
whitelist_dict = {}
for line in self.lines:
if line.startswith("#"):
continue
key = self.variant_key(line)
whitelist_dict[key] = line.strip()
return whitelist_dict
def apply_filter(self, output_vcf_path, filter_id, filter_description, filter_func, passed_only=False):
meta = copy.deepcopy(self.meta)
filter_list = meta.get("FILTER", [])
filter_list.append({
"ID": filter_id,
"Description": filter_description
})
with open(output_vcf_path, 'w') as output_vcf:
self.write_headers(output_vcf, meta)
counter = 0
for key, variant in self.variants():
counter += 1
columns = variant.split("\t")
passed = filter_func(key, variant)
if not passed:
if passed_only:
continue
# Filter not passed
if columns[self.FILTER_COLUMN].strip() in ["PASS", ".", ""]:
columns[self.FILTER_COLUMN] = filter_id
else:
columns[self.FILTER_COLUMN] = columns[self.FILTER_COLUMN] + "," + filter_id
output_vcf.write("\t".join(columns))
self.__count = counter
print("%d total variants in VCF." % counter)
return VCFVariants(output_vcf_path)
def count(self):
if self.__count is None:
self.__count = sum(1 for _ in self.variants())
return self.__count
class CSVVariants(VariantsFile):
header_keys = ["chr", "start", "end", "ref", "alt"]
def __init__(self, uri, **kwargs):
self.table = TableCSVFile(uri, **kwargs)
self.delimiter = self.table.kwargs.get("delimiter")
if not self.delimiter:
self.delimiter = self.table.kwargs.get("dialect").delimiter
self.columns = dict()
super().__init__(uri)
self.headers = self.table.headers
self.header_line = self.delimiter.join(["\"%s\"" % h for h in self.table.headers])
def variant_key(self, row):
columns = self.columns
return "%s:g.%s%s>%s" % (
str(row[columns.get("chr")]).replace("chr", ""),
row[columns.get("start")],
row[columns.get("ref")],
row[columns.get("alt")],
)
def read_headers(self):
headers = [str(h).lower() for h in self.table.headers]
self.columns = {headers[i]: i for i in range(len(headers))}
for key in self.header_keys:
if key not in self.columns:
raise AttributeError("Column %s not found in %s." % (key, self.table.headers))
def write_headers(self, to_file):
to_file.write(self.header_line + "\n")
def build_index(self):
whitelist_dict = {
"header": self.header_line
}
for i, row in enumerate(self.table.rows):
if i <= self.table.header_row:
continue
if row[self.columns.get("start")] != row[self.columns.get("end")]:
print("Variant has different start and end")
key = self.variant_key(row)
whitelist_dict[key] = self.delimiter.join(["\"%s\"" % r for r in row]).strip()
return whitelist_dict
class WhitelistFilter:
# This file contains a subset of the whitelist
whitelist_output_filename = "whitelist_subset.csv"
# This file contains a subset of the VCF
vcf_output_filename = "variants_subset.vcf"
def __init__(self, variant_calls):
"""
Args:
variant_calls (VariantsFile):
"""
self.variant_calls = variant_calls
self.index = variant_calls.build_index()
self.in_whitelist = set()
def filter_variant(self, key, variant):
if key in self.index:
self.in_whitelist.add(key)
return True
return False
def print_passed(self):
print(self.variant_calls.header_line)
counter = 0
for key in self.in_whitelist:
print(self.index[key])
counter += 1
if counter >= 100:
print("... and %s more..." % (len(self.in_whitelist) - counter))
def save_passed(self, to_file_path):
with open(to_file_path, "w") as f:
self.variant_calls.write_headers(f)
for key in self.in_whitelist:
f.write(self.index[key] + "\n")
|
#!/usr/bin/python
#By Sun Jinyuan and Cui Yinglu, 2021
import pandas as pd
import argparse
parser = argparse.ArgumentParser(description=
'Process files from previous foldx scan')
parser.add_argument("-sn", '--subdirnum', help="Total number of subdirectories")
parser.add_argument("-fn", '--fxoutname', help="Average_BuildModel_<pdbfilename>.fxout")
parser.add_argument("-c", '--cutoff',help="Cutoff of ddg")
args = parser.parse_args()
fxoutname = args.fxoutname
subnum = int(args.subdirnum)
cutoff = int(args.cutoff)
df_average_lst = []
for num in range(subnum):
num += 1
fxout_name = "Subdirectory"+str(num)+"/"+fxoutname
df_average = pd.read_table(fxout_name, sep='\t',skiprows=8)
df_average_lst.append(df_average)
df_list_lst = []
for num in range(subnum):
num += 1
list_name = "test/Subdirectory"+str(num)+"/List_Mutations_readable.txt"
df_list = pd.read_table(list_name, sep=" ",header=None)
df_list_lst.append(df_list)
df_average_all = pd.concat(df_average_lst, axis=0, ignore_index=True)
#df_average.head()
df_list_all = pd.concat(df_list_lst, axis=0, ignore_index=True)
df_o = df_average_all.iloc[:, 0:3].join(df_list_all)
odict = {'mutation':[],'energy':[],'SD':[],'position':[]}
for i in range(df_o.shape[0]):
odict['mutation'].append(str(df_o[1][i])+str(df_o[2][i])+str(df_o[3][i]))
odict['position'].append(str(df_o[2][i]))
odict['energy'].append(df_o['total energy'][i])
odict['SD'].append(df_o['SD'][i])
CompleteList_df = pd.DataFrame(odict)
CompleteList_SortedByEnergy_df = CompleteList_df.sort_values('energy').reset_index(drop=True)
def BetsPerPosition(df):
position_list = []
length = df.shape[0]
for i in range(length):
if df['position'][i] in position_list:
df = df.drop(index=i)
else:
position_list.append(df['position'][i])
return df.reset_index(drop=True)
def BelowCutOff(df,cutoff):
#position_list = []
length = df.shape[0]
for i in range(length):
if float(df['energy'][i]) > float(cutoff):
df = df.drop(index=i)
else:
continue
return df.reset_index(drop=True)
BestPerPosition_SortedByEnergy_df = BetsPerPosition(CompleteList_SortedByEnergy_df)
BestPerPosition_df = BetsPerPosition(CompleteList_SortedByEnergy_df)
BelowCutOff_df = BelowCutOff(CompleteList_df,-1)
BelowCutOff_SortedByEnergy_df = BelowCutOff(CompleteList_SortedByEnergy_df,-1)
BestPerPositionBelowCutOff_SortedByEnergy_df = BelowCutOff(BestPerPosition_SortedByEnergy_df,-1)
BestPerPositionBelowCutOff_df = BelowCutOff(BestPerPosition_df,-1)
def variablename(var):
import itertools
return [tpl[0] for tpl in filter(lambda x: var is x[1], globals().items())]
def out_tab_file(df):
df_name = variablename(df)[0]
filename = "MutationsEnergies_"+df_name[:-3]+".tab"
with open(filename,"w+") as of:
of.write(BestPerPositionBelowCutOff_df.to_csv(columns=['mutation', 'energy', 'SD'], sep='\t', index=False))
of.close()
out_tab_file(CompleteList_df)
out_tab_file(CompleteList_SortedByEnergy_df)
out_tab_file(BestPerPosition_SortedByEnergy_df)
out_tab_file(BestPerPosition_df)
out_tab_file(BelowCutOff_df)
out_tab_file(BelowCutOff_SortedByEnergy_df)
out_tab_file(BestPerPositionBelowCutOff_SortedByEnergy_df)
out_tab_file(BestPerPositionBelowCutOff_df)
|
# Copyright The IETF Trust 2007, 2009, All Rights Reserved
from django.conf import settings
from django.conf.urls import patterns, include, handler500, handler404
from django.contrib import admin
from django.views.generic import TemplateView
from ietf.liaisons.sitemaps import LiaisonMap
from ietf.ipr.sitemaps import IPRMap
from ietf import api
# import debug_toolbar
admin.autodiscover()
api.autodiscover()
# sometimes, this code gets called more than once, which is an
# that seems impossible to work around.
try:
admin.site.disable_action('delete_selected')
except KeyError:
pass
sitemaps = {
'liaison': LiaisonMap,
'ipr': IPRMap,
}
if hasattr(settings, 'IS_CODESTAND_APP'):
handler500 = 'ietf.codestand.views.handler500'
handler404 = 'ietf.codestand.views.handler404'
urlpatterns = patterns('',
(r'^$', 'ietf.doc.views_search.frontpage'),
(r'^accounts/', include('ietf.ietfauth.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^ann/', include('ietf.nomcom.redirect_ann_urls')),
(r'^community/', include('ietf.community.urls')),
(r'^accounts/settings/', include('ietf.cookies.urls')),
(r'^doc/', include('ietf.doc.urls')),
(r'^drafts/', include('ietf.doc.redirect_drafts_urls')),
(r'^mailtrigger/',include('ietf.mailtrigger.urls')),
(r'^feed/', include('ietf.feed_urls')),
(r'^group/', include('ietf.group.urls')),
(r'^help/', include('ietf.help.urls')),
(r'^idtracker/', include('ietf.doc.redirect_idtracker_urls')),
(r'^iesg/', include('ietf.iesg.urls')),
(r'^ipr/', include('ietf.ipr.urls')),
(r'^liaison/', include('ietf.liaisons.urls')),
(r'^list/', include('ietf.mailinglists.urls')),
(r'^meeting/', include('ietf.meeting.urls')),
(r'^nomcom/', include('ietf.nomcom.urls')),
(r'^person/', include('ietf.person.urls')),
(r'^release/', include('ietf.release.urls')),
(r'^secr/', include('ietf.secr.urls')),
(r'^sitemap-(?P<section>.+).xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
(r'^sitemap.xml$', 'django.contrib.sitemaps.views.index', { 'sitemaps': sitemaps}),
(r'^submit/', include('ietf.submit.urls')),
(r'^sync/', include('ietf.sync.urls')),
(r'^stream/', include('ietf.group.urls_stream')),
(r'^templates/', include('ietf.dbtemplate.urls')),
(r'^(?P<group_type>(wg|rg|ag|team|dir|area))/', include('ietf.group.urls_info')),
# Redirects
(r'^(?P<path>public)/', include('ietf.redirects.urls')),
# Google webmaster tools verification url
(r'^googlea30ad1dacffb5e5b.html', TemplateView.as_view(template_name='googlea30ad1dacffb5e5b.html')),
)
if settings.IS_CODESTAND_APP:
urlpatterns += patterns('',
(r'^codestand/', include('ietf.codestand.urls')),
(r'^codestand/matches/', include('ietf.codestand.matches.urls')),
(r'^codestand/requests/', include('ietf.codestand.requests.urls')),
(r'^codestand/accounts/', include('ietf.codestand.accounts.urls')),
)
# if settings.DEBUG:
# urlpatterns += patterns('',
# (r'^codestand/__debug__/', include(debug_toolbar.urls)),
# )
# Endpoints for Tastypie's REST API
urlpatterns += patterns('',
(r'^api/v1/?$', api.top_level),
)
for n,a in api._api_list:
urlpatterns += patterns('',
(r'^api/v1/', include(a.urls)),
)
# This is needed to serve files during testing
#if settings.SERVER_MODE in ('development', 'test'):
# urlpatterns += ( staticfiles_urlpatterns()
# + patterns('',
# (r'^_test500/$', lambda x: None),
# (r'^environment/$', 'ietf.help.views.environment'),
# ## maybe preserve some static legacy URLs ?
# (r'^(?P<path>(?:images|css|js)/.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT+'ietf/'}),
# )
# )
# This is needed to serve files which are not handled by collectstatic :
if settings.SERVER_MODE in ('development', 'test'):
urlpatterns += patterns('',
(r'^(?P<path>(?:images|css|js|test|static|fonts|other)/.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
# (r'^(?P<path>autocomplete/(?:img|css|js)/.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
(r'^(?P<path>admin/(?:img|css|js)/.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
(r'^(?P<path>secretariat/(img|css|js)/.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
(r'^(?P<path>robots\.txt)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT+"dev/"}),
(r'^_test500/$', lambda x: None),
(r'^environment/$', 'ietf.help.views.environment'),
)
|
"""Home Assistant Switcher Component."""
from asyncio import QueueEmpty, TimeoutError as Asyncio_TimeoutError, wait_for
from datetime import datetime, timedelta
from logging import getLogger
from typing import Dict, Optional
import voluptuous as vol
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import EventType, HomeAssistantType
_LOGGER = getLogger(__name__)
DOMAIN = 'switcher_kis'
CONF_DEVICE_ID = 'device_id'
CONF_DEVICE_PASSWORD = 'device_password'
CONF_PHONE_ID = 'phone_id'
DATA_DEVICE = 'device'
SIGNAL_SWITCHER_DEVICE_UPDATE = 'switcher_device_update'
ATTR_AUTO_OFF_SET = 'auto_off_set'
ATTR_ELECTRIC_CURRENT = 'electric_current'
ATTR_REMAINING_TIME = 'remaining_time'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_PHONE_ID): cv.string,
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Required(CONF_DEVICE_PASSWORD): cv.string
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistantType, config: Dict) -> bool:
"""Set up the switcher component."""
from aioswitcher.bridge import SwitcherV2Bridge
phone_id = config[DOMAIN][CONF_PHONE_ID]
device_id = config[DOMAIN][CONF_DEVICE_ID]
device_password = config[DOMAIN][CONF_DEVICE_PASSWORD]
v2bridge = SwitcherV2Bridge(
hass.loop, phone_id, device_id, device_password)
await v2bridge.start()
async def async_stop_bridge(event: EventType) -> None:
"""On homeassistant stop, gracefully stop the bridge if running."""
await v2bridge.stop()
hass.async_add_job(hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_stop_bridge))
try:
device_data = await wait_for(
v2bridge.queue.get(), timeout=5.0, loop=hass.loop)
except (Asyncio_TimeoutError, RuntimeError):
_LOGGER.exception("failed to get response from device")
await v2bridge.stop()
return False
hass.data[DOMAIN] = {
DATA_DEVICE: device_data
}
hass.async_create_task(async_load_platform(
hass, SWITCH_DOMAIN, DOMAIN, None, config))
@callback
def device_updates(timestamp: Optional[datetime]) -> None:
"""Use for updating the device data from the queue."""
if v2bridge.running:
try:
device_new_data = v2bridge.queue.get_nowait()
if device_new_data:
async_dispatcher_send(
hass, SIGNAL_SWITCHER_DEVICE_UPDATE, device_new_data)
except QueueEmpty:
pass
async_track_time_interval(hass, device_updates, timedelta(seconds=4))
return True
|
import tempfile
import numpy as np
import du
from du.numpy_utils import (expand_dim,
memmap_concatenate,
constant_value_array)
from du._test_utils import equal
def test_expand_dim():
z = np.zeros((2, 2))
x = np.arange(4).reshape(2, 2)
assert np.alltrue(np.vstack([x, z]) == expand_dim(x, 4, axis=0))
assert np.alltrue(np.hstack([x, z]) == expand_dim(x, 4, axis=1))
def test_memmap_concatenate():
x = np.random.randn(3, 3)
l = [x, x, x]
with tempfile.TemporaryFile() as f:
assert np.alltrue(memmap_concatenate(f, l, 0) == np.concatenate(l, 0))
assert np.alltrue(memmap_concatenate(f, l, 1) == np.concatenate(l, 1))
def test_constant_value_array():
equal(7.5, constant_value_array(1.5, 5).sum())
equal(5, constant_value_array(1.5, 5, int).sum())
def test_generator_std():
x1 = np.random.rand(100, 12)
x2 = 0.32 * np.random.rand(100, 34) + 2.0
x3 = 1.5 * np.random.rand(100, 100) - 1.0
xs = [x1, x2, x3]
np.testing.assert_allclose(du.numpy_utils.generator_std(xs, axis=1),
np.concatenate(xs, axis=1).std(axis=1))
|
"""cubelab_notebook - Jupyter Notebook-based web frontend for CubeViz operations"""
__version__ = '0.1.0'
__author__ = 'Nicholas Earl <nearl@stsci.edu>'
__all__ = []
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-04-12 18:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mathswizard', '0030_auto_20180330_1159'),
]
operations = [
migrations.AddField(
model_name='studentprofile',
name='completed',
field=models.IntegerField(default='1'),
),
migrations.AddField(
model_name='studentprofile',
name='levelprog',
field=models.IntegerField(default='0'),
),
migrations.AddField(
model_name='studentprofile',
name='skipped',
field=models.IntegerField(default='0'),
),
migrations.AlterField(
model_name='studentprofile',
name='cof',
field=models.IntegerField(default='0'),
),
migrations.AlterField(
model_name='studentprofile',
name='focus',
field=models.CharField(choices=[('Addition', 'Addition'), ('Multiplication', 'Multiplication'), ('Subtraction', 'Subtraction')], default='Addition', max_length=100),
),
migrations.AlterField(
model_name='studentprofile',
name='level',
field=models.IntegerField(default='1'),
),
]
|
import copy
import threading
import queue
class SingleTaskListener():
def __init__(self):
self.ev = threading.Event()
def wait(self):
self.ev.wait()
def notify(self, _id, _data):
self.ev.set()
class MultipleTaskListener():
def __init__(self, count):
self.count = 0
self.expected = count
self.ev = threading.Event()
def wait(self):
self.ev.wait()
def notify(self, _id, _data):
self.count += 1
if self.count == self.expected:
self.ev.set()
class TaskFuture():
def __init__(self):
self.data = None
def notify(self, _id, _data):
if(_data != None):
self.data = copy.deepcopy(_data)
class ThreadedWorkQueue(object):
def __init__(self):
self.queue = queue.Queue()
self.thread_handle = None
self.work_lock = threading.Lock()
self.tasks = {}
self.payload = {}
self.context_id = None
self.context_copy = None
self.listeners = None
def start(self):
self.work_lock.acquire()
self.thread_handle = threading.Thread(target=lambda e: e.thread_run_loop(), args=(self,), daemon=True)
self.thread_handle.start()
self.work_lock.release()
def _stop_active_task(self, _id):
pass
def stop(self):
self.work_lock.acquire()
if self.thread_handle == None:
self.work_lock.release()
return
#flush queue
_tasks = self.tasks
_payload = self.payload
self.tasks = {}
self.payload = {}
self.queue.put(None)
if self.context_id != None:
self.tasks[self.context_id] = _tasks[self.context_id]
self.payload[self.context_id] = _payload[self.context_id]
del _tasks[self.context_id]
del _payload[self.context_id]
self._stop_active_task(self.context_id)
for i,v in _tasks.items():
self._task_removed(i, copy.deepcopy(v), _payload[i])
self.work_lock.release()
self.thread_handle.join()
self.thread_handle = None
#discard a queued item, item must not be started, if it's started then discard will fail
def remove(self, _id):
self.work_lock.acquire()
if _id != self.context_id:
self._try_remove_task(_id)
self.work_lock.release()
def _task_removed(self, _id, _data, _payload):
pass
def _try_remove_task(self, _id):
data = self.tasks.get(_id,None)
if data != None:
_p = self.payload[_id]
del self.tasks[_id]
del self.payload[_id]
self._task_removed(_id, copy.deepcopy(data), _p)
#add item to queue
def add(self, _id, _item_dict, _payload):
self.work_lock.acquire()
if self.tasks == None or _id in self.tasks:
self.work_lock.release()
return False
self.tasks[_id] = _item_dict
self.payload[_id] = _payload
self.work_lock.release()
self.queue.put(_id)
return True
def query_items(self):
self.work_lock.acquire()
result = copy.deepcopy(self.tasks)
if self.context_copy != None:
result[self.context_id] = copy.deepcopy(self.context_copy)
self.work_lock.release()
return result
def query_status(self):
status = None
tasks = None
active = None
self.work_lock.acquire()
if (self.thread_handle != None):
status = "active"
else:
status = "inactive"
tasks = copy.deepcopy(self.tasks)
if self.context_copy != None:
active = copy.deepcopy(self.context_copy)
self.work_lock.release()
return {
"status" : status,
"queue" : tasks,
"active" : active
}
def is_active(self):
self.work_lock.acquire()
if self.thread_handle == None:
self.work_lock.release()
return False
result = len(self.tasks)
self.work_lock.release()
if result > 0:
return True
return False
def wait(self):
self.work_lock.acquire()
sz = len(self.tasks)
if sz > 0:
ev = threading.Event()
func = lambda: ev.set()
if self.listeners == None:
self.listeners = [func]
else:
self.listeners.append(func)
self.work_lock.release()
ev.wait()
return;
self.work_lock.release()
def prepare_task(self, _id, _itm):
return copy.deepcopy(_itm), self.payload.get(_id, None)
def execute_active_task(self, _id, _payload):
pass
def task_finished(self, _id, _task_copy, _payload):
del self.tasks[_id]
del self.payload[_id]
if len(self.tasks) == 0 and self.listeners != None:
l = self.listeners
self.listeners = None
for func in l:
func()
def acquire_active_context(self):
self.work_lock.acquire()
return self.context_copy
def release_active_context(self, _ctx):
self.work_lock.release()
def thread_run_loop(self):
while True:
_id = self.queue.get()
if _id == None:
break
self.work_lock.acquire()
_item = self.tasks.get(_id,None)
if _item != None:
_work_item_copy, _exec_payload = self.prepare_task(_id, _item)
self.context_id = _id
self.context_copy = _work_item_copy
self.work_lock.release()
self.execute_active_task(_id, _exec_payload)
self.work_lock.acquire()
self.context_id = None
self.context_copy = None
self.task_finished(_id, _work_item_copy, _exec_payload)
#else: item could be removed before it was processed
self.work_lock.release()
|
from spock.plugins import DefaultPlugins
class PluginLoader:
def __init__(self, client, settings):
self.plugins = settings['plugins']
del settings['plugins']
self.plugin_settings = settings['plugin_settings']
del settings['plugin_settings']
self.announce = {}
self.extensions = {
'Client': client,
'Settings': settings
}
for plugin in self.plugins:
if hasattr(plugin, 'pl_announce'):
for ident in plugin.pl_announce:
self.announce[ident] = plugin
# Make an attempt at providing the reg_event_handler API
# But we can't guarantee it will be there (Ha!)
event = self.requires('Event')
self.reg_event_handler = event.reg_event_handler if event else None
while self.plugins:
plugin = self.plugins.pop()
plugin(self, self.plugin_settings.get(plugin, None))
def requires(self, ident):
if ident not in self.extensions:
if ident in self.announce:
plugin = self.announce[ident]
self.plugins.remove(plugin)
plugin(self, self.plugin_settings.get(plugin, None))
else:
return None
return self.extensions[ident]
def provides(self, ident, obj):
self.extensions[ident] = obj
#2 values = Attribute&Setting name, default value
#3 values = Attribute name, setting name, default value
default_settings = [
('plugins', DefaultPlugins),
('plugin_settings', {}),
('mc_username', 'username', 'Bot'),
('mc_password', 'password', ''),
('authenticated', True),
('thread_workers', 5),
('bufsize', 4096),
('sock_quit', True),
('sess_quit', True),
]
for index, setting in enumerate(default_settings):
if len(setting) == 2:
default_settings[index] = (setting[0], setting[0], setting[1])
class Client:
def __init__(self, **kwargs):
#Grab some settings
settings = kwargs.get('settings', {})
final_settings = {}
for setting in default_settings:
val = kwargs.get(setting[1], settings.get(setting[1], setting[2]))
final_settings[setting[0]] = val
PluginLoader(self, final_settings)
|
import json
import os
import confuse
import boto3
from botocore.exceptions import ClientError
from cachetools import Cache
class MissingCredentials(Exception):
"""
Credentials are missing, see the error output to find possible causes
"""
pass
class CredentialProvider:
credentials = None
cache = Cache(maxsize=10)
def __init__(self, account='default', credentials=None):
self.account = account
self.read_credentials = [
self.from_env,
self.from_secrets,
self.read_config
]
if credentials:
self.credentials = self.Config(**credentials)
missing = self.credentials.check_config()
if len(missing):
raise MissingCredentials('The following configuration parameters are missing: {}'.format(missing))
else:
self.load_credentials()
def load_credentials(self):
for read_method in self.read_credentials:
if read_method():
return True
def from_secrets(self):
if not os.environ.get('SP_API_AWS_SECRET_ID', None):
return
try:
client = boto3.client('secretsmanager')
response = client.get_secret_value(
SecretId=os.environ.get('SP_API_AWS_SECRET_ID')
)
secret = json.loads(response.get('SecretString'))
account_data = dict(
refresh_token=secret.get('SP_API_REFRESH_TOKEN'),
lwa_app_id=secret.get('LWA_APP_ID'),
lwa_client_secret=secret.get('LWA_CLIENT_SECRET'),
aws_secret_key=secret.get('SP_API_SECRET_KEY'),
aws_access_key=secret.get('SP_API_ACCESS_KEY'),
role_arn=secret.get('SP_API_ROLE_ARN')
)
self.cache['account_data'] = json.dumps(account_data)
except ClientError as client_error:
return
else:
self.credentials = self.Config(**account_data)
return len(self.credentials.check_config()) == 0
def from_env(self):
try:
account_data = json.loads(self.cache['account_data'])
except KeyError:
account_data = dict(
refresh_token=self._get_env('SP_API_REFRESH_TOKEN'),
lwa_app_id=self._get_env('LWA_APP_ID'),
lwa_client_secret=self._get_env('LWA_CLIENT_SECRET'),
aws_secret_key=self._get_env('SP_API_SECRET_KEY'),
aws_access_key=self._get_env('SP_API_ACCESS_KEY'),
role_arn=self._get_env('SP_API_ROLE_ARN')
)
self.credentials = self.Config(**account_data)
return len(self.credentials.check_config()) == 0
def _get_env(self, key):
return os.environ.get('{}_{}'.format(key, self.account),
os.environ.get(key))
def read_config(self):
try:
config = confuse.Configuration('python-sp-api')
config_filename = os.path.join(config.config_dir(), 'credentials.yml')
config.set_file(config_filename)
account_data = config[self.account].get()
self.credentials = self.Config(**account_data)
missing = self.credentials.check_config()
if len(missing):
raise MissingCredentials('The following configuration parameters are missing: {}'.format(missing))
except confuse.exceptions.NotFoundError:
raise MissingCredentials('The account {} was not setup in your configuration file.'.format(self.account))
except confuse.exceptions.ConfigReadError:
raise MissingCredentials(
'Neither environment variables nor a config file were found. '
'Please set the correct variables, or use a config file (credentials.yml). '
'See https://confuse.readthedocs.io/en/latest/usage.html#search-paths for search paths.'
)
else:
return True
class Config:
def __init__(self,
refresh_token,
lwa_app_id,
lwa_client_secret,
aws_access_key,
aws_secret_key,
role_arn
):
self.refresh_token = refresh_token
self.lwa_app_id = lwa_app_id
self.lwa_client_secret = lwa_client_secret
self.aws_access_key = aws_access_key
self.aws_secret_key = aws_secret_key
self.role_arn = role_arn
def check_config(self):
errors = []
for k, v in self.__dict__.items():
if not v and k != 'refresh_token':
errors.append(k)
return errors
|
'''Implements the `java_compiler_toolchain` rule.
Java compiler toolchain instances are created with `java_compiler_toolchain`
rule instances. A separate `toolchain` rule instance is used to declare a
`java_compiler_toolchain` instance has the type
`@dwtj_rules_java//java/toolchains/java_compiler_toolchain:toolchain_type`.
See [the Bazel Toolchain documentation][1] for more information.
An example might look something like this:
```build
java_compiler_toolchain(
name = "_my_javac",
javac_executable = ":my_javac_executable",
)
toolchain(
name = "my_javac",
exec_compatible_with = [
...
],
target_compatible_with = [
...
],
toolchain = ":_my_javac",
toolchain_type = "@dwtj_rules_java//java/toolchains/java_compiler_toolchain:toolchain_type",
)
```
[1]: https://docs.bazel.build/versions/3.4.0/toolchains.html
'''
JavaCompilerToolchainInfo = provider(
doc = "Specifies the tools, scripts, and configuration needed to compile and JAR Java targets.",
fields = {
"javac_executable": "A `javac` executable file (in the host configuration).",
"jar_executable": "A `jar` executable file (in the host configuration).",
"compile_java_jar_script_template": "A template for a script which is used to compile Java sources to a JAR file.",
"class_path_separator": "The class path separator to use when invoking this `javac` executable."
},
)
def _java_compiler_toolchain_impl(ctx):
java_compiler_toolchain_info = JavaCompilerToolchainInfo(
javac_executable = ctx.file.javac_executable,
jar_executable = ctx.file.jar_executable,
compile_java_jar_script_template = ctx.file._compile_java_jar_script_template,
class_path_separator = ctx.attr.class_path_separator,
)
toolchain_info = platform_common.ToolchainInfo(
java_compiler_toolchain_info = java_compiler_toolchain_info,
)
return [
toolchain_info,
java_compiler_toolchain_info,
]
java_compiler_toolchain = rule(
implementation = _java_compiler_toolchain_impl,
attrs = {
"javac_executable": attr.label(
allow_single_file = True,
mandatory = True,
executable = True,
cfg = "host",
),
"jar_executable": attr.label(
allow_single_file = True,
mandatory = True,
executable = True,
cfg = "host",
),
# NOTE(dwtj): This seems like a somewhat roundabout way to make this
# template available for instantiation in the `compile_java_jar()`
# helper function, but I haven't yet figured out another way to do it
# which resolves the label to a `File`.
# TODO(dwtj): Try the `Label()` constructor.
"_compile_java_jar_script_template": attr.label(
default = "//java:common/actions/TEMPLATE.compile_java_jar.sh",
allow_single_file = True,
),
"class_path_separator": attr.string(
default = ":", # Defaults to the Unix-like class-path separator.
)
},
provides = [JavaCompilerToolchainInfo]
)
|
# Creating a system of authenticating users based on EEG data collected from the Physionet EEGBCI Dataset
import os
import keras
import mne
from mne.preprocessing import ICA, create_ecg_epochs
from mne import pick_types
from mne.channels import make_standard_montage
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
import pandas as pd
from sklearn.preprocessing import normalize
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
import tensorflow.keras.metrics
import numpy as np
from collections import Counter
import math
from sklearn.utils import resample
from keras import metrics
from imblearn.over_sampling import SMOTE
from sklearn.metrics import precision_recall_curve
#Create the task lists each corresponding to a motor movement
runs_set = []
task1 = [3,7,11]
task2 = [4,8,12]
task3 = [5,9,13]
task4 = [6,10,14]
# ICA function that converts raw data to processed data
def getIca(subject, runs):
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])
eegbci.standardize(raw)
montage = make_standard_montage('standard_1005')
raw.set_montage(montage)
raw.rename_channels(lambda x: x.strip('.'))
raw.filter(14., 30.)
picks = pick_types(raw.info, eeg=True)
ica = ICA(n_components=15, random_state=97)
ica.fit(raw)
raw.load_data()
icaArray = ica.get_components()
return icaArray
# Create the motor movement passkeys from the processed data
for i in range(1,108):
hand_set = []
feet_set = []
for r in range(3):
runA = getIca(i, task1[r]) #ica for run in task1
runB = getIca(i, task3[r]) #ica for run in task3
hand = np.concatenate((runA,runB),axis=1) # person's task 1 then 3
runC = getIca(i, task2[r])
runD = getIca(i, task4[r])
feet = np.concatenate((runC,runD),axis=1)
hand_set.append(hand)
feet_set.append(feet)
for i in range(15):
hand = hand_set[np.random.randint(0,3)]
feet = feet_set[np.random.randint(0,3)]
hand2 = hand_set[np.random.randint(0,3)]
runs_set.append(np.concatenate((hand,feet,hand2),axis=1))
# 2. SMOTE resample to increase the amount of training data
data = runs_set
# the smote function
def smote_resample(x,y):
smote = SMOTE(random_state=42, sampling_strategy='minority')
sx, sy = smote.fit_resample(x, y)
return sx, sy
# Get x and y for the classifier
x,y = [],[]
for i in range(len(data)):
features = np.array([feature for feature in data[i][0]]).flatten()
x.append(features)
if i<15:
y.append(1)
else:
y.append(0)
print(y)
# Use smote
sx, sy = smote_resample(x,y)
# Classification
print(Counter(sy))
x_train, x_test, y_train, y_test = train_test_split(sx, sy, test_size=0.3)
x_train = normalize(np.asarray(x_train))
x_test = normalize(np.asarray(x_test))
print(x_train)
y_train = np.asarray(y_train).astype('int32').reshape((-1, 1))
y_test = np.asarray(y_test).astype('int32').reshape((-1, 1))
from tensorflow.keras.models import Sequential
model = Sequential([
Dense(30, activation='relu'),
Dropout(0.75),
Dense(15, activation='relu'),
Dropout(0.375),
Dense(10, activation='relu'),
Dense(1, activation='sigmoid')])
model.compile(optimizer='Adam',loss='binary_crossentropy', metrics=['accuracy','TruePositives','TrueNegatives','FalsePositives','FalseNegatives'])
model.fit(x_train, y_train, epochs=30, validation_data=(x_test, y_test), verbose=2)
y_pred = model.predict_classes(x_test)
|
import plotly
import plotly.graph_objects as go
import dash
from dash.dependencies import Input, Output, State
import numpy as np
import pandas as pd
import ipdb
from igraph import Graph
import networkx as nx
import colorlover as cl
from itertools import chain, combinations
from collections import Counter, OrderedDict
from functools import reduce
import json
import dash_bootstrap_components as dbc
from dash.exceptions import PreventUpdate
import app_layout
graph = Graph.Read_Ncol("data/gov_to_gov/edges.txt")
start_vertex = "cdc.gov"
start_sensitivity = 0.75
def labeled_pagerank(graph):
result = zip(graph.vs["name"], graph.pagerank())
return Counter(dict(result))
original_pagerank = labeled_pagerank(graph)
rankings = {
page: rank + 1 for rank, (page, score) in enumerate(original_pagerank.most_common())
}
def base_normalize(sub, orig, sensitivity=0.75):
return sub / (orig ** sensitivity)
def relative_pagerank(subgraph, normalize=base_normalize, sensitivity=0.75):
subgraph_pagerank = labeled_pagerank(subgraph)
# for each vertex v, normalize it's subgraph pagerank by its original pagerank
# according to the normalization function
return Counter(
{
v: normalize(subgraph_pagerank[v], original_pagerank[v], sensitivity)
for v in subgraph_pagerank.keys()
}
)
def get_adjacent_subgraph(vertex, mode="ALL", include_self=False):
vertex_id = graph.vs.find(name=vertex).index
adjacent_vertices = graph.neighbors(vertex, mode=mode)
if not include_self:
proper_adjacents = [v for v in adjacent_vertices if v != vertex_id]
return graph.subgraph(proper_adjacents)
else:
adjacent_vertices.append(vertex_id)
return graph.subgraph(adjacent_vertices)
def adjacent_pagerank(vertex, mode="ALL", normalize=base_normalize, sensitivity=0.75):
subgraph = get_adjacent_subgraph(vertex, mode=mode)
return relative_pagerank(subgraph, normalize=normalize, sensitivity=sensitivity)
def processed_pagerank(
vertex, mode="ALL", n=10, normalize=base_normalize, sensitivity=0.75
):
vertex_ranks = adjacent_pagerank(
vertex, mode=mode, normalize=normalize, sensitivity=sensitivity
).most_common(n)
vertices, scores = zip(*vertex_ranks)
scores = divide_by_max(scores)
return list(vertices), list(scores)
def cocitation(g, vertices):
A = np.array(g.get_adjacency().data)
v_ids = [g.vs.find(name=v).index for v in vertices]
return {
(g.vs[i]["name"], g.vs[j]["name"]): A[i] @ A[j]
for i, j in combinations(v_ids, 2)
}
def biblio(g, vertices):
A = np.array(g.get_adjacency().data)
v_ids = [g.vs.find(name=v).index for v in vertices]
return {
(g.vs[i]["name"], g.vs[j]["name"]): A[:, i] @ A[:, j]
for i, j in combinations(v_ids, 2)
}
def get_hyperlink(website):
return f"<a href='https://{website}'> {website}</a>"
def divide_by_max(X):
A = np.array(list(X))
m = np.max(A)
A = 1 / m * A
return A
def list_concat(lists):
return reduce(lambda a, b: a + b, lists, [])
def get_starting_positions(vertices):
if not vertices:
raise ValueError("There are no vertices")
y_coords = np.linspace(0.5, -0.5, len(vertices))
return {
vertex: (np.random.uniform(-0.01, 0.01), y)
for vertex, y in zip(vertices, y_coords)
}
def top_edges_for_vertex(v, edge_weights):
vertex_edge_weights = Counter(
{edge: weight for edge, weight in edge_weights.items() if v in edge}
)
top_edges_for_vertex = [edge for edge, weight in vertex_edge_weights.most_common(2)]
return top_edges_for_vertex
def get_positions(edge_weights, subgraph_vertices, mode):
starting_positions = get_starting_positions(subgraph_vertices)
if len(subgraph_vertices) > 1:
weighted_edgelist = [(u, v, w) for (u, v), w in edge_weights.items()]
g = nx.Graph()
g.add_weighted_edges_from(weighted_edgelist)
positions = nx.spring_layout(
g, weight="weight", scale=1 / 2, pos=starting_positions
).values()
else:
positions = starting_positions.values()
if mode == "OUT":
positions = [p + np.array([2.5, 0]) for p in positions]
return positions
def get_subgraph_edge_weights(vertex, adjacent_subgraph, subgraph_vertices, mode):
all_edge_weights = biblio(adjacent_subgraph, subgraph_vertices + [vertex])
adjacent_edge_weights = {
edge: max(weight, 0.1)
for edge, weight in all_edge_weights.items()
if vertex in edge
}
local_edge_weights = {
edge: weight for edge, weight in all_edge_weights.items() if vertex not in edge
}
top_local_edges = list_concat(
top_edges_for_vertex(v, local_edge_weights) for v in subgraph_vertices
)
top_local_edge_weights = {
edge: max(weight, 0.1)
for edge, weight in local_edge_weights.items()
if edge in top_local_edges
}
return top_local_edge_weights, adjacent_edge_weights
def get_node_data(vertex, subgraph_vertices, positions, sizes, mode):
subgraph_node_data = {
v: {"x": x, "y": y, "size": size, "type": mode.lower()}
for v, (x, y), size in zip(subgraph_vertices, positions, sizes)
}
center_node_data = {vertex: {"x": 1.25, "y": 0, "size": 1.1, "type": "center"}}
node_data = {**subgraph_node_data, **center_node_data}
return node_data
def get_normalized_edge_weights(local_edge_weights, adjacent_edge_weights):
edge_weights = {**local_edge_weights, **adjacent_edge_weights}
m = max(edge_weights.values())
normalized_edge_weights = {
edge: weight / m for edge, weight in edge_weights.items()
}
return normalized_edge_weights
def get_edge_data(normalized_edge_weights, node_data):
edge_data = [
{"source": source, "target": target, "weight": weight}
for (source, target), weight in normalized_edge_weights.items()
]
for edge in edge_data:
edge["u_x"] = node_data[edge["source"]]["x"]
edge["u_y"] = node_data[edge["source"]]["y"]
edge["v_x"] = node_data[edge["target"]]["x"]
edge["v_y"] = node_data[edge["target"]]["y"]
return edge_data
def get_graph_info(subgraph, mode):
mode_text = "incoming" if mode == "IN" else "outgoing"
n_vertices = subgraph.vcount() - 1
return f"Number of {mode_text} edges: {n_vertices}"
def get_subgraph_data(vertex, mode="IN", sensitivity=0.75, n=6):
# ipdb.set_trace(context=10)
subgraph_vertices, sizes = processed_pagerank(
vertex, mode=mode, sensitivity=sensitivity, n=n
)
adjacent_subgraph = get_adjacent_subgraph(vertex, mode=mode, include_self=True)
local_edge_weights, adjacent_edge_weights = get_subgraph_edge_weights(
vertex, adjacent_subgraph, subgraph_vertices, mode
)
positions = get_positions(local_edge_weights, subgraph_vertices, mode)
node_data = get_node_data(vertex, subgraph_vertices, positions, sizes, mode)
normalized_edge_weights = get_normalized_edge_weights(
local_edge_weights, adjacent_edge_weights
)
edge_data = get_edge_data(normalized_edge_weights, node_data)
info = get_graph_info(adjacent_subgraph, mode)
return node_data, edge_data, info
def make_edge_trace(edge):
return go.Scatter(
x=(edge["u_x"], edge["v_x"], None),
y=(edge["u_y"], edge["v_y"], None),
line=dict(width=5 * edge["weight"], color="#999"),
showlegend=False,
text=f"{edge['source']} -> {edge['target']}",
hoverinfo="none",
mode="lines",
)
def get_node_df(node_data):
df = pd.DataFrame(node_data).T.reset_index()
n_colors = len(node_data.keys())
n_colors = str(max(3, n_colors))
colors = cl.scales[n_colors]["qual"]["Paired"]
df["color"] = colors[: len(df)]
df["label_y"] = [0.5 - 0.2 * i for i in range(len(df))]
np.linspace(-0.5, 0.5, len(df))
df = df.rename(columns={"index": "name"})
df["link"] = df.name.apply(get_hyperlink)
return df
def get_half_of_graph(node_df, edge_data):
node_traces = [
go.Scatter(
x=node_df.x,
y=node_df.y,
mode="markers",
hoverinfo="text",
customdata=node_df.name,
text=node_df.name,
legendgroup="nodes",
marker=dict(size=0.25 * node_df.size, color=node_df.color),
line_width=2,
showlegend=False,
)
]
edge_traces = [make_edge_trace(edge) for edge in edge_data]
return node_traces, edge_traces
def get_legend(df, info):
legend_trace = go.Scatter(
x=[0] * len(df[:-1]),
y=df.label_y,
text=df[:-1].link,
marker=(dict(size=20, color=df[:-1].color)),
showlegend=False,
mode="markers+text",
textposition="middle right",
hoverinfo="none",
)
invis_trace = go.Scatter(
x=[8] * len(df[:-1]),
y=df.label_y,
name="incoming",
text=df[:-1].link,
marker=(dict(size=0, color=df[:-1].color)),
showlegend=False,
mode="none",
hoverinfo="none",
)
return {
"data": [legend_trace, invis_trace],
"layout": go.Layout(
title=info,
hovermode="closest",
xaxis={"showgrid": False, "zeroline": False, "showticklabels": False},
yaxis={"showgrid": False, "zeroline": False, "showticklabels": False},
),
}
def get_state(vertex, sensitivity, cache):
if (f"{vertex}, {sensitivity}") in cache:
(
in_node_data,
in_edge_data,
left_info,
out_node_data,
out_edge_data,
right_info,
) = cache[f"{vertex}, {sensitivity}"]
else:
in_node_data, in_edge_data, left_info = get_subgraph_data(
vertex, mode="IN", sensitivity=sensitivity
)
out_node_data, out_edge_data, right_info = get_subgraph_data(
vertex, mode="OUT", sensitivity=sensitivity
)
cache[f"{vertex}, {sensitivity}"] = (
in_node_data,
in_edge_data,
left_info,
out_node_data,
out_edge_data,
right_info,
)
in_node_df = get_node_df(in_node_data)
left_node_traces, left_edge_traces = get_half_of_graph(in_node_df, in_edge_data)
out_node_df = get_node_df(out_node_data)
right_node_traces, right_edge_traces = get_half_of_graph(out_node_df, out_edge_data)
left_legend = get_legend(in_node_df, left_info)
right_legend = get_legend(out_node_df, right_info)
graph = {
"data": left_edge_traces
+ right_edge_traces
+ left_node_traces
+ right_node_traces,
"layout": go.Layout(
hovermode="closest",
xaxis={"showgrid": False, "zeroline": False, "showticklabels": False},
yaxis={"showgrid": False, "zeroline": False, "showticklabels": False},
),
}
title = f"#### [{vertex}](http://vertex): pagerank {rankings[vertex]} of {len(rankings)}"
return (title, left_info, left_legend, graph, right_info, right_legend, cache)
external_stylesheets = [
dbc.themes.BOOTSTRAP,
"https://codepen.io/chriddyp/pen/bWLwgP.css",
]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.layout = app_layout.layout
app.title = 'Web Flyover'
@app.callback(
[
Output("title-text", "children"),
Output("left-info-panel", "children"),
Output("left-legend-panel", "figure"),
Output("graph", "figure"),
Output("right-info-panel", "children"),
Output("right-legend-panel", "figure"),
Output("cache", "children"),
],
[
Input("graph", "clickData"),
Input("back-button", "n_clicks"),
Input("sensitivity-slider", "value"),
Input("node-search", "value"),
],
[
State("left-legend-panel", "figure"),
State("graph", "figure"),
State("right-legend-panel", "figure"),
State("cache", "children"),
State("history", "children"),
],
)
def update_view(
clickData,
n_clicks,
sensitivity,
searched_node,
left_legend,
graph,
right_legend,
cache,
history,
):
history = json.loads(history) if history else []
cache = json.loads(cache) if cache else {}
inputs = dash.callback_context.inputs
if app_start(inputs):
state = get_state(start_vertex, start_sensitivity, cache)
else:
trigger = dash.callback_context.triggered[0]["prop_id"].split(".")[0]
if trigger == "graph" and clickData["points"][0].get("customdata"):
new_vertex = clickData["points"][0]["customdata"].split()[0]
state = get_state(new_vertex, sensitivity, cache)
elif trigger == "back-button" and len(history) > 1:
previous_vertex = history[-2]
state = get_state(previous_vertex, sensitivity, cache)
elif trigger == "sensitivity-slider":
current_vertex = history[-1]
state = get_state(current_vertex, sensitivity, cache)
elif trigger == "node-search" and searched_node:
new_vertex = searched_node
state = get_state(new_vertex, sensitivity, cache)
else:
raise PreventUpdate
(title, left_info, left_legend, graph, right_info, right_legend, cache) = state
return (
title,
"", # left_info
left_legend,
graph,
"", # right_info
right_legend,
json.dumps(cache),
)
@app.callback(
Output("sensitivity-display", "children"), [Input("sensitivity-slider", "value")]
)
def update_sensitivity_display(value):
return f"Sensitivity: {value:.2f}"
@app.callback(
Output("history", "children"),
[
Input("graph", "clickData"),
Input("back-button", "n_clicks"),
Input("node-search", "value"),
],
[State("history", "children")],
)
def update_history(clickData, n_clicks, searched_node, history):
if history:
history = json.loads(history)
else:
history = []
inputs = dash.callback_context.inputs
if app_start(inputs):
history.append(start_vertex)
else:
trigger = dash.callback_context.triggered[0]["prop_id"].split(".")[0]
if trigger == "graph" and clickData["points"][0].get("customdata"):
new_point = clickData["points"][0]["customdata"].split()[0]
history.append(new_point)
elif trigger == "back-button":
if len(history) > 1:
history.pop()
elif trigger == "node-search" and searched_node:
history.append(searched_node)
return json.dumps(history)
@app.callback(
Output("input-data", "children"),
[
Input("graph", "clickData"),
Input("back-button", "n_clicks"),
Input("sensitivity-slider", "value"),
Input("node-search", "value"),
],
[State("input-data", "children")],
)
def dump_input_data(clickData, n_clicks, sensitivity, searched_node, data):
data = json.loads(data) if data else []
inputs = dash.callback_context.inputs
inputs["app_start"] = app_start(inputs)
data.append(inputs)
return json.dumps(data, indent=2)
@app.callback(
Output("trigger-data", "children"),
[
Input("graph", "clickData"),
Input("back-button", "n_clicks"),
Input("sensitivity-slider", "value"),
Input("node-search", "value"),
],
[State("trigger-data", "children")],
)
def dump_trigger_data(clickData, n_clicks, sensitivity, searched_node, data):
data = json.loads(data) if data else []
trigger = dash.callback_context.triggered
data.append(trigger)
return json.dumps(data, indent=2)
# dash.callback_context.triggered[0]["prop_id"].split(".")[0]
@app.callback(Output("click-data", "children"), [Input("graph", "clickData")])
def dump_click_data(clickData):
return json.dumps(clickData, indent=2)
def app_start(inputs):
graph_clicked = bool(inputs.get("graph.clickData"))
sensitivity_adj = bool(
inputs.get("sensitivity-slider.value")
and inputs.get("sensitivity-slider.value") != 0.75
)
back_pressed = bool(inputs.get("back-button.n_clicks"))
node_been_searched = inputs.get("node-search.value") != start_vertex
if not any([graph_clicked, sensitivity_adj, back_pressed, node_been_searched]):
return True
return False
if __name__ == "__main__":
app.run_server(debug=False)
|
# Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Tests for batch execution of jobs on AWS"""
import pennylane as qml
import pytest
from braket.aws import AwsDevice
from pennylane import numpy as np
from braket.pennylane_plugin import BraketAwsQubitDevice, BraketLocalQubitDevice
@pytest.mark.parametrize("shots", [None])
def test_batch_execution_of_gradient(device, shots, mocker):
"""Test that the output of a parallelized execution of batch circuits to evaluate the
gradient is correct in comparison to default.qubit."""
qubits = 2
layers = 2
dev_aws = device(qubits)
if isinstance(dev_aws, BraketLocalQubitDevice):
pytest.skip("Parallelized batch execution is only supported on the remote AWS device")
dev_aws._parallel = True
dev_default = qml.device("default.qubit", wires=qubits)
def func(weights):
qml.templates.StronglyEntanglingLayers(weights, wires=range(qubits))
return qml.expval(qml.PauliZ(0))
qnode_aws = qml.QNode(func, dev_aws)
qnode_default = qml.QNode(func, dev_default)
weights = qml.init.strong_ent_layers_uniform(layers, qubits)
dfunc_aws = qml.grad(qnode_aws)
dfunc_default = qml.grad(qnode_default)
spy1 = mocker.spy(BraketAwsQubitDevice, "execute")
spy2 = mocker.spy(BraketAwsQubitDevice, "batch_execute")
spy3 = mocker.spy(AwsDevice, "run_batch")
res_aws = dfunc_aws(weights)
res_default = dfunc_default(weights)
assert np.allclose(res_aws, res_default)
spy1.assert_called_once() # For a forward pass
spy2.assert_called_once()
spy3.assert_called_once()
expected_circuits = qubits * layers * 3 * 2
assert len(spy2.call_args_list[0][0][1]) == expected_circuits
@pytest.mark.parametrize("shots", [None])
def test_batch_execution_of_gradient_torch(device, shots, mocker):
"""Test that the output of a parallelized execution of batch circuits to evaluate the
gradient is correct in comparison to default.qubit when using the torch interface."""
try:
import torch
except ImportError:
pytest.skip("This test requires installation of torch")
qubits = 2
layers = 2
dev_aws = device(qubits)
if isinstance(dev_aws, BraketLocalQubitDevice):
pytest.skip("Parallelized batch execution is only supported on the remote AWS device")
dev_aws._parallel = True
dev_default = qml.device("default.qubit", wires=qubits)
def func(weights):
qml.templates.StronglyEntanglingLayers(weights, wires=range(qubits))
return qml.expval(qml.PauliZ(0))
qnode_aws = qml.QNode(func, dev_aws, interface="torch")
qnode_default = qml.QNode(func, dev_default, interface="torch")
weights_aws = torch.tensor(
qml.init.strong_ent_layers_uniform(layers, qubits, seed=1967), requires_grad=True
)
weights_default = torch.tensor(
qml.init.strong_ent_layers_uniform(layers, qubits, seed=1967), requires_grad=True
)
spy1 = mocker.spy(BraketAwsQubitDevice, "execute")
spy2 = mocker.spy(BraketAwsQubitDevice, "batch_execute")
spy3 = mocker.spy(AwsDevice, "run_batch")
out_aws = qnode_aws(weights_aws)
out_default = qnode_default(weights_default)
out_aws.backward()
out_default.backward()
res_aws = weights_aws.grad
res_default = weights_default.grad
assert np.allclose(res_aws, res_default)
spy1.assert_called_once() # For a forward pass
spy2.assert_called_once()
spy3.assert_called_once()
expected_circuits = qubits * layers * 3 * 2
assert len(spy2.call_args_list[0][0][1]) == expected_circuits
@pytest.mark.parametrize("shots", [None])
def test_batch_execution_of_gradient_tf(device, shots, mocker):
"""Test that the output of a parallelized execution of batch circuits to evaluate the
gradient is correct in comparison to default.qubit when using the tf interface."""
tf = pytest.importorskip("tensorflow", minversion="2.4")
qubits = 2
layers = 2
dev_aws = device(qubits)
if isinstance(dev_aws, BraketLocalQubitDevice):
pytest.skip("Parallelized batch execution is only supported on the remote AWS device")
dev_aws._parallel = True
dev_default = qml.device("default.qubit", wires=qubits)
def func(weights):
qml.templates.StronglyEntanglingLayers(weights, wires=range(qubits))
return qml.expval(qml.PauliZ(0))
qnode_aws = qml.QNode(func, dev_aws, interface="tf")
qnode_default = qml.QNode(func, dev_default, interface="tf")
weights_aws = tf.Variable(qml.init.strong_ent_layers_uniform(layers, qubits, seed=1967))
weights_default = tf.Variable(qml.init.strong_ent_layers_uniform(layers, qubits, seed=1967))
spy1 = mocker.spy(BraketAwsQubitDevice, "execute")
spy2 = mocker.spy(BraketAwsQubitDevice, "batch_execute")
spy3 = mocker.spy(AwsDevice, "run_batch")
with tf.GradientTape() as tape:
out_aws = qnode_aws(weights_aws)
res_aws = tape.gradient(out_aws, weights_aws)
with tf.GradientTape() as tape:
out_default = qnode_default(weights_default)
res_default = tape.gradient(out_default, weights_default)
assert np.allclose(res_aws, res_default)
spy1.assert_called_once() # For a forward pass
spy2.assert_called_once()
spy3.assert_called_once()
expected_circuits = qubits * layers * 3 * 2
assert len(spy2.call_args_list[0][0][1]) == expected_circuits
|
# Generated by Django 2.0.9 on 2018-12-18 13:38
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20181218_0846'),
]
operations = [
migrations.AddField(
model_name='user',
name='address',
field=models.TextField(blank=True, default='', verbose_name='address'),
),
migrations.AlterField(
model_name='user',
name='birthday',
field=models.DateField(blank=True, default=datetime.date(2018, 12, 18), verbose_name='birthday'),
),
]
|
#!/bin/env python
import unittest
from unittest.mock import Mock
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError,\
SchemaMissingKeyError
from genie.libs.parser.iosxe.show_run import ShowRunPolicyMap, ShowRunInterface
class TestShowRunPolicyMap(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"policy_map": {
"L3VPN-0_in": {
"class": {
"HEY_in": {
"police": {
"cir_bps": "365",
"pir_bps": "235",
"conformed": "transmit",
"exceeded": "drop"
}
},
"OSPF": {
"police": {
"cir_bps": "543",
"pir_bps": "876",
"conformed": "transmit",
"exceeded": "drop"
}
},
"class-default": {
"police": {
"cir_bps": "2565",
"cir_bc_bytes": "4234",
"conformed": "transmit",
"exceeded": "drop"
},
"service_policy": "child"
}
}
}
}
}
golden_output = {'execute.return_value': '''\
show run policy-map L3VPN-0_in
Building configuration...
Current configuration : 56 bytes
!
policy-map L3VPN-0_in
class HEY_in
police cir 365 pir 235 conform-action transmit exceed-action drop
class OSPF
police cir 543 pir 876 conform-action transmit exceed-action drop
class class-default
police cir 2565 bc 4234 conform-action transmit exceed-action drop
service-policy child
!
end
'''}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowRunPolicyMap(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(name= 'L3VPN-0_in')
def test_golden(self):
self.device1 = Mock(**self.golden_output)
obj = ShowRunPolicyMap(device=self.device1)
parsed_output = obj.parse(name= 'L3VPN-0_in')
self.assertEqual(parsed_output,self.golden_parsed_output)
class TestShowRunInterface(unittest.TestCase):
maxDiff = None
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'interfaces': {
'GigabitEthernet1/0/13': {
'authentication_control_direction': 'in',
'authentication_event_fail_action': 'next-method',
'authentication_fallback': 'dot1x',
'authentication_host_mode': 'multi-auth',
'authentication_order': 'dot1x mab',
'authentication_periodic': True,
'authentication_port_control': 'auto',
'authentication_priority': 'dot1x mab',
'authentication_timer_inactivity': '65535',
'authentication_timer_reauthenticate_server': True,
'authentication_violation': 'restrict',
'description': 'ISE Controlled Port',
'dot1x_pae_authenticator': True,
'dot1x_timeout_quiet_period': '5',
'dot1x_timeout_server_timeout': '10',
'dot1x_timeout_tx_period': '5',
'ip_arp_inspection_limit_rate': '1024',
'ip_dhcp_snooping_limit_rate': '100',
'load_interval': '30',
'mab': True,
'snmp_trap_link_status': False,
'snmp_trap_mac_notification_change_added': True,
'snmp_trap_mac_notification_change_removed': True,
'spanning_tree_bpduguard': 'enable',
'spanning_tree_portfast': True,
'switchport_access_vlan': '70',
'switchport_mode': 'access',
'switchport_nonegotiate': 'nonegotiate',
},
},
}
golden_output = {'execute.return_value': '''\
#show running-config interface Gi1/0/13
Building configuration...
Current configuration : 914 bytes
!
interface GigabitEthernet1/0/13
description ISE Controlled Port
switchport access vlan 70
switchport mode access
switchport nonegotiate
ip arp inspection limit rate 1024
load-interval 30
authentication control-direction in
authentication event fail action next-method
authentication host-mode multi-auth
authentication order dot1x mab
authentication priority dot1x mab
authentication port-control auto
authentication periodic
authentication timer reauthenticate server
authentication timer inactivity 65535
authentication violation restrict
authentication fallback dot1x
mab
snmp trap mac-notification change added
snmp trap mac-notification change removed
no snmp trap link-status
dot1x pae authenticator
dot1x timeout quiet-period 5
dot1x timeout server-timeout 10
dot1x timeout tx-period 5
spanning-tree portfast
spanning-tree bpduguard enable
ip dhcp snooping limit rate 100
end
'''}
golden_parsed_output1 = {
'interfaces': {
'GigabitEthernet0': {
'description': '"Boot lan interface"',
'ipv4': {
'ip': '10.1.21.249',
'netmask': '255.255.255.0',
},
'negotiation_auto': True,
'vrf': 'Mgmt-intf',
},
},
}
golden_output1 = {'execute.return_value': '''\
#show running-config interface GigabitEthernet0
Building configuration...
Current configuration : 150 bytes
!
interface GigabitEthernet0
description "Boot lan interface"
vrf forwarding Mgmt-intf
ip address 10.1.21.249 255.255.255.0
negotiation auto
end
'''}
golden_parsed_output2 = {
'interfaces': {
'Port-channel1.100': {
'encapsulation_dot1q': '201',
'ipv4': {
'ip': '202.0.0.1',
'netmask': '255.255.255.0',
},
'ipv6': ['2002::1/112'],
'ipv6_ospf': {
'1': {
'area': '0',
},
},
},
},
}
golden_output2 = {'execute.return_value': '''\
interface Port-channel1.100
encapsulation dot1Q 201
ip address 202.0.0.1 255.255.255.0
ipv6 address 2002::1/112
ipv6 ospf 1 area 0
end
'''}
golden_parsed_output3 = {
'interfaces': {
'GigabitEthernet0/0/3': {
'ip_ospf': {
'2': {
'area': '0',
},
},
'ipv4': {
'ip': '99.99.110.1',
'netmask': '255.255.255.0',
},
'ipv6': ['2003::1/112'],
'ipv6_ospf': {
'1': {
'area': '0',
},
},
'negotiation_auto': True,
},
},
}
golden_output3 = {'execute.return_value': '''\
interface GigabitEthernet0/0/3
ip address 99.99.110.1 255.255.255.0
ip ospf 2 area 0
negotiation auto
ipv6 address 2003::1/112
ipv6 ospf 1 area 0
end
'''}
golden_parsed_output4 = {
'interfaces': {
'GigabitEthernet0/0/0.101': {
'encapsulation_dot1q': '101',
'ipv4': {
'ip': '201.0.0.1',
'netmask': '255.255.255.0',
},
'ipv6': ['2001::1/112'],
'ipv6_enable': True,
'ipv6_ospfv3': {
'1': {
'area': '0',
},
},
'vrf': 'VRF1',
},
},
}
golden_output4 = {'execute.return_value': '''\
interface GigabitEthernet0/0/0.101
encapsulation dot1Q 101
vrf forwarding VRF1
ip address 201.0.0.1 255.255.255.0
ipv6 address 2001::1/112
ipv6 enable
ospfv3 1 ipv6 area 0
end
'''}
golden_parsed_output5 = {
'interfaces': {
'Loopback1': {
'ipv4': {
'ip': '200.1.0.2',
'netmask': '255.255.255.0',
},
'ipv6': ['1:1:1::1/64', '2000:1::2/112'],
'vrf': 'VRF1',
},
},
}
golden_output5 = {'execute.return_value': '''\
interface Loopback1
vrf forwarding VRF1
ip address 200.1.0.2 255.255.255.0
ipv6 address 1:1:1::1/64
ipv6 address 2000:1::2/112
end
'''}
golden_parsed_output6 = {
'interfaces': {
'GigabitEthernet0/0/0': {
'carrier_delay': ['up 60', 'down 60'],
'ipv6': ['1::1/112'],
'negotiation_auto': True,
},
},
}
golden_output6 = {'execute.return_value': '''\
interface GigabitEthernet0/0/0
no ip address
carrier-delay up 60
carrier-delay down 60
negotiation auto
ipv6 address 1::1/112
end
'''}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowRunInterface(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='GigabitEthernet0/0/0')
def test_golden(self):
self.device1 = Mock(**self.golden_output)
obj = ShowRunInterface(device=self.device1)
parsed_output = obj.parse(interface='Gi1/0/13')
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden1(self):
self.device1 = Mock(**self.golden_output1)
obj = ShowRunInterface(device=self.device1)
parsed_output = obj.parse(interface='GigabitEthernet0')
self.assertEqual(parsed_output,self.golden_parsed_output1)
def test_golden2(self):
self.device1 = Mock(**self.golden_output2)
obj = ShowRunInterface(device=self.device1)
parsed_output = obj.parse(interface='Port-channel1.100')
self.assertEqual(parsed_output,self.golden_parsed_output2)
def test_golden3(self):
self.device1 = Mock(**self.golden_output3)
obj = ShowRunInterface(device=self.device1)
parsed_output = obj.parse(interface='GigabitEthernet0/0/3')
self.assertEqual(parsed_output,self.golden_parsed_output3)
def test_golden4(self):
self.device1 = Mock(**self.golden_output4)
obj = ShowRunInterface(device=self.device1)
parsed_output = obj.parse(interface='GigabitEthernet0/0/0.101')
self.assertEqual(parsed_output,self.golden_parsed_output4)
def test_golden5(self):
self.device1 = Mock(**self.golden_output5)
obj = ShowRunInterface(device=self.device1)
parsed_output = obj.parse(interface='Loopback1')
self.assertEqual(parsed_output,self.golden_parsed_output5)
def test_golden6(self):
self.device1 = Mock(**self.golden_output6)
obj = ShowRunInterface(device=self.device1)
parsed_output = obj.parse(interface='GigabitEthernet0/0/0')
self.assertEqual(parsed_output,self.golden_parsed_output6)
if __name__ == '__main__':
unittest.main()
|
import pigpio
import time
import cwiid
import os
import sys
from timeout import timeout, TimeoutError
pi = pigpio.pi()
is_debug = "debug" in sys.argv
class Skateboard(object):
motor = 18
led = 17
button = 27
min_speed = 2000
max_speed = 1000
servo_smooth = 1.8
smooth_sleep = 0.005
accel_sleep = 0.08
def __init__(self):
pi.set_PWM_frequency(Skateboard.motor, 50)
pi.set_mode(Skateboard.led, pigpio.OUTPUT)
pi.set_mode(Skateboard.button, pigpio.INPUT)
pi.set_pull_up_down(Skateboard.button, pigpio.PUD_UP)
self.__speed = 1500
self.speed=1500
@property
def speed(self):
return self.__speed
@speed.setter
def speed(self, value):
value = max(min(value, Skateboard.min_speed), Skateboard.max_speed)
while abs(value-self.__speed) > Skateboard.servo_smooth:
direction = cmp(value, self.__speed)
self.__speed += direction * Skateboard.servo_smooth
pi.set_servo_pulsewidth(Skateboard.motor, self.__speed)
time.sleep(Skateboard.smooth_sleep)
pi.set_servo_pulsewidth(Skateboard.motor, value)
self.__speed = value
time.sleep(Skateboard.accel_sleep)
def blinky(self,times,period):
for i in range (1,times):
pi.write(self.led,1)
time.sleep(period)
pi.write(self.led,0)
time.sleep(period)
def connection_process(self):
connected = False
while not connected:
self.blinky(5,0.4)
try:
self.wii = cwiid.Wiimote(bdaddr="bluetooth adress here") 00:1F:C5:86:3E:85
connected = True
self.blinky(40,0.03)
self.wii.rpt_mode = cwiid.RPT_BTN
self.wii.rumble = 1
time.sleep(1)
self.wii.rumble = 0
except RuntimeError:
pass
def run_process(self):
pi.write(self.led, 1)
self.get_status()
if self.status_button:
self.wii.rumble=1
time.sleep(2)
self.wii.rumble=0
raise RuntimeError("Status Button")
if (self.buttons & cwiid.BTN_B):
self.speed = 1500
time.sleep(0.5)
if (self.buttons & cwiid.BTN_DOWN):
self.speed += 1
if (self.buttons & cwiid.BTN_UP):
self.speed -= 1
if (self.buttons & cwiid.BTN_A):
self.speed = 1000
if (self.buttons & cwiid.BTN_PLUS):
Skateboard.accel_sleep += 0.025
time.sleep(0.5)
if Skateboard.accel_sleep >= 0.1:
Skateboard.accel_sleep = 0.1
print(Skateboard.accel_sleep)
if (self.buttons & cwiid.BTN_MINUS):
Skateboard.accel_sleep -= 0.025
time.sleep(0.5)
if Skateboard.accel_sleep <= 0:
Skateboard.accel_sleep = 0
print(Skateboard.accel_sleep)
@timeout(0.4)
def get_status(self):
self.buttons = self.wii.state['buttons']
self.status_button = not pi.read(Skateboard.button)
### Main Program ###
skate = Skateboard()
skate.blinky(20,0.05)
skate.connection_process()
while True:
try:
skate.run_process()
# print(skate.speed)
except KeyboardInterrupt:
raise
except:
skate.speed = 1500
if is_debug:
raise
else:
os.system("poweroff")
|
"""
Motion Decoder model for OmniDet.
# author: Varun Ravi Kumar <rvarun7777@gmail.com>
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; Authors provide no warranty with the software
and are not liable for anything.
"""
import numpy as np
import torch
import torch.nn as nn
from models.normnet_decoder import PixelShuffleICNR, conv3x3
from models.semantic_decoder import convblock
class MotionDecoder(nn.Module):
def __init__(self, num_ch_enc, n_classes=2, siamese_net=False):
super().__init__()
self.n_classes = n_classes
self.num_ch_enc = num_ch_enc # [64, 64, 128, 256, 512]
# [64, 64, 128, 256, 512] for motion_decoder and [128, 128, 256, 512, 1024] for siamese net
self.num_ch_enc = num_ch_enc if not siamese_net else self.num_ch_enc * 2
self.num_ch_dec = np.array([16, 32, 64, 128, 256]) if not siamese_net else np.array([16, 32, 64, 128, 256]) * 2
# decoder
self.upconv_4_0 = convblock(self.num_ch_enc[-1], self.num_ch_dec[4])
self.upconv_4_1 = convblock(self.num_ch_dec[4] + self.num_ch_enc[3], self.num_ch_dec[4])
self.upconv_3_0 = convblock(self.num_ch_dec[4], self.num_ch_dec[3])
self.upconv_3_1 = convblock(self.num_ch_dec[3] + self.num_ch_enc[2], self.num_ch_dec[3])
self.upconv_2_0 = convblock(self.num_ch_dec[3], self.num_ch_dec[2])
self.upconv_2_1 = convblock(self.num_ch_dec[2] + self.num_ch_enc[1], self.num_ch_dec[2])
self.upconv_1_0 = convblock(self.num_ch_dec[2], self.num_ch_dec[1])
self.upconv_1_1 = convblock(self.num_ch_dec[1] + self.num_ch_enc[0], self.num_ch_dec[1])
self.upconv_0_0 = convblock(self.num_ch_dec[1], self.num_ch_dec[0])
self.upconv_0_1 = convblock(self.num_ch_dec[0], self.num_ch_dec[0])
self.motion_conv_0 = conv3x3(self.num_ch_dec[0], self.n_classes)
self.shuffle_conv_4_0 = PixelShuffleICNR(self.num_ch_dec[4], self.num_ch_dec[4] * 4)
self.shuffle_conv_3_0 = PixelShuffleICNR(self.num_ch_dec[3], self.num_ch_dec[3] * 4)
self.shuffle_conv_2_0 = PixelShuffleICNR(self.num_ch_dec[2], self.num_ch_dec[2] * 4)
self.shuffle_conv_1_0 = PixelShuffleICNR(self.num_ch_dec[1], self.num_ch_dec[1] * 4)
self.shuffle_conv_0_0 = PixelShuffleICNR(self.num_ch_dec[0], self.num_ch_dec[0] * 4)
def forward(self, input_features):
outputs = dict()
x = input_features[-1]
x = self.upconv_4_0(x)
x = self.shuffle_conv_4_0(x)
x = torch.cat((x, input_features[3]), dim=1)
x = self.upconv_4_1(x)
x = self.upconv_3_0(x)
x = self.shuffle_conv_3_0(x)
x = torch.cat((x, input_features[2]), dim=1)
x = self.upconv_3_1(x)
x = self.upconv_2_0(x)
x = self.shuffle_conv_2_0(x)
x = torch.cat((x, input_features[1]), dim=1)
x = self.upconv_2_1(x)
x = self.upconv_1_0(x)
x = self.shuffle_conv_1_0(x)
x = torch.cat((x, input_features[0]), dim=1)
x = self.upconv_1_1(x)
x = self.upconv_0_0(x)
x = self.shuffle_conv_0_0(x)
if torch.onnx.is_in_onnx_export():
return self.motion_conv_0(x)
else:
outputs[("motion", 0)] = self.motion_conv_0(x)
return outputs
|
from argparse import ArgumentParser
from threading import Lock
from queue import Queue
from smartmirror.messages_handler import MessagesHandler
from smartmirror.ui_thread import UiThread
from smartmirror.uc_thread import UcThread
from smartmirror.Logger import Logger, init_logger
"""
Init program properties
- set args parameters
- init Logger buffer into file and std output
"""
def init_properties():
parser = ArgumentParser(
prog='smartmirror',
description='Smart Mirror program',
epilog='more detailed information in README.md file https://github.com/not4juu/SmartMirror'
)
parser.add_argument('-v', '--verbose', action='count', help='show verbose logs on console')
parser.add_argument('--version', action='version', version='%(prog)s 1.0.0')
args = parser.parse_args()
init_logger(logs_to_file=True, verbose=args.verbose)
Logger.debug('Initialization of properties finish successfully')
"""
Init program threads:
- user interface thread
- user command thread
"""
def init_program_threads():
message_queue = Queue()
message_locker = Lock()
message_handler = MessagesHandler(messages_queue=message_queue, messages_locker=message_locker)
main_ui_thread = UiThread(messages_handler=message_handler)
main_ui_thread.start()
main_uc_thread = UcThread(messages_handler=message_handler)
main_uc_thread.start()
message_queue.join()
Logger.debug('Threads starts successfully')
"""
Main function calls by program
"""
def main():
init_properties()
init_program_threads()
if __name__ == "__main__":
main()
Logger.info(__name__ + " ends")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 RedLotus <ssfdust@gmail.com>
# Author: RedLotus <ssfdust@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
app.modules.auth.permissions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
权限定义以及映射模块,此模块用以建立默认的
角色权限关系,修改后通过
`inv app.db.update-app-permissions`
进行更新权限关系
"""
class ROLES:
"""角色字段定义"""
SuperUser = "SuperUser"
User = "User"
UserManager = "UserManager"
EmailTemplateManager = "EmailTemplateManager"
# End Of ROLES
class PERMISSIONS:
"""权限字段定义"""
SuperUser = "SuperPrivilege"
User = "UserPrivilege"
# UserManager
GroupAdd = "GroupAddPrivilege"
GroupDelete = "GroupDeletePrivilege"
GroupEdit = "GroupEditPrivilege"
GroupQuery = "GroupQueryPrivilege"
UserEdit = "UserEditPrivilege"
# EmailTemplateManager
EmailTemplateAdd = "EmailTemplateAddPrivilege"
EmailTemplateEdit = "EmailTemplateEditPrivilege"
EmailTemplateDelete = "EmailTemplateDeletePrivilege"
EmailTemplateQuery = "EmailTemplateQueryPrivilege"
# End Of PERMISSIONS
# 默认的角色权限映射
DEFAULT_ROLES_PERMISSIONS_MAPPING = {
ROLES.SuperUser: [
PERMISSIONS.SuperUser,
PERMISSIONS.User,
# 用户管理
PERMISSIONS.GroupAdd,
PERMISSIONS.GroupDelete,
PERMISSIONS.GroupEdit,
PERMISSIONS.GroupQuery,
PERMISSIONS.UserEdit,
# 电子邮件模板管理
PERMISSIONS.EmailTemplateAdd,
PERMISSIONS.EmailTemplateDelete,
PERMISSIONS.EmailTemplateEdit,
PERMISSIONS.EmailTemplateQuery,
# End Of SuperUser
],
ROLES.UserManager: [
PERMISSIONS.GroupAdd,
PERMISSIONS.GroupDelete,
PERMISSIONS.GroupEdit,
PERMISSIONS.GroupQuery,
PERMISSIONS.UserEdit,
],
ROLES.EmailTemplateManager: [
PERMISSIONS.EmailTemplateAdd,
PERMISSIONS.EmailTemplateDelete,
PERMISSIONS.EmailTemplateEdit,
PERMISSIONS.EmailTemplateQuery,
],
# End Of Permissions Mapping
}
|
from time import sleep
from time import time
import threading
import pygame
import random
import subprocess
import hashlib
import datetime
from Security.SerialReader import SerialReader
from Security.PhoneDetect import PhoneDetect
from Security.Camera import Camera
from Messenger.Messenger import Messenger
class Security:
armed = False
alert = False
key1 = False
key2 = False
statusModify = 0
camera = ""
firstMotion = 0
lastAlert = 0
armedTime = 0
lastMessageSent = 0
def __init__(self):
self.camera = Camera()
self.key1 = PhoneDetect.key1
self.key2 = PhoneDetect.key2
if self.key1==False and self.key2==False:
self.armed = True
self.camera.open()
else:
self.camera.close()
while True:
timestamp = int(time())
if PhoneDetect.key1!=self.key1:
self.statusModify = timestamp
self.key1 = PhoneDetect.key1
if PhoneDetect.key2!=self.key2:
self.statusModify = timestamp
self.key2 = PhoneDetect.key2
if self.statusModify == timestamp:
if self.key1==False and self.key2==False:
self.armed = True
self.armedTime = timestamp
self.camera.open()
else:
if self.armed:
if self.armedTime < timestamp - 600:
Messenger.sendMessage("", "Riasztó kikapcsolva")
self.armed = False
self.armedTime = 0
self.camera.close()
if self.armed and SerialReader.motion:
if self.armedTime == timestamp-600:
Messenger.sendMessage("","Riasztó aktív!")
if self.armedTime < timestamp-600:
if self.lastAlert < timestamp-10:
self.lastAlert = timestamp
self.mp3(["mp3/alert.mp3"])
delta = datetime.datetime.now() + datetime.timedelta(0, 1)
img = "webc_" + delta.strftime("%Y-%m-%d_%H-%M-%S") + ".png"
md5text = "showDreyaImage" + img
md5 = hashlib.md5(md5text.encode()).hexdigest()
Messenger.sendMessage("", "Mozgás a lakásban! http://remote.host.com/watchtheimage.php?id=" + md5)
self.upload()
sleep(1)
def mp3(self, mp3s):
mp3 = random.choice(mp3s)
# pygame.init()
# pygame.mixer.init()
# pygame.mixer.music.load(mp3)
# pygame.mixer.music.play()
def upload(self):
threading.Thread(target = upload).start()
def upload():
bashCommand = "/Dreya/upload.sh"
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
|
import numpy as np
import matplotlib.pyplot as plt
plt.figure()
data = np.loadtxt("temperaturas.dat")
plt.show(data)
plt.savefig("calor.png")
|
import django.forms as forms
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, RequestContext
from django.contrib import admin
from django.template import Template, Context
from polymorphic.admin import (
PolymorphicParentModelAdmin, PolymorphicChildModelAdmin, PolymorphicChildModelFilter,
)
from reversion.admin import VersionAdmin
from website.apps.core.admin import TrackedModelAdmin
from website.apps.survey.models import Question, OptionQuestion, Section
from website.apps.survey.models import (
Response, OptionResponse, FloatResponse, IntegerResponse, TextResponse,
)
class QuestionChildAdmin(TrackedModelAdmin, VersionAdmin, PolymorphicChildModelAdmin):
base_model = Question
class CategoryForm(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
section = forms.ModelChoiceField(Section.objects)
subsection = forms.ModelChoiceField(Section.objects)
def change_category(request, queryset, message_user):
form = None
if 'cancel' in request.POST:
message_user(request, 'Canceled change section(s)')
return
elif 'change' in request.POST:
# do the categorization
form = CategoryForm(request.POST)
if form.is_valid():
section = form.cleaned_data['section']
subsection = form.cleaned_data['subsection']
for link in queryset:
link.subsection = section
link.section = subsection
link.save()
message_user(
request,
Template('Successfully changed section(s) and subsection(s)').render(
Context({'count': queryset.count(), 'category': section})))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = CategoryForm(
initial={
'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render_to_response(
'admin/change.html',
RequestContext(
request,
{'links': queryset, 'form': form, 'path': request.get_full_path()}))
class NumberForm(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
offset = forms.IntegerField()
def question_number(request, queryset, message_user):
form = None
if 'cancel' in request.POST:
message_user(request, 'Canceled changing question number')
return
elif 'change' in request.POST:
form = NumberForm(request.POST)
if form.is_valid():
offset = form.cleaned_data['offset']
for link in queryset:
link.number += offset
link.save()
message_user(
request,
Template('Successfully changed question numbers').render(
Context({'count': queryset.count()})))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = NumberForm(
initial={
'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render_to_response(
'admin/categorize.html',
RequestContext(
request,
{'links': queryset, 'form': form, 'path': request.get_full_path()}))
class QuestionAdmin(TrackedModelAdmin, VersionAdmin, PolymorphicParentModelAdmin):
actions = ['changeCategory', 'questionNumber']
base_model = Question
list_filter = (PolymorphicChildModelFilter,)
ordering = ('number',)
child_models = [
(Question, QuestionChildAdmin),
(OptionQuestion, QuestionChildAdmin),
]
def questionNumber(self, request, queryset): # pragma: no cover
return question_number(request, queryset, self.message_user)
questionNumber.short_description = 'Change question number(s)'
def changeCategory(self, request, queryset): # pragma: no cover
return change_category(request, queryset, self.message_user)
changeCategory.short_description = 'Change section'
class ResponseAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_filter = ('author', 'question', 'culture')
ordering = ('id',)
search_fields = ('codersnotes',)
list_display = ('question', 'culture', 'author')
class FloatResponseAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_filter = ('author', 'question', 'culture')
ordering = ('id',)
search_fields = ('codersnotes',)
list_display = ('question', 'culture', 'author', 'response')
class IntegerResponseAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_filter = ('author', 'question', 'culture')
ordering = ('id',)
search_fields = ('codersnotes',)
list_display = ('question', 'culture', 'author', 'response')
class TextResponseAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_filter = ('author', 'question', 'culture')
ordering = ('id',)
search_fields = ('codersnotes',)
list_display = ('question', 'culture', 'author', 'response')
class OptionResponseAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_filter = ('author', 'question', 'culture')
ordering = ('id',)
search_fields = ('codersnotes',)
list_display = ('question', 'culture', 'author', 'response')
admin.site.register(Response, ResponseAdmin)
admin.site.register(Question, QuestionAdmin)
admin.site.register(FloatResponse, FloatResponseAdmin)
admin.site.register(IntegerResponse, IntegerResponseAdmin)
admin.site.register(TextResponse, TextResponseAdmin)
admin.site.register(OptionResponse, OptionResponseAdmin)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .dedicated_cloud_node import *
from .dedicated_cloud_service import *
from .get_dedicated_cloud_node import *
from .get_dedicated_cloud_service import *
from .get_virtual_machine import *
from .virtual_machine import *
from ._inputs import *
from . import outputs
|
"""Parser for loading config file
"""
import configparser
import os.path
from utils.consts import *
def load_configs(config_file, config_section, config_keys):
"""Loads configurations from config file based on given config section and values
Arguments:
config_file {string} -- configuration section to read
config_keys {string[]} -- list of keys to read from the section
Returns:
dict<string, string> -- map containing configuration for given section
"""
if not os.path.isfile(config_file):
return None
config = configparser.ConfigParser()
config.read(config_file)
if config_section not in config:
return None
for key in config_keys:
if key not in config[config_section]:
print(key)
return None
return config[config_section]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 22 19:28:35 2021
@author: Arya
"""
# imports
import cv2
import glob
import skimage.transform as trans
import numpy as np
import model
from tensorflow.keras.models import save_model
# loading data
x_train = []
y_train = []
x_test = []
y_test = []
path = "D:/BTproject/Dataset/no/"
images = glob.glob(path + "*.jpg")
images.sort()
for x in images:
image = cv2.imread(x, cv2.IMREAD_GRAYSCALE)
image = image / 255
image = trans.resize(image,(256,256))
y_train.append(0)
x_train.append(image)
path = "D:/BTproject/Dataset/yes/"
images = glob.glob(path + "*.jpg")
images.sort()
for x in images:
image = cv2.imread(x, cv2.IMREAD_GRAYSCALE)
image = image / 255
image = trans.resize(image,(256,256))
y_train.append(1)
x_train.append(image)
path = "D:/BTproject/Dataset/test/no/"
images = glob.glob(path + "*.jpg")
images.sort()
for x in images:
image = cv2.imread(x, cv2.IMREAD_GRAYSCALE)
image = image / 255
image = trans.resize(image,(256,256))
y_test.append(0)
x_test.append(image)
path = "D:/BTproject/Dataset/test/yes/"
images = glob.glob(path + "*.jpg")
images.sort()
for x in images:
image = cv2.imread(x, cv2.IMREAD_GRAYSCALE)
image = image / 255
image = trans.resize(image,(256,256))
y_test.append(1)
x_test.append(image)
del(x, image, images, path)
# preparing data
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
x_train = np.reshape(x_train, (len(x_train), 256, 256, 1))
x_test = np.reshape(x_test, (len(x_test), 256, 256, 1))
# train model
BTcnn = model.cnn_bt()
BTcnn.summary()
BTcnn.fit(x_train,
y_train,
epochs=15,
batch_size=4,
verbose=1)
save_model(BTcnn, 'BTcnnModel.h5')
# Evaluate the model on test set
score = BTcnn.evaluate(x_test,
y_test,
verbose=0)
# Print test accuracy
print('\n', 'Test accuracy:', score[1])
|
from .xpath_condition import XPathCondition
from .xpath_condition_equals import XPathConditionEquals
from .xpath_condition_contains import XPathConditionContains
from .xpath_condition_starts_with import XPathConditionStartsWith
from .xpath_attribute_value import XpathAttributeValue
from .xpath_attribute_value_equals import XpathAttributeValueEquals
from .xpath_attribute_value_contains import XpathAttributeValueContains
from .xpath_attribute_value_starts_with import XpathAttributeValueStartsWith
from .enums import *
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from keystoneauth1 import session as ksa_session
import mock
import os_client_config
from openstack import connection
from openstack import exceptions
from openstack import profile
from openstack import session
from openstack.tests.unit import base
CONFIG_AUTH_URL = "http://127.0.0.1:5000/v2.0"
CONFIG_USERNAME = "BozoTheClown"
CONFIG_PASSWORD = "TopSecret"
CONFIG_PROJECT = "TheGrandPrizeGame"
CONFIG_CACERT = "TrustMe"
CLOUD_CONFIG = """
clouds:
sample:
region_name: RegionOne
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
insecure:
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
cacert: {cacert}
insecure: True
cacert:
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
cacert: {cacert}
insecure: False
""".format(auth_url=CONFIG_AUTH_URL, username=CONFIG_USERNAME,
password=CONFIG_PASSWORD, project=CONFIG_PROJECT,
cacert=CONFIG_CACERT)
class TestConnection(base.TestCase):
@mock.patch("openstack.session.Session")
def test_other_parameters(self, mock_session_init):
mock_session_init.return_value = mock_session_init
mock_profile = mock.Mock()
mock_profile.get_services = mock.Mock(return_value=[])
conn = connection.Connection(profile=mock_profile, authenticator='2',
verify=True, cert='cert', user_agent='1')
args = {'auth': '2', 'user_agent': '1', 'verify': True, 'cert': 'cert'}
mock_session_init.assert_called_with(mock_profile, **args)
self.assertEqual(mock_session_init, conn.session)
def test_session_provided(self):
mock_session = mock.Mock(spec=session.Session)
mock_profile = mock.Mock()
mock_profile.get_services = mock.Mock(return_value=[])
conn = connection.Connection(session=mock_session,
profile=mock_profile,
user_agent='1')
self.assertEqual(mock_session, conn.session)
def test_ksa_session_provided(self):
mock_session = mock.Mock(spec=ksa_session.Session)
mock_profile = mock.Mock()
mock_profile.get_services = mock.Mock(return_value=[])
self.assertRaises(exceptions.SDKException, connection.Connection,
session=mock_session, profile=mock_profile,
user_agent='1')
@mock.patch("keystoneauth1.loading.base.get_plugin_loader")
def test_create_authenticator(self, mock_get_plugin):
mock_plugin = mock.Mock()
mock_loader = mock.Mock()
mock_options = [
mock.Mock(dest="auth_url"),
mock.Mock(dest="password"),
mock.Mock(dest="username"),
]
mock_loader.get_options = mock.Mock(return_value=mock_options)
mock_loader.load_from_options = mock.Mock(return_value=mock_plugin)
mock_get_plugin.return_value = mock_loader
auth_args = {
'auth_url': '0',
'username': '1',
'password': '2',
}
conn = connection.Connection(auth_plugin='v2password', **auth_args)
mock_get_plugin.assert_called_with('v2password')
mock_loader.load_from_options.assert_called_with(**auth_args)
self.assertEqual(mock_plugin, conn.authenticator)
@mock.patch("keystoneauth1.loading.base.get_plugin_loader")
def test_default_plugin(self, mock_get_plugin):
connection.Connection()
self.assertTrue(mock_get_plugin.called)
self.assertEqual(mock_get_plugin.call_args, mock.call("password"))
@mock.patch("keystoneauth1.loading.base.get_plugin_loader")
def test_pass_authenticator(self, mock_get_plugin):
mock_plugin = mock.Mock()
mock_get_plugin.return_value = None
conn = connection.Connection(authenticator=mock_plugin)
self.assertFalse(mock_get_plugin.called)
self.assertEqual(mock_plugin, conn.authenticator)
def test_create_session(self):
auth = mock.Mock()
prof = profile.Profile()
conn = connection.Connection(authenticator=auth, profile=prof)
self.assertEqual(auth, conn.authenticator)
self.assertEqual(prof, conn.profile)
self.assertEqual('openstack.telemetry.alarm.v2._proxy',
conn.alarm.__class__.__module__)
self.assertEqual('openstack.cluster.v1._proxy',
conn.cluster.__class__.__module__)
self.assertEqual('openstack.compute.v2._proxy',
conn.compute.__class__.__module__)
self.assertEqual('openstack.database.v1._proxy',
conn.database.__class__.__module__)
self.assertEqual('openstack.identity.v3._proxy',
conn.identity.__class__.__module__)
self.assertEqual('openstack.image.v2._proxy',
conn.image.__class__.__module__)
self.assertEqual('openstack.network.v2._proxy',
conn.network.__class__.__module__)
self.assertEqual('openstack.object_store.v1._proxy',
conn.object_store.__class__.__module__)
self.assertEqual('openstack.load_balancer.v2._proxy',
conn.load_balancer.__class__.__module__)
self.assertEqual('openstack.orchestration.v1._proxy',
conn.orchestration.__class__.__module__)
self.assertEqual('openstack.telemetry.v2._proxy',
conn.telemetry.__class__.__module__)
self.assertEqual('openstack.workflow.v2._proxy',
conn.workflow.__class__.__module__)
def _prepare_test_config(self):
# Create a temporary directory where our test config will live
# and insert it into the search path via OS_CLIENT_CONFIG_FILE.
config_dir = self.useFixture(fixtures.TempDir()).path
config_path = os.path.join(config_dir, "clouds.yaml")
with open(config_path, "w") as conf:
conf.write(CLOUD_CONFIG)
self.useFixture(fixtures.EnvironmentVariable(
"OS_CLIENT_CONFIG_FILE", config_path))
def test_from_config_given_data(self):
self._prepare_test_config()
data = os_client_config.OpenStackConfig().get_one_cloud("sample")
sot = connection.from_config(cloud_config=data)
self.assertEqual(CONFIG_USERNAME,
sot.authenticator._username)
self.assertEqual(CONFIG_PASSWORD,
sot.authenticator._password)
self.assertEqual(CONFIG_AUTH_URL,
sot.authenticator.auth_url)
self.assertEqual(CONFIG_PROJECT,
sot.authenticator._project_name)
def test_from_config_given_name(self):
self._prepare_test_config()
sot = connection.from_config(cloud_name="sample")
self.assertEqual(CONFIG_USERNAME,
sot.authenticator._username)
self.assertEqual(CONFIG_PASSWORD,
sot.authenticator._password)
self.assertEqual(CONFIG_AUTH_URL,
sot.authenticator.auth_url)
self.assertEqual(CONFIG_PROJECT,
sot.authenticator._project_name)
def test_from_config_given_options(self):
self._prepare_test_config()
version = "100"
class Opts(object):
compute_api_version = version
sot = connection.from_config(cloud_name="sample", options=Opts)
pref = sot.session.profile.get_filter("compute")
# NOTE: Along the way, the `v` prefix gets added so we can build
# up URLs with it.
self.assertEqual("v" + version, pref.version)
def test_from_config_verify(self):
self._prepare_test_config()
sot = connection.from_config(cloud_name="insecure")
self.assertFalse(sot.session.verify)
sot = connection.from_config(cloud_name="cacert")
self.assertEqual(CONFIG_CACERT, sot.session.verify)
def test_authorize_works(self):
fake_session = mock.Mock(spec=session.Session)
fake_headers = {'X-Auth-Token': 'FAKE_TOKEN'}
fake_session.get_auth_headers.return_value = fake_headers
sot = connection.Connection(session=fake_session,
authenticator=mock.Mock())
res = sot.authorize()
self.assertEqual('FAKE_TOKEN', res)
def test_authorize_silent_failure(self):
fake_session = mock.Mock(spec=session.Session)
fake_session.get_auth_headers.return_value = None
fake_session.__module__ = 'openstack.session'
sot = connection.Connection(session=fake_session,
authenticator=mock.Mock())
res = sot.authorize()
self.assertIsNone(res)
|
#!/usr/bin/env python3
import unittest
from text_to_image.utilities import convert_char_to_int
class ConvertCharToIntTestCase(unittest.TestCase):
def test_letters_to_8bit_value(self):
letters = sorted(("a", "b", "c", "i", "h", "o", "v", "x", "y", "q",
"A", "B", "C", "D", "I", "P", "Q", "R", "X", "Z", "T", "E", "F"))
for letter in letters:
self.assertEqual(ord(letter), convert_char_to_int(letter, limit=256))
def test_numbers_to_8bit_value(self):
for no in range(0, 10):
self.assertEqual(ord(str(no)), convert_char_to_int(str(no), limit=256))
def test_last_char_value_for_7_8_16bit_numbers(self):
self.assertEqual(1, convert_char_to_int(chr(128), limit=128))
self.assertEqual(1, convert_char_to_int(chr(256), limit=256))
self.assertEqual(1, convert_char_to_int(chr(65536), limit=65536))
def test_raises_error_when_char_lenght_not_1(self):
self.assertRaises(TypeError, convert_char_to_int, "abc")
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import random
random.seed(1) # comment-out this line to change sequence each time
# Write a program that stores random DNA sequence in a string
# The sequence should be 30 nt long
# On average, the sequence should be 60% AT
# Calculate the actual AT fraction while generating the sequence
# Report the length, AT fraction, and sequence
#create a variable for DNA
# everytime the loop occus,I need to add an AT or GC
"""
for i in range(30):
r = random.random()
if r >0.6:
print(i,'G')
elif r<0.6:
print(i,'A')
else:
print(i,'?')
#Not good because that if r=0.6 code will not work but because right now r has no 0.6 so it works.
Another workable code for this problem
import random
random.seed(1)
dna=""
for i in range(30):
r = random.random()
if r < 0.3: dna += 'A' #A is 30% while T is 30% so AT is 60%
elif r < 0.6: dna += 'T'
elif r < 0.8: dna += 'C'
else : dna += 'G'
print(dna)
"""
dna =""
for i in range(30):
if random.random()>0.6: #this is to satisfy line 8
if random.random()>0.5:#this is to see if its going to be G or C
dna += 'G'
else:
dna += 'C'
else:
if random.random()>0.5: #this is to see if its going to be A or T
dna += 'A' #this is more clear than the above code lines 15 - 22
else:
dna += 'T'
at = 0
for i in range(len(dna)):
if dna[i] == 'A': at += 1
elif dna[i] == 'T': at += 1
print(len(dna),at/len(dna))
print(dna)
"""
python3 22atseq.py
30 0.6666666666666666 ATTACCGTAATCTACTATTAAGTCACAACC
"""
|
# -*- coding: utf-8 -*-
"""
Copyright () 2019
All rights reserved
FILE: duration_calculate.py
AUTHOR: tianyuningmou
DATE CREATED: @Time : 2019-06-23 15:49
DESCRIPTION: .
VERSION: : #1
CHANGED By: : tianyuningmou
CHANGE: :
MODIFIED: : @Time : 2019-06-23 15:49
"""
import datetime
from math import *
class DurationCalculate(object):
"""
包含计算多个时间段的算法,采用双指针,时间复杂度O(n),空间复杂度O(1)
"""
@classmethod
def duration_calculate_to_seconds(cls, durations, date_time):
"""
计算多个时间段之和,重叠部分不计算,并转换成秒
:param durations:
example: [[t1, t2], [t3, t4], ...]
:param date_time: 截止时间,可不填
:return:
"""
return cls.duration_calculate(durations, date_time).seconds
@classmethod
def duration_calculate_to_minutes(cls, durations, date_time, *args):
"""
计算多个时间段之和,重叠部分不计算,并转换成分钟
:param durations:
example: [[t1, t2], [t3, t4], ...]
:param date_time: 截止时间,可不填
:param args: 填写精确类型--> floor, round, ceil
:return:
"""
if args or args[0] == 'ceil':
return ceil(cls.duration_calculate(durations, date_time).seconds / 60.0)
elif args[0] == 'round':
return round(cls.duration_calculate(durations, date_time).seconds / 60.0)
elif args[0] == 'floor':
return floor(cls.duration_calculate(durations, date_time).seconds / 60.0)
else:
return "请输入['ceil'] or ['round'] or ['floor'], 或者不填"
@classmethod
def duration_calculate_to_hours(cls, durations, date_time, *args):
"""
计算多个时间段之和,重叠部分不计算,并转换成小时
:param durations:
example: [[t1, t2], [t3, t4], ...]
:param date_time: 截止时间,可不填
:param args: 填写精确类型--> floor, round, ceil
:return:
"""
if args or args[0] == 'ceil':
return ceil(cls.duration_calculate(durations, date_time).seconds / 3600.0)
elif args[0] == 'round':
return round(cls.duration_calculate(durations, date_time).seconds / 3600.0)
elif args[0] == 'floor':
return floor(cls.duration_calculate(durations, date_time).seconds / 3600.0)
else:
return "请输入['ceil'] or ['round'] or ['floor'], 或者不填"
@classmethod
def duration_calculate_to_days(cls, durations, date_time):
"""
计算多个时间段之和,重叠部分不计算,并转换成天
:param durations:
example: [[t1, t2], [t3, t4], ...]
:param date_time: 截止时间,可不填
:return:
"""
return cls.duration_calculate(durations, date_time).days
@classmethod
def duration_calculate_to_months(cls, durations, date_time, *args):
"""
计算多个时间段之和,重叠部分不计算,并转换成月
:param durations:
example: [[t1, t2], [t3, t4], ...]
:param date_time: 截止时间,可不填
:param args: 填写精确类型--> floor, round, ceil
:return:
"""
if args or args[0] == 'ceil':
return ceil(cls.duration_calculate_to_days(durations, date_time) / 30.0)
elif args[0] == 'round':
return round(cls.duration_calculate_to_days(durations, date_time) / 30.0)
elif args[0] == 'floor':
return floor(cls.duration_calculate_to_days(durations, date_time) / 30.0)
else:
return "请输入['ceil'] or ['round'] or ['floor'], 或者不填"
@classmethod
def duration_calculate(cls, durations, date_time=None):
"""
计算时间段差值的基础算法
:param durations:
example: [[t1, t2], [t3, t4], ...]
:param date_time: 截止时间,可不填
:return:
"""
if len(durations) == 1:
return durations[0][1] - durations[0][0]
start = 0
end = 1
duration = 0
if not date_time:
if isinstance(durations[0][0], datetime.datetime):
date_time = datetime.datetime.now()
else:
raise IOError('请输入截止值date_time')
while end < len(durations):
if durations[start][0] > date_time:
break
if durations[start][1] > date_time:
duration += date_time - durations[start][0]
break
else:
if durations[start][1] > durations[end][1]:
end += 1
elif durations[end][0] < durations[start][1] < durations[end][1]:
duration += durations[end][0] - durations[start][0]
if end == len(durations) - 1:
duration += durations[end][1] - durations[end][0]
start = end
end += 1
else:
duration += durations[start][1] - durations[start][0]
if end == len(durations) - 1:
duration += durations[end][1] - durations[end][0]
start = end
end += 1
return duration
if __name__ == '__main__':
print(DurationCalculate.duration_calculate([[1, 5], [2, 3], [6, 9]], date_time=10))
|
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
def send_email(user, request, mail_subject, message_template):
current_site = get_current_site(request)
message = render_to_string(message_template, {
'user': user,
'domain': current_site.domain,
'pk': user.id,
'token': default_token_generator.make_token(user),
})
EmailMessage(
mail_subject,
message,
to=(user.email,)
).send()
|
from __future__ import absolute_import
try:
import tensorflow as tf
except ImportError:
# The lack of tensorflow will be caught by the low-level routines.
pass
class Pyramid(object):
"""A tensorflow representation of a transform domain signal.
An interface-compatible version of
:py:class:`dtcwt.Pyramid` where the initialiser
arguments are assumed to be :py:class:`tf.Variable` instances.
The attributes defined in :py:class:`dtcwt.Pyramid`
are implemented via properties. The original tf arrays may be accessed
via the ``..._op(s)`` attributes.
.. py:attribute:: lowpass_op
A tensorflow tensor that can be evaluated in a session to return
the coarsest scale lowpass signal for the input, X.
.. py:attribute:: highpasses_op
A tuple of tensorflow tensors, where each element is the complex
subband coefficients for corresponding scales finest to coarsest.
.. py:attribute:: scales_ops
*(optional)* A tuple where each element is a tensorflow tensor
containing the lowpass signal for corresponding scales finest to
coarsest. This is not required for the inverse and may be *None*.
"""
def __init__(self, lowpass, highpasses, scales=None, numpy=False):
self.lowpass_op = lowpass
self.highpasses_ops = highpasses
self.scales_ops = scales
self.numpy = numpy
@property
def lowpass(self):
if not hasattr(self, '_lowpass'):
if self.lowpass_op is None:
self._lowpass = None
else:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
self._lowpass = sess.run(self.lowpass_op)
return self._lowpass
@property
def highpasses(self):
if not hasattr(self, '_highpasses'):
if self.highpasses_ops is None:
self._highpasses = None
else:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
self._highpasses = \
tuple(sess.run(x) for x in self.highpasses_ops)
return self._highpasses
@property
def scales(self):
if not hasattr(self, '_scales'):
if self.scales_ops is None:
self._scales = None
else:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
self._scales = tuple(sess.run(x) for x in self.scales_ops)
return self._scales
|
#---------------------------
# Title: File orthogrid_op.py
#---------------------------
############################
# IMPORT MODULES
############################
import bpy
from bpy.types import (
Panel,
Operator,
)
from bpy.props import IntProperty, BoolProperty, StringProperty, PointerProperty
from . orthogrid_props import *
##############################
# OPERATORS
##############################
def add_orthogrid(self, context):
context = bpy.context
scn = context.scene
pos = scn.cursor.location
#gsx = scn.orthogrid_props.gridsize_x
mesh = bpy.ops.mesh.primitive_grid_add(
size= 10,
x_subdivisions=scn.orthogrid_props.gridsubdiv_x,
y_subdivisions=scn.orthogrid_props.gridsubdiv_y,
#view_align=False,
enter_editmode=False,
location=pos)
class SCENE_OT_add_orthogrid(Operator):
"""Create a new Mesh Object"""
bl_idname = "scene.add_orthogrid"
bl_label = "Add Grid Object"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
add_orthogrid(self, context)
context = bpy.context
scn = context.scene
# move en edit mode than the center is the corner LeftBottom
bpy.ops.object.editmode_toggle()
bpy.ops.transform.translate(value=(0.5, 0, 0), proportional_size=1)
bpy.ops.transform.translate(value=(0, 0.5, 0), proportional_size=1)
bpy.ops.mesh.delete(type='ONLY_FACE')
bpy.ops.object.editmode_toggle()
# Add object options
obj = context.active_object
obj.show_axis = scn.orthogrid_props.show_axis
obj.show_texture_space = scn.orthogrid_props.show_space
obj.show_name = scn.orthogrid_props.show_name
obj.name = scn.orthogrid_props.grid_name
# array duplicate over x
bpy.ops.object.modifier_add(type='ARRAY')
obj.modifiers["Array"].name = "multi4x"
obj.modifiers["multi4x"].count = scn.orthogrid_props.gridsize_x
obj.modifiers["multi4x"].use_merge_vertices = True
obj.modifiers["multi4x"].show_on_cage = True
obj.modifiers["multi4x"].show_expanded = False
# array duplicate over y
bpy.ops.object.modifier_add(type='ARRAY')
obj.modifiers["Array"].name = "multi4y"
obj.modifiers["multi4y"].relative_offset_displace[0] = 0
obj.modifiers["multi4y"].relative_offset_displace[1] = 1
obj.modifiers["multi4y"].count = scn.orthogrid_props.gridsize_y
obj.modifiers["multi4y"].use_merge_vertices = True
obj.modifiers["multi4y"].show_on_cage = True
obj.modifiers["multi4y"].show_expanded = False
# apply modifiers
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="multi4x")
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="multi4y")
return {'FINISHED'}
|
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ContractResourceAssetAvailiabilityCodesCode(GenericTypeCode):
"""
ContractResourceAssetAvailiabilityCodes
From: http://hl7.org/fhir/asset-availability in valuesets.xml
This value set has asset availability codes.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://hl7.org/fhir/asset-availability
"""
codeset: FhirUri = "http://hl7.org/fhir/asset-availability"
class ContractResourceAssetAvailiabilityCodesCodeValues:
"""
To be completed
From: http://hl7.org/fhir/asset-availability in valuesets.xml
"""
Lease = ContractResourceAssetAvailiabilityCodesCode("lease")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 18 12:59:20 2018
@author: gamer
"""
from ale_python_interface import ALEInterface
import utils.env as utils
import numpy as np
import collections
OPTIONS = {"IMAGES_SIZE":(80,80)}
CROP = {"breakout":(32,10,8,8)}
class ALE(ALEInterface):
def __init__(self,game_name, render=True):
super(ALE, self).__init__()
self.crop = CROP[game_name]
self.num_frames = num_frames
self.load_rom(game_name,render)
self.load_params()
self._actions_raw = self.getMinimalActionSet().tolist()
self.action_space = Discrete(len(self._actions_raw))
self.observation_space = Box(0,255,OPTIONS["IMAGES_SIZE"]+(self.num_frames,))
def load_params(self):
self._start_lives = self.lives()
self._current_state = np.zeros(self.observation_space.shape)
def load_rom(self,rom_file,render):
self.setInt(str.encode('random_seed'), 123)
self.setFloat(str.encode('repeat_action_probability'), 0.0)
self.setBool(str.encode('sound'), False)
self.setBool(str.encode('display_screen'), render)
self.loadROM(str.encode("./roms/"+utils.game_name(rom_file)))
def get_current_state(self):
up,down,left,right = self.crop
return utils.process_frame(self.getScreenRGB()[up:-down,left:-right], OPTIONS["IMAGES_SIZE"]))
def step(self,action):
reward = 0
assert action in range(self.actions_n), "Action not available"
reward = self.act(action)
state = self.get_current_state()
return state, reward, self.lives() != self._start_lives, None
def reset(self):
self.reset_game()
self.load_params()
return self.get_current_state()
def clone(self):
env = self.cloneSystemState()
env.params()
return env
|
import collections
import csv
import os
import re
import subprocess
import sys
import tempfile
def rename_benchmark(bm):
if "hd-d0" in bm:
return "%02d-d0" % int(re.match("\(.+ ([0-9]+)\)", bm).group(1))
elif "hd-d5" in bm:
return "%02d-d5" % int(re.match("\(.+ ([0-9]+)\)", bm).group(1))
elif "array-search" in bm:
return "arraysearch-%02d" % int(re.match("\(.+ ([0-9]+)\)", bm).group(1))
elif "inversek2j-theta1" in bm:
return "inversek2j-1"
elif "inversek2j-theta2" in bm:
return "inversek2j-2"
elif "qm" in bm:
return "%s" % re.match("\(qm (.+)\)", bm).group(1)
else:
return bm[1:-1] # s-expr
f = open("%s.out.csv" % sys.argv[1])
rdr = csv.DictReader(f)
f2 = open("%s.csv" % sys.argv[1], "w")
f2.write("\"benchmark\",\"group\",\"time\",\"timeout\"\n")
for row in rdr:
f2.write("%s,%s,%s,%s\n" % (rename_benchmark(row["benchmark"]), row["group"],
row["time"], row["timeout"]))
f2.close()
f.close()
fR = tempfile.NamedTemporaryFile()
fR.write('''
library(ggplot2)
library(grid)
library(reshape2)
library(gridExtra)
library(scales)
df <- read.csv("%s")
df$timeout_label = sapply(df$timeout, function(x) if (x=="True") return("*") else return(""))
df$group <- factor(df$group, c("arraysearch", "qm", "hd-d0", "hd-d5", "parrot"))
levels(df$group)[levels(df$group)=="hd-d0"] <- "Hacker's Delight d0"
levels(df$group)[levels(df$group)=="hd-d5"] <- "Hacker's Delight d5"
levels(df$group)[levels(df$group)=="parrot"] <- "Parrot"
levels(df$group)[levels(df$group)=="arraysearch"] <- "Array Search"
levels(df$group)[levels(df$group)=="qm"] <- "CIA"
clean_names <- gsub("arraysearch-0(.)", "arraysearch-\\\\1", df$benchmark)
labels <- setNames(clean_names, df$benchmark)
print(labels)
p <- ggplot(df, aes(x=benchmark, y=time))
p <- p + geom_bar(stat="identity", fill="#356384", width=0.85)
p <- p + facet_grid(. ~ group, scales="free_x", space="free_x")
p <- p + geom_text(aes(label=timeout_label, x=benchmark, y=time+1), size=3)
p <- p + theme_bw(9)
p <- p + theme(plot.margin=unit(c(0.2, 0.2, 0, 0), "cm"))
p <- p + scale_y_log10(expand=c(0,0), breaks=c(10, 100, 1000, 10000), limits=c(1, 20000))
p <- p + scale_x_discrete(labels=labels)
p <- p + labs(x="Benchmark", y="Solving time (secs)")
p <- p + theme(legend.position="none")
p <- p + theme(axis.text.x=element_text(angle=90, vjust=0.5, hjust=1.0, size=5,margin=margin(0)))
p <- p + theme(strip.background=element_rect(fill="#eeeeee", size=0.4, colour="#aaaaaa"))
p <- p + theme(panel.border=element_rect(fill=NA, size=0.4, colour="#aaaaaa"))
p <- p + theme(axis.ticks.x=element_blank())
ggsave("./%s.pdf", p, width=7, height=2.16)
''' % (f2.name, sys.argv[1]))
fR.flush()
subprocess.check_call(["Rscript", fR.name])
fR.close()
|
import os
import os.path
import errno
################################################################
def create_dir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
################################################################
def file_exists(path):
return os.path.isfile(path)
|
import os, logging, json, threading, pygame, random, math, sys
from random import randint
import punchykickgravityflipwarz.colours
from punchykickgravityflipwarz.sprite_sheet import SpriteSheet
from punchykickgravityflipwarz.entity import Entity
from punchykickgravityflipwarz.vector import Vector
from punchykickgravityflipwarz.item import *
logger = logging.getLogger(__name__)
TILE_SIZE = 32
PLAYER_JUMP_SPEED = 10
PLAYER_SPEED = 5
PLAYER_RADIUS = 15
PLAYER_DIAMETER = 2 * PLAYER_RADIUS
RESET_GAME = False
class Player(Entity):
def __init__(self, name, controls, sprite_sheet_file_name, game, location = (0,0)):
super().__init__(location[0], location[1], TILE_SIZE, TILE_SIZE)
logger.debug(f"Creating player [{name}].")
self.name = name
self.lives = 3
self.controls = controls
self.vel_x = 0
self.vel_y = 0
self.acc_x = 0
self.direction = 0
self.game = game
self.item_type = Gun(self, self.game)
self.game.item_types.add(self.item_type)
self.item_type.rect = self.rect
# Setup the sprites/animation.
sheet = SpriteSheet(os.path.join('punchykickgravityflipwarz', 'resources', sprite_sheet_file_name))
super().add_sprite("stood_right", sheet, (0, 0, TILE_SIZE, TILE_SIZE))
super().add_sprite("stood_left", sheet, (3*TILE_SIZE, TILE_SIZE, TILE_SIZE, TILE_SIZE))
super().add_sprites("walking_right", sheet, (TILE_SIZE, 0, TILE_SIZE, TILE_SIZE), 3, (TILE_SIZE, 0))
super().add_sprites("walking_left", sheet, (2*TILE_SIZE, TILE_SIZE, TILE_SIZE, TILE_SIZE), 3, (-TILE_SIZE, 0))
super().set_sprite("stood_right")
super().update_animation()
def capture_inputs(self):
keys = pygame.key.get_pressed()
if self.controls.joystick is not None:
joystick = self.controls.joystick
hats = joystick.get_hat(0)
self.key_up = hats[1]==1
self.key_down = hats[1]==-1
self.key_left = hats[0]==-1
self.key_right = hats[0]==1
if (joystick.get_numbuttons()>2):
self.key_space = joystick.get_button( 1 )==1
else:
self.key_space = keys[self.controls.getKeys()["space"]]
elif self.controls.network is not None:
self.key_up = self.controls.network["state"]["up"]
self.key_down = self.controls.network["state"]["down"]
self.key_left = self.controls.network["state"]["left"]
self.key_right = self.controls.network["state"]["right"]
self.key_space = self.controls.network["state"]["a"]
else:
self.key_up = keys[self.controls.keys["up"]]
self.key_down = keys[self.controls.keys["down"]]
self.key_left = keys[self.controls.keys["left"]]
self.key_right = keys[self.controls.keys["right"]]
self.key_space = keys[self.controls.keys["space"]]
def update(self):
self.capture_inputs()
# items
if self.item_type is not None:
self.item_type.update()
if self.key_space:
item_finished, items = self.item_type.action()
if item_finished:
self.item_type = None
for item in items:
self.game.items.add(item)
self.death()
# Left/right movement
if self.key_left:
self.vel_x = -PLAYER_SPEED
super().set_sprite("walking_left")
self.direction = 0
elif self.key_right:
super().set_sprite("walking_right")
self.direction = 1
self.vel_x = PLAYER_SPEED
else:
self.vel_x = 0
if self.direction == 0:
super().set_sprite("stood_left")
elif self.direction == 1:
super().set_sprite("stood_right")
self.vel_x += self.acc_x
self.acc_x = 0
self.rect.x += self.vel_x
tile_hit_list = pygame.sprite.spritecollide(self, self.game.world.tiles, False)
for tile in tile_hit_list:
if self.vel_x > 0:
self.rect.right = tile.rect.left
elif self.vel_x < 0:
self.rect.left = tile.rect.right
# up/down movement
if self.game.world.gravity < 0:
sign = -1
else:
sign = 1
if self.vel_y == 0:
self.vel_y = 1 * sign
else:
self.vel_y += self.game.world.gravity
if self.key_up:
self.rect.y += sign*2
tile_hit_list = pygame.sprite.spritecollide(self, self.game.world.tiles, False)
self.rect.y -= sign*2
if len(tile_hit_list) > 0:
self.vel_y = -1 * sign * PLAYER_JUMP_SPEED
self.rect.y += self.vel_y
tile_hit_list = pygame.sprite.spritecollide(self, self.game.world.tiles, False)
for tile in tile_hit_list:
if self.vel_y > 0:
self.rect.bottom = tile.rect.top
elif self.vel_y < 0:
self.rect.top = tile.rect.bottom
self.vel_y = 0
super().update_animation()
def death(self):
if (self.rect.x < 0):
self.lives -= 1
print(f"{self.name} died, {self.lives} lives remaining")
RESET_GAME = True;
elif (self.rect.x > 1920):
self.lives -= 1
print(f"{self.name} died, {self.lives} lives remaining")
RESET_GAME = True;
elif (self.rect.y > 1200):
self.lives -= 1
print(f"{self.name} died, {self.lives} lives remaining")
RESET_GAME = True;
def show(self, screen):
super().show()
|
import os
import sys
import logging
from typing import Iterable, Type
from types import TracebackType
import discord
def catch_all(type_: Type[BaseException], value: BaseException, traceback: TracebackType) -> None:
# Log an error for Discord
logging.error("Uncaught exception:", exc_info=(type_, value, traceback))
# Pass to default handler (prints, exits, etc.)
sys.__excepthook__(type_, value, traceback)
class DiscordLogHandler(logging.Handler):
def __init__(self, webhook_id: str, webhook_token: str) -> None:
super().__init__()
self.webhook = discord.Webhook.partial(webhook_id, webhook_token,
adapter=discord.RequestsWebhookAdapter())
def emit(self, record: logging.LogRecord) -> None:
fmt = self.format(record)
as_code = "\n" in fmt
for part in self._message_parts(fmt, as_code):
self.webhook.send(part)
@staticmethod
def _message_parts(msg: str, as_code: bool, max_len: int = 2000) -> Iterable[str]:
if as_code:
return (f'```{msg[start:start+max_len-6]}```'
for start in range(0, len(msg), max_len - 6))
return (msg[start:start + max_len] for start in range(0, len(msg), max_len))
def setup_log_handler(debug: bool = False) -> bool:
if not sys.argv[0].endswith('gunicorn'):
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
format='[%(asctime)s] [%(levelname)-8s] %(message)s', level=level
)
logging.info('Logging started for \'%s\' at log level %s', sys.argv[1], level)
# Set up Discord logger so we can see errors
discord_webhook_id = os.environ.get('DISCORD_WEBHOOK_ID')
discord_webhook_token = os.environ.get('DISCORD_WEBHOOK_TOKEN')
if discord_webhook_id and discord_webhook_token:
handler = DiscordLogHandler(discord_webhook_id, discord_webhook_token)
handler.setLevel(logging.ERROR)
logging.getLogger('').addHandler(handler)
return True
return False
|
"""Step definitions for behavioural tests."""
from behave import given, then # pylint:disable=no-name-in-module
@given(u"the user visits {path}")
def visit(context, path):
"""Code for the user visiting a path on the web site."""
url = "http://localhost:8000{}".format(path)
context.response = context.client.get(url, follow_redirects=True)
@then(u"the user sees {some_string}")
def sees(context, some_string):
"""Code to assert that the user sees some string in the response."""
response_string = context.response.get_data(as_text=True)
fail_msg = "{} not in {}".format(some_string, response_string)
assert some_string in response_string, fail_msg
|
import datetime
from bs4 import BeautifulSoup
import utils
import requests
URL = 'https://www.sports-reference.com/cbb/boxscores/index.cgi?month=MONTH&day=DAY&year=YEAR'
DATA_FOLDER = utils.DATA_FOLDER
def scrape_scores(date_obj):
'''
scrape and return stats in the form of a list of lists where each sublist is information from a single game played on the specified day
'''
day, month, year = str(date_obj.day), str(date_obj.month), str(date_obj.year)
day_stats = []
url = URL.replace("DAY", day).replace("MONTH", month).replace("YEAR", year)
data = requests.get(url).content
table_divs = BeautifulSoup(data,'html.parser').find_all("div", {'class': 'game_summary'})
this_day_string = date_obj.strftime('%Y%m%d')
print(this_day_string)
for div in table_divs:
tables = div.find('tbody')
rows = tables.find_all('tr')
stats = [1 if len(rows) == 3 else 0]
for row in rows[:2]:
datapts = row.find_all('td')[:2]
stats.append(datapts[0].find('a').text)
stats.append(datapts[1].text)
day_stats.append(stats + [this_day_string])
return day_stats
def scrape_by_day(file_start, scrape_start, scrape_end, all_data):
'''
scrape data from scrape_start to end, append it to all_data, and save this new file as a csv
file_start: specifies a string to use for save name purposes. This is the date ('YYYYMMDD') on which the data file begins
scrape_start: different from the file start, this is the date object of the day on which to start scraping new data
scrape_end: this is the date object specifying what the last day to scrape should be
all_data: this is a list of all the existing data that the new data will be appended to
'''
new_data = []
i = scrape_start
this_month = scrape_start.month
while i <= scrape_end:
if i.month in [5, 6, 7, 8, 9, 10]:
i += datetime.timedelta(days = 1)
continue
new_data.extend(scrape_scores(i))
i += datetime.timedelta(days = 1)
if i.month != this_month:
this_month = i.month
print(len(new_data), "games recorded")
all_data.extend(new_data)
new_data = []
utils.save_data(DATA_FOLDER + file_start + "-" + (i - datetime.timedelta(days = 1)).strftime('%Y%m%d') + ".csv", all_data)
print(len(new_data), "games recorded")
all_data.extend(new_data)
utils.save_data(DATA_FOLDER + file_start + "-" + (i - datetime.timedelta(days = 1)).strftime('%Y%m%d') + ".csv", all_data)
return all_data
def main(file_start, scrape_start, scrape_end, data_filepath = False):
'''
performs some preliminary setup work and calls functions to scrape data in the specified date ranges and add it to an already exisiting file of data (if specified)
file_start: specifies a string to use for save name purposes. This is the date ('YYYYMMDD') on which the data file begins
scrape_start: different from the file start, this is the date string ('YYYYMMDD') of the day on which to start scraping new data
scrape_end: this is a string date ('YYYYMMDD') specifying what the last day to scrape should be
data_filepath: specifies where to find the existing data we want to append new data to. If not specified, the scraped data is saved as a standalone
'''
start = datetime.datetime.strptime(scrape_start, "%Y%m%d")
end = datetime.datetime.strptime(scrape_end, "%Y%m%d")
if data_filepath != False:
all_data = utils.read_csv(data_filepath)
if all_data[-1][-1] >= scrape_end:
print('data already updated')
return
else:
all_data = []
scrape_by_day(file_start, start, end, all_data)
|
# -*- coding: utf-8 -*-
print '-' * 10
print "List"
a = ['0', '1', '2', '3', '4', '5']
print "a = %r" % a
print "len(a) = %r" % len(a)
print "a[0] = %r, %s" % (a[0], a[0])
print "a[-len(a)] = %r, %s" % (a[-len(a)], a[-len(a)])
print "a[0:3] = %r" % a[0:3]
print "a[::2] = %r" % a[::2]
a.append('7')
print "a = %r" % a
a.insert(len(a)-1, '6')
print "a = %r" % a
a.insert(6, 'x')
print "a = %r" % a
del(a[6])
print "a = %r" % a
a.pop()
print "a = %r" % a
a.pop(6)
print "a = %r" % a
print "\n" * 3
print '-' * 10
print "tuple"
b = (1, 2, [1,2])
print "b =", b
b[2][0] = 2
print "b = ", b
try:
tuple.append(3)
except:
print "Error"
|
#!/usr/bin/env python
"""
Python interface to MAGMA toolkit.
"""
from __future__ import absolute_import, division, print_function
import sys
import ctypes
import atexit
import numpy as np
from . import cuda
# Load MAGMA library:
if 'linux' in sys.platform:
_libmagma_libname_list = ['libmagma.so']
elif sys.platform == 'darwin':
_libmagma_libname_list = ['magma.so', 'libmagma.dylib']
elif sys.platform == 'win32':
_libmagma_libname_list = ['magma.dll']
else:
raise RuntimeError('unsupported platform')
_load_err = ''
for _lib in _libmagma_libname_list:
try:
_libmagma = ctypes.cdll.LoadLibrary(_lib)
except OSError:
_load_err += ('' if _load_err == '' else ', ') + _lib
else:
_load_err = ''
break
if _load_err:
raise OSError('%s not found' % _load_err)
c_int_type = ctypes.c_longlong
# Exceptions corresponding to various MAGMA errors:
_libmagma.magma_strerror.restype = ctypes.c_char_p
_libmagma.magma_strerror.argtypes = [c_int_type]
def magma_strerror(error):
"""
Return string corresponding to specified MAGMA error code.
"""
return _libmagma.magma_strerror(error)
class MagmaError(Exception):
def __init__(self, status, info=None):
self._status = status
self._info = info
errstr = "%s (Code: %d)" % (magma_strerror(status), status)
super(MagmaError,self).__init__(errstr)
def magmaCheckStatus(status):
"""
Raise an exception corresponding to the specified MAGMA status code.
"""
if status != 0:
raise MagmaError(status)
# Utility functions:
_libmagma.magma_version.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
def magma_version():
"""
Get MAGMA version.
"""
majv = c_int_type()
minv = c_int_type()
micv = c_int_type()
_libmagma.magma_version(ctypes.byref(majv),
ctypes.byref(minv), ctypes.byref(micv))
return (majv.value, minv.value, micv.value)
# MAGMA below 1.4.0 uses LAPACK-style char constants, while MAGMA 1.5+ uses
# numeric constants. These dicts are filled in magma_init() and can convert
# between the two modes accordingly:
_bool_conversion = {}
_order_conversion = {}
_trans_conversion = {}
_uplo_conversion = {}
_diag_conversion = {}
_side_conversion = {}
_norm_conversion = {}
_dist_conversion = {}
_sym_conversion = {}
_pack_conversion = {}
_vec_conversion = {}
_range_conversion = {}
_vect_conversion = {}
_direct_conversion = {}
_storev_conversion = {}
_libmagma.magma_bool_const.restype = c_int_type
_libmagma.magma_bool_const.argtypes = [ctypes.c_char]
_libmagma.magma_order_const.restype = c_int_type
_libmagma.magma_order_const.argtypes = [ctypes.c_char]
_libmagma.magma_norm_const.restype = c_int_type
_libmagma.magma_norm_const.argtypes = [ctypes.c_char]
_libmagma.magma_dist_const.restype = c_int_type
_libmagma.magma_dist_const.argtypes = [ctypes.c_char]
_libmagma.magma_sym_const.restype = c_int_type
_libmagma.magma_sym_const.argtypes = [ctypes.c_char]
_libmagma.magma_pack_const.restype = c_int_type
_libmagma.magma_pack_const.argtypes = [ctypes.c_char]
_libmagma.magma_vect_const.restype = c_int_type
_libmagma.magma_vect_const.argtypes = [ctypes.c_char]
_libmagma.magma_range_const.restype = c_int_type
_libmagma.magma_range_const.argtypes = [ctypes.c_char]
_libmagma.magma_direct_const.restype = c_int_type
_libmagma.magma_direct_const.argtypes = [ctypes.c_char]
_libmagma.magma_storev_const.restype = c_int_type
_libmagma.magma_storev_const.argtypes = [ctypes.c_char]
_libmagma.magma_vec_const.restype = c_int_type
_libmagma.magma_vec_const.argtypes = [ctypes.c_char]
_libmagma.magma_uplo_const.restype = c_int_type
_libmagma.magma_uplo_const.argtypes = [ctypes.c_char]
_libmagma.magma_side_const.restype = c_int_type
_libmagma.magma_side_const.argtypes = [ctypes.c_char]
_libmagma.magma_trans_const.restype = c_int_type
_libmagma.magma_trans_const.argtypes = [ctypes.c_char]
_libmagma.magma_diag_const.restype = c_int_type
_libmagma.magma_diag_const.argtypes = [ctypes.c_char]
_libmagma.magma_init.restype = int
def magma_init():
"""
Initialize MAGMA.
"""
global _bool_conversion
global _order_conversion
global _trans_conversion
global _uplo_conversion
global _diag_conversion
global _side_conversion
global _norm_conversion
global _dist_conversion
global _sym_conversion
global _pack_conversion
global _vec_conversion
global _range_conversion
global _vect_conversion
global _direct_conversion
global _storev_conversion
status = _libmagma.magma_init()
magmaCheckStatus(status)
v = magma_version()
if v >= (1, 5, 0):
for c in [b'n', b'N', b'y', b'Y']:
_bool_conversion.update({c: _libmagma.magma_bool_const(c)})
_bool_conversion.update({c.decode(): _libmagma.magma_bool_const(c)})
for c in [b'r', b'R', b'c', b'C']:
_order_conversion.update({c: _libmagma.magma_order_const(c)})
_order_conversion.update({c.decode(): _libmagma.magma_order_const(c)})
for c in [b'O', b'o', b'1', b'2', b'F', b'f', b'E', b'e', b'I', b'i',b'M',b'm']:
_norm_conversion.update({c: _libmagma.magma_norm_const(c)})
_norm_conversion.update({c.decode(): _libmagma.magma_norm_const(c)})
for c in [b'U', b'u', b'S', b's', b'N', b'n']:
_dist_conversion.update({c: _libmagma.magma_dist_const(c)})
_dist_conversion.update({c.decode(): _libmagma.magma_dist_const(c)})
for c in [b'H', b'h', b'S', b's', b'N', b'n', b'P', b'p']:
_sym_conversion.update({c: _libmagma.magma_sym_const(c)})
_sym_conversion.update({c.decode(): _libmagma.magma_sym_const(c)})
for c in [b'N', b'n', b'U', b'U', b'L', b'l', b'C', b'c', b'R', b'r',b'B',b'b', b'Q', b'q', b'Z', b'z']:
_pack_conversion.update({c: _libmagma.magma_pack_const(c)})
_pack_conversion.update({c.decode(): _libmagma.magma_pack_const(c)})
for c in [b'N', b'n', b'V', b'v', b'I', b'i', b'A', b'a', b'S', b's',b'O',b'o']:
_vec_conversion.update({c: _libmagma.magma_vec_const(c)})
_vec_conversion.update({c.decode(): _libmagma.magma_vec_const(c)})
for c in [ b'V', b'v', b'I', b'i', b'A', b'a']:
_range_conversion.update({c: _libmagma.magma_range_const(c)})
_range_conversion.update({c.decode(): _libmagma.magma_range_const(c)})
for c in [b'q', b'Q', b'p', b'P']:
_vect_conversion.update({c: _libmagma.magma_vect_const(c)})
_vect_conversion.update({c.decode(): _libmagma.magma_vect_const(c)})
for c in [b'f', b'F', b'B', b'b']:
_direct_conversion.update({c: _libmagma.magma_direct_const(c)})
_direct_conversion.update({c.decode(): _libmagma.magma_direct_const(c)})
for c in [b'c', b'C', b'r', b'R']:
_storev_conversion.update({c: _libmagma.magma_storev_const(c)})
_storev_conversion.update({c.decode(): _libmagma.magma_storev_const(c)})
for c in [b'l', b'L', b'u', b'U']:
_uplo_conversion.update({c: _libmagma.magma_uplo_const(c)})
_uplo_conversion.update({c.decode(): _libmagma.magma_uplo_const(c)})
for c in [b'l', b'L', b'r', b'R', b'b', b'B']:
_side_conversion.update({c: _libmagma.magma_side_const(c)})
_side_conversion.update({c.decode(): _libmagma.magma_side_const(c)})
for c in [b'n', b'N', b't', b'T', b'c', b'C']:
_trans_conversion.update({c: _libmagma.magma_trans_const(c)})
_trans_conversion.update({c.decode(): _libmagma.magma_trans_const(c)})
for c in [b'N', b'n', b'U', b'u']:
_diag_conversion.update({c: _libmagma.magma_diag_const(c)})
_diag_conversion.update({c.decode(): _libmagma.magma_diag_const(c)})
else:
for c in ['l', 'L', 'u', 'U']:
_uplo_conversion.update({c: c})
for c in ['n', 'N', 'a', 'A', 'o', 'O', 's', 'S', 'i', 'I', 'v', 'V']:
_vec_conversion.update({c: c})
for c in ['l', 'L', 'r', 'R', 'b', 'B']:
_sides_conversion.update({c: c})
for c in ['n', 'N', 't', 'T', 'c', 'C']:
_trans_conversion.update({c:c})
for c in ['n', 'N', 'u', 'U']:
_diag_conversion.update({c:c})
_libmagma.magma_finalize.restype = int
def magma_finalize():
"""
Finalize MAGMA.
"""
status = _libmagma.magma_finalize()
magmaCheckStatus(status)
_libmagma.magma_getdevice_arch.restype = int
def magma_getdevice_arch():
"""
Get device architecture.
"""
return _libmagma.magma_getdevice_arch()
_libmagma.magma_getdevice.argtypes = [ctypes.c_void_p]
def magma_getdevice():
"""
Get current device used by MAGMA.
"""
dev = c_int_type()
_libmagma.magma_getdevice(ctypes.byref(dev))
return dev.value
_libmagma.magma_setdevice.argtypes = [c_int_type]
def magma_setdevice(dev):
"""
Get current device used by MAGMA.
"""
_libmagma.magma_setdevice(dev)
def magma_device_sync():
"""
Synchronize device used by MAGMA.
"""
_libmagma.magma_device_sync()
# BLAS routines
# ISAMAX, IDAMAX, ICAMAX, IZAMAX
_libmagma.magma_isamax.restype = int
_libmagma.magma_isamax.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_isamax(n, dx, incx, queue):
"""
Index of maximum magnitude element.
"""
return _libmagma.magma_isamax(n, int(dx), incx, queue)
_libmagma.magma_idamax.restype = int
_libmagma.magma_idamax.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_idamax(n, dx, incx, queue):
"""
Index of maximum magnitude element.
"""
return _libmagma.magma_idamax(n, int(dx), incx, queue)
_libmagma.magma_icamax.restype = int
_libmagma.magma_icamax.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_icamax(n, dx, incx, queue):
"""
Index of maximum magnitude element.
"""
return _libmagma.magma_icamax(n, int(dx), incx, queue)
_libmagma.magma_izamax.restype = int
_libmagma.magma_izamax.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_izamax(n, dx, incx, queue):
"""
Index of maximum magnitude element.
"""
return _libmagma.magma_izamax(n, int(dx), incx, queue)
# ISAMIN, IDAMIN, ICAMIN, IZAMIN
_libmagma.magma_isamin.restype = int
_libmagma.magma_isamin.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_isamin(n, dx, incx, queue):
"""
Index of minimum magnitude element.
"""
return _libmagma.magma_isamin(n, int(dx), incx, queue)
_libmagma.magma_idamin.restype = int
_libmagma.magma_idamin.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_idamin(n, dx, incx, queue):
"""
Index of minimum magnitude element.
"""
return _libmagma.magma_idamin(n, int(dx), incx, queue)
_libmagma.magma_icamin.restype = int
_libmagma.magma_icamin.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_icamin(n, dx, incx, queue):
"""
Index of minimum magnitude element.
"""
return _libmagma.magma_icamin(n, int(dx), incx, queue)
_libmagma.magma_izamin.restype = int
_libmagma.magma_izamin.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_izamin(n, dx, incx, queue):
"""
Index of minimum magnitude element.
"""
return _libmagma.magma_izamin(n, int(dx), incx, queue)
# SASUM, DASUM, SCASUM, DZASUM
_libmagma.magma_sasum.restype = int
_libmagma.magma_sasum.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sasum(n, dx, incx, queue):
"""
Sum of absolute values of vector.
"""
return _libmagma.magma_sasum(n, int(dx), incx, queue)
_libmagma.magma_dasum.restype = int
_libmagma.magma_dasum.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dasum(n, dx, incx, queue):
"""
Sum of absolute values of vector.
"""
return _libmagma.magma_dasum(n, int(dx), incx, queue)
_libmagma.magma_scasum.restype = int
_libmagma.magma_scasum.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_scasum(n, dx, incx, queue):
"""
Sum of absolute values of vector.
"""
return _libmagma.magma_scasum(n, int(dx), incx, queue)
_libmagma.magma_dzasum.restype = int
_libmagma.magma_dzasum.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dzasum(n, dx, incx, queue):
"""
Sum of absolute values of vector.
"""
return _libmagma.magma_dzasum(n, int(dx), incx, queue)
# SAXPY, DAXPY, CAXPY, ZAXPY
_libmagma.magma_saxpy.restype = int
_libmagma.magma_saxpy.argtypes = [c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_saxpy(n, alpha, dx, incx, dy, incy, queue):
"""
Vector addition.
"""
_libmagma.magma_saxpy(n, alpha, int(dx), incx, int(dy), incy, queue)
_libmagma.magma_daxpy.restype = int
_libmagma.magma_daxpy.argtypes = [c_int_type,
ctypes.c_double,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_daxpy(n, alpha, dx, incx, dy, incy, queue):
"""
Vector addition.
"""
_libmagma.magma_daxpy(n, alpha, int(dx), incx, int(dy), incy, queue)
_libmagma.magma_caxpy.restype = int
_libmagma.magma_caxpy.argtypes = [c_int_type,
cuda.cuFloatComplex,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_caxpy(n, alpha, dx, incx, dy, incy, queue):
"""
Vector addition.
"""
_libmagma.magma_caxpy(n, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(dx), incx, int(dy), incy, queue)
_libmagma.magma_zaxpy.restype = int
_libmagma.magma_zaxpy.argtypes = [c_int_type,
cuda.cuDoubleComplex,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zaxpy(n, alpha, dx, incx, dy, incy, queue):
"""
Vector addition.
"""
_libmagma.magma_zaxpy(n, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(dx), incx, int(dy), incy, queue)
# SCOPY, DCOPY, CCOPY, ZCOPY
_libmagma.magma_scopy.restype = int
_libmagma.magma_scopy.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_scopy(n, dx, incx, dy, incy, queue):
"""
Vector copy.
"""
_libmagma.magma_scopy(n, int(dx), incx, int(dy), incy, queue)
_libmagma.magma_dcopy.restype = int
_libmagma.magma_dcopy.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dcopy(n, dx, incx, dy, incy, queue):
"""
Vector copy.
"""
_libmagma.magma_dcopy(n, int(dx), incx, int(dy), incy, queue)
_libmagma.magma_ccopy.restype = int
_libmagma.magma_ccopy.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ccopy(n, dx, incx, dy, incy, queue):
"""
Vector copy.
"""
_libmagma.magma_ccopy(n, int(dx), incx, int(dy), incy, queue)
_libmagma.magma_zcopy.restype = int
_libmagma.magma_zcopy.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zcopy(n, dx, incx, dy, incy, queue):
"""
Vector copy.
"""
_libmagma.magma_zcopy(n, int(dx), incx, int(dy), incy, queue)
# SDOT, DDOT, CDOTU, CDOTC, ZDOTU, ZDOTC
_libmagma.magma_sdot.restype = ctypes.c_float
_libmagma.magma_sdot.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sdot(n, dx, incx, dy, incy, queue):
"""
Vector dot product.
"""
return _libmagma.magma_sdot(n, int(dx), incx, int(dy), incy, queue)
_libmagma.magma_ddot.restype = ctypes.c_double
_libmagma.magma_ddot.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ddot(n, dx, incx, dy, incy, queue):
"""
Vector dot product.
"""
return _libmagma.magma_ddot(n, int(dx), incx, int(dy), incy, queue)
_libmagma.magma_cdotc.restype = cuda.cuFloatComplex
_libmagma.magma_cdotc.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_cdotc(n, dx, incx, dy, incy, queue):
"""
Vector dot product.
"""
return _libmagma.magma_cdotc(n, int(dx), incx, int(dy), incy, queue)
_libmagma.magma_cdotu.restype = cuda.cuFloatComplex
_libmagma.magma_cdotu.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_cdotu(n, dx, incx, dy, incy, queue):
"""
Vector dot product.
"""
return _libmagma.magma_cdotu(n, int(dx), incx, int(dy), incy, queue)
_libmagma.magma_zdotc.restype = cuda.cuDoubleComplex
_libmagma.magma_zdotc.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zdotc(n, dx, incx, dy, incy, queue):
"""
Vector dot product.
"""
return _libmagma.magma_zdotc(n, int(dx), incx, int(dy), incy, queue)
_libmagma.magma_zdotu.restype = cuda.cuDoubleComplex
_libmagma.magma_zdotu.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zdotu(n, dx, incx, dy, incy, queue):
"""
Vector dot product.
"""
return _libmagma.magma_zdotu(n, int(dx), incx, int(dy), incy)
# SNRM2, DNRM2, SCNRM2, DZNRM2
_libmagma.magma_snrm2.restype = ctypes.c_float
_libmagma.magma_snrm2.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_snrm2(n, dx, incx, queue):
"""
Euclidean norm (2-norm) of vector.
"""
return _libmagma.magma_snrm2(n, int(dx), incx, queue)
_libmagma.magma_dnrm2.restype = ctypes.c_double
_libmagma.magma_dnrm2.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dnrm2(n, dx, incx, queue):
"""
Euclidean norm (2-norm) of vector.
"""
return _libmagma.magma_dnrm2(n, int(dx), incx, queue)
_libmagma.magma_scnrm2.restype = ctypes.c_float
_libmagma.magma_scnrm2.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_scnrm2(n, dx, incx, queue):
"""
Euclidean norm (2-norm) of vector.
"""
return _libmagma.magma_scnrm2(n, int(dx), incx, queue)
_libmagma.magma_dznrm2.restype = ctypes.c_double
_libmagma.magma_dznrm2.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dznrm2(n, dx, incx, queue):
"""
Euclidean norm (2-norm) of vector.
"""
return _libmagma.magma_dznrm2(n, int(dx), incx, queue)
# SROT, DROT, CROT, CSROT, ZROT, ZDROT
_libmagma.magma_srot.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_float,
ctypes.c_float,
ctypes.c_void_p]
def magma_srot(n, dx, incx, dy, incy, dc, ds, queue):
"""
Apply a rotation to vectors.
"""
_libmagma.magma_srot(n, int(dx), incx, int(dy), incy, dc, ds, queue)
# SROTM, DROTM
_libmagma.magma_srotm.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_srotm(n, dx, incx, dy, incy, param, queue):
"""
Apply a real modified Givens rotation.
"""
_libmagma.magma_srotm(n, int(dx), incx, int(dy), incy, param, queue)
# SROTMG, DROTMG
_libmagma.magma_srotmg.argtypes = [ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_srotmg(d1, d2, x1, y1, param, queue):
"""
Construct a real modified Givens rotation matrix.
"""
_libmagma.magma_srotmg(int(d1), int(d2), int(x1), int(y1), param, queue)
# SSCAL, DSCAL, CSCAL, CSSCAL, ZSCAL, ZDSCAL
_libmagma.magma_sscal.argtypes = [c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sscal(n, alpha, dx, incx, queue):
"""
Scale a vector by a scalar.
"""
_libmagma.magma_sscal(n, alpha, int(dx), incx, queue)
_libmagma.magma_cscal.argtypes = [c_int_type,
cuda.cuFloatComplex,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_cscal(n, alpha, dx, incx, queue):
"""
Scale a vector by a scalar.
"""
_libmagma.magma_cscal(n, alpha, int(dx), incx, queue)
_libmagma.magma_csscal.argtypes = [c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_csscal(n, alpha, dx, incx, queue):
"""
Scale a vector by a scalar.
"""
_libmagma.magma_csscal(n, alpha, int(dx), incx, queue)
_libmagma.magma_sscal.argtypes = [c_int_type,
ctypes.c_double,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dscal(n, alpha, dx, incx, queue):
"""
Scale a vector by a scalar.
"""
_libmagma.magma_dscal(n, alpha, int(dx), incx, queue)
_libmagma.magma_zscal.argtypes = [c_int_type,
cuda.cuDoubleComplex,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zscal(n, alpha, dx, incx, queue):
"""
Scale a vector by a scalar.
"""
_libmagma.magma_zscal(n, alpha, int(dx), incx, queue)
_libmagma.magma_zdscal.argtypes = [c_int_type,
ctypes.c_double,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zdscal(n, alpha, dx, incx, queue):
"""
Scale a vector by a scalar.
"""
_libmagma.magma_zdscal(n, alpha, int(dx), incx, queue)
# SSWAP, DSWAP, CSWAP, ZSWAP
_libmagma.magma_sswap.argtypes = [c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sswap(n, dA, ldda, dB, lddb, queue):
"""
Swap vectors.
"""
_libmagma.magma_sswap(n, int(dA), ldda, int(dB), lddb, queue)
# SGEMV, DGEMV, CGEMV, ZGEMV
_libmagma.magma_sgemv.argtypes = [ctypes.c_char,
c_int_type,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgemv(trans, m, n, alpha, dA, ldda, dx, incx, beta,
dy, incy, queue):
"""
Matrix-vector product for general matrix.
"""
_libmagma.magma_sgemv(trans, m, n, alpha, int(dA), ldda, dx, incx,
beta, int(dy), incy, queue)
# SGER, DGER, CGERU, CGERC, ZGERU, ZGERC
_libmagma.magma_sger.argtypes = [c_int_type,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sger(m, n, alpha, dx, incx, dy, incy, dA, ldda, queue):
"""
Rank-1 operation on real general matrix.
"""
_libmagma.magma_sger(m, n, alpha, int(dx), incx, int(dy), incy,
int(dA), ldda, queue)
# SSYMV, DSYMV, CSYMV, ZSYMV
_libmagma.magma_ssymv.argtypes = [ctypes.c_char,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ssymv(uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy):
_libmagma.magma_ssymv(uplo, n, alpha, int(dA), ldda, int(dx), incx, beta,
int(dy), incy, queue)
# SSYR, DSYR, CSYR, ZSYR
_libmagma.magma_ssyr.argtypes = [ctypes.c_char,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ssyr(uplo, n, alpha, dx, incx, dA, ldda, queue):
_libmagma.magma_ssyr(uplo, n, alpha, int(dx), incx, int(dA), ldda, queue)
# SSYR2, DSYR2, CSYR2, ZSYR2
_libmagma.magma_ssyr2.argtypes = [ctypes.c_char,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ssyr2(uplo, n, alpha, dx, incx, dy, incy, dA, ldda, queue):
_libmagma.magma_ssyr2(uplo, n, alpha, int(dx), incx,
int(dy), incy, int(dA), ldda, queue)
# STRMV, DTRMV, CTRMV, ZTRMV
_libmagma.magma_strmv.argtypes = [ctypes.c_char,
ctypes.c_char,
ctypes.c_char,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_strmv(uplo, trans, diag, n,
dA, ldda, dx, incx, queue):
_libmagma.magma_strmv(uplo, trans, diag, n,
int(dA), ldda, int(dx), incx, queue)
# STRSV, DTRSV, CTRSV, ZTRSV
_libmagma.magma_strsv.argtypes = [ctypes.c_char,
ctypes.c_char,
ctypes.c_char,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_strsv(uplo, trans, diag, n,
dA, ldda, dx, incx, queue):
_libmagma.magma_strsv(uplo, trans, diag, n,
int(dA), ldda, int(dx), incx, queue)
# SGEMM, DGEMM, CGEMM, ZGEMM
_libmagma.magma_sgemm.argtypes = [ctypes.c_char,
ctypes.c_char,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgemm(transA, transB, m, n, k, alpha, dA, ldda, dB, lddb, beta,
dC, lddc, queue):
_libmagma.magma_sgemm(transA, transB, m, n, k, alpha,
int(dA), ldda, int(dB), lddb,
beta, int(dC), lddc, queue)
_libmagma.magma_zgemm.argtypes = [ctypes.c_char,
ctypes.c_char,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zgemm(transA, transB, m, n, k, alpha, dA, ldda, dB, lddb, beta,
dC, lddc, queue):
_libmagma.magma_zgemm(transA, transB, m, n, k, alpha,
int(dA), ldda, int(dB), lddb,
beta, int(dC), lddc, queue)
# SSYMM, DSYMM, CSYMM, ZSYMM
_libmagma.magma_ssymm.argtypes = [ctypes.c_char,
ctypes.c_char,
c_int_type,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ssymm(side, uplo, m, n, alpha, dA, ldda, dB, lddb, beta,
dC, lddc, queue):
_libmagma.magma_ssymm(side, uplo, m, n, alpha,
int(dA), ldda, int(dB), lddb,
beta, int(dC), lddc, queue)
# SSYRK, DSYRK, CSYRK, ZSYRK
_libmagma.magma_ssyrk.argtypes = [ctypes.c_char,
ctypes.c_char,
c_int_type,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ssyrk(uplo, trans, n, k, alpha, dA, ldda, beta,
dC, lddc, queue):
_libmagma.magma_ssyrk(uplo, trans, n, k, alpha,
int(dA), ldda, beta, int(dC), lddc, queue)
# SSYR2K, DSYR2K, CSYR2K, ZSYR2K
_libmagma.magma_ssyr2k.argtypes = [ctypes.c_char,
ctypes.c_char,
c_int_type,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ssyr2k(uplo, trans, n, k, alpha, dA, ldda,
dB, lddb, beta, dC, lddc, queue):
_libmagma.magma_ssyr2k(uplo, trans, n, k, alpha,
int(dA), ldda, int(dB), lddb,
beta, int(dC), lddc, queue)
# STRMM, DTRMM, CTRMM, ZTRMM
_libmagma.magma_strmm.argtypes = [ctypes.c_char,
ctypes.c_char,
ctypes.c_char,
ctypes.c_char,
c_int_type,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_strmm(side, uplo, trans, diag, m, n, alpha, dA, ldda,
dB, lddb, queue):
_libmagma.magma_strmm(uplo, trans, diag, m, n, alpha,
int(dA), ldda, int(dB), lddb, queue)
# STRSM, DTRSM, CTRSM, ZTRSM
_libmagma.magma_strsm.argtypes = [ctypes.c_char,
ctypes.c_char,
ctypes.c_char,
ctypes.c_char,
c_int_type,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_strsm(side, uplo, trans, diag, m, n, alpha, dA, ldda,
dB, lddb, queue):
_libmagma.magma_strsm(uplo, trans, diag, m, n, alpha,
int(dA), ldda, int(dB), lddb, queue)
# Auxiliary routines:
_libmagma.magma_vec_const.restype = int
_libmagma.magma_vec_const.argtypes = [ctypes.c_char]
def magma_vec_const(job):
return _libmagma.magma_vec_const(job)
_libmagma.magma_get_spotrf_nb.restype = int
_libmagma.magma_get_spotrf_nb.argtypes = [c_int_type]
def magma_get_spotrf_nb(m):
return _libmagma.magma_get_spotrf_nb(m)
_libmagma.magma_get_sgetrf_nb.restype = int
_libmagma.magma_get_sgetrf_nb.argtypes = [c_int_type]
def magma_get_sgetrf_nb(m):
return _libmagma.magma_get_sgetrf_nb(m)
_libmagma.magma_get_sgetri_nb.restype = int
_libmagma.magma_get_sgetri_nb.argtypes = [c_int_type]
def magma_get_sgetri_nb(m):
return _libmagma.magma_get_sgetri_nb(m)
_libmagma.magma_get_sgeqp3_nb.restype = int
_libmagma.magma_get_sgeqp3_nb.argtypes = [c_int_type]
def magma_get_sgeqp3_nb(m):
return _libmagma.magma_get_sgeqp3_nb(m)
_libmagma.magma_get_sgeqrf_nb.restype = int
_libmagma.magma_get_sgeqrf_nb.argtypes = [c_int_type, c_int_type]
def magma_get_sgeqrf_nb(m, n):
return _libmagma.magma_get_sgeqrf_nb(m, n)
_libmagma.magma_get_dgeqrf_nb.restype = int
_libmagma.magma_get_dgeqrf_nb.argtypes = [c_int_type, c_int_type]
def magma_get_dgeqrf_nb(m, n):
return _libmagma.magma_get_dgeqrf_nb(m, n)
_libmagma.magma_get_cgeqrf_nb.restype = int
_libmagma.magma_get_cgeqrf_nb.argtypes = [c_int_type, c_int_type]
def magma_get_cgeqrf_nb(m, n):
return _libmagma.magma_get_cgeqrf_nb(m, n)
_libmagma.magma_get_zgeqrf_nb.restype = int
_libmagma.magma_get_zgeqrf_nb.argtypes = [c_int_type, c_int_type]
def magma_get_zgeqrf_nb(m, n):
return _libmagma.magma_get_zgeqrf_nb(m, n)
_libmagma.magma_get_sgeqlf_nb.restype = int
_libmagma.magma_get_sgeqlf_nb.argtypes = [c_int_type]
def magma_get_sgeqlf_nb(m):
return _libmagma.magma_get_sgeqlf_nb(m)
_libmagma.magma_get_sgehrd_nb.restype = int
_libmagma.magma_get_sgehrd_nb.argtypes = [c_int_type]
def magma_get_sgehrd_nb(m):
return _libmagma.magma_get_sgehrd_nb(m)
_libmagma.magma_get_ssytrd_nb.restype = int
_libmagma.magma_get_ssytrd_nb.argtypes = [c_int_type]
def magma_get_ssytrd_nb(m):
return _libmagma.magma_get_ssytrd_nb(m)
_libmagma.magma_get_sgelqf_nb.restype = int
_libmagma.magma_get_sgelqf_nb.argtypes = [c_int_type]
def magma_get_sgelqf_nb(m):
return _libmagma.magma_get_sgelqf_nb(m)
_libmagma.magma_get_sgebrd_nb.restype = int
_libmagma.magma_get_sgebrd_nb.argtypes = [c_int_type]
def magma_get_sgebrd_nb(m):
return _libmagma.magma_get_sgebrd_nb(m)
_libmagma.magma_get_ssygst_nb.restype = int
_libmagma.magma_get_ssygst_nb.argtypes = [c_int_type]
def magma_get_ssygst_nb(m):
return _libmagma.magma_get_ssgyst_nb(m)
_libmagma.magma_get_sbulge_nb.restype = int
_libmagma.magma_get_sbulge_nb.argtypes = [c_int_type]
def magma_get_sbulge_nb(m):
return _libmagma.magma_get_sbulge_nb(m)
_libmagma.magma_get_dsytrd_nb.restype = int
_libmagma.magma_get_dsytrd_nb.argtypes = [c_int_type]
def magma_get_dsytrd_nb(m):
return _libmagma.magma_get_dsytrd_nb(m)
_libmagma.magma_queue_create_internal.restype = int
_libmagma.magma_queue_create_internal.argtypes = [c_int_type,
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p,
c_int_type]
def magma_queue_create(device):
queue_ptr = ctypes.c_void_p()
status = _libmagma.magma_queue_create_internal(device, ctypes.byref(queue_ptr), '', '', 0)
magmaCheckStatus(status)
return queue_ptr
_libmagma.magma_queue_destroy_internal.restype = int
_libmagma.magma_queue_destroy_internal.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p,
c_int_type]
def magma_queue_destroy(queue_ptr):
status = _libmagma.magma_queue_destroy_internal(queue_ptr, '', '', 0)
magmaCheckStatus(status)
_libmagma.magma_queue_sync_internal.restype = int
_libmagma.magma_queue_sync_internal.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p,
c_int_type]
def magma_queue_sync(queue_ptr):
status = _libmagma.magma_queue_sync_internal(queue_ptr, '', '', 0)
magmaCheckStatus(status)
# Buffer size algorithms
def _magma_gesvd_buffersize(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt,
func, dtype):
work = np.zeros(1, dtype)
func(jobu, jobvt, m, n,
int(a), lda, int(s), int(u), ldu,
int(vt), ldvt, int(work.ctypes.data), -1)
return int(work[0])
def magma_sgesvd_buffersize(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt):
return _magma_gesvd_buffersize(jobu, jobvt, m, n, a, lda, s, u, ldu, vt,
ldvt, magma_sgesvd, np.float32)
def magma_dgesvd_buffersize(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt):
return _magma_gesvd_buffersize(jobu, jobvt, m, n, a, lda, s, u, ldu, vt,
ldvt, magma_dgesvd, np.float64)
def magma_cgesvd_buffersize(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt):
return _magma_gesvd_buffersize(jobu, jobvt, m, n, a, lda, s, u, ldu, vt,
ldvt, magma_cgesvd, np.float32)
def magma_zgesvd_buffersize(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt):
return _magma_gesvd_buffersize(jobu, jobvt, m, n, a, lda, s, u, ldu, vt,
ldvt, magma_zgesvd, np.float64)
# LAPACK routines
# SGEBRD, DGEBRD, CGEBRD, ZGEBRD
_libmagma.magma_sgebrd.restype = int
_libmagma.magma_sgebrd.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgebrd(m, n, A, lda, d, e, tauq, taup, work, lwork):
"""
Reduce matrix to bidiagonal form.
"""
info = c_int_type()
status = _libmagma.magma_sgebrd.argtypes(m, n, int(A), lda,
int(d), int(e),
int(tauq), int(taup),
int(work), int(lwork),
ctypes.byref(info))
magmaCheckStatus(status)
# SGEHRD2, DGEHRD2, CGEHRD2, ZGEHRD2
_libmagma.magma_sgehrd2.restype = int
_libmagma.magma_sgehrd2.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgehrd2(n, ilo, ihi, A, lda, tau,
work, lwork):
"""
Reduce matrix to upper Hessenberg form.
"""
info = c_int_type()
status = _libmagma.magma_sgehrd2(n, ilo, ihi, int(A), lda,
int(tau), int(work),
lwork, ctypes.byref(info))
magmaCheckStatus(status)
# SGEHRD, DGEHRD, CGEHRD, ZGEHRD
_libmagma.magma_sgehrd.restype = int
_libmagma.magma_sgehrd.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgehrd(n, ilo, ihi, A, lda, tau,
work, lwork, dT):
"""
Reduce matrix to upper Hessenberg form (fast algorithm).
"""
info = c_int_type()
status = _libmagma.magma_sgehrd(n, ilo, ihi, int(A), lda,
int(tau), int(work),
lwork, int(dT), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgehrd.restype = int
_libmagma.magma_dgehrd.argtypes = _libmagma.magma_sgehrd.argtypes
def magma_dgehrd(n, ilo, ihi, A, lda, tau,
work, lwork, dT):
"""
Reduce matrix to upper Hessenberg form (fast algorithm).
"""
info = c_int_type()
status = _libmagma.magma_dgehrd(n, ilo, ihi, int(A), lda,
int(tau), int(work),
lwork, int(dT), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgehrd.restype = int
_libmagma.magma_cgehrd.argtypes = _libmagma.magma_sgehrd.argtypes
def magma_cgehrd(n, ilo, ihi, A, lda, tau,
work, lwork, dT):
"""
Reduce matrix to upper Hessenberg form (fast algorithm).
"""
info = c_int_type()
status = _libmagma.magma_cgehrd(n, ilo, ihi, int(A), lda,
int(tau), int(work),
lwork, int(dT), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgehrd.restype = int
_libmagma.magma_zgehrd.argtypes = _libmagma.magma_sgehrd.argtypes
def magma_zgehrd(n, ilo, ihi, A, lda, tau,
work, lwork, dT):
"""
Reduce matrix to upper Hessenberg form (fast algorithm).
"""
info = c_int_type()
status = _libmagma.magma_zgehrd(n, ilo, ihi, int(A), lda,
int(tau), int(work),
lwork, int(dT), ctypes.byref(info))
magmaCheckStatus(status)
# SGEHRD_M, DGEHRD_M, CGEHRD_M, ZGEHRD_M
_libmagma.magma_sgehrd_m.restype = int
_libmagma.magma_sgehrd_m.argtypes = _libmagma.magma_sgehrd.argtypes
def magma_sgehrd_m(n, ilo, ihi, A, lda, tau,
work, lwork, dT):
"""
Reduce matrix to upper Hessenberg form (fast algorithm).
"""
info = c_int_type()
status = _libmagma.magma_sgehrd_m(n, ilo, ihi, int(A), lda,
int(tau), int(work),
lwork, int(dT), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgehrd_m.restype = int
_libmagma.magma_dgehrd_m.argtypes = _libmagma.magma_sgehrd.argtypes
def magma_dgehrd_m(n, ilo, ihi, A, lda, tau,
work, lwork, dT):
"""
Reduce matrix to upper Hessenberg form (fast algorithm).
"""
info = c_int_type()
status = _libmagma.magma_dgehrd_m(n, ilo, ihi, int(A), lda,
int(tau), int(work),
lwork, int(dT), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgehrd_m.restype = int
_libmagma.magma_cgehrd_m.argtypes = _libmagma.magma_sgehrd.argtypes
def magma_cgehrd_m(n, ilo, ihi, A, lda, tau,
work, lwork, dT):
"""
Reduce matrix to upper Hessenberg form (fast algorithm).
"""
info = c_int_type()
status = _libmagma.magma_cgehrd_m(n, ilo, ihi, int(A), lda,
int(tau), int(work),
lwork, int(dT), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgehrd_m.restype = int
_libmagma.magma_zgehrd_m.argtypes = _libmagma.magma_sgehrd.argtypes
def magma_zgehrd_m(n, ilo, ihi, A, lda, tau,
work, lwork, dT):
"""
Reduce matrix to upper Hessenberg form (fast algorithm).
"""
info = c_int_type()
status = _libmagma.magma_zgehrd_m(n, ilo, ihi, int(A), lda,
int(tau), int(work),
lwork, int(dT), ctypes.byref(info))
magmaCheckStatus(status)
# SORGHR_M, DORGHR_M, CUNGHR_M, ZUNGHR_M
_libmagma.magma_sorghr_m.restype = int
_libmagma.magma_sorghr_m.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sorghr_m(n, ilo, ihi, A, lda, tau, T, nb):
"""
Generates a REAL orthogonal matrix Q which is defined as the product of
IHI-ILO elementary reflectors of order N, as returned by <t>GEHRD
Multi-GPU, data on host
"""
info = c_int_type()
status = _libmagma.magma_sorghr_m(n, ilo, ihi, int(A), lda,
int(tau), int(T), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dorghr_m.restype = int
_libmagma.magma_dorghr_m.argtypes = _libmagma.magma_sorghr_m.argtypes
def magma_dorghr_m(n, ilo, ihi, A, lda, tau, T, nb):
"""
Generates a REAL orthogonal matrix Q which is defined as the product of
IHI-ILO elementary reflectors of order N, as returned by <t>GEHRD
Multi-GPU, data on host
"""
info = c_int_type()
status = _libmagma.magma_dorghr_m(n, ilo, ihi, int(A), lda,
int(tau), int(T), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cunghr_m.restype = int
_libmagma.magma_cunghr_m.argtypes = _libmagma.magma_sorghr_m.argtypes
def magma_cunghr_m(n, ilo, ihi, A, lda, tau, T, nb):
"""
Generates a REAL orthogonal matrix Q which is defined as the product of
IHI-ILO elementary reflectors of order N, as returned by <t>GEHRD
Multi-GPU, data on host
"""
info = c_int_type()
status = _libmagma.magma_cunghr_m(n, ilo, ihi, int(A), lda,
int(tau), int(T), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zunghr_m.restype = int
_libmagma.magma_zunghr_m.argtypes = _libmagma.magma_sorghr_m.argtypes
def magma_zunghr_m(n, ilo, ihi, A, lda, tau, T, nb):
"""
Generates a REAL orthogonal matrix Q which is defined as the product of
IHI-ILO elementary reflectors of order N, as returned by <t>GEHRD
Multi-GPU, data on host
"""
info = c_int_type()
status = _libmagma.magma_zunghr_m(n, ilo, ihi, int(A), lda,
int(tau), int(T), nb,
ctypes.byref(info))
magmaCheckStatus(status)
# SGELQF, DGELQF, CGELQF, ZGELQF
_libmagma.magma_sgelqf.restype = int
_libmagma.magma_sgelqf.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgelqf(m, n, A, lda, tau, work, lwork):
"""
LQ factorization.
"""
info = c_int_type()
status = _libmagma.magma_sgelqf(m, n, int(A), lda,
int(tau), int(work),
lwork, ctypes.byref(info))
magmaCheckStatus(status)
# SGEQRF, DGEQRF, CGEQRF, ZGEQRF
_libmagma.magma_sgeqrf.restype = int
_libmagma.magma_sgeqrf.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgeqrf(m, n, A, lda, tau, work, lwork):
"""
QR factorization.
"""
info = c_int_type()
status = _libmagma.magma_sgeqrf(m, n, int(A), lda,
int(tau), int(work),
lwork, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgeqrf.restype = int
_libmagma.magma_dgeqrf.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dgeqrf(m, n, A, lda, tau, work, lwork):
"""
QR factorization.
"""
info = c_int_type()
status = _libmagma.magma_dgeqrf(m, n, int(A), lda,
int(tau), int(work),
lwork, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgeqrf.restype = int
_libmagma.magma_cgeqrf.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_cgeqrf(m, n, A, lda, tau, work, lwork):
"""
QR factorization.
"""
info = c_int_type()
status = _libmagma.magma_cgeqrf(m, n, int(A), lda,
int(tau), int(work),
lwork, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgeqrf.restype = int
_libmagma.magma_zgeqrf.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zgeqrf(m, n, A, lda, tau, work, lwork):
"""
QR factorization.
"""
info = c_int_type()
status = _libmagma.magma_zgeqrf(m, n, int(A), lda,
int(tau), int(work),
lwork, ctypes.byref(info))
magmaCheckStatus(status)
# SGEQRF, DGEQRF, CGEQRF, ZGEQRF (ooc)
_libmagma.magma_sgeqrf_ooc.restype = int
_libmagma.magma_sgeqrf_ooc.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgeqrf_ooc(m, n, A, lda, tau, work, lwork):
"""
QR factorization (ooc).
"""
info = c_int_type()
status = _libmagma.magma_sgeqrf_ooc(m, n, int(A), lda,
int(tau), int(work),
lwork, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgeqrf_ooc.restype = int
_libmagma.magma_dgeqrf_ooc.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dgeqrf_ooc(m, n, A, lda, tau, work, lwork):
"""
QR factorization (ooc).
"""
info = c_int_type()
status = _libmagma.magma_dgeqrf_ooc(m, n, int(A), lda,
int(tau), int(work),
lwork, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgeqrf_ooc.restype = int
_libmagma.magma_cgeqrf_ooc.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_cgeqrf_ooc(m, n, A, lda, tau, work, lwork):
"""
QR factorization (ooc).
"""
info = c_int_type()
status = _libmagma.magma_cgeqrf_ooc(m, n, int(A), lda,
int(tau), int(work),
lwork, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgeqrf_ooc.restype = int
_libmagma.magma_zgeqrf_ooc.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zgeqrf_ooc(m, n, A, lda, tau, work, lwork):
"""
QR factorization (ooc).
"""
info = c_int_type()
status = _libmagma.magma_zgeqrf_ooc(m, n, int(A), lda,
int(tau), int(work),
lwork, ctypes.byref(info))
magmaCheckStatus(status)
# SGEQRF_GPU, DGEQRF_GPU, CGEQRF_GPU, ZGEQRF_GPU
_libmagma.magma_sgeqrf_gpu.restype = int
_libmagma.magma_sgeqrf_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_sgeqrf_gpu(m, n, A, ldda, tau, dT):
"""
QR factorization (gpu interface, upper triangular R is inverted).
"""
info = c_int_type()
status = _libmagma.magma_sgeqrf_gpu(m, n, int(A), ldda,
int(tau), int(dT),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgeqrf_gpu.restype = int
_libmagma.magma_dgeqrf_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_dgeqrf_gpu(m, n, A, ldda, tau, dT):
"""
QR factorization (gpu interface, upper triangular R is inverted).
"""
info = c_int_type()
status = _libmagma.magma_dgeqrf_gpu(m, n, int(A), ldda,
int(tau), int(dT),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgeqrf_gpu.restype = int
_libmagma.magma_cgeqrf_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_cgeqrf_gpu(m, n, A, ldda, tau, dT):
"""
QR factorization (gpu interface,upper triangular R is inverted).
"""
info = c_int_type()
status = _libmagma.magma_cgeqrf_gpu(m, n, int(A), ldda,
int(tau), int(dT),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgeqrf_gpu.restype = int
_libmagma.magma_zgeqrf_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_zgeqrf_gpu(m, n, A, ldda, tau, dT):
"""
QR factorization (gpu interface, upper triangular R is inverted).
"""
info = c_int_type()
status = _libmagma.magma_zgeqrf_gpu(m, n, int(A), ldda,
int(tau), int(dT),
ctypes.byref(info))
magmaCheckStatus(status)
# SGEQRF2_GPU, DGEQRF2_GPU, CGEQRF2_GPU, ZGEQRF2_GPU
_libmagma.magma_sgeqrf2_gpu.restype = int
_libmagma.magma_sgeqrf2_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_sgeqrf2_gpu(m, n, A, ldda, tau):
"""
QR factorization (gpu interface,
LAPACK-complaint arguments).
"""
info = c_int_type()
status = _libmagma.magma_sgeqrf2_gpu(m, n, int(A), ldda,
int(tau),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgeqrf2_gpu.restype = int
_libmagma.magma_dgeqrf2_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_dgeqrf2_gpu(m, n, A, ldda, tau):
"""
QR factorization (gpu interface,
LAPACK-complaint arguments).
"""
info = c_int_type()
status = _libmagma.magma_dgeqrf2_gpu(m, n, int(A), ldda,
int(tau),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgeqrf2_gpu.restype = int
_libmagma.magma_cgeqrf2_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_cgeqrf2_gpu(m, n, A, ldda, tau):
"""
QR factorization (gpu interface,
LAPACK-complaint arguments).
"""
info = c_int_type()
status = _libmagma.magma_cgeqrf2_gpu(m, n, int(A), ldda,
int(tau),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgeqrf2_gpu.restype = int
_libmagma.magma_zgeqrf2_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_zgeqrf2_gpu(m, n, A, ldda, tau):
"""
QR factorization (gpu, LAPACK-complaint arguments).
"""
info = c_int_type()
status = _libmagma.magma_zgeqrf2_gpu(m, n, int(A), ldda,
int(tau),
ctypes.byref(info))
magmaCheckStatus(status)
# SGEQRF3_GPU, DGEQRF3_GPU, CGEQRF3_GPU, ZGEQRF3_GPU
_libmagma.magma_sgeqrf3_gpu.restype = int
_libmagma.magma_sgeqrf3_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_sgeqrf3_gpu(m, n, A, ldda, tau, dT):
"""
QR factorization (gpu interface).
"""
info = c_int_type()
status = _libmagma.magma_sgeqrf3_gpu(m, n, int(A), ldda,
int(tau), int(dT),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgeqrf3_gpu.restype = int
_libmagma.magma_dgeqrf3_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_dgeqrf3_gpu(m, n, A, ldda, tau, dT):
"""
QR factorization (gpu interface).
"""
info = c_int_type()
status = _libmagma.magma_dgeqrf3_gpu(m, n, int(A), ldda,
int(tau), int(dT),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgeqrf3_gpu.restype = int
_libmagma.magma_cgeqrf3_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_cgeqrf3_gpu(m, n, A, ldda, tau, dT):
"""
QR factorization (gpu interface).
"""
info = c_int_type()
status = _libmagma.magma_cgeqrf3_gpu(m, n, int(A), ldda,
int(tau), int(dT),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgeqrf3_gpu.restype = int
_libmagma.magma_zgeqrf3_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_zgeqrf3_gpu(m, n, A, ldda, tau, dT):
"""
QR factorization (gpu interface).
"""
info = c_int_type()
status = _libmagma.magma_zgeqrf3_gpu(m, n, int(A), ldda,
int(tau), int(dT),
ctypes.byref(info))
magmaCheckStatus(status)
# SGEQRF_M, DGEQRF_M, CGEQRF_M, ZGEQRF_M
_libmagma.magma_sgeqrf_m.restype = int
_libmagma.magma_sgeqrf_m.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgeqrf_m(ngpu, m, n, A, lda, tau, work, lwork):
"""
QR factorization (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
status = _libmagma.magma_sgeqrf_m(ngpu, m, n, int(A), lda,
int(tau), int(work), lwork,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgeqrf_m.restype = int
_libmagma.magma_dgeqrf_m.argtypes = _libmagma.magma_sgeqrf_m.argtypes
def magma_dgeqrf_m(ngpu, m, n, A, lda, tau, work, lwork):
"""
QR factorization (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
status = _libmagma.magma_dgeqrf_m(ngpu, m, n, int(A), lda,
int(tau), int(work), lwork,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgeqrf_m.restype = int
_libmagma.magma_cgeqrf_m.argtypes = _libmagma.magma_sgeqrf_m.argtypes
def magma_cgeqrf_m(ngpu, m, n, A, lda, tau, work, lwork):
"""
QR factorization (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
status = _libmagma.magma_cgeqrf_m(ngpu, m, n, int(A), lda,
int(tau), int(work), lwork,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgeqrf_m.restype = int
_libmagma.magma_zgeqrf_m.argtypes = _libmagma.magma_sgeqrf_m.argtypes
def magma_zgeqrf_m(ngpu, m, n, A, lda, tau, work, lwork):
"""
QR factorization (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
status = _libmagma.magma_zgeqrf_m(ngpu, m, n, int(A), lda,
int(tau), int(work), lwork,
ctypes.byref(info))
magmaCheckStatus(status)
# SGEQRF2_MGPU, DGEQRF2_MGPU, CGEQRF2_MGPU, ZGEQRF2_MGPU
_libmagma.magma_sgeqrf2_mgpu.restype = int
_libmagma.magma_sgeqrf2_mgpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_sgeqrf2_mgpu(ngpu, m, n, dlA, ldda, tau):
"""
QR factorization (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
status = _libmagma.magma_sgeqrf2_mgpu(ngpu, m, n, int(dlA),
ldda, int(tau),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgeqrf2_mgpu.restype = int
_libmagma.magma_dgeqrf2_mgpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_dgeqrf2_mgpu(ngpu, m, n, dlA, ldda, tau):
"""
QR factorization (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
status = _libmagma.magma_dgeqrf2_mgpu(ngpu, m, n, int(dlA),
ldda, int(tau),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgeqrf2_mgpu.restype = int
_libmagma.magma_cgeqrf2_mgpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_cgeqrf2_mgpu(ngpu, m, n, dlA, ldda, tau):
"""
QR factorization (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
status = _libmagma.magma_cgeqrf2_mgpu(ngpu, m, n, int(dlA),
ldda, int(tau),
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgeqrf2_mgpu.restype = int
_libmagma.magma_zgeqrf2_mgpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_zgeqrf2_mgpu(ngpu, m, n, dlA, ldda, tau):
"""
QR factorization (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
status = _libmagma.magma_zgeqrf2_mgpu(ngpu, m, n, int(dlA),
ldda, int(tau),
ctypes.byref(info))
magmaCheckStatus(status)
# SORMQR_M, DORMQR_M, CUNMQR_M, ZUNMQR_M
_libmagma.magma_sormqr_m.restype = int
_libmagma.magma_sormqr_m.argtypes = [c_int_type,
c_int_type,
c_int_type,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sormqr_m(ngpu, side, trans, m, n, k, A, lda,
tau, C, ldc, work, lwork):
"""
Multiply by Q from QR factorizatioin (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
side = _side_conversion[side]
trans = _trans_conversion[trans]
status = _libmagma.magma_sormqr_m(ngpu, side, trans, m, n, k,
int(A), lda, int(tau),
int(C), ldc, int(work), lwork,
ctypes.byref(info))
_libmagma.magma_dormqr_m.restype = int
_libmagma.magma_dormqr_m.argtypes = _libmagma.magma_sormqr_m.argtypes
def magma_dormqr_m(ngpu, side, trans, m, n, k, A, lda,
tau, C, ldc, work, lwork):
"""
Multiply by Q from QR factorizatioin (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
side = _side_conversion[side]
trans = _trans_conversion[trans]
status = _libmagma.magma_dormqr_m(ngpu, side, trans, m, n, k,
int(A), lda, int(tau),
int(C), ldc, int(work), lwork,
ctypes.byref(info))
_libmagma.magma_cunmqr_m.restype = int
_libmagma.magma_cunmqr_m.argtypes = _libmagma.magma_sormqr_m.argtypes
def magma_cunmqr_m(ngpu, side, trans, m, n, k, A, lda,
tau, C, ldc, work, lwork):
"""
Multiply by Q from QR factorizatioin (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
side = _side_conversion[side]
trans = _trans_conversion[trans]
status = _libmagma.magma_cunmqr_m(ngpu, side, trans, m, n, k,
int(A), lda, int(tau),
int(C), ldc, int(work), lwork,
ctypes.byref(info))
_libmagma.magma_zunmqr_m.restype = int
_libmagma.magma_zunmqr_m.argtypes = _libmagma.magma_sormqr_m.argtypes
def magma_zunmqr_m(ngpu, side, trans, m, n, k, A, lda,
tau, C, ldc, work, lwork):
"""
Multiply by Q from QR factorizatioin (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
side = _side_conversion[side]
trans = _trans_conversion[trans]
status = _libmagma.magma_zurmqr_m(ngpu, side, trans, m, n, k,
int(A), lda, int(tau),
int(C), ldc, int(work), lwork,
ctypes.byref(info))
# STRSM_M, DTRSM_M, CTRSM_M, ZTRSM_M
_libmagma.magma_strsm_m.restype = int
_libmagma.magma_strsm_m.argtypes = [c_int_type,
c_int_type,
c_int_type,
c_int_type,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_float,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_strsm_m(ngpu, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb):
"""
Solve triangular Linear equations (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
side = _side_conversion[side]
trans = _trans_conversion[trans]
uplo = _uplo_conversion[uplo]
diag = _diag_conversion[diag]
status = _libmagma.magma_strsm_m(ngpu, side, uplo, trans,
diag, m, n, alpha, int(A),
lda, int(B), ldb,
ctypes.byref(info))
_libmagma.magma_sormqr.restype = int
_libmagma.magma_sormqr.argtypes = [c_int_type,
c_int_type,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sormqr(side, trans, m, n, k, A, lda,
tau, C, ldc, work, lwork):
"""
Multiply by Q from QR factorizatioin (multiple gpu,
GPU memory is allocated in the routine).
"""
info = c_int_type()
side = _side_conversion[side]
trans = _trans_conversion[trans]
status = _libmagma.magma_sormqr(side, trans, m, n, k,
int(A), lda, int(tau),
int(C), ldc, int(work), lwork,
ctypes.byref(info))
# SORGQR, DORGQR, CUNGQR, ZUNGQR
_libmagma.magma_sorgqr.restype = int
_libmagma.magma_sorgqr.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sorgqr(m, n, k, A, lda, tau, dT, nb):
"""
Generate Q from QR factorization.
"""
info = c_int_type()
status = _libmagma.magma_sorgqr(m, n, k, int(A), lda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dorgqr.restype = int
_libmagma.magma_dorgqr.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dorgqr(m, n, k, A, lda, tau, dT, nb):
"""
Generate Q from QR factorization.
"""
info = c_int_type()
status = _libmagma.magma_dorgqr(m, n, k, int(A), lda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cungqr.restype = int
_libmagma.magma_cungqr.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_cungqr(m, n, k, A, lda, tau, dT, nb):
"""
Generate Q from QR factorization.
"""
info = c_int_type()
status = _libmagma.magma_cungqr(m, n, k, int(A), lda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zungqr.restype = int
_libmagma.magma_zungqr.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zungqr(m, n, k, A, lda, tau, dT, nb):
"""
Generate Q from QR factorization.
"""
info = c_int_type()
status = _libmagma.magma_zungqr(m, n, k, int(A), lda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
# SORGQR2, DORGQR2, CUNGQR2, ZUNGQR2
_libmagma.magma_sorgqr2.restype = int
_libmagma.magma_sorgqr2.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_sorgqr2(m, n, k, A, lda, tau):
"""
Generate Q from QR factorization.
(Recompute T matrices on CPU and send them to GPU)
"""
info = c_int_type()
status = _libmagma.magma_sorgqr2(m, n, k, int(A), lda,
int(tau), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dorgqr2.restype = int
_libmagma.magma_dorgqr2.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_dorgqr2(m, n, k, A, lda, tau):
"""
Generate Q from QR factorization.
(Recompute T matrices on CPU and send them to GPU)
"""
info = c_int_type()
status = _libmagma.magma_dorgqr2(m, n, k, int(A), lda,
int(tau), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cungqr2.restype = int
_libmagma.magma_cungqr2.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_cungqr2(m, n, k, A, lda, tau):
"""
Generate Q from QR factorization.
(Recompute T matrices on CPU and send them to GPU)
"""
info = c_int_type()
status = _libmagma.magma_cungqr2(m, n, k, int(A), lda,
int(tau), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zungqr2.restype = int
_libmagma.magma_zungqr2.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_zungqr2(m, n, k, A, lda, tau):
"""
Generate Q from QR factorization.
(Recompute T matrices on CPU and send them to GPU)
"""
info = c_int_type()
status = _libmagma.magma_zungqr2(m, n, k, int(A), lda,
int(tau), ctypes.byref(info))
magmaCheckStatus(status)
# SORGQR_GPU, DORGQR_GPU, CUNGQR_GPU, ZUNGQR_GPU
_libmagma.magma_sorgqr_gpu.restype = int
_libmagma.magma_sorgqr_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sorgqr_gpu(m, n, k, A, ldda, tau, dT, nb):
"""
Generate Q from QR factorization.
(GPU interface)
"""
info = c_int_type()
status = _libmagma.magma_sorgqr_gpu(m, n, k, int(A), ldda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dorgqr_gpu.restype = int
_libmagma.magma_dorgqr_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dorgqr_gpu(m, n, k, A, ldda, tau, dT, nb):
"""
Generate Q from QR factorization.
(GPU interface)
"""
info = c_int_type()
status = _libmagma.magma_dorgqr_gpu(m, n, k, int(A), ldda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cungqr_gpu.restype = int
_libmagma.magma_cungqr_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_cungqr_gpu(m, n, k, A, ldda, tau, dT, nb):
"""
Generate Q from QR factorization.
(GPU interface)
"""
info = c_int_type()
status = _libmagma.magma_cungqr_gpu(m, n, k, int(A), ldda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zungqr_gpu.restype = int
_libmagma.magma_zungqr_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zungqr_gpu(m, n, k, A, ldda, tau, dT, nb):
"""
Generate Q from QR factorization.
(GPU interface)
"""
info = c_int_type()
status = _libmagma.magma_zungqr_gpu(m, n, k, int(A), ldda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
# SORGQR_2STAGE_GPU, DORGQR_2STAGE_GPU
# CUNGQR_2STAGE_GPU, ZUNGQR_2STAGE_GPU
_libmagma.magma_sorgqr_2stage_gpu.restype = int
_libmagma.magma_sorgqr_2stage_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sorgqr_2stage_gpu(m, n, k, A, ldda, tau, dT, nb):
"""
Generate Q from QR factorization.
(GPU interface)
"""
info = c_int_type()
status = _libmagma.magma_sorgqr_2stage_gpu(m, n, k, int(A), ldda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dorgqr_2stage_gpu.restype = int
_libmagma.magma_dorgqr_2stage_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dorgqr_2stage_gpu(m, n, k, A, ldda, tau, dT, nb):
"""
Generate Q from QR factorization.
(GPU interface)
"""
info = c_int_type()
status = _libmagma.magma_dorgqr_2stage_gpu(m, n, k, int(A), ldda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cungqr_2stage_gpu.restype = int
_libmagma.magma_cungqr_2stage_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_cungqr_2stage_gpu(m, n, k, A, ldda, tau, dT, nb):
"""
Generate Q from QR factorization.
(GPU interface)
"""
info = c_int_type()
status = _libmagma.magma_cungqr_2stage_gpu(m, n, k, int(A), ldda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zungqr_2stage_gpu.restype = int
_libmagma.magma_zungqr_2stage_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_zungqr_2stage_gpu(m, n, k, A, ldda, tau, dT, nb):
"""
Generate Q from QR factorization.
(GPU interface)
"""
info = c_int_type()
status = _libmagma.magma_zungqr_2stage_gpu(m, n, k, int(A), ldda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
# SORGQR_M, DORGQR_M, CUNGQR_M, ZUNGQR_M
_libmagma.magma_sorgqr_m.restype = int
_libmagma.magma_sorgqr_m.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sorgqr_m(m, n, k, A, lda, tau, dT, nb):
"""
Generate Q from QR factorization.
(multi-GPU)
"""
info = c_int_type()
status = _libmagma.magma_sorgqr_m(m, n, k, int(A), lda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dorgqr_m.restype = int
_libmagma.magma_dorgqr_m.argtypes = _libmagma.magma_sorgqr_m.argtypes
def magma_dorgqr_m(m, n, k, A, lda, tau, dT, nb):
"""
Generate Q from QR factorization.
(multi-GPU)
"""
info = c_int_type()
status = _libmagma.magma_dorgqr_m(m, n, k, int(A), lda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cungqr_m.restype = int
_libmagma.magma_cungqr_m.argtypes = _libmagma.magma_sorgqr_m.argtypes
def magma_cungqr_m(m, n, k, A, lda, tau, dT, nb):
"""
Generate Q from QR factorization.
"""
info = c_int_type()
status = _libmagma.magma_cungqr_m(m, n, k, int(A), lda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zungqr_m.restype = int
_libmagma.magma_zungqr_m.argtypes = _libmagma.magma_sorgqr_m.argtypes
def magma_zungqr_m(m, n, k, A, lda, tau, dT, nb):
"""
Generate Q from QR factorization.
"""
info = c_int_type()
status = _libmagma.magma_zungqr_m(m, n, k, int(A), lda,
int(tau), int(dT), nb,
ctypes.byref(info))
magmaCheckStatus(status)
# SGESV, DGESV, CGESV, ZGESV
_libmagma.magma_sgesv.restype = int
_libmagma.magma_sgesv.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgesv(n, nhrs, A, lda, ipiv, B, ldb):
"""
Solve system of linear equations.
"""
info = c_int_type()
status = _libmagma.magma_sgesv(n, nhrs, int(A), lda,
int(ipiv), int(B),
ldb, ctypes.byref(info))
magmaCheckStatus(status)
# SGETRF, DGETRF, CGETRF, ZGETRF
_libmagma.magma_sgetrf.restype = int
_libmagma.magma_sgetrf.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_sgetrf(m, n, A, lda, ipiv):
"""
LU factorization.
"""
info = c_int_type()
status = _libmagma.magma_sgetrf(m, n, int(A), lda,
int(ipiv), ctypes.byref(info))
magmaCheckStatus(status)
# SGETRF_M, DGETRF_M, CGETRF_M, ZGETRF_M
_libmagma.magma_sgetrf_m.restype = int
_libmagma.magma_sgetrf_m.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_sgetrf_m(ngpu,m, n, A, lda, ipiv):
"""
LU factorization. Multi-gpu, data on host.
"""
info = c_int_type()
status = _libmagma.magma_sgetrf_m(ngpu,m, n, int(A), lda,
int(ipiv), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgetrf_m.restype = int
_libmagma.magma_dgetrf_m.argtypes = _libmagma.magma_sgetrf_m.argtypes
def magma_dgetrf_m(ngpu,m, n, A, lda, ipiv):
"""
LU factorization. Multi-gpu, data on host.
"""
info = c_int_type()
status = _libmagma.magma_dgetrf_m(ngpu,m, n, int(A), lda,
int(ipiv), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgetrf_m.restype = int
_libmagma.magma_cgetrf_m.argtypes = _libmagma.magma_sgetrf_m.argtypes
def magma_cgetrf_m(ngpu,m, n, A, lda, ipiv):
"""
LU factorization. Multi-gpu, data on host.
"""
info = c_int_type()
status = _libmagma.magma_cgetrf_m(ngpu,m, n, int(A), lda,
int(ipiv), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgetrf_m.restype = int
_libmagma.magma_zgetrf_m.argtypes = _libmagma.magma_sgetrf_m.argtypes
def magma_zgetrf_m(ngpu,m, n, A, lda, ipiv):
"""
LU factorization. Multi-gpu, data on host.
"""
info = c_int_type()
status = _libmagma.magma_zgetrf_m(ngpu,m, n, int(A), lda,
int(ipiv), ctypes.byref(info))
magmaCheckStatus(status)
## SGETRF2, DGETRF2, CGETRF2, ZGETRF2
#_libmagma.magma_sgetrf2.restype = int
#_libmagma.magma_sgetrf2.argtypes = [c_int_type,
# c_int_type,
# ctypes.c_void_p,
# c_int_type,
# ctypes.c_void_p,
# ctypes.c_void_p]
#def magma_sgetrf2(m, n, A, lda, ipiv):
#
# """
# LU factorization (multi-GPU).
# """
#
# info = c_int_type()
# status = _libmagma.magma_sgetrf2(m, n, int(A), lda,
# int(ipiv), ctypes.byref(info))
# magmaCheckStatus(status)
# SGEEV, DGEEV, CGEEV, ZGEEV
_libmagma.magma_sgeev.restype = int
_libmagma.magma_sgeev.argtypes = [ctypes.c_char,
ctypes.c_char,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_sgeev(jobvl, jobvr, n, a, lda,
w, vl, ldvl, vr, ldvr, work, lwork, rwork):
"""
Compute eigenvalues and eigenvectors.
"""
c_int_type()
status = _libmagma.magma_sgeev(jobvl, jobvr, n, int(a), lda,
int(w), int(vl), ldvl, int(vr), ldvr,
int(work), lwork, int(rwork), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgeev.restype = int
_libmagma.magma_dgeev.argtypes = _libmagma.magma_sgeev.argtypes
def magma_dgeev(jobvl, jobvr, n, a, lda,
w, vl, ldvl, vr, ldvr, work, lwork, rwork):
"""
Compute eigenvalues and eigenvectors.
"""
c_int_type()
status = _libmagma.magma_dgeev(jobvl, jobvr, n, int(a), lda,
int(w), int(vl), ldvl, int(vr), ldvr,
int(work), lwork, int(rwork), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgeev.restype = int
_libmagma.magma_cgeev.argtypes = _libmagma.magma_sgeev.argtypes
def magma_cgeev(jobvl, jobvr, n, a, lda,
w, vl, ldvl, vr, ldvr, work, lwork, rwork):
"""
Compute eigenvalues and eigenvectors.
"""
c_int_type()
status = _libmagma.magma_cgeev(jobvl, jobvr, n, int(a), lda,
int(w), int(vl), ldvl, int(vr), ldvr,
int(work), lwork, int(rwork), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgeev.restype = int
_libmagma.magma_zgeev.argtypes = _libmagma.magma_sgeev.argtypes
def magma_zgeev(jobvl, jobvr, n, a, lda,
w, vl, ldvl, vr, ldvr, work, lwork, rwork):
"""
Compute eigenvalues and eigenvectors.
"""
c_int_type()
status = _libmagma.magma_zgeev(jobvl, jobvr, n, int(a), lda,
int(w), int(vl), ldvl, int(vr), ldvr,
int(work), lwork, int(rwork), ctypes.byref(info))
magmaCheckStatus(status)
# SGEEV_M, DGEEV_M, CGEEV_M, ZGEEV_M
_libmagma.magma_sgeev_m.restype = int
_libmagma.magma_sgeev_m.argtypes = _libmagma.magma_sgeev.argtypes
def magma_sgeev_m(jobvl, jobvr, n, a, lda,
w, vl, ldvl, vr, ldvr, work, lwork, rwork):
"""
Compute eigenvalues and eigenvectors.
Multi-GPU, data on host
"""
c_int_type()
status = _libmagma.magma_sgeev_m(jobvl, jobvr, n, int(a), lda,
int(w), int(vl), ldvl, int(vr), ldvr,
int(work), lwork, int(rwork), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgeev_m.restype = int
_libmagma.magma_dgeev_m.argtypes = _libmagma.magma_sgeev.argtypes
def magma_dgeev_m(jobvl, jobvr, n, a, lda,
w, vl, ldvl, vr, ldvr, work, lwork, rwork):
"""
Compute eigenvalues and eigenvectors.
"""
c_int_type()
status = _libmagma.magma_dgeev_m(jobvl, jobvr, n, int(a), lda,
int(w), int(vl), ldvl, int(vr), ldvr,
int(work), lwork, int(rwork), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgeev_m.restype = int
_libmagma.magma_cgeev_m.argtypes = _libmagma.magma_sgeev.argtypes
def magma_cgeev_m(jobvl, jobvr, n, a, lda,
w, vl, ldvl, vr, ldvr, work, lwork, rwork):
"""
Compute eigenvalues and eigenvectors.
"""
c_int_type()
status = _libmagma.magma_cgeev_m(jobvl, jobvr, n, int(a), lda,
int(w), int(vl), ldvl, int(vr), ldvr,
int(work), lwork, int(rwork), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgeev_m.restype = int
_libmagma.magma_zgeev_m.argtypes = _libmagma.magma_sgeev.argtypes
def magma_zgeev_m(jobvl, jobvr, n, a, lda,
w, vl, ldvl, vr, ldvr, work, lwork, rwork):
"""
Compute eigenvalues and eigenvectors.
"""
c_int_type()
status = _libmagma.magma_zgeev_m(jobvl, jobvr, n, int(a), lda,
int(w), int(vl), ldvl, int(vr), ldvr,
int(work), lwork, int(rwork), ctypes.byref(info))
magmaCheckStatus(status)
# SGESVD, DGESVD, CGESVD, ZGESVD
_libmagma.magma_sgesvd.restype = int
_libmagma.magma_sgesvd.argtypes = [c_int_type,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgesvd(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork):
"""
SVD decomposition.
"""
jobu = _vec_conversion[jobu]
jobvt = _vec_conversion[jobvt]
info = c_int_type()
status = _libmagma.magma_sgesvd(jobu, jobvt, m, n,
int(a), lda, int(s), int(u), ldu,
int(vt), ldvt, int(work), lwork,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgesvd.restype = int
_libmagma.magma_dgesvd.argtypes = [c_int_type,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dgesvd(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork):
"""
SVD decomposition.
"""
jobu = _vec_conversion[jobu]
jobvt = _vec_conversion[jobvt]
info = c_int_type()
status = _libmagma.magma_dgesvd(jobu, jobvt, m, n,
int(a), lda, int(s), int(u), ldu,
int(vt), ldvt, int(work), lwork,
ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgesvd.restype = int
_libmagma.magma_cgesvd.argtypes = [c_int_type,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_cgesvd(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork,
rwork):
"""
SVD decomposition.
"""
jobu = _vec_conversion[jobu]
jobvt = _vec_conversion[jobvt]
info = c_int_type()
status = _libmagma.magma_cgesvd(jobu, jobvt, m, n,
int(a), lda, int(s), int(u), ldu,
int(vt), ldvt, int(work), lwork,
int(rwork), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgesvd.restype = int
_libmagma.magma_zgesvd.argtypes = [c_int_type,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_zgesvd(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork,
rwork):
"""
SVD decomposition.
"""
jobu = _vec_conversion[jobu]
jobvt = _vec_conversion[jobvt]
c_int_type()
status = _libmagma.magma_zgesvd(jobu, jobvt, m, n,
int(a), lda, int(s), int(u), ldu,
int(vt), ldvt, int(work), lwork,
int(rwork), ctypes.byref(info))
magmaCheckStatus(status)
# SGESDD, DGESDD, CGESDD, ZGESDD
_libmagma.magma_sgesdd.restype = int
_libmagma.magma_sgesdd.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_sgesdd(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork,
iwork):
"""
SDD decomposition.
"""
jobz = _vec_conversion[jobz]
info = c_int_type()
status = _libmagma.magma_sgesdd(jobz, m, n,
int(a), lda, int(s), int(u), ldu,
int(vt), ldvt, int(work), lwork,
int(iwork), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgesdd.restype = int
_libmagma.magma_dgesdd.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_dgesdd(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork,
iwork):
"""
SDD decomposition.
"""
jobz = _vec_conversion[jobz]
info = c_int_type()
status = _libmagma.magma_dgesdd(jobz, m, n,
int(a), lda, int(s), int(u), ldu,
int(vt), ldvt, int(work), lwork,
int(iwork), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgesdd.restype = int
_libmagma.magma_cgesdd.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_cgesdd(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork,
rwork, iwork):
"""
SDD decomposition.
"""
jobz = _vec_conversion[jobz]
info = c_int_type()
status = _libmagma.magma_cgesdd(jobz, m, n,
int(a), lda, int(s), int(u), ldu,
int(vt), ldvt, int(work), lwork,
int(rwork), int(iwork), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgesdd.restype = int
_libmagma.magma_zgesdd.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_zgesdd(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork,
rwork, iwork):
"""
SDD decomposition.
"""
jobz = _vec_conversion[jobz]
info = c_int_type()
status = _libmagma.magma_zgesdd(jobz, m, n,
int(a), lda, int(s), int(u), ldu,
int(vt), ldvt, int(work), lwork,
int(rwork), int(iwork), ctypes.byref(info))
magmaCheckStatus(status)
# SPOSV, DPOSV, CPOSV, ZPOSV
_libmagma.magma_sposv_gpu.restype = int
_libmagma.magma_sposv_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sposv_gpu(uplo, n, nhrs, a_gpu, lda, b_gpu, ldb):
"""
Solve linear system with positive semidefinite coefficient matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_sposv_gpu(uplo, n, nhrs, int(a_gpu), lda,
int(b_gpu), ldb, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dposv_gpu.restype = int
_libmagma.magma_dposv_gpu.argtypes = _libmagma.magma_sposv_gpu.argtypes
def magma_dposv_gpu(uplo, n, nhrs, a_gpu, lda, b_gpu, ldb):
"""
Solve linear system with positive semidefinite coefficient matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_dposv_gpu(uplo, n, nhrs, int(a_gpu), lda,
int(b_gpu), ldb, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cposv_gpu.restype = int
_libmagma.magma_cposv_gpu.argtypes = _libmagma.magma_sposv_gpu.argtypes
def magma_cposv_gpu(uplo, n, nhrs, a_gpu, lda, b_gpu, ldb):
"""
Solve linear system with positive semidefinite coefficient matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_cposv_gpu(uplo, n, nhrs, int(a_gpu), lda,
int(b_gpu), ldb, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zposv_gpu.restype = int
_libmagma.magma_zposv_gpu.argtypes = _libmagma.magma_sposv_gpu.argtypes
def magma_zposv_gpu(uplo, n, nhrs, a_gpu, lda, b_gpu, ldb):
"""
Solve linear system with positive semidefinite coefficient matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_zposv_gpu(uplo, n, nhrs, int(a_gpu), lda,
int(b_gpu), ldb, ctypes.byref(info))
magmaCheckStatus(status)
# SGESV, DGESV, CGESV, ZGESV
_libmagma.magma_sgesv_gpu.restype = int
_libmagma.magma_sgesv_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgesv_gpu(n, nhrs, A, lda, ipiv, B, ldb):
"""
Solve system of linear equations.
"""
info = c_int_type()
status = _libmagma.magma_sgesv_gpu(n, nhrs, int(A), lda,
int(ipiv), int(B),
ldb, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgesv_gpu.restype = int
_libmagma.magma_dgesv_gpu.argtypes = _libmagma.magma_sgesv_gpu.argtypes
def magma_dgesv_gpu(n, nhrs, A, lda, ipiv, B, ldb):
"""
Solve system of linear equations.
"""
info = c_int_type()
status = _libmagma.magma_dgesv_gpu(n, nhrs, int(A), lda,
int(ipiv), int(B),
ldb, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgesv_gpu.restype = int
_libmagma.magma_cgesv_gpu.argtypes = _libmagma.magma_sgesv_gpu.argtypes
def magma_cgesv_gpu(n, nhrs, A, lda, ipiv, B, ldb):
"""
Solve system of linear equations.
"""
info = c_int_type()
status = _libmagma.magma_cgesv_gpu(n, nhrs, int(A), lda,
int(ipiv), int(B),
ldb, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgesv_gpu.restype = int
_libmagma.magma_zgesv_gpu.argtypes = _libmagma.magma_sgesv_gpu.argtypes
def magma_zgesv_gpu(n, nhrs, A, lda, ipiv, B, ldb):
"""
Solve system of linear equations.
"""
info = c_int_type()
status = _libmagma.magma_zgesv_gpu(n, nhrs, int(A), lda,
int(ipiv), int(B),
ldb, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_sgesv_nopiv_gpu.restype = int
_libmagma.magma_sgesv_nopiv_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_sgesv_nopiv_gpu(n, nhrs, A, lda, B, ldb):
"""
Solve system of linear equations.
"""
info = c_int_type()
status = _libmagma.magma_sgesv_nopiv_gpu(n, nhrs, int(A), lda,
int(B), ldb, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgesv_nopiv_gpu.restype = int
_libmagma.magma_dgesv_nopiv_gpu.argtypes = _libmagma.magma_sgesv_nopiv_gpu.argtypes
def magma_dgesv_nopiv_gpu(n, nhrs, A, lda, B, ldb):
"""
Solve system of linear equations.
"""
info = c_int_type()
status = _libmagma.magma_dgesv_nopiv_gpu(n, nhrs, int(A), lda,
int(B), ldb, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgesv_nopiv_gpu.restype = int
_libmagma.magma_cgesv_nopiv_gpu.argtypes = _libmagma.magma_sgesv_nopiv_gpu.argtypes
def magma_cgesv_nopiv_gpu(n, nhrs, A, lda, B, ldb):
"""
Solve system of linear equations.
"""
info = c_int_type()
status = _libmagma.magma_cgesv_nopiv_gpu(n, nhrs, int(A), lda,
int(B), ldb, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgesv_nopiv_gpu.restype = int
_libmagma.magma_zgesv_nopiv_gpu.argtypes = _libmagma.magma_sgesv_nopiv_gpu.argtypes
def magma_zgesv_nopiv_gpu(n, nhrs, A, lda, B, ldb):
"""
Solve system of linear equations.
"""
info = c_int_type()
status = _libmagma.magma_zgesv_nopiv_gpu(n, nhrs, int(A), lda,
int(B), ldb, ctypes.byref(info))
magmaCheckStatus(status)
# SPOTRF, DPOTRF, CPOTRF, ZPOTRF
_libmagma.magma_spotrf_gpu.restype = int
_libmagma.magma_spotrf_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_spotrf_gpu(uplo, n, A, lda):
"""
Cholesky factorization of positive symmetric matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_spotrf_gpu(uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dpotrf_gpu.restype = int
_libmagma.magma_dpotrf_gpu.argtypes = _libmagma.magma_spotrf_gpu.argtypes
def magma_dpotrf_gpu(uplo, n, A, lda):
"""
Cholesky factorization of positive symmetric matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_dpotrf_gpu(uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cpotrf_gpu.restype = int
_libmagma.magma_cpotrf_gpu.argtypes = _libmagma.magma_spotrf_gpu.argtypes
def magma_cpotrf_gpu(uplo, n, A, lda):
"""
Cholesky factorization of positive symmetric matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_cpotrf_gpu(uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zpotrf_gpu.restype = int
_libmagma.magma_zpotrf_gpu.argtypes = _libmagma.magma_zpotrf_gpu.argtypes
def magma_zpotrf_gpu(uplo, n, A, lda):
"""
Cholesky factorization of positive symmetric matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_zpotrf_gpu(uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_spotrf_m.restype = int
_libmagma.magma_spotrf_m.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_spotrf_m(ngpu, uplo, n, A, lda):
"""
Cholesky factorization of positive symmetric matrix.
Multi-gpu, data on host
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_spotrf_m(ngu, uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dpotrf_m.restype = int
_libmagma.magma_dpotrf_m.argtypes = _libmagma.magma_spotrf_m.argtypes
def magma_dpotrf_m(ngpu, uplo, n, A, lda):
"""
Cholesky factorization of positive symmetric matrix.
Multi-gpu, data on host
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_dpotrf_m(ngpu, uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cpotrf_m.restype = int
_libmagma.magma_cpotrf_m.argtypes = _libmagma.magma_spotrf_m.argtypes
def magma_cpotrf_gpu(ngpu, uplo, n, A, lda):
"""
Cholesky factorization of positive symmetric matrix.
Multi-gpu, data on host
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_cpotrf_m(ngpu, uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zpotrf_gpu.restype = int
_libmagma.magma_zpotrf_gpu.argtypes = _libmagma.magma_zpotrf_m.argtypes
def magma_zpotrf_m(ngpu, uplo, n, A, lda):
"""
Cholesky factorization of positive symmetric matrix.
Multi-gpu, data on host
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_zpotrf_m(ngpu, uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
# SPOTRI, DPOTRI, CPOTRI, ZPOTRI
_libmagma.magma_spotri_gpu.restype = int
_libmagma.magma_spotri_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_spotri_gpu(uplo, n, A, lda):
"""
Inverse using the Cholesky factorization of positive symmetric matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_spotri_gpu(uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dpotri_gpu.restype = int
_libmagma.magma_dpotri_gpu.argtypes = _libmagma.magma_spotri_gpu.argtypes
def magma_dpotri_gpu(uplo, n, A, lda):
"""
Inverse using the Cholesky factorization of positive symmetric matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_dpotri_gpu(uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cpotri_gpu.restype = int
_libmagma.magma_cpotri_gpu.argtypes = _libmagma.magma_spotri_gpu.argtypes
def magma_cpotri_gpu(uplo, n, A, lda):
"""
Inverse using the Cholesky factorization of positive symmetric matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_cpotri_gpu(uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zpotri_gpu.restype = int
_libmagma.magma_zpotri_gpu.argtypes = _libmagma.magma_spotri_gpu.argtypes
def magma_zpotri_gpu(uplo, n, A, lda):
"""
Inverse using the Cholesky factorization of positive symmetric matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_zpotri_gpu(uplo, n, int(A), lda, ctypes.byref(info))
magmaCheckStatus(status)
# SGETRF, DGETRF, CGETRF, ZGETRF
_libmagma.magma_sgetrf_gpu.restype = int
_libmagma.magma_sgetrf_gpu.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p]
def magma_sgetrf_gpu(n, m, A, lda, ipiv):
"""
LU factorization.
"""
info = c_int_type()
status = _libmagma.magma_sgetrf_gpu(n, m, int(A), lda,
int(ipiv), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dgetrf_gpu.restype = int
_libmagma.magma_dgetrf_gpu.argtypes = _libmagma.magma_sgetrf_gpu.argtypes
def magma_dgetrf_gpu(n, m, A, lda, ipiv):
"""
LU factorization.
"""
info = c_int_type()
status = _libmagma.magma_dgetrf_gpu(n, m, int(A), lda,
int(ipiv), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cgetrf_gpu.restype = int
_libmagma.magma_cgetrf_gpu.argtypes = _libmagma.magma_sgetrf_gpu.argtypes
def magma_cgetrf_gpu(n, m, A, lda, ipiv):
"""
LU factorization.
"""
info = c_int_type()
status = _libmagma.magma_cgetrf_gpu(n, m, int(A), lda,
int(ipiv), ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zgetrf_gpu.restype = int
_libmagma.magma_zgetrf_gpu.argtypes = _libmagma.magma_sgetrf_gpu.argtypes
def magma_zgetrf_gpu(n, m, A, lda, ipiv):
"""
LU factorization.
"""
info = c_int_type()
status = _libmagma.magma_zgetrf_gpu(n, m, int(A), lda,
int(ipiv), ctypes.byref(info))
magmaCheckStatus(status)
# SSYEVD, DSYEVD
_libmagma.magma_ssyevd_gpu.restype = int
_libmagma.magma_ssyevd_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork,
liwork):
"""
Compute eigenvalues of real symmetric matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_ssyevd_gpu(jobz, uplo, n, int(dA), ldda,
int(w), int(wA), ldwa, int(work),
lwork, int(iwork), liwork, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dsyevd_gpu.restype = int
_libmagma.magma_dsyevd_gpu.argtypes = [c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork,
liwork):
"""
Compute eigenvalues of real symmetric matrix.
"""
jobz = _vec_conversion[jobz]
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_dsyevd_gpu(jobz, uplo, n, int(dA), ldda,
int(w), int(wA), ldwa, int(work),
lwork, int(iwork), liwork, ctypes.byref(info))
magmaCheckStatus(status)
# SSYEVD_M, DSYEVD_M, CHEEVD_M, ZHEEVD_M
_libmagma.magma_ssyevd_m.restype = int
_libmagma.magma_ssyevd_m.argtypes = [c_int_type,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ssyevd_m(ngpu, jobz, uplo, n, A, lda, w, work, lwork, iwork, liwork):
"""
Compute eigenvalues of real symmetric matrix.
Multi-GPU, data on host
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_ssyevd_m(ngpu, jobz, uplo, n, int(A), lda,
int(w), int(work),
lwork, int(iwork), liwork, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_dsyevd_m.restype = int
_libmagma.magma_dsyevd_m.argtypes = _libmagma.magma_dsyevd_m.argtypes
def magma_dsyevd_m(ngpu, jobz, uplo, n, A, lda, w, work, lwork, iwork, liwork):
"""
Compute eigenvalues of real symmetric matrix.
Multi-GPU, data on host
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_dsyevd_m(ngpu, jobz, uplo, n, int(A), lda,
int(w), int(work),
lwork, int(iwork), liwork, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_cheevd_m.restype = int
_libmagma.magma_cheevd_m.argtypes = [c_int_type,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_cheevd_m(ngpu, jobz, uplo, n, A, lda, w, work, lwork,
rwork, lrwork, iwork, liwork):
"""
Compute eigenvalues of complex hermitian matrix.
Multi-GPU, data on host
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_cheevd_m(ngpu, jobz, uplo, n, int(A), lda,
int(w), int(work), lwork, int(rwork),
lrwork, int(iwork), liwork, ctypes.byref(info))
magmaCheckStatus(status)
_libmagma.magma_zheevd_m.restype = int
_libmagma.magma_zheevd_m.argtypes = _libmagma.magma_cheevd_m.argtypes
def magma_zheevd_m(ngpu, jobz, uplo, n, A, lda, w, work, lwork,
rwork, lrwork, iwork, liwork):
"""
Compute eigenvalues of complex hermitian matrix.
Multi-GPU, data on host
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_zheevd_m(ngpu, jobz, uplo, n, int(A), lda,
int(w), int(work), lwork, int(rwork),
lrwork, int(iwork), liwork, ctypes.byref(info))
magmaCheckStatus(status)
# SSYEVDX_M, DSYEVDX_M, CHEEVDX_M, ZHEEVDX_M
_libmagma.magma_ssyevd_m.restype = int
_libmagma.magma_ssyevd_m.argtypes = [c_int_type,
c_int_type,
c_int_type,
c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_float,
ctypes.c_float,
c_int_type,
c_int_type,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p,
c_int_type,
ctypes.c_void_p]
def magma_ssyevdx_m(ngpu, jobz, rnge, uplo, n, A, lda,
vl, vu, il, iu, m,
w, work, lwork, iwork, liwork):
"""
Compute eigenvalues of real symmetric matrix.
Multi-GPU, data on host
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_ssyevdx_m(ngpu, jobz, uplo, n, int(A), lda,
int(w), int(work),
lwork, int(iwork), liwork, ctypes.byref(info))
magmaCheckStatus(status)
# SYMMETRIZE
_libmagma.magmablas_ssymmetrize.restype = int
_libmagma.magmablas_ssymmetrize.argtypes = [c_int_type,
c_int_type,
ctypes.c_void_p,
c_int_type]
def magmablas_ssymmetrize(uplo, n, A, lda):
"""
Symmetrize a triangular matrix.
"""
uplo = _uplo_conversion[uplo]
status = _libmagma.magmablas_ssymmetrize(uplo, n, int(A), lda)
magmaCheckStatus(status)
_libmagma.magmablas_dsymmetrize.restype = int
_libmagma.magmablas_dsymmetrize.argtypes = _libmagma.magmablas_ssymmetrize.argtypes
def magmablas_dsymmetrize(uplo, n, A, lda):
"""
Symmetrize a triangular matrix.
"""
uplo = _uplo_conversion[uplo]
status = _libmagma.magmablas_dsymmetrize(uplo, n, int(A), lda)
magmaCheckStatus(status)
_libmagma.magmablas_csymmetrize.restype = int
_libmagma.magmablas_csymmetrize.argtypes = _libmagma.magmablas_ssymmetrize.argtypes
def magmablas_csymmetrize(uplo, n, A, lda):
"""
Symmetrize a triangular matrix.
"""
uplo = _uplo_conversion[uplo]
status = _libmagma.magmablas_csymmetrize(uplo, n, int(A), lda)
magmaCheckStatus(status)
_libmagma.magmablas_zsymmetrize.restype = int
_libmagma.magmablas_zsymmetrize.argtypes = _libmagma.magmablas_ssymmetrize.argtypes
def magmablas_zsymmetrize(uplo, n, A, lda):
"""
Symmetrize a triangular matrix.
"""
uplo = _uplo_conversion[uplo]
status = _libmagma.magmablas_zsymmetrize(uplo, n, int(A), lda)
magmaCheckStatus(status)
|
# Generated by Django 3.2.5 on 2021-08-20 18:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='tGuild_Bank',
fields=[
('id_bank_guild', models.CharField(max_length=100, primary_key=True, serialize=False, unique=True)),
('bank_money', models.IntegerField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='tGuild_Discord',
fields=[
('id_guild', models.CharField(max_length=100, primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('bank_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='kacang.tguild_bank')),
],
),
migrations.CreateModel(
name='tMember_Bank',
fields=[
('id_bank', models.CharField(max_length=100, primary_key=True, serialize=False, unique=True)),
('cash_money', models.IntegerField(null=True)),
('bank_money', models.IntegerField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='tMember_Condition',
fields=[
('id_condition', models.CharField(max_length=100, primary_key=True, serialize=False, unique=True)),
('condition_value', models.IntegerField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='tMember_Discord',
fields=[
('id_discord', models.CharField(max_length=100, primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=100)),
('avatar', models.CharField(max_length=255)),
('user_join', models.CharField(max_length=100)),
('user_create', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('bank_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='kacang.tmember_bank')),
('condition_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='kacang.tmember_condition')),
('guild_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='kacang.tguild_discord')),
],
),
migrations.CreateModel(
name='tShop_Discord',
fields=[
('id_shop', models.CharField(max_length=100, primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=100)),
('category', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='tShop_Item',
fields=[
('id_item', models.CharField(max_length=100, primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=100)),
('desc', models.CharField(max_length=255)),
('category', models.CharField(choices=[('food', 'Food'), ('drink', 'Drink'), ('weapon', 'Weapon'), ('armor', 'Armor'), ('item', 'Item'), ('other', 'Other')], max_length=50)),
('price', models.IntegerField(null=True)),
('value_misc', models.IntegerField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='tMember_Inventory',
fields=[
('id_inventory', models.AutoField(primary_key=True, serialize=False, unique=True)),
('item_count', models.IntegerField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('item_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='kacang.tshop_item')),
('member_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='kacang.tmember_discord')),
],
),
migrations.CreateModel(
name='tGuild_Shop_Item',
fields=[
('id_shop_item', models.AutoField(primary_key=True, serialize=False, unique=True)),
('stock', models.IntegerField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('item_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='kacang.tshop_item')),
('shop_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='kacang.tshop_discord')),
],
),
migrations.AddField(
model_name='tguild_discord',
name='shop_id',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='kacang.tshop_discord'),
),
migrations.CreateModel(
name='tGuild_Channel',
fields=[
('id_channel', models.CharField(max_length=100, primary_key=True, serialize=False, unique=True)),
('status', models.IntegerField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('guild_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='kacang.tguild_discord')),
],
),
]
|
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from .models import Question, Essay
from .forms import AnswerForm
import joblib
from .utils.helpers import *
import os
current_path = os.path.abspath(os.path.dirname(__file__))
# Create your views here.
def index(request):
questions_list = Question.objects.order_by('set')
context = {
'questions_list': questions_list,
}
return render(request, 'grader/index.html', context)
def essay(request, question_id, essay_id):
essay = get_object_or_404(Essay, pk=essay_id)
context = {
"essay": essay,
}
return render(request, 'grader/essay.html', context)
def question(request, question_id):
question = get_object_or_404(Question, pk=question_id)
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = AnswerForm(request.POST)
if form.is_valid():
content = form.cleaned_data.get('answer')
if len(content.split()) > 50:
essay_set = question.set
model_path = 'models/model_set' + str(essay_set) + '.pkl'
model = joblib.load(os.path.join(current_path, model_path))
essay_prompt_df = pd.read_pickle(os.path.join(current_path, 'prompt_source_files/essay_prompt_df'))
essay_source_df = pd.read_pickle(os.path.join(current_path, 'prompt_source_files/essay_source_df'))
vectorizer_path = 'vectorizer/tfidf_set' + str(essay_set) + '.pkl'
vectorizer = joblib.load(os.path.join(current_path, vectorizer_path))
content_feature_df = create_data(content,essay_set,essay_prompt_df,essay_source_df,vectorizer)
preds = model.predict(content_feature_df)
preds = np.rint(preds)
if preds < 0:
preds = 0
if preds > question.max_score:
preds = question.max_score
else:
preds = 0
essay = Essay.objects.create(
content=content,
question=question,
score=preds
)
return redirect('essay', question_id=question.set, essay_id=essay.id)
else:
form = AnswerForm()
context = {
"question": question,
"form": form,
}
return render(request, 'grader/question.html', context)
|
from django.contrib import admin
from .models import *
admin.site.register(Board)
admin.site.register(Player)
admin.site.register(State)
admin.site.register(Piece)
admin.site.register(Message)
|
"""Validation of Yaml configuration files against json schema files.
Typical usage example:
validator = Validator(json_schema_file_object)
validator.validate(yaml_file_object)
"""
import io
import json
import jsonschema
import ruamel.yaml
from ostorlab import exceptions
class Error(exceptions.OstorlabError):
"""Base Exception
"""
class ValidationError(Error):
"""Wrapper Exception for the ValidationError produced by jsonschema's validate method."""
class SchemaError(Error):
"""Wrapper Exception for the SchemaError produced by jsonschema's validate method."""
class Validator:
"""Creates validator that checks yaml files with a json schema."""
def __init__(self, json_schema_file_object: io.FileIO):
"""Inits Validator class.
Args:
json_schema_file_object: A file object of the Json schema file.
Raises:
SchemaError: When the Json schema file in itself is not valid.
"""
self._json_schema = json.load(json_schema_file_object)
try:
jsonschema.Draft202012Validator.check_schema(self._json_schema)
except jsonschema.exceptions.SchemaError as e:
raise SchemaError('Schema is invalid.') from e
def validate(self, yaml_file_object):
""" Validates a yaml file against a json schema .
Args:
yaml_file_object: A file object of the yaml configuration file we want to validate.
Raises:
ValidationError if the validation did not pass well.
SchemaError if the Schema is not valid.
"""
yaml = ruamel.yaml.YAML(typ='safe')
yaml_data = yaml.load(yaml_file_object)
try:
jsonschema.validate(instance=yaml_data, schema=self._json_schema)
except jsonschema.exceptions.ValidationError as e:
raise ValidationError('Validation did not pass well.') from e
except jsonschema.exceptions.SchemaError as e:
raise SchemaError('Schema is invalid.') from e
|
import fun
import numpy
#-*- coding: utf-8 -*-
"""
算法描述:ISODATA算法:
"""
###预定义数据
#------------------------------------------------
#定义数据集
X = {'X1':(0,0),'X2':(1,1),'X3':(2,2),'X4':(4,3),'X5':(5,3),'X6':(4,4),'X7':(5,4),'X8':(6,5)}
#预期的聚类中心数目
K = 2
#每一聚类域中最少的样本数目
Qn = 1
#一个聚类中样本距离分布的标准差
Qs = 4
#两个聚类中心间的最小距离,若小于,则需要合并
Qc = 4
#一次迭代预算中可以合并的聚类中心的最多对数
L = 1
#迭代的最多次数
I =4
#初始聚类中心Nc = {Nc1,Nc2}
Nc1,Nc2 = (1,1),(5,4)
Nc = 2
if __name__ == "__main__":
Z = fun.getCluster(X,Nc1,Nc2)
if [(1 if len(n) >= Qn else 0) for n in Z].count(5): Nc = Nc -1
|
#!/usr/bin/env python3
# Copyright 2020 John Alamina
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import numpy as np
from kaldiio import WriteHelper
from scipy.fftpack import fft
import sys
import torch
import json
import kaldiio
from kymatio.numpy import Scattering1D
import pickle
J = 6
Q = 16
if len(sys.argv) != 3:
print("Usage: python json2json.py [target] [outfile]")
sys.exit(1)
in_target = sys.argv[1]
outfile = sys.argv[2]
infile = 'dump/%s/deltafalse/data.json' % in_target
ark = 'data/wavs/%s.ark' % in_target
d = {}
with open("data/%s/wav.scp" % in_target, "r") as f:
for l in f:
ar = l.split(' ')
d[ar[0]] = ' '.join(ar[1:len(ar) - 1])
truncated = {}
with open(infile, "r") as f:
jso = json.load(f)
js_items = list(jso['utts'].items())
for i, utt in enumerate(js_items):
if i % 10 == 0:
print(".", end='', flush=True)
if i % 100 == 0:
print("total processed = %d of %d " % (i, len(js_items)))
key, info = utt
wav = "/home/john/src/python/espnet/egs/an4/asr1s/data" \
"/wavs/%s.wav" % key
sz, mat = kaldiio.load_mat(wav)
wav = wav.replace('.wav', '.mat')
T = mat.shape[-1]
sx = Scattering1D(J, T, Q)
meta = sx.meta()
order1 = np.where(meta['order'] == 1)
Sx = sx(mat)
mat = Sx[order1].transpose()
jso['utts'][key]["input"][0]["shape"] = mat.shape
jso['utts'][key]["input"][0]["feat"] = wav
truncated[key]=jso['utts'][key]
pickle.dump(mat, open(wav, "wb"))
jso['utts'] = truncated
with open(outfile, "w") as f:
json.dump(jso, f)
# key, info = list(jso.items())[10]
#
# # plot the speech feature
# fbank = kaldiio.load_mat(info["input"][0]["feat"])
# plt.matshow(fbank.T[::-1])
# plt.title(key + ": " + info["output"][0]["text"])
#
# # print the key-value pair
# key, info
def pad(a, reference, offset):
"""
array: Array to be padded
reference: Reference array with the desired shape
offsets: list of offsets (number of elements must be equal to the dimension of the array)
"""
# Create an array of zeros with the reference shape
result = np.zeros(reference.shape)
# Create a list of slices from offset to offset + shape in each dimension
insertHere = [slice(offset[dim], offset[dim] + a.shape[dim]) for dim in range(a.ndim)]
# Insert the array in the result at the specified offsets
result[insertHere] = a
return result
|
import scrapy
class TmoocSpider(scrapy.Spider):
name = 'tmooc'
allowed_domains = ['tts.tmooc.cn','c.it211.com.cn']
start_urls = ['https://c.it211.com.cn/aid20101106am/aid20101106am.m3u8?_time=1618722481205&sign=87BFA989C7B3E007811E80B4C35C62E4']
def parse(self, response):
pass
|
#coding:utf-8
'''
UDP服务端创建和运行
#coding:utf-8
import socket
#创建Socket,绑定指定的ip和端口
#SOCK_DGRAM指定了这个Socket的类型是UDP。绑定端口和TCP一样。
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('127.0.0.1', 9999))
print('Bind UDP on 9999...')
while True:
# 直接发送数据和接收数据
data, addr = s.recvfrom(1024)
print('Received from %s:%s.' % addr)
s.sendto(b'Hello, %s!' % data, addr)
'''
'''
UDP 客户端的创建和运行
#coding:utf-8
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for data in [b'Hello', b'World']:
# 发送数据:
s.sendto(data, ('127.0.0.1', 9999))
# 接收数据:
print(s.recv(1024).decode('utf-8'))
s.close()
'''
|
import panflute as pf
from panflute import ( # pylint: disable=unused-import
Null, HorizontalRule, Space, SoftBreak, LineBreak, Str,
Code, BlockQuote, Note, Div, Plain, Para, Emph, Strong, Strikeout,
Superscript, Subscript, SmallCaps, Span, RawBlock, RawInline, Math,
CodeBlock, Link, Image, BulletList, OrderedList, DefinitionList,
LineBlock, Header, Quoted, Cite, Table, ListContainer, TableCell, Block,
convert_text, Element, run_filter)
from .constants import (HTML, LATEX, EPUB, MD_PANDOC_STYLES_MD,
FIL_ALL, FIL_OTHER, FIL_CHECK, LATEX_FORMATS)
from .utils import make_list, yaml_load, yaml_dump
class PandocStylesFilter():
'''
Base class for filters. Defines methods to help writing filters and to
run them.
'''
def __init__(self, func, filter_types=None, tags=None):
self._add_method(func, "func")
self.filter_types = make_list(filter_types or [])
self.tags = make_list(tags or [])
self._text = None
def run(self):
run_filter(self._pandoc_filter)
def _pandoc_filter(self, elem, doc):
self._init_filter(elem, doc)
if not self.check():
return
self.new_text = self.func() # pylint: disable=assignment-from-none
return self._return_filter()
@property
def text(self):
if self._text:
return self._text
return self.get_text()
@text.setter
def text(self, value):
self._text = value
def _init_filter(self, elem, doc):
self.elem = elem
self.doc = doc
self.cfg = dict()
self._get_format()
self.classes = elem.classes if hasattr(elem, "classes") else None
self.attributes = elem.attributes if hasattr(elem, "attributes") else None
self.identifier = elem.identifier if hasattr(elem, "identifier") else None
self._text = elem.text if hasattr(elem, "text") else None
self.content = elem.content if hasattr(elem, "content") else None
def _return_filter(self):
if self.new_text is None:
return
elif self.new_text == []:
return []
elif isinstance(self.new_text, list):
new = []
for x in self.new_text: # pylint: disable=not-an-iterable
if isinstance(x, str):
x = convert_text(x)
if isinstance(x, ListContainer):
new.extend(x)
elif isinstance(x, list):
new.extend(x)
else:
new.append(x)
return new
return convert_text(self.new_text)
def _get_format(self):
self.fmt = self.doc.format
self.real_fmt = self.fmt
if self.fmt in LATEX_FORMATS:
self.fmt = LATEX
elif self.fmt == EPUB:
self.fmt = HTML
def check(self):
return (not self.filter_types
or any(isinstance(self.elem, x) for x in self.filter_types))\
and (not self.tags or any(x in self.tags for x in self.classes))
def func(self):
return
def _add_method(self, var, name):
if var is not None:
if callable(var):
setattr(self, name, var.__get__(self))
else:
raise TypeError("Only functions are allowed in filter generation!")
def get_metadata(self, key, default=None):
'''Gets metadata'''
return self.doc.get_metadata(key, default)
def get_pandoc_styles_metadata(self):
'''Return the pandoc_styles cfg as a dictionary'''
try:
self.cfg = yaml_load(self.get_metadata(MD_PANDOC_STYLES_MD))
except FileNotFoundError:
self.cfg = {}
return self.cfg
def save_pandoc_styles_metadata(self):
'''Save the given cfg in the cfg-file'''
yaml_dump(self.cfg, self.get_metadata(MD_PANDOC_STYLES_MD))
def stringify(self, elem=None):
'''Stringify an element'''
return stringify(elem or self.elem)
def transform(self, elem_type):
'''Transforms content of this element to elem_type. Return the new Element'''
if isinstance(self.content, ListContainer):
return elem_type(*self.content)
return elem_type(self.content)
def raw_block(self, text):
'''Return a RawBlock pandoc element in self.fmt. Accepts strings, tuples
and lists as arguments.
'''
return raw(self.fmt, text)
def raw_inline(self, text):
'''Return a RawInline pandoc element in self.fmt. Accepts strings, tuples
and lists as arguments.
'''
return raw(self.fmt, text, element_type=RawInline)
def convert_text(self, text=None, input_fmt='markdown', output_fmt='panflute',
extra_args=None):
'''Wrapper for panflutes convert_text'''
text = text or self.text
return convert_text(text, input_fmt, output_fmt, False, extra_args)
def get_text(self, elem=None, output_fmt='markdown', extra_args=None):
"""
Converts the content of the given Element to the output format. Use instead
of stringify to retain inline formatting.
"""
elem = self if elem is None else elem
if isinstance(elem, ListContainer):
elem = Plain(*elem)
else:
elem = getattr(elem, 'content')
return convert_text(elem, 'panflute', output_fmt, False, extra_args)
def run_pandoc_styles_filter(func, filter_types=None, tags=None):
"""
Run a filter with the given func. The function is now a method to a filter object
and you can access its contents through self.
Your filter can return:
> None: do nothing
> string: convert the string from markdown to panflute elements
> list: The list can contain Panflute Elements or strings. Strings are converted
like above.
"""
PandocStylesFilter(func, filter_types, tags).run()
class TransformFilter(PandocStylesFilter):
'''
Base class for filters. Defines methods to help writing filters and to
run them.
'''
# pylint: disable=super-init-not-called
def __init__(self, tags=None, all_formats=None, other=None, filter_types=None,
check=None, **kwargs):
self.tags = make_list(tags or [])
self.filter_types = filter_types if filter_types is not None else [Div]
self.filter_types = make_list(self.filter_types)
self._add_method(all_formats, FIL_ALL)
self._add_method(other, FIL_OTHER)
self._add_method(check, FIL_CHECK)
self.funcs = kwargs
def _pandoc_filter(self, elem, doc):
self._init_filter(elem, doc)
if not self.check():
return
for key, func in self.funcs.items():
self._add_method(func, key)
self.all_formats()
self._call_filter()
return self._return_filter()
def _call_filter(self):
try:
self.new_text = getattr(self, self.fmt)()
except AttributeError:
# pylint: disable=assignment-from-none
self.new_text = self.other()
def _return_filter(self):
if self.new_text is None:
return
elif self.new_text == []:
return []
elif isinstance(self.new_text, list):
new = []
for x in self.new_text:
if isinstance(x, str):
x = raw(self.fmt, x)
if isinstance(x, ListContainer):
new.extend(x)
elif isinstance(x, list):
new.extend(x)
else:
new.append(x)
return new
return self.raw_block(self.new_text)
def all_formats(self):
return
def other(self):
return
def _add_method(self, var, name):
if var is not None:
if isinstance(var, str):
setattr(self, name, lambda: var.format(text=self.convert_to_fmt()))
elif isinstance(var, list):
setattr(self, name, lambda: [self.content if x == "text" else x
for x in var])
elif callable(var):
setattr(self, name, var.__get__(self))
else:
raise TypeError("Only strings and functions are allowed in filter generation!")
def convert_to_fmt(self, text=None, input_fmt='markdown', extra_args=None):
'''Converts text in input_fmt to self.fmt'''
text = text or self.text
return convert_text(text, input_fmt, self.fmt, False, extra_args)
def get_text(self, elem=None, output_fmt=None, extra_args=None):
"""
Converts the content of the given Element to the format. Use instead
of stringify to retain inline formatting.
"""
elem = self if elem is None else elem
if isinstance(elem, ListContainer):
elem = Plain(*elem)
else:
elem = getattr(elem, 'content')
return convert_text(elem, 'panflute', output_fmt or self.fmt, False, extra_args)
def run_transform_filter(tags=None, all_formats=None, other=None, filter_types=None,
check=None, **kwargs):
'''
Creates and runs a pandoc filter.
tags: The default check method checks, if these tags are in the classes of
the element the filter searches for. If it is [], check only checks for
the element type
kwargs: The name of the format and a value of: a function, string or a list.
Frequently used formats: latex (for latex and pdf), html (for html and epubs),
markdown.
> Function: These functions are registered as a method and are executed,
if the format of the output matches the name. These methods have to
either return a string/list of strings or an pandoc element/list of elements.
> String: The string is returned as the output. The string can contain
the formating {text], which gets replaced by the converted text
of the element.
> List: The list is returned as below. You can insert the string "text" inside the
list. It is replaced with the content of the element.
all_formats: This method is executed before the format specific methods
and is used to execute shared code.
filter_types: If the filter searches for an element other than a div. Can be
a list of types
check: Replace the default check method with your own.
Your filter can return:
> None: do nothing
> string: convert the string to a rawblock in the current format or
from markdown to panflute elements if the format doesn't support rawblocks
> list: The list can contain Panflute Elements or strings. Strings are converted
like above.
'''
pandoc_filter = TransformFilter(tags, all_formats, other, filter_types, check,
**kwargs)
pandoc_filter.run()
def is_pandoc_element(ele):
if isinstance(ele, Element):
return True
elif isinstance(ele, (list, tuple)):
return is_pandoc_element(ele[0])
return False
def raw(fmt, text, element_type=RawBlock):
'''Return a Raw pandoc element in the given format.'''
if fmt not in ['tex', 'latex', 'html', 'context']:
return convert_text(text)
return element_type(text, fmt)
def stringify(elem, newlines=True):
"""
Return the raw text version of an element (and its children elements).
Example:
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
"""
if isinstance(elem, ListContainer):
elem = Plain(*elem)
return pf.stringify(elem, newlines)
def strip_html_tag(text, tag="p"):
text = text.replace(f"<{tag}>", "")
return text.replace(f"</{tag}>", "")
|
from lxml import etree
from jinja2 import Environment, FileSystemLoader
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import time
def fetch_object(url):
"""
Fetch single BrAPI object by path
"""
print(' GET ' + url)
session = requests.Session()
retry = Retry(connect=3, backoff_factor=15)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
r = session.get(url)
# Covering internal server errors by retrying one more time
if r.status_code == 500:
time.sleep(5)
r = requests.get(url, allow_redirects=True)
elif r.status_code != 200:
print(f"Problem with request: {str(r)}")
raise RuntimeError("Non-200 status code")
return r
def fetching_checklists():
# Gathering all checklist ID's
session = requests.Session()
session.trust_env = False
response = fetch_object('https://www.ebi.ac.uk/ena/browser/api/summary/ERC000001-ERC999999')
return response.json()['summaries']
def main():
for response_object in fetching_checklists():
checklist = response_object['accession']
print(f"Parsing {checklist}")
# Getting the xml checklist from ENA
url = f"https://www.ebi.ac.uk/ena/browser/api/xml/{checklist}?download=true"
response = fetch_object(url)
# Dictionary that will contain all attributes needed
xml_tree = {}
# Loading templates directory
file_loader = FileSystemLoader('ena_upload/templates/jinja_templates')
env = Environment(loader=file_loader)
# Parsing XML
root = etree.fromstring(response.content)
# Looping over all fields and storing their name and cardinality
for attribute in root.iter('FIELD'):
name = ''
cardinality = ''
for sub_attr in attribute:
if sub_attr.tag == 'NAME':
name = sub_attr.text
elif sub_attr.tag == 'MANDATORY':
cardinality = sub_attr.text
xml_tree[name] = cardinality
# Loading the xml jinja2 template for samples
t = env.get_template('ENA_template_samples.xml')
# Render template with values from the ENA xml
output_from_parsed_template = t.render(attributes=xml_tree)
# Saving new xml template file
with open(f"ena_upload/templates/ENA_template_samples_{checklist}.xml", "w") as fh:
fh.write(output_from_parsed_template)
if __name__ == "__main__":
main()
|
# -------------------------------------------
# import
# -------------------------------------------
import os
import sys
import re
import codecs
import random
from PIL import Image
import numpy as np
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.preprocessing import image
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from list_util import *
import transforms
from transforms import label_to_img
# -------------------------------------------
# defines
# -------------------------------------------
CUR_PATH = os.path.join(os.path.dirname(__file__))
# -------------------------------------------
# private functions
# -------------------------------------------
# -------------------------------------------
# public functions
# -------------------------------------------
class Dataset:
def __init__(self, classes, input_size, img_dir, label_dir=None, trans=False):
self.classes = classes
self.input_size = input_size # WH
self.img_paths = list_from_dir(img_dir, ('.jpg', '.png'))
if label_dir:
self.label_paths = list_from_dir(label_dir, ('.jpg', '.png'))
else:
self.label_paths = None
if trans:
self.transforms = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomCrop(self.input_size)
])
else:
self.transforms = transforms.Compose([
transforms.CenterCrop(self.input_size)
])
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
img_pil = Image.open(self.img_paths[idx])
seed = random.randint(0, 2**32)
img_pil = self.transforms(img_pil, seed=seed)
img = self.format_img(img_pil)
if self.label_paths:
label_pil = Image.open(self.label_paths[idx])
label_pil = self.transforms(label_pil, seed=seed)
label = self.to_label(label_pil)
else:
label = None
return img, label
def format_img(self, img_pil):
img_array = np.asarray(img_pil, dtype='float32')
# img_array = img_array / 255 # for vgg16
img_array = img_array / 127.5 - 1 # for vgg16
img_array = np.expand_dims(img_array, axis=0)
# img_array = preprocess_input(img_array, mode='tf') # for vgg16
return img_array
def to_one_hot(self, label_array):
w, h = self.input_size
x = np.zeros((h, w, self.classes))
for i in range(h):
for j in range(w):
x[i, j, label_array[i][j]] = 1
return x
def to_label(self, label_pil):
label_array = np.asarray(label_pil, dtype=np.int32)
label_array[label_array == 255] = 0 # 境界部分をbackgroundクラスにする
label_array = self.to_one_hot(label_array)
label_array = np.expand_dims(label_array, axis=0)
return label_array
class DataLoader:
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.x_data_list = []
self.y_data_list = []
def __len__(self):
return len(self.dataset)
def flow(self):
while True:
data_num = len(self.dataset)
indices = np.arange(data_num)
if self.shuffle:
np.random.shuffle(indices)
for i in indices:
img, label = self.dataset[i]
self.x_data_list.append(img[0])
self.y_data_list.append(label[0])
if self.batch_size <= len(self.x_data_list):
x_data_list = np.asarray(
self.x_data_list, dtype='float32')
y_data_list = np.asarray(
self.y_data_list, dtype='float32')
self.x_data_list = []
self.y_data_list = []
yield x_data_list, y_data_list
# -------------------------------------------
# main
# -------------------------------------------
if __name__ == '__main__':
print("start")
train_img_dir = os.path.join(CUR_PATH, "data", "train", "img")
train_gt_dir = os.path.join(CUR_PATH, "data", "train", "gt")
#train_img_paths = list_from_dir(train_img_dir, ('.jpg', '.png'))
#train_gt_paths = list_from_dir(train_gt_dir, ('.jpg', '.png'))
dataset = Dataset(classes=21, input_size=(224, 224),
img_dir=train_img_dir, label_dir=train_gt_dir,
trans=True)
cnt = 0
loader = DataLoader(dataset, batch_size=4, shuffle=True)
for train, target in loader.flow():
print(train.shape, train[0].dtype)
print(target.shape, target[0].dtype)
imgs = train
labels = target
img = (imgs[0] + 1) * 127.5
img = img.astype(np.uint8)
plt.figure()
plt.imshow(img)
# label = labels[0, :, :, 0] # background class
label = label_to_img(labels[0])
plt.figure()
plt.imshow(label)
plt.show()
cnt += 1
if cnt >= 4:
break
print("end")
|
import os
import unittest
from unittest import mock
from core.e2e.validators import masterfile_validator
from core.e2e.handlers.exceptions import MasterFileError
class TestMasterFileValidator(unittest.TestCase):
@mock.patch('os.path')
def test_check_master_file_path(self, mock_os):
mock_os.exists.return_value = False
with self.assertRaises(Exception) as context:
masterfile_validator._validate_check_master_file_path()
self.assertEqual(
context.exception.args[0],
'Master File YML not found (src/views/master.yml)'
)
@mock.patch('yaml.safe_load')
def test_validate_master_file_format(self, safe_load):
safe_load.side_effect = Exception
with self.assertRaises(Exception) as context:
masterfile_validator._validate_master_file_format()
self.assertEqual(
context.exception.args[0],
'Invalid master file format.'
)
@mock.patch('yaml.safe_load')
def test_validate_master_file_config(self, safe_load):
safe_load.return_value = {}
with self.assertRaises(Exception) as context:
masterfile_validator._validate_master_file_config_values()
self.assertEqual(
context.exception.args[0],
'Invalid master file format. Missing key botname'
)
@mock.patch('yaml.safe_load')
def test_validate_master_file_states(self, safe_load):
safe_load.return_value = {
'botname' : 'testBotName',
'description' : 'testDescription',
'init_state' : 'main',
'states' : {
'welcome' : {}
}
}
with self.assertRaises(Exception) as context:
masterfile_validator._validate_master_file_config_values()
self.assertEqual(
context.exception.args[0],
"Invalid master file format. Invalid main in init_state."
)
safe_load.return_value = {
'botname' : 'testBotName',
'description' : 'testDescription',
'init_state' : 'welcome',
'states' : {
'welcome' : {}
}
}
with self.assertRaises(Exception) as context:
masterfile_validator._validate_master_file_config_values()
self.assertEqual(
context.exception.args[0],
"Invalid master file format. Next state missing in 'welcome' state."
)
safe_load.return_value = {
'botname' : 'testBotName',
'description' : 'testDescription',
'init_state' : 'welcome',
'states' : {
'welcome' : {
'next' : 'main'
}
}
}
with self.assertRaises(Exception) as context:
masterfile_validator._validate_master_file_config_values()
self.assertEqual(
context.exception.args[0],
"Invalid master file format. Value 'main' in state welcome."
)
|
# -*- encoding:utf-8 -*-
from commands import SelectorMixin, Commands
from ui import component_ui, component_commands
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import random
import re
import socket
import time
import requests
from kuryr.lib._i18n import _
from openstack import exceptions as os_exc
from os_vif import objects
from oslo_cache import core as cache
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
from kuryr_kubernetes import exceptions
from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes.objects import vif
from kuryr_kubernetes import os_vif_util
CONF = cfg.CONF
LOG = log.getLogger(__name__)
VALID_MULTI_POD_POOLS_OPTS = {'noop': ['neutron-vif',
'nested-vlan',
'nested-macvlan',
'sriov',
'nested-dpdk'],
'neutron': ['neutron-vif'],
'nested': ['nested-vlan'],
}
DEFAULT_TIMEOUT = 500
DEFAULT_INTERVAL = 1
DEFAULT_JITTER = 3
MAX_BACKOFF = 60
MAX_ATTEMPTS = 10
subnet_caching_opts = [
cfg.BoolOpt('caching', default=True,
help=_('Enable caching of subnets.')),
cfg.IntOpt('cache_time', default=3600,
help=_('TTL, in seconds, for cached subnets')),
]
nodes_caching_opts = [
cfg.BoolOpt('caching', default=True,
help=_('Enable caching of nodes.')),
cfg.IntOpt('cache_time', default=3600,
help=_('TTL, in seconds, for cached nodes')),
]
CONF.register_opts(subnet_caching_opts, "subnet_caching")
CONF.register_opts(nodes_caching_opts, "nodes_caching")
cache.configure(CONF)
subnet_cache_region = cache.create_region()
MEMOIZE = cache.get_memoization_decorator(
CONF, subnet_cache_region, "subnet_caching")
cache.configure_cache_region(CONF, subnet_cache_region)
nodes_cache_region = cache.create_region()
MEMOIZE_NODE = cache.get_memoization_decorator(
CONF, nodes_cache_region, "nodes_caching")
cache.configure_cache_region(CONF, nodes_cache_region)
RESOURCE_MAP = {'Endpoints': 'endpoints',
'KuryrLoadBalancer': 'kuryrloadbalancers',
'KuryrNet': 'kuryrnets',
'KuryrNetPolicy': 'kuryrnetpolicies',
'KuryrNetwork': 'kuryrnetworks',
'KuryrNetworkPolicy': 'kuryrnetworkpolicies',
'KuryrPort': 'kuryrports',
'Namespace': 'namespaces',
'NetworkPolicy': 'networkpolicies',
'Node': 'nodes',
'Pod': 'pods',
'Service': 'services',
'Machine': 'machines'}
API_RE = re.compile(r'v\d+')
def get_klb_crd_path(obj):
"""Return klb crd path from provided resource"""
namespace = obj['metadata']['namespace']
lb_name = obj['metadata']['name']
return (f"{constants.K8S_API_CRD_NAMESPACES}/"
f"{namespace}/"
f"kuryrloadbalancers/"
f"{lb_name}")
def get_res_link(obj):
"""Return selfLink equivalent for provided resource"""
# First try, if we still have it
try:
return obj['metadata']['selfLink']
except KeyError:
pass
# If not, let's proceed with the path assembling.
try:
res_type = RESOURCE_MAP[obj['kind']]
except KeyError:
LOG.error('Unknown resource kind: %s', obj.get('kind'))
raise
namespace = ''
if obj['metadata'].get('namespace'):
namespace = f"/namespaces/{obj['metadata']['namespace']}"
try:
api = f"/apis/{obj['apiVersion']}"
if API_RE.match(obj['apiVersion']):
api = f"/api/{obj['apiVersion']}"
except KeyError:
LOG.error("Object doesn't have an apiVersion available: %s", obj)
raise
return f"{api}{namespace}/{res_type}/{obj['metadata']['name']}"
def get_api_ver(path):
"""Get apiVersion out of resource path.
Path usually is something simillar to:
/api/v1/namespaces/default/pods/pod-5bb648d658-55n76
in case of core resources, and:
/apis/openstack.org/v1/namespaces/default/kuryrloadbalancers/lb-324
in case of custom resoures.
"""
if path.startswith('/api/'):
return path.split('/')[2]
if path.startswith('/apis/'):
return '/'.join(path.split('/')[2:4])
raise ValueError('Provided path is not Kubernetes api path: %s', path)
def utf8_json_decoder(byte_data):
"""Deserializes the bytes into UTF-8 encoded JSON.
:param byte_data: The bytes to be converted into the UTF-8 encoded JSON.
:returns: The UTF-8 encoded JSON represented by Python dictionary format.
"""
return jsonutils.loads(byte_data.decode('utf8'))
def convert_netns(netns):
"""Convert /proc based netns path to Docker-friendly path.
When CONF.docker_mode is set this method will change /proc to
/CONF.netns_proc_dir. This allows netns manipulations to work when running
in Docker container on Kubernetes host.
:param netns: netns path to convert.
:return: Converted netns path.
"""
if CONF.cni_daemon.docker_mode:
return netns.replace('/proc', CONF.cni_daemon.netns_proc_dir)
else:
return netns
def get_res_unique_name(resource):
"""Returns a unique name for the resource like pod or CRD.
It returns a unique name for the resource composed of its name and the
namespace it is created in or just name for cluster-scoped resources.
:returns: String with <namespace/>name of the resource
"""
try:
return "%(namespace)s/%(name)s" % resource['metadata']
except KeyError:
return "%(name)s" % resource['metadata']
def check_suitable_multi_pool_driver_opt(pool_driver, pod_driver):
return pod_driver in VALID_MULTI_POD_POOLS_OPTS.get(pool_driver, [])
def exponential_sleep(deadline, attempt, interval=DEFAULT_INTERVAL,
max_backoff=MAX_BACKOFF, jitter=DEFAULT_JITTER):
"""Sleep for exponential duration.
:param deadline: sleep timeout duration in seconds.
:param attempt: attempt count of sleep function.
:param interval: minimal time interval to sleep
:param max_backoff: maximum time to sleep
:param jitter: max value of jitter added to the sleep time
:return: the actual time that we've slept
"""
now = time.time()
seconds_left = deadline - now
if seconds_left <= 0:
return 0
to_sleep = exponential_backoff(attempt, interval, max_backoff=max_backoff,
jitter=jitter)
if to_sleep > seconds_left:
to_sleep = seconds_left
if to_sleep < interval:
to_sleep = interval
time.sleep(to_sleep)
return to_sleep
def exponential_backoff(attempt, interval=DEFAULT_INTERVAL,
max_backoff=MAX_BACKOFF, jitter=DEFAULT_JITTER):
"""Return exponential backoff duration with jitter.
This implements a variation of exponential backoff algorithm [1] (expected
backoff E(c) = interval * 2 ** attempt / 2).
[1] https://en.wikipedia.org/wiki/Exponential_backoff
"""
if attempt >= MAX_ATTEMPTS:
# No need to calculate very long intervals
attempt = MAX_ATTEMPTS
backoff = 2 ** attempt * interval
if max_backoff is not None and backoff > max_backoff:
backoff = max_backoff
if jitter:
backoff += random.randint(0, jitter)
return backoff
def get_node_name():
# leader-elector container based on K8s way of doing leader election is
# assuming that hostname it sees is the node id. Containers within a pod
# are sharing the hostname, so this will match what leader-elector returns.
return socket.gethostname()
def get_leader_name():
url = 'http://localhost:%d' % CONF.kubernetes.controller_ha_elector_port
try:
return requests.get(url).json()['name']
except Exception:
LOG.exception('Error when fetching current leader pod name.')
# NOTE(dulek): Assuming there's no leader when we can't contact leader
# elector container.
return None
@MEMOIZE_NODE
def get_nodes_ips(node_subnets):
"""Get the IPs of the trunk ports associated to the deployment."""
trunk_ips = []
os_net = clients.get_network_client()
tags = CONF.neutron_defaults.resource_tags
if tags:
ports = os_net.ports(status='ACTIVE', tags=tags)
else:
# NOTE(ltomasbo: if tags are not used, assume all the trunk ports are
# part of the kuryr deployment
ports = os_net.ports(status='ACTIVE')
for port in ports:
if (port.trunk_details and port.fixed_ips and
port.fixed_ips[0]['subnet_id'] in node_subnets):
trunk_ips.append(port.fixed_ips[0]['ip_address'])
return trunk_ips
@MEMOIZE
def get_subnet(subnet_id):
os_net = clients.get_network_client()
n_subnet = os_net.get_subnet(subnet_id)
n_network = os_net.get_network(n_subnet.network_id)
subnet = os_vif_util.neutron_to_osvif_subnet(n_subnet)
network = os_vif_util.neutron_to_osvif_network(n_network)
network.subnets.objects.append(subnet)
return network
@MEMOIZE
def get_subnet_cidr(subnet_id):
os_net = clients.get_network_client()
try:
subnet_obj = os_net.get_subnet(subnet_id)
except os_exc.ResourceNotFound:
LOG.exception("Subnet %s CIDR not found!", subnet_id)
raise
return subnet_obj.cidr
def get_subnet_id(**filters):
os_net = clients.get_network_client()
subnets = os_net.subnets(**filters)
try:
return next(subnets).id
except StopIteration:
return None
@MEMOIZE
def get_subnets_id_cidrs(subnet_ids):
os_net = clients.get_network_client()
subnets = os_net.subnets()
cidrs = [(subnet.id, subnet.cidr) for subnet in subnets
if subnet.id in subnet_ids]
if len(cidrs) != len(subnet_ids):
existing = {subnet.id for subnet in subnets}
missing = set(subnet_ids) - existing
LOG.exception("CIDRs of subnets %s not found!", missing)
raise os_exc.ResourceNotFound()
return cidrs
def get_subnets_cidrs(subnet_ids):
return [x[1] for x in get_subnets_id_cidrs(subnet_ids)]
@MEMOIZE
def _get_subnetpool(subnetpool_id):
os_net = clients.get_network_client()
try:
subnetpool_obj = os_net.get_subnet_pool(subnetpool_id)
except os_exc.ResourceNotFound:
LOG.exception("Subnetpool %s not found!", subnetpool_id)
raise
return subnetpool_obj
def get_subnetpool_version(subnetpool_id):
subnetpool_obj = _get_subnetpool(subnetpool_id)
return subnetpool_obj.ip_version
def get_subnetpool_cidrs(subnetpool_id):
subnetpool_obj = _get_subnetpool(subnetpool_id)
return subnetpool_obj.prefixes
def extract_pod_annotation(annotation):
obj = objects.base.VersionedObject.obj_from_primitive(annotation)
# FIXME(dulek): This is code to maintain compatibility with Queens. We can
# remove it once we stop supporting upgrading from Queens,
# most likely in Stein. Note that this requires being sure
# that *all* the pod annotations are in new format.
if obj.obj_name() != vif.PodState.obj_name():
# This is old format of annotations - single VIF object. We need to
# pack it in PodState object.
obj = vif.PodState(default_vif=obj)
return obj
def has_limit(quota):
NO_LIMIT = -1
return quota['limit'] != NO_LIMIT
def is_available(resource, resource_quota):
availability = resource_quota['limit'] - resource_quota['used']
if availability <= 0:
LOG.error("Neutron quota exceeded for %s. Used %d out of %d limit.",
resource, resource_quota['used'], resource_quota['limit'])
return False
elif availability <= 3:
LOG.warning("Neutron quota low for %s. Used %d out of %d limit.",
resource, resource_quota['used'], resource_quota['limit'])
return True
def has_kuryr_crd(crd_url):
k8s = clients.get_kubernetes_client()
try:
k8s.get(crd_url, json=False, headers={'Connection': 'close'})
except exceptions.K8sResourceNotFound:
LOG.error('CRD %s does not exists.', crd_url)
except exceptions.K8sClientException:
LOG.exception('Error fetching CRD %s, assuming it does not exist.',
crd_url)
return False
return True
def get_lbaas_spec(k8s_object):
# k8s_object can be service or endpoint
try:
annotations = k8s_object['metadata']['annotations']
annotation = annotations[constants.K8S_ANNOTATION_LBAAS_SPEC]
except KeyError:
return None
obj_dict = jsonutils.loads(annotation)
obj = obj_lbaas.LBaaSServiceSpec.obj_from_primitive(obj_dict)
LOG.debug("Got LBaaSServiceSpec from annotation: %r", obj)
return obj
def set_lbaas_spec(service, lbaas_spec):
# TODO(ivc): extract annotation interactions
if lbaas_spec is None:
LOG.debug("Removing LBaaSServiceSpec annotation: %r", lbaas_spec)
annotation = None
else:
lbaas_spec.obj_reset_changes(recursive=True)
LOG.debug("Setting LBaaSServiceSpec annotation: %r", lbaas_spec)
annotation = jsonutils.dumps(lbaas_spec.obj_to_primitive(),
sort_keys=True)
svc_link = get_res_link(service)
ep_link = get_endpoints_link(service)
k8s = clients.get_kubernetes_client()
try:
k8s.annotate(ep_link,
{constants.K8S_ANNOTATION_LBAAS_SPEC: annotation})
except exceptions.K8sResourceNotFound as ex:
LOG.debug("Failed to annotate svc: %s", ex)
raise exceptions.ResourceNotReady(ep_link)
except exceptions.K8sClientException:
LOG.debug("Failed to annotate endpoint %r", ep_link)
raise
try:
k8s.annotate(svc_link,
{constants.K8S_ANNOTATION_LBAAS_SPEC: annotation},
resource_version=service['metadata']['resourceVersion'])
except exceptions.K8sResourceNotFound as ex:
LOG.debug("Failed to annotate svc: %s", ex)
raise exceptions.ResourceNotReady(svc_link)
except exceptions.K8sClientException:
LOG.exception("Failed to annotate svc: %r", svc_link)
raise
def get_lbaas_state(endpoint):
try:
annotations = endpoint['metadata']['annotations']
annotation = annotations[constants.K8S_ANNOTATION_LBAAS_STATE]
except KeyError:
return None
obj_dict = jsonutils.loads(annotation)
obj = obj_lbaas.LBaaSState.obj_from_primitive(obj_dict)
LOG.debug("Got LBaaSState from annotation: %r", obj)
return obj
def set_lbaas_state(endpoints, lbaas_state):
# TODO(ivc): extract annotation interactions
if lbaas_state is None:
LOG.debug("Removing LBaaSState annotation: %r", lbaas_state)
annotation = None
else:
lbaas_state.obj_reset_changes(recursive=True)
LOG.debug("Setting LBaaSState annotation: %r", lbaas_state)
annotation = jsonutils.dumps(lbaas_state.obj_to_primitive(),
sort_keys=True)
k8s = clients.get_kubernetes_client()
k8s.annotate(get_res_link(endpoints),
{constants.K8S_ANNOTATION_LBAAS_STATE: annotation},
resource_version=endpoints['metadata']['resourceVersion'])
def get_endpoints_link(service):
svc_link = get_res_link(service)
link_parts = svc_link.split('/')
if link_parts[-2] != 'services':
raise exceptions.IntegrityError(
f"Unsupported service link: {svc_link}")
link_parts[-2] = 'endpoints'
return "/".join(link_parts)
def get_service_link(endpoints):
endpoints_link = get_res_link(endpoints)
link_parts = endpoints_link.split('/')
if link_parts[-2] != 'endpoints':
raise exceptions.IntegrityError(
f"Unsupported endpoints link: {endpoints_link}")
link_parts[-2] = 'services'
return "/".join(link_parts)
def has_port_changes(service, loadbalancer_crd):
if not loadbalancer_crd:
return False
link = get_res_link(service)
svc_port_set = service['spec'].get('ports')
for port in svc_port_set:
port['targetPort'] = str(port['targetPort'])
spec_port_set = loadbalancer_crd['spec'].get('ports', [])
if spec_port_set:
if len(svc_port_set) != len(spec_port_set):
return True
pairs = zip(svc_port_set, spec_port_set)
diff = any(x != y for x, y in pairs)
if diff:
LOG.debug("LBaaS spec ports %(spec_ports)s != %(svc_ports)s "
"for %(link)s" % {'spec_ports': spec_port_set,
'svc_ports': svc_port_set,
'link': link})
return diff
return False
def get_service_ports(service):
return [{'name': port.get('name'),
'protocol': port.get('protocol', 'TCP'),
'port': port['port'],
'targetPort': str(port['targetPort'])}
for port in service['spec']['ports']]
@MEMOIZE
def get_service_subnet_version():
os_net = clients.get_network_client()
svc_subnet_id = CONF.neutron_defaults.service_subnet
try:
svc_subnet = os_net.get_subnet(svc_subnet_id)
except os_exc.ResourceNotFound:
LOG.exception("Service subnet %s not found", svc_subnet_id)
raise
return svc_subnet.ip_version
def clean_lb_crd_status(loadbalancer_name):
namespace, name = loadbalancer_name.split('/')
k8s = clients.get_kubernetes_client()
try:
k8s.patch_crd('status', f'{constants.K8S_API_CRD_NAMESPACES}'
f'/{namespace}/kuryrloadbalancers/{name}', {})
except exceptions.K8sResourceNotFound:
LOG.debug('KuryrLoadbalancer CRD not found %s',
name)
except exceptions.K8sClientException:
LOG.exception('Error updating KuryrLoadbalancer CRD %s',
name)
raise
def get_pod_by_ip(pod_ip, namespace=None):
k8s = clients.get_kubernetes_client()
pod = {}
try:
if namespace:
pods = k8s.get(f'{constants.K8S_API_BASE}/namespaces/{namespace}/'
f'pods?fieldSelector=status.phase=Running,'
f'status.podIP={pod_ip}')
else:
pods = k8s.get(f'{constants.K8S_API_BASE}/'
f'pods?fieldSelector=status.phase=Running,'
f'status.podIP={pod_ip}')
except exceptions.K8sClientException:
LOG.exception('Error retrieving Pod with IP %s', pod_ip)
raise
if pods.get('items'):
# Only one Pod should have the IP
return pods['items'][0]
return pod
def get_current_endpoints_target(ep, port, spec_ports, ep_name):
"""Retrieve details about one specific Endpoint target
Defines the details about the Endpoint target, such as the
target address, name, port value and the Pool ID. In case,
the Endpoints has no targetRef defined, the name of the
target will be the same as the Endpoint.
:param ep: Endpoint on the Endpoints object
:param port: Endpoint port
:param spec_ports: dict of port name associated to pool ID
:param ep_name: Name of the Endpoints object
:returns: Tuple with target address, target name, port number
and pool ID.
"""
target_ref = ep.get('targetRef', {})
pod_name = ep_name
# NOTE(maysams): As we don't support dual-stack, we assume
# only one address is possible on the addresses field.
address = ep['addresses'][0]
if target_ref:
pod_name = target_ref.get('name', '')
return (address, pod_name, port['port'],
spec_ports.get(port.get('name')))
def get_subnet_by_ip(nodes_subnets, target_ip):
ip = ipaddress.ip_address(target_ip)
for nodes_subnet in nodes_subnets:
if ip in ipaddress.ip_network(nodes_subnet[1]):
return nodes_subnet
return None
def get_kuryrloadbalancer(name, namespace):
k8s = clients.get_kubernetes_client()
try:
return k8s.get(f'{constants.K8S_API_CRD_NAMESPACES}/'
f'{namespace}/kuryrloadbalancers/'
f'{name}')
except exceptions.K8sResourceNotFound:
return {}
def is_pod_completed(pod):
try:
return (pod['status']['phase'] in
(constants.K8S_POD_STATUS_SUCCEEDED,
constants.K8S_POD_STATUS_FAILED))
except KeyError:
return False
def is_host_network(pod):
return pod['spec'].get('hostNetwork', False)
|
# -*- coding: utf-8 -*-
"""
@author: Zheng Fang
This is a unit test. If you would like to further develop pahmc_ode_gpu, you
should visit here frequently.
"""
import os
from pathlib import Path
from numba import cuda, jit
import numpy as np
import torch as th
os.chdir(Path.cwd().parent)
from pahmc_ode_gpu import cuda_lib_dynamics
os.chdir(Path.cwd()/'unit_tests')
"""Prepare data, as well as variables to be compared to."""
name = 'nakl'
D = 4
M = 100000
X = np.concatenate((np.random.uniform(-100.0, 50.0, (1,M)),
np.random.uniform(0.0, 1.0, (D-1,M))))
par = np.array([120.0, 50.0, 20.0, -77.0, 0.3, -54.4, -40.0, 15,
0.1, 0.4, -60.0, -15, 1.0, 7.0, -55.0, 30, 1.0, 5.0])
stimulus \
= np.concatenate((np.random.uniform(-30, 30, (1,M)), np.zeros((D-1,M))))
# this function has been tested in pahmc_ode_cpu
@jit(nopython=True)
def cpu_field(X, par, stimulus):
(D, M) = np.shape(X)
vecfield = np.zeros((D,M))
vecfield[0, :] \
= stimulus[0, :] \
+ par[0] * (X[1, :] ** 3) * X[2, :] * (par[1] - X[0, :]) \
+ par[2] * (X[3, :] ** 4) * (par[3] - X[0, :]) \
+ par[4] * (par[5] - X[0, :])
tanh_m = np.tanh((X[0, :]-par[6])/par[7])
eta_m = 1 / 2 * (1 + tanh_m)
tau_m = par[8] + par[9] * (1 - tanh_m * tanh_m)
vecfield[1, :] = (eta_m - X[1, :]) / tau_m
tanh_h = np.tanh((X[0, :]-par[10])/par[11])
eta_h = 1 / 2 * (1 + tanh_h)
tau_h = par[12] + par[13] * (1 - tanh_h * tanh_h)
vecfield[2, :] = (eta_h - X[2, :]) / tau_h
tanh_n = np.tanh((X[0, :]-par[14])/par[15])
eta_n = 1 / 2 * (1 + tanh_n)
tau_n = par[16] + par[17] * (1 - tanh_n * tanh_n)
vecfield[3, :] = (eta_n - X[3, :]) / tau_n
return vecfield
print('\nTesting... ', end='')
field_compared = cpu_field(X, par, stimulus)
# let's tell PyTorch about our model in order to test jacobian and dfield_dpar
X = th.from_numpy(X)
par = th.from_numpy(par)
stimulus = th.from_numpy(stimulus)
X.requires_grad = True
par.requires_grad = True
vecfield = th.zeros(D, M)
vecfield[0, :] \
= stimulus[0, :] \
+ par[0] * (X[1, :] ** 3) * X[2, :] * (par[1] - X[0, :]) \
+ par[2] * (X[3, :] ** 4) * (par[3] - X[0, :]) \
+ par[4] * (par[5] - X[0, :])
tanh_m = th.tanh((X[0, :]-par[6])/par[7])
eta_m = 1 / 2 * (1 + tanh_m)
tau_m = par[8] + par[9] * (1 - tanh_m * tanh_m)
vecfield[1, :] = (eta_m - X[1, :]) / tau_m
tanh_h = th.tanh((X[0, :]-par[10])/par[11])
eta_h = 1 / 2 * (1 + tanh_h)
tau_h = par[12] + par[13] * (1 - tanh_h * tanh_h)
vecfield[2, :] = (eta_h - X[2, :]) / tau_h
tanh_n = th.tanh((X[0, :]-par[14])/par[15])
eta_n = 1 / 2 * (1 + tanh_n)
tau_n = par[16] + par[17] * (1 - tanh_n * tanh_n)
vecfield[3, :] = (eta_n - X[3, :]) / tau_n
# fetch the variables to be compared to
scalarfield = th.sum(vecfield)
scalarfield.backward()
jacobian_compared = X.grad.numpy()
dfield_dpar_compared = par.grad.numpy()
X = X.detach().numpy()
par = par.detach().numpy()
stimulus = stimulus.numpy()
"""Fetch the kernels, transfer data, and specify grid dimensions."""
k__field = getattr(cuda_lib_dynamics, f'k__{name}_field')
k__jacobian = getattr(cuda_lib_dynamics, f'k__{name}_jacobian')
k__dfield_dpar = getattr(cuda_lib_dynamics, f'k__{name}_dfield_dpar')
d_X = cuda.to_device(X)
d_par = cuda.to_device(par)
d_stimulus = cuda.to_device(stimulus)
d_field = cuda.to_device(np.zeros((D,M)))
d_jacobian = cuda.to_device(np.zeros((D,D,M)))
d_dfield_dpar = cuda.to_device(np.zeros((D,len(par),M)))
"""Define convenience functions."""
def gtimer1():
k__field[(16,32), (2,128)](d_X, d_par, d_stimulus, d_field)
cuda.synchronize()
def gtimer2():
k__jacobian[(4,4,32), (2,2,64)](d_X, d_par, d_jacobian)
cuda.synchronize()
def gtimer3():
k__dfield_dpar[(4,4,32), (2,2,64)](d_X, d_par, d_dfield_dpar)
cuda.synchronize()
def gtimer4():
gtimer1(); gtimer2(); gtimer3()
def gtimer5():
k__field[(16,32), (2,128)](d_X, d_par, d_stimulus, d_field)
k__jacobian[(4,4,32), (2,2,64)](d_X, d_par, d_jacobian)
k__dfield_dpar[(4,4,32), (2,2,64)](d_X, d_par, d_dfield_dpar)
cuda.synchronize()
"""Make sure everything is correct."""
gtimer5()
field = d_field.copy_to_host()
jacobian = np.sum(d_jacobian.copy_to_host(), axis=0)
dfield_dpar = np.sum(d_dfield_dpar.copy_to_host(), axis=(0,2))
np.testing.assert_almost_equal(field, field_compared, decimal=6)
np.testing.assert_almost_equal(jacobian, jacobian_compared, decimal=6)
np.testing.assert_almost_equal(dfield_dpar, dfield_dpar_compared, decimal=6)
print('ok.')
#======================================================================
# for profiling only
@jit(nopython=True)
def cpu_jacobian(X, par):
(D, M) = np.shape(X)
jacob = np.zeros((D,D,M))
jacob[0, 0, :] = - par[0] * (X[1, :] ** 3) * X[2, :] \
- par[2] * (X[3, :] ** 4) - par[4]
jacob[0, 1, :] \
= 3 * par[0] * (X[1, :] ** 2) * X[2, :] * (par[1] - X[0, :])
jacob[0, 2, :] = par[0] * (X[1, :] ** 3) * (par[1] - X[0, :])
jacob[0, 3, :] = 4 * par[2] * (X[3, :] ** 3) * (par[3] - X[0, :])
tanh_m = np.tanh((X[0, :]-par[6])/par[7])
kernel_m = (1 - tanh_m * tanh_m)
eta_m = 1 / 2 * (1 + tanh_m)
tau_m = par[8] + par[9] * kernel_m
eta_der_m = 1 / (2 * par[7]) * kernel_m
tau_der_m = - 2 * par[9] / par[7] * tanh_m * kernel_m
jacob[1, 0, :] \
= eta_der_m / tau_m + tau_der_m * (X[1, :] - eta_m) / (tau_m * tau_m)
tanh_h = np.tanh((X[0, :]-par[10])/par[11])
kernel_h = (1 - tanh_h * tanh_h)
eta_h = 1 / 2 * (1 + tanh_h)
tau_h = par[12] + par[13] * kernel_h
eta_der_h = 1 / (2 * par[11]) * kernel_h
tau_der_h = - 2 * par[13] / par[11] * tanh_h * kernel_h
jacob[2, 0, :] \
= eta_der_h / tau_h + tau_der_h * (X[2, :] - eta_h) / (tau_h * tau_h)
tanh_n = np.tanh((X[0, :]-par[14])/par[15])
kernel_n = (1 - tanh_n * tanh_n)
eta_n = 1 / 2 * (1 + tanh_n)
tau_n = par[16] + par[17] * kernel_n
eta_der_n = 1 / (2 * par[15]) * kernel_n
tau_der_n = - 2 * par[17] / par[15] * tanh_n * kernel_n
jacob[3, 0, :] \
= eta_der_n / tau_n + tau_der_n * (X[3, :] - eta_n) / (tau_n * tau_n)
jacob[1, 1, :] = - 1 / tau_m
jacob[2, 2, :] = - 1 / tau_h
jacob[3, 3, :] = - 1 / tau_n
return jacob
@jit(nopython=True)
def cpu_dfield_dpar(X, par):
(D, M) = np.shape(X)
deriv_par = np.zeros((D,M,len(par)))
deriv_par[0, :, 0] = (X[1, :] ** 3) * X[2, :] * (par[1] - X[0, :])
deriv_par[0, :, 1] = par[0] * (X[1, :] ** 3) * X[2, :]
deriv_par[0, :, 2] = (X[3, :] ** 4) * (par[3] - X[0, :])
deriv_par[0, :, 3] = par[2] * (X[3, :] ** 4)
deriv_par[0, :, 4] = par[5] - X[0, :]
deriv_par[0, :, 5] = par[4]
tanh_m = np.tanh((X[0, :]-par[6])/par[7])
kernel_m = (1 - tanh_m * tanh_m)
eta_m = 1 / 2 * (1 + tanh_m)
tau_m = par[8] + par[9] * kernel_m
common_m = (X[1, :] - eta_m) / (tau_m * tau_m)
eta_der_m = - 1 / (2 * par[7]) * kernel_m
tau_der_m = 2 * par[9] / par[7] * tanh_m * kernel_m
deriv_par[1, :, 6] = eta_der_m / tau_m + tau_der_m * common_m
eta_der_m = - (X[0, :] - par[6]) / (2 * (par[7] ** 2)) * kernel_m
tau_der_m = 2 * par[9] * (X[0, :] - par[6]) / (par[7] ** 2) \
* tanh_m * kernel_m
deriv_par[1, :, 7] = eta_der_m / tau_m + tau_der_m * common_m
deriv_par[1, :, 8] = common_m
deriv_par[1, :, 9] = kernel_m * common_m
tanh_h = np.tanh((X[0, :]-par[10])/par[11])
kernel_h = (1 - tanh_h * tanh_h)
eta_h = 1 / 2 * (1 + tanh_h)
tau_h = par[12] + par[13] * kernel_h
common_h = (X[2, :] - eta_h) / (tau_h * tau_h)
eta_der_h = - 1 / (2 * par[11]) * kernel_h
tau_der_h = 2 * par[13] / par[11] * tanh_h * kernel_h
deriv_par[2, :, 10] = eta_der_h / tau_h + tau_der_h * common_h
eta_der_h = - (X[0, :] - par[10]) / (2 * (par[11] ** 2)) * kernel_h
tau_der_h = 2 * par[13] * (X[0, :] - par[10]) / (par[11] ** 2) \
* tanh_h * kernel_h
deriv_par[2, :, 11] = eta_der_h / tau_h + tau_der_h * common_h
deriv_par[2, :, 12] = common_h
deriv_par[2, :, 13] = kernel_h * common_h
tanh_n = np.tanh((X[0, :]-par[14])/par[15])
kernel_n = (1 - tanh_n * tanh_n)
eta_n = 1 / 2 * (1 + tanh_n)
tau_n = par[16] + par[17] * kernel_n
common_n = (X[3, :] - eta_n) / (tau_n * tau_n)
eta_der_n = - 1 / (2 * par[15]) * kernel_n
tau_der_n = 2 * par[17] / par[15] * tanh_n * kernel_n
deriv_par[3, :, 14] = eta_der_n / tau_n + tau_der_n * common_n
eta_der_n = - (X[0, :] - par[14]) / (2 * (par[15] ** 2)) * kernel_n
tau_der_n = 2 * par[17] * (X[0, :] - par[14]) / (par[15] ** 2) \
* tanh_n * kernel_n
deriv_par[3, :, 15] = eta_der_n / tau_n + tau_der_n * common_n
deriv_par[3, :, 16] = common_n
deriv_par[3, :, 17] = kernel_n * common_n
return deriv_par
for _ in range(5):
gtimer5()
temp = cpu_field(X, par, stimulus)
temp = cpu_jacobian(X, par)
temp = cpu_dfield_dpar(X, par)
"""
%timeit -r 50 -n 10 temp = cpu_field(X, par, stimulus)
%timeit -r 50 -n 10 gtimer1()
%timeit -r 50 -n 10 temp = cpu_jacobian(X, par)
%timeit -r 50 -n 10 gtimer2()
%timeit -r 50 -n 10 temp = cpu_dfield_dpar(X, par)
%timeit -r 50 -n 10 gtimer3()
%timeit -r 50 -n 10 gtimer4()
%timeit -r 50 -n 10 gtimer5()
"""
|
from django import forms
from database.models import Configuration
from mysite.forms import *
class EmailsForm(forms.ModelForm):
sender_email = forms.CharField(required=False, label="Email para pedidos")
password = forms.CharField(widget=forms.PasswordInput(), required=False, label="Password del email para pedidos")
quotations_email = forms.CharField(required=False, label="Email para cotizaciones")
quotations_password = forms.CharField(widget=forms.PasswordInput(), required=False, label="Password del email para cotizaciones")
receiver_email = forms.CharField(required=False, label="Email para notificaciones")
action = HiddenField(initial="emails")
class Meta:
model = Configuration
fields = (
'sender_email',
'password',
'quotations_email',
'quotations_password',
'receiver_email'
)
class ReportsForm(forms.ModelForm):
week_cut = forms.ChoiceField(label="Corte Semanal", choices=Configuration.WEEK_DAYS)
action = HiddenField(initial="reports")
class Meta:
model = Configuration
fields = (
'week_cut',
)
|
from pyzabbix import ZabbixMetric, ZabbixSender
from pyzabbix.api import ZabbixAPI
import os
import json
# --- config acesso
def config():
configfile_name = "conf/config.json"
# Verifique se já existe um arquivo de configuração
if not os.path.isfile(configfile_name):
print("Arquivo config.json não encontrado, modelo em README.")
exit()
else:
with open('conf/config.json') as json_data_file:
data = json.load(json_data_file)
# print(data['zabbix']['host'])
return data
# --- Hostname
def hostname(zapi):
# Get todos host monitorados
result1 = zapi.host.get(monitored_hosts=1, output='extend')
# Get todos host desativados
result2 = zapi.do_request('host.get',
{
'filter': {'status': 1},
'output': 'extend'
})
# Filtra resultado
hostnames1 = [host['host'] for host in result1]
hostnames2 = [host['host'] for host in result2['result']]
# Logout do Zabbix
zapi.user.logout()
return hostnames1, hostnames2
# --- sendMetrica
def sendMetrica():
# Send metrics to zabbix trapper
packet = [
ZabbixMetric('hostname1', 'test[cpu_usage]', 2),
ZabbixMetric('hostname1', 'test[system_status]', "OK"),
ZabbixMetric('hostname1', 'test[disk_io]', '0.1'),
ZabbixMetric('hostname1', 'test[cpu_usage]', 20, 1411598020),
]
result = ZabbixSender(use_config=True).send(packet)
return result
# --- main
def main():
# config acesso
data = config()
# Cria ZabbixAPI class instance
zabiixAPI = ZabbixAPI(url=data['zabbix']['host'], user=data['zabbix']['user'], password=data['zabbix']['pwd'])
host1, host2 = hostname(zabiixAPI)
print(host1)
print("\n")
print(host2)
# --- inicio
if __name__ == "__main__":
main()
|
# Generated by Django 3.2.2 on 2021-05-14 04:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0022_urlaction'),
]
operations = [
migrations.AddField(
model_name='coresettings',
name='clear_faults_days',
field=models.IntegerField(default=0),
),
]
|
from django import forms
from django.utils.translation import pgettext_lazy
from ....widget.models import Benefit
from ...product.widgets import ImagePreviewWidget
from ....widget.thumbnails import create_benefit_thumbnails
class BenefitForm(forms.ModelForm):
class Meta:
model = Benefit
fields = [
'name',
'text',
'image',
'is_active',
]
labels = {
'name': pgettext_lazy(
'Benefit field name',
'Nombre',
),
'text': pgettext_lazy(
'Benefit field name',
'Texto',
),
'image': pgettext_lazy(
'Benefit field name',
'Imagen',
),
'is_active': pgettext_lazy(
'Slider active state',
'Activo',
),
}
def save(self, commit=True):
print("*** *** ***")
print("save benefit from form")
print("*** *** ***")
benefit = super().save(commit=commit)
create_benefit_thumbnails.delay(benefit.pk)
return benefit
|
from django.contrib.auth.models import User
from django.db import models
from products.models import Product
class CartManager(models.Manager):
def new_or_get(self, request):
cart_id = request.session.get("cart_id", None)
qs = Cart.objects.filter(id=cart_id)
if qs.count()==1:
new_obj = False
cart_obj = qs.first()
if request.user is authenticated and cart_obj.user is None:
cart_obj.user = request.user
cart_obj.save()
else:
cart_obj = Cart.objects.new(user=request.user)
request.session["cart_id"] = cart_obj.id
return cart_obj, new_obj
def new(self, user=None):
user_obj = None
if user is not None:
try:
if user.is_authenticated():
user_obj = user
except Exception as Error:
pass
return self.model.objects.create(user=user_obj)
class Cart(models.Model):
user = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE)
products = models.ManyToManyField(Product, blank=True)
total = models.DecimalField(default=0.00, max_digits=100, decimal_places=2)
updated = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
objects = CartManager()
def __str__(self):
return f"{self.id}"
|
from filter_tokens import filter_tokens
def test_filter_tokens():
filtd = filter_tokens(['\\int', '\\text', '{', 'x', '}', '\\text', '{', 'h', 'i', 't', 'h', 'e', 'r', 'e', '}', 'x', '+', 'y'])
assert filtd == ['\\int', '\\text', '{', 'x', '}', 'x', '+', 'y']
s = "b_1(u) = \\frac{1 - u q^{1/2} }{1 - u q^{-1/2}} \mbox{ and }b_2(u) = \\frac{ -uq^{-1/2}+ q^{-1}}{ 1 - uq^{-1/2}}."
|
# coding: utf-8
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Module to write variant calling report"""
import ast
from sequana.lazy import pandas as pd
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.utils.datatables_js import DataTable
class VariantCallingModule(SequanaBaseModule):
""" Write HTML report of variant calling. This class takes a csv file
generated by sequana_variant_filter.
"""
def __init__(self, data):
""".. rubric:: constructor
:param data: it can be a csv filename created by
sequana.freebayes_vcf_filter or a
:class:`freebayes_vcf_filter.Filtered_freebayes` object.
"""
super().__init__()
self.title = "Variant Calling Report"
try:
with open(data, "r") as fp:
self.filename = data
line = fp.readline()
if line.startswith("# sequana_variant_calling"):
string_dict = line.split(";")[-1].strip()
try:
self.filter_dict = ast.literal_eval(string_dict)
except SyntaxError:
self.filter_dict = None
self.df = pd.read_csv(fp)
except FileNotFoundError:
msg = ("The csv file is not present. Please, check if your"
" file is present.")
raise FileNotFoundError(msg)
except TypeError:
self.df = data.df
self.filter_dict = data.vcf.filters_params
self.create_report_content()
self.create_html("variant_calling.html")
def create_report_content(self):
self.sections = list()
if self.filter_dict:
self.filters_information()
self.variant_calling()
def filters_information(self):
""" Add information of filter.
"""
self.sections.append({
'name': "Filter Options",
'anchor': 'filters_option',
'content':
"<p>All filters parameters used is presented in this list:</p>"
"\n<ul><li>freebayes_score: {freebayes_score}</li>\n"
"<li>frequency: {frequency}</li>\n"
"<li>min_depth: {min_depth}</li>\n"
"<li>forward_depth: {forward_depth}</li>\n"
"<li>reverse_depth: {reverse_depth}</li>\n"
"<li>strand_ratio: {strand_ratio}</li></ul>\n"
"Note:<ul><li>frequency: alternate allele / depth</li>\n"
"<li>min_depth: minimum alternate allele present</li>\n"
"<li>forward_depth: minimum alternate allele present on "
"forward strand</li>\n"
"<li>reverse_depth: minimum alternate allele present on "
"reverse strand</li>\n"
"<li>strand_ratio: alternate allele forward / (alternate "
"allele forward + alternate allele reverse)</li>"
"</ul>".format(**self.filter_dict)
})
def variant_calling(self):
""" Variants detected section.
"""
datatable = DataTable(self.df, 'vc')
# set options
datatable.datatable.datatable_options = {
'scrollX': 'true',
'pageLength': 30,
'scrollCollapse': 'true',
'dom': 'Bfrtip',
'buttons': ['copy', 'csv']
}
js = datatable.create_javascript_function()
html_tab = datatable.create_datatable(float_format='%.3f')
self.sections.append({
'name': "Variants Detected",
'anchor': 'basic_stats',
'content':
"<p>This table present variant detected by freebayes after "
"filtering.</p>\n{0}\n{1}\n<p>Note: the freebayes score can be"
" understood as 1 - P(locus is homozygous given the data)</p>"
.format(js, html_tab)
})
|
#!/bin/python
import numpy as np
import MDAnalysis
import time
import math
import sys
#Timing
start = time.time()
#Input values
topo = "ADI.prmtop"
traj=[]
for i in range(30):
traj.append("belly/ADI.belly{:02d}.xyz.nc".format(i))
rMax=12.
binSize=0.1
d0=0.09
#Read in atom type
fcrd=open("ADI.crd","r")
l=fcrd.readline()
l2=l.split()
natom=np.int(l2[0])
ntyp=np.int(l2[1])
ityp=np.zeros(natom,dtype=np.int)
l=fcrd.readline()
i=0
while l:
l2=l.split()
ityp[i]=np.int(l2[0])-1
l=fcrd.readline()
i+=1
fcrd.close()
#Read in LJ params
rminu=np.zeros(natom)
epsu=np.zeros(natom)
rminv=np.zeros(5)
epsv=np.zeros(5)
flj=open("LJparm.dat","r")
l=flj.readline()
l=flj.readline()
for i in range(natom):
l=flj.readline()
l2=l.split()
rminu[i]=np.float(l2[0])
epsu[i]=np.float(l2[1])
l=flj.readline()
for i in range(5):
l=flj.readline()
l2=l.split()
rminv[i]=np.float(l2[0])
epsv[i]=np.float(l2[1])
flj.close()
A=12.*np.sqrt(epsu[:,None]*epsv[None,:])*((rminu[:,None]+rminv[None,:])/2.)**12
B=12.*np.sqrt(epsu[:,None]*epsv[None,:])*((rminu[:,None]+rminv[None,:])/2.)**6
#Open trajectories
coord = MDAnalysis.Universe(topo,traj)
H2OCoord = coord.select_atoms("resname CL3")
ionsCoord = coord.select_atoms("not resname CL3 and not resname Cl- and not resname Na+")
#Iterate
rMax2=rMax**2
rMax2p=(rMax+d0)**2
hrMax=0.5*rMax
nbins=np.int(rMax/binSize+0.01)
nH2O=np.zeros((ntyp,nbins),dtype=np.int)
fH2O=np.zeros((ntyp,nbins))
nWat=len(H2OCoord.atoms)
ilist=np.array([0,1,2,3,4])
for ts in coord.trajectory:
dims = coord.dimensions[:3]
hdims = dims/2.
# sys.stdout.write("Progress: {0:.2f}% Complete\r".format((float(ts.frame) / float(len(coord.trajectory))) * 100))
# sys.stdout.flush()
for ia,a in enumerate(ionsCoord.atoms):
it=ityp[ia]
rhat=H2OCoord.atoms[np.arange(1,nWat,5)].positions-a.position
r2Wat=np.einsum("ij,ij->i",rhat,rhat)
ir=np.where(r2Wat<rMax2p)[0]
rhat=rhat[ir]
phat=H2OCoord.atoms[ir*5+1].positions-H2OCoord.atoms[ir*5].positions
phat/=np.sqrt(np.einsum("ij,ij->i",phat,phat))[:,None]
rhat+=d0*phat
r2Wat=np.einsum("ij,ij->i",rhat,rhat)
ir2=np.where(r2Wat<rMax2)[0]
ir=ir[ir2]
rhat=rhat[ir2]
r2Wat=r2Wat[ir2]
rWat=np.sqrt(r2Wat)
rhat/=rWat[:,None]
ix=np.ndarray.astype(rWat[:]/binSize,np.int)
fWat=np.zeros_like(r2Wat)
for ib in ilist:
sWat=H2OCoord.atoms[ir*5+ib].positions-a.position
s2Wat=np.einsum("ij,ij->i",sWat,sWat)
s6Wat=s2Wat**-3
sWat=np.einsum("ij,ij->i",sWat,rhat)
fWat+=sWat*s6Wat*(A[ia,ib]*s6Wat-B[ia,ib])/s2Wat
np.add.at(nH2O[it],ix,1)
np.add.at(fH2O[it],ix,fWat)
fout=open("sample.frc.dat","w")
for i in range(ntyp):
for j in range(nbins):
r0=(j+0.5)*binSize
if nH2O[i,j] == 0:
fout.write("{:7.3f} {:19.12e} {:9d} {:3d}\n".format(r0,0.,-1,i+1))
else:
fout.write("{:7.3f} {:19.12e} {:9d} {:3d}\n".format(r0,fH2O[i,j]/nH2O[i,j],nH2O[i,j],i+1))
fout.close()
#Timing
end = time.time()
t = end - start
print("\nTotal running time: {:.2f} sec".format(t))
|
#!/usr/bin/env python
#
# wamplite is free software; you can redistribute it and/or modify
# it under the terms of the MIT license. See LICENSE for details.
#
import wamplite
import logging
import sys
import threading
import time
import websocket
# Connection & authentication details. Use ws:// for websocket, or wss:// for
# secure websocket.
url = "ws://localhost:55555"
realm = "default_realm"
authid = "peter"
password = "secret2"
event = threading.Event()
rpc_completed_ok=False
def wampclient_on_error(wampclient):
# Unsophisticated error handling, just close the session
logging.info("wampclient_on_error ... calling close")
wampclient.close()
def wampclient_on_close(wampclient):
logging.info("*** session closed ***")
event.set()
def wampclient_on_open(wampclient):
logging.info("*** session open ***")
wampclient.call("hello", rpc_response,
"listarg1", "listarg2", dictarg1="v1", dictarg2="v2")
def rpc_response(wampclient, *args, **kwargs):
logging.info("rpc_response, args: {}, kwargs: {}".format(args, kwargs))
global rpc_completed_ok
rpc_completed_ok = True
event.set()
def init_logging(use_debug):
websocket.enableTrace(use_debug)
logger = logging.getLogger()
if use_debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
init_logging(use_debug=False)
# attempts to make before giving up
attempts = 5
while True:
event.clear()
# create the wamp client
client = wamplite.WampClient(url, realm, authid, password,
on_error_cb = wampclient_on_error,
on_close_cb = wampclient_on_close,
on_open_cb = wampclient_on_open)
client.start()
# wait for IO thread to response
event.wait(timeout=30)
if rpc_completed_ok:
break
else:
# if rpc failed, clean up and retry
client.close()
del client
attempts -= 1
if attempts == 0:
logging.error("failed to call RPC")
exit(1)
else:
logging.info("remaining attempts: %d" % attempts)
time.sleep(3)
|
import argparse
import os
import sys
import torch
from torch import nn, optim
from torch.optim import optimizer
from torchvision import datasets, models, transforms
parser = argparse.ArgumentParser(description="Trains a neural network")
parser.add_argument('data_dir', metavar='dir', type=str,
help="Directory to the dataset to be trained on")
parser.add_argument("--save_dir", dest="save_dir", default=".",
type=str, help="Directory to save checkpoints")
parser.add_argument("--arg", dest="arch", default="vgg16",
type=str, help="Pretrained architecture to use")
parser.add_argument("--learning_rate", dest="learning_rate",
default=0.001, type=float, help="Learning rate to use")
parser.add_argument("--hidden_units", dest="hidden_units",
type=int, default=512, help="Number of hidden units to use")
parser.add_argument("--epochs", dest="epochs", type=int,
default=5, help="Number of epochs to train model")
parser.add_argument("--gpu", dest="gpu", action="store_true", help="Use GPU?")
args = parser.parse_args()
data_dir = args.data_dir
save_dir = args.save_dir
arch = args.arch
learning_rate = args.learning_rate
hidden_units = args.hidden_units
epochs = args.epochs
gpu = args.gpu
device = torch.device("cuda" if gpu and torch.cuda.is_available() else "cpu")
def build_model(arch, hidden_units):
if arch == "alexnet":
model = models.alexnet(pretrained=True)
in_features = model.classifier[1].in_features
classifier_name = "classifier"
elif arch == "vgg11":
model = models.vgg11(pretrained=True)
in_features = model.classifier[0].in_features
classifier_name = "classifier"
elif arch == "vgg13":
model = models.vgg13(pretrained=True)
in_features = model.classifier[0].in_features
classifier_name = "classifier"
elif arch == "vgg16":
model = models.vgg16(pretrained=True)
in_features = model.classifier[0].in_features
classifier_name = "classifier"
elif arch == "vgg19":
model = models.vgg19(pretrained=True)
in_features = model.classifier[0].in_features
classifier_name = "classifier"
elif arch == "resnet18":
model = models.resnet18(pretrained=True)
in_features = model.fc.in_features
classifier_name = "fc"
elif arch == "resnet34":
model = models.resnet34(pretrained=True)
in_features = model.fc.in_features
classifier_name = "fc"
elif arch == "resnet50":
model = models.resnet50(pretrained=True)
in_features = model.fc.in_features
classifier_name = "fc"
elif arch == "resnet101":
model = models.resnet101(pretrained=True)
in_features = model.fc.in_features
classifier_name = "fc"
elif arch == "resnet152":
model = models.resnet152(pretrained=True)
in_features = model.fc.in_features
classifier_name = "fc"
elif arch == "densenet121":
model = models.densenet121(pretrained=True)
in_features = model.classifier.in_features
classifier_name = "classifier"
elif arch == "densenet169":
model = models.densenet169(pretrained=True)
in_features = model.classifier.in_features
classifier_name = "classifier"
elif arch == "densenet201":
model = models.densenet201(pretrained=True)
in_features = model.classifier.in_features
classifier_name = "classifier"
elif arch == "densenet161":
model = models.densenet161(pretrained=True)
in_features = model.classifier.in_features
classifier_name = "classifier"
else:
print(f"Error: Unknown architecture: {arch}")
sys.exit()
# Freeze parameters
for param in model.parameters():
param.requires_grad = False
# Define classifier
classifier = nn.Sequential(
nn.Linear(in_features, hidden_units),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(hidden_units, hidden_units),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(hidden_units, 102),
nn.LogSoftmax(dim=1)
)
if classifier_name == "classifier":
model.classifier = classifier
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
elif classifier_name == "fc":
model.fc = classifier
optimizer = optim.Adam(model.fc.parameters(), lr=learning_rate)
return model, optimizer
def train_model(model, epochs, dataloaders, optimizer, criterion):
steps = 0
running_loss = 0
print_every = 20
model.to(device)
for epoch in range(epochs):
for images, labels in dataloaders["train"]:
steps += 1
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
logps = model(images)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
test_loss = 0
accuracy = 0
for images, labels in dataloaders["valid"]:
images, labels = images.to(device), labels.to(device)
logps = model(images)
loss = criterion(logps, labels)
test_loss += loss.item()
ps = torch.exp(logps)
_, top_class = ps.topk(1, dim=1)
equality = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equality.type(torch.FloatTensor)).item()
print(f"Epoch: {epoch+1}/{epochs} ",
f"Training Loss: {running_loss/print_every:.3f} ",
f"Validation Loss: {test_loss/len(dataloaders['valid']):.3f} ",
f"Validation Accuracy: {accuracy/len(dataloaders['valid']):.3f}")
running_loss = 0
model.train()
def generate_data(dir):
train_dir = os.path.join(dir, "train")
valid_dir = os.path.join(dir, "valid")
test_dir = os.path.join(dir, "test")
data_transforms = {
"train": transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
"valid": transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
"test": transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
}
# Load the datasets with ImageFolder
image_datasets = {
"train": datasets.ImageFolder(train_dir, transform=data_transforms["train"]),
"valid": datasets.ImageFolder(valid_dir, transform=data_transforms["valid"]),
"test": datasets.ImageFolder(test_dir, transform=data_transforms["test"])
}
# Using the image datasets and the trainforms, define the dataloaders
dataloaders = {
"train": torch.utils.data.DataLoader(image_datasets["train"], batch_size=64, shuffle=True),
"valid": torch.utils.data.DataLoader(image_datasets["valid"], batch_size=64, shuffle=True),
"test": torch.utils.data.DataLoader(image_datasets["test"], batch_size=64, shuffle=True)
}
return data_transforms, image_datasets, dataloaders
def save_model(save_dir, model, image_datasets):
model.class_to_idx = image_datasets["train"].class_to_idx
checkpoint = {
"input_size": 25088,
"output_size": 102,
"classifier": model.classifier,
"state_dict": model.state_dict(),
"class_to_idx": model.class_to_idx,
"arch": arch
# "optimizer": optimizer.state_dict(),
# "epochs": epochs,
}
torch.save(checkpoint, os.path.join(save_dir, "checkpoint.pth"))
if __name__ == "__main__":
print("--------LOADING DATA--------")
_, image_datasets, dataloaders = generate_data(data_dir)
print("Data loaded successfully")
print("--------BUILDIING MODEL--------")
model, optimizer = build_model(arch, hidden_units)
print("Model successfully built")
criterion = nn.NLLLoss()
print("--------TRAINING MODEL--------")
print(f"Training model with {epochs} epochs")
train_model(model, epochs, dataloaders, optimizer, criterion)
print("Model successfully trained")
print("--------SAVING MODEL--------")
save_model(save_dir, model, image_datasets)
print(f"Model saved to {os.path.join(save_dir, 'checkpoint.pth')}")
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
from brainstorm.tests.test_layers import (
spec_list, test_deltas_calculation_of_layer, test_layer_add_to_deltas,
test_layer_backward_pass_insensitive_to_internal_state_init,
test_layer_forward_pass_insensitive_to_internal_state_init,
test_gradients_for_layer)
def get_test_configurations():
for spec in spec_list:
time_steps, batch_size, activation = spec
yield {
'time_steps': time_steps,
'batch_size': batch_size,
'activation': activation
}
def run_layer_tests(layer, spec):
spec_str = "time_steps={time_steps}, batch_size={batch_size}," \
" activation={activation}".format(**spec)
print('======= Testing {} for {} ====='.format(layer.name, spec_str))
print('Testing Delta Calculations ...')
test_deltas_calculation_of_layer((layer, spec))
print('Testing Gradient Calculations ...')
test_gradients_for_layer((layer, spec))
print('Verifying that layer ADDS to deltas ...')
test_layer_add_to_deltas((layer, spec))
print('Verifying that the forward pass is insensitive to initialization of'
' internals ...')
test_layer_forward_pass_insensitive_to_internal_state_init((layer, spec))
print('Verifying that the backward pass is insensitive to initialization'
' of internals ...')
test_layer_backward_pass_insensitive_to_internal_state_init((layer, spec))
print("")
|
def roboVet(modShift):
"""Run the Model-Shift test
Inputs:
-------------
modshift
The dictionary returned by ModShift.runModshift
Returns:
-------------
A dictionary containing the following keys:
disp
The disposition of the DOI --- either "candidate" or "false positive"
not_tran_like
A 1/0 flag indicating whether or not the DOI is transit-like. 0 means it is transit-like
sig_sec
A 1/0 flag indicating whether or not the DOI has a significant secondary.
comments
A string containing comments on individual tests
Output:
----------
None
"""
# By default, it is a candidate unless it fails a test.
disp = 'candidate'
comments = ''
# Run the not transit-like tests
out = not_trans_like(modShift)
not_trans_like_flag = out['not_trans_like_flag']
comments += out['comments']
# Run the significant secondary tests
out = sig_sec(modShift)
sig_sec_flag = out['sig_sec_flag']
comments += out['comments']
# Set false positive if any flag is not 0
if not_trans_like_flag > 0 or sig_sec_flag > 0:
disp = 'false positive'
return {'disp':disp, 'not_trans_like':not_trans_like_flag, 'sig_sec':sig_sec_flag, 'comments':comments}
def not_trans_like(modshift):
not_trans_like_flag = 0
comments = ''
# Check is primary is significant compared to red nosie level
if modshift['mod_sig_pri']/modshift['mod_Fred'] < modshift['mod_sig_fa1'] and modshift['mod_sig_pri'] > 0:
if comments != '':
comments += '---'
comments += 'SIG_PRI_OVER_FRED_TOO_LOW'
not_trans_like_flag = 1
# Check if primary is significant compared to tertiary
if modshift['mod_sig_pri']-modshift['mod_sig_ter'] < modshift['mod_sig_fa2'] and modshift['mod_sig_pri'] > 0 and modshift['mod_sig_ter'] > 0:
if comments != '':
comments += '---'
comments += 'SIG_PRI_MINUS_SIG_TER_TOO_LOW'
not_trans_like_flag = 1
# Check if primary is significant compared to positive
if modshift['mod_sig_pri']-modshift['mod_sig_pos'] < modshift['mod_sig_fa2'] and modshift['mod_sig_pri'] > 0 and modshift['mod_sig_pos'] > 0:
if comments != '':
comments += '---'
comments += 'SIG_PRI_MINUS_SIG_POS_TOO_LOW'
not_trans_like_flag = 1
# Check if a single (or few) events dominates the signal. Likely SPSD if so. Using 1.5 as educated guess threshold for now.
if modshift['mod_dmm'] > 1.5:
if comments != '':
comments += '---'
comments += 'INDIV_DEPTHS_NOT_CONSISTENT'
not_trans_like_flag = 1
# Check if a the signal fails the modshift shape metric. Likely sinusoidal if shape > 0.3 (Using 0.3 as educated guess threshold for now.)
if modshift['mod_shape'] > 0.3:
if comments != '':
comments += '---'
comments += 'SINUSOIDAL_VIA_MODSHIFT'
not_trans_like_flag = 1
return {'not_trans_like_flag':not_trans_like_flag,'comments':comments}
def sig_sec(modshift):
sig_sec_flag = 0
comments = ''
# Check if a significant secondary exists in phased light curve from model-shift
if modshift['mod_sig_sec'] / modshift['mod_Fred'] > modshift['mod_sig_fa1'] and modshift['mod_sig_sec'] > 0 and \
(modshift['mod_sig_sec'] - modshift['mod_sig_ter'] > modshift['mod_sig_fa2'] or modshift['mod_sig_ter'] > 0) and \
(modshift['mod_sig_sec'] - modshift['mod_sig_pri'] > modshift['mod_sig_fa2'] or modshift['mod_sig_pri'] > 0):
if comments != '':
comments += '---'
comments += 'SIG_SEC_IN_MODEL_SHIFT'
sig_sec_flag = 1
# Add something here for eclipse from planet refelection one day
# Next line is to check if it could be detected at twice the orbital period and thus should be a PC.
# HAVE TO CHECK WITH FERGAL ON VALUES FROM TRAP FIT
#if abs(0.5 - modshift['mod_ph_sec'])*clip['trapFit.period_days'] < 0.25*clip['trapFit.duration_hrs']/24.0 and abs(modshift['mod_sig_pri'] - modshift['mod_sig_sec']) < modshift['mod_sig_fa2']
# Check Odd/Even from model-shift
if modshift['mod_sig_oe'] > modshift['mod_sig_fa1']:
if comments != '':
comments += '---'
comments += 'ODD_EVEN_DIFF'
sig_sec_flag = 1
return {'sig_sec_flag':sig_sec_flag,'comments':comments}
|
import logging
from datetime import datetime, timedelta
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from passlib.context import CryptContext
from pydantic import BaseModel
from jose import JWTError, jwt
from pydantic import Field
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
SECRET_KEY = "a9646c30a5a8a493c375ff549bbc603660e03c9d057d72f249f264fb4a07c832"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 60
# Note: In actual production, we will store root user in a database
ROOT_USERS_DB = {
"root": {
"username": "root",
"hashed_password": '$2b$12$/eLylqVxNCF0YkuQhAs5eeGJGonPi2FJpSCj.dq4C1zG8IJKSmyCW',
}
}
class Token(BaseModel):
access_token: str
token_type: str
class User(BaseModel):
username: str = Field(
...,
description="Username",
example="Peter"
)
class UserInDB(User):
hashed_password: str
class UserGen(User):
expire_days: float = Field(
...,
description="Number of days till token expire",
example=360.0,
)
router = APIRouter(
prefix="/token",
tags=["token"],
responses={401: {"description": "Unauthorized"}},
)
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
def decode_token(token: str) -> dict:
payload = jwt.decode(token, SECRET_KEY, algorithms=ALGORITHM)
return payload
def verify_password(plain_password: str, hashed_password: str) -> bool:
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password: str) -> str:
"""Generate password hash
"""
return pwd_context.hash(password)
def get_user(db, username: str) -> Optional[UserInDB]:
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def authenticate_user(user_db, username: str, password: str) -> Optional[UserInDB]:
user = get_user(user_db, username)
if not user:
return None
if not verify_password(password, user.hashed_password):
return None
return user
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
@router.post(
"/",
response_model=Token,
description=f"Log in as root user to get root token (valid for {ACCESS_TOKEN_EXPIRE_MINUTES} minutes)"
)
async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):
user = authenticate_user(
ROOT_USERS_DB, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
async def get_root_user(token: str = Depends(oauth2_scheme)) -> UserInDB:
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = decode_token(token)
username: str = payload.get("sub")
if username is None:
raise credentials_exception
except JWTError as e:
logger.info(f"JWT error. Error: {e}")
raise credentials_exception
user = get_user(ROOT_USERS_DB, username)
if user is None:
raise credentials_exception
return user
async def get_regular_user(token: str = Depends(oauth2_scheme)) -> User:
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = decode_token(token)
username: str = payload.get("sub")
if username is None:
raise credentials_exception
except JWTError as e:
logger.info(f"JWT error. Error: {e}")
raise credentials_exception
return User(username=username)
@router.put(
"/generate",
response_model=Token,
description="Generate user token. Require root JWT authorization token."
)
async def generate_token(
user_gen: UserGen,
user: UserInDB = Depends(get_root_user)
):
"""Generaate user token
Note:
- In real application, once we generate a token for a user, we should
save it in a database.
- However, here we just return the token. This is the case where you
don't need to perform user management (such as revoke user access).
"""
logger.info(
f"{user.username} creates a {user_gen.expire_days} days token for {user_gen.username}"
)
access_token_expires = timedelta(days=user_gen.expire_days)
access_token = create_access_token(
data={"sub": user_gen.username}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
@router.get(
"/root-test",
response_model=User,
description="Test root user access. Require root JWT authorization token."
)
async def test_root_api(user: UserInDB = Depends(get_root_user)):
return user
@router.get(
"/reg-test",
response_model=User,
description="Test regular user access. Require user JWT authorization token."
)
async def test_regular_user_api(user: User = Depends(get_regular_user)):
return user
|
try:
from . import generic as g
except BaseException:
import generic as g
class RasterTest(g.unittest.TestCase):
def test_rasterize(self):
p = g.get_mesh('2D/wrench.dxf')
origin = p.bounds[0]
pitch = p.extents.max() / 600
resolution = g.np.ceil(p.extents / pitch).astype(int)
# rasterize with filled
filled = p.rasterize(origin=origin,
pitch=pitch,
resolution=resolution,
fill=True,
width=None)
# rasterize just the outline
outline = p.rasterize(origin=origin,
pitch=pitch,
resolution=resolution,
fill=False,
width=2.0)
# rasterize both
both = p.rasterize(origin=origin,
pitch=pitch,
resolution=resolution,
fill=True,
width=2.0)
# rasterize with two-dimensional pitch
pitch = p.extents / 600
filled_2dpitch = p.rasterize(origin=origin,
pitch=pitch,
resolution=resolution,
fill=True,
width=None)
# count the number of filled pixels
fill_cnt = g.np.array(filled).sum()
fill_2dpitch_cnt = g.np.array(filled_2dpitch).sum()
both_cnt = g.np.array(both).sum()
outl_cnt = g.np.array(outline).sum()
# filled should have more than an outline
assert fill_cnt > outl_cnt
# filled+outline should have more than outline
assert both_cnt > outl_cnt
# filled+outline should have more than filled
assert both_cnt > fill_cnt
# A different pitch results in a different image
assert fill_2dpitch_cnt != fill_cnt
def test_nested(self):
# make a test path with nested circles
theta = g.np.linspace(0, g.np.pi * 2, 100)
unit = g.np.column_stack((g.np.cos(theta), g.np.sin(theta)))
radii = g.np.linspace(1.0, 10.0, 10)
g.np.random.shuffle(radii)
paths = []
for R in radii:
paths.append(g.trimesh.load_path(R * unit))
path = g.trimesh.path.util.concatenate(paths)
# split and extrude should both show 5 regions
assert len(path.split()) == 5
assert len(path.extrude(1.0)) == 5
pitch = path.extents.max() / 1000
origin = path.bounds[0] - pitch
resolution = (g.np.ceil(
path.extents / pitch) + 2).astype(int)
# rasterize using the settings
r = path.rasterize(
pitch=pitch, origin=origin, resolution=resolution)
# it's a boolean image so filled cells times
# pitch area should be about the same as the area
filled = g.np.array(r).sum() * pitch ** 2
assert g.np.isclose(filled, path.area, rtol=0.01)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app, gl, gloo, data
from glumpy.geometry import primitives
from glumpy.transforms import PanZoom
vertex = """
attribute vec3 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
gl_Position = <transform(vec4(position.xy,0,1.0))>;
v_texcoord = texcoord;
}
"""
fragment = """
#include "misc/spatial-filters.frag"
#include "colormaps/colormaps.glsl"
uniform sampler2D data;
uniform vec2 data_shape;
varying vec2 v_texcoord;
void main()
{
// Extract data value
float value = Bicubic(data, data_shape, v_texcoord).r;
// Map value to rgb color
vec4 bg_color = vec4(colormap_hot(value),1.0);
vec4 fg_color = vec4(0,0,0,1);
// Trace contour
float levels = 32.0;
float antialias = 1.0;
float linewidth = 1.0 + antialias;
if(length(value-0.5) < 0.5/levels)
linewidth = 3.0 + antialias;
float v = levels*value - 0.5;
float dv = linewidth/2.0 * fwidth(v);
float f = abs(fract(v) - 0.5);
float d = smoothstep(-dv,+dv,f);
float t = linewidth/2.0 - antialias;
d = abs(d)*linewidth/2.0 - t;
if( d < 0.0 ) {
gl_FragColor = bg_color;
} else {
d /= antialias;
gl_FragColor = mix(fg_color,bg_color,d);
}
} """
window = app.Window(800, 800, color = (1,1,1,1))
@window.event
def on_draw(dt):
window.clear()
program.draw(gl.GL_TRIANGLES, I)
@window.event
def on_key_press(key, modifiers):
if key == app.window.key.SPACE:
transform.reset()
program = gloo.Program(vertex, fragment)
V,I = primitives.plane(2.0, n=64)
program.bind(V)
lena = data.get("lena.png")/256.0
program['data'] = lena[::-1,:,0]
program['data'].interpolation = gl.GL_NEAREST
program['data_shape'] = lena.shape[1], lena.shape[0]
program['u_kernel'] = data.get("spatial-filters.npy")
program['u_kernel'].interpolation = gl.GL_LINEAR
transform = PanZoom(aspect=1)
program['transform'] = transform
window.attach(transform)
app.run()
|
import os
import weakref
__version__ = "0.0.2"
def formatted_value(value, array=True):
"""Format a given input value to be compliant for USD
Args:
array (bool): If provided, will treat iterables as an array rather than a tuple
"""
if isinstance(value, str):
value = '"{}"'.format(value.replace('"', '\\"'))
elif isinstance(value, (list, tuple)):
temp = []
for val in value:
if isinstance(val, str):
val = formatted_value(val, array=False)
temp.append(str(val))
value = '{}{}{}'.format(
'[' if array else '(',
', '.join(temp),
']' if array else ')'
)
return str(value)
def indentation(level):
"""Return the indentation string for a given level of indentation"""
return level * 4 * ' '
class AbstractData(object):
"""Base class for all our primitives"""
as_type = None
def __init__(self, *args, **kwargs):
super(AbstractData, self).__init__()
self.level = 0
self.children = []
self.properties = []
self.attributes = []
self.was_set = False
self.name = None
self.value = None
self.parent = None
self.keyframes = {}
def indent(self, level=None):
"""Returns the indentation string"""
if level is None:
level = self.level
return indentation(level)
def lines(self, indent=0):
"""Create the lines required to write out"""
self.level = indent
return []
def formatted_name(self):
name = self.name
if self.keyframes:
name = '{}.timeSamples'.format(name)
return name
def formatted_value(self, frame=None):
if frame is None:
value = self.value
elif frame in self.keyframes:
value = self.keyframes[frame]
else:
raise KeyError("No keyframe value for frame: {}".format(frame))
return formatted_value(value, array=self.as_type and '[' in self.as_type)
def set_value(self, value):
self.was_set = True
self.value = value
def unset_value(self):
self.was_set = False
self.value = None
def set_parent(self, parent):
self.parent = weakref.proxy(parent)
def unset_parent(self):
self.parent = None
def path(self):
tokens = []
node = self
while node:
tokens.append(node.name)
node = node.parent
if not node:
tokens.append('/')
tokens = [t for t in tokens[::-1] if t]
path = os.path.join(*tokens)
return path
def set_property(self, prop, value):
prop_obj = self.get_property(prop, defaults=True)
if not prop_obj:
prop_obj = self.add_property(prop)
elif prop_obj not in self.properties:
prop_obj = self.add_property(prop_obj)
prop_obj.set_value(value)
return prop_obj
def set_attribute(self, attr, value, as_type=None, is_uniform=None):
attr_obj = self.get_attribute(attr, defaults=True)
if not attr_obj:
attr_obj = self.add_attribute(attr, as_type=as_type, is_uniform=is_uniform)
elif attr_obj not in self.attributes:
attr_obj = self.add_attribute(attr_obj)
attr_obj.set_value(value)
if as_type is not None:
attr_obj.as_type = as_type
if is_uniform is not None:
attr_obj.is_uniform = is_uniform
return attr_obj
def get_property(self, name, defaults=False):
for prop in self.properties:
if name == prop.name:
return prop
if not defaults:
return
for cls in self._class_inheritence():
for k, v in cls.__dict__.items():
if not isinstance(v, Property):
continue
if name == v.name:
prop = Property(
name=v.name,
value=v.value,
values=v.values
)
return prop
def get_attribute(self, name, defaults=False):
for attr in self.attributes:
if name == attr.name:
return attr
if not defaults:
return
for cls in self._class_inheritence():
for k, v in cls.__dict__.items():
if not isinstance(v, Attribute):
continue
if name == v.name:
attr = Attribute(
name=v.name,
value=v.value,
is_uniform=v.is_uniform,
as_type=v.as_type,
allowedTokens=v.allowed_tokens
)
return attr
def add_property(self, prop):
if not isinstance(prop, Property):
prop = Property(prop)
for pr in self.properties:
if pr.name == prop.name:
return pr
self.properties.append(prop)
prop.set_parent(self)
return prop
def add_attribute(self, attr, value=None, as_type=None, is_uniform=False):
if not isinstance(attr, Attribute):
attr = Attribute(name=attr, value=value, as_type=as_type, is_uniform=is_uniform)
for at in self.attributes:
if at.name == attr.name:
return at
self.attributes.append(attr)
attr.set_parent(self)
return attr
def remove_property(self, prop):
if isinstance(prop, Property):
for i, pr in enumerate(self.properties):
if pr.name == prop.name:
return self.properties.pop(i)
else:
for i, pr in enumerate(self.properties):
if pr.name == prop.name:
return self.properties.pop(i)
def add_child(self, prim):
assert isinstance(prim, Prim), "Child must be a primitive"
self.children.append(prim)
prim.set_parent(self)
return prim
def _class_inheritence(self):
classes = list(self.__class__.__bases__)
classes.append(self.__class__)
return classes
def set_keyframe(self, frame, value):
self.value = None
self.keyframes[frame] = value
class Stage(AbstractData):
"""Represents the main USD stage that contains all the elements"""
def save(self, location):
# Always force a usda extension
base, ext = os.path.splitext(location)
location = '{}.usda'.format(base)
with open(location, 'w') as f:
f.writelines(self.lines())
def lines(self, indent=0):
lines = super(Stage, self).lines(indent)
lines.append('#usda 1.0\n')
lines.append('(\n')
self.level += 1
lines.append('{}"Written by AUD v{}"\n'.format(self.indent(), __version__))
for property in self.properties:
lines.extend(property.lines(self.level))
self.level -= 1
lines.append(')\n\n')
for child in self.children:
lines.extend(child.lines(self.level))
return lines
def set_frame_range(self, start, end):
self.set_property('startTimeCode', start)
self.set_property('endTimeCode', end)
def set_framerate(self, fps):
self.set_property('timeCodesPerSecond', fps)
def set_up_axis(self, axis="Y"):
self.set_property('upAxis', axis.upper())
class Prim(AbstractData):
"""The basic USD object in the hierarchy"""
as_type = None
def __init__(self, name, is_class=False, is_over=False, as_type=None):
super(Prim, self).__init__()
self.name = name
self.is_class = is_class
self.is_over = is_over
self.as_type = as_type or self.as_type
self.references = []
self.inherits = []
self.variants = []
def lines(self, indent=0):
lines = super(Prim, self).lines(indent=indent)
tokens = []
if self.is_class:
tokens.append('class')
elif self.is_over:
tokens.append('over')
else:
tokens.append('def')
if self.as_type:
tokens.append(self.as_type)
tokens.append('"{}"'.format(self.name))
lines.append('\n{}{} (\n'.format(self.indent(), ' '.join(tokens)))
self.level += 1
for prop in (self.variants + self.references + self.inherits + self.properties):
lines.extend(prop.lines(indent=self.level))
lines.append("\n")
self.level -= 1
lines.append('{0})\n{0}{{\n'.format(self.indent()))
self.level += 1
for attr in self.attributes:
lines.extend(attr.lines(indent=self.level))
lines.append("\n")
for child in self.children:
lines.extend(child.lines(indent=self.level))
self.level -= 1
lines.append('\n{}}}\n'.format(self.indent()))
return lines
def add_reference(self, ref, mode=None):
if mode not in Iterable.modes:
raise ValueError("{} is not a valid mode".format(mode))
for iterable in self.references:
if iterable.mode == mode:
break
else:
iterable = Iterable(name='reference', mode=mode)
self.references.append(iterable)
if not isinstance(ref, IterObject):
ref = IterObject(ref, is_file=True)
iterable.append(ref)
def add_inherit(self, inherits, mode=None):
if mode not in Iterable.modes:
raise ValueError("{} is not a valid mode".format(mode))
for iterable in self.inherits:
if iterable.mode == mode:
break
else:
iterable = Iterable(name='inherits', mode=mode)
self.references.append(iterable)
if not isinstance(inherits, IterObject):
inherits = IterObject(inherits, is_file=False)
iterable.append(inherits)
def add_variant(self, variant, mode=None):
if mode not in Iterable.modes:
raise ValueError("{} is not a valid mode".format(mode))
for iterable in self.inherits:
if iterable.mode == mode:
break
else:
iterable = Iterable(name='variantSets', mode=mode)
self.variants.append(iterable)
if not isinstance(variant, IterObject):
variant = IterObject(variant)
iterable.append(variant)
def set_xform_order(self, order='trs'):
ops = []
for o in order:
if o == 't':
ops.append('xformOp:translate')
elif o == 'r':
ops.append('xformOp:rotateXYZ')
elif o == 's':
ops.append('xformOp:scale')
else:
raise ValueError('Cannot understand xform order: {}'.format(o))
self.set_attribute('xformOpOrder', ops, as_type='token[]', is_uniform=True)
class Property(AbstractData):
"""A property for a USD Prim that appears between the parenthesis"""
def __init__(self, name, value=None, values=None):
super(Property, self).__init__()
self.name = name
self.values = values
self.value = value
def lines(self, indent=0):
lines = super(Property, self).lines(indent=indent)
if not self.was_set:
return lines
tokens = []
if self.as_type:
tokens.append(self.as_type)
tokens.extend([self.formatted_name(), '='])
if self.keyframes:
tokens.append('{')
else:
tokens.append(self.formatted_value())
tokens = [str(t) for t in tokens]
tokens = ' '.join([t for t in tokens if t])
lines.append("{}{}\n".format(self.indent(), tokens))
if not self.keyframes:
return lines
self.level += 1
for frame, value in self.keyframes:
lines.append("{}{}:{},\n".format(
self.indent(), frame, self.formatted_value(frame=frame)
))
self.level -= 1
lines.append("{}}}\n".format(self.indent(), ))
class Attribute(AbstractData):
"""Data attributes that provide actual information about a prim between its {}"""
def __init__(self, name, value=None, is_uniform=False,
as_type=None, docstring=None,
allowedTokens=None, **kwargs):
super(Attribute, self).__init__()
self.__doc__ = self.docstring = docstring
self.is_uniform = is_uniform
self.value = value
self.name = name
self.as_type = as_type
self.allowed_tokens = self.allowedTokens = allowedTokens
def lines(self, indent=0):
lines = super(Attribute, self).lines(indent=indent)
if not self.was_set:
return lines
tokens = []
if self.is_uniform:
tokens.append('uniform')
if self.as_type:
tokens.append(self.as_type)
tokens.append(self.formatted_name())
tokens.append('=')
if self.keyframes:
tokens.append('{\n')
else:
tokens.append(self.formatted_value())
tokens = [str(t) for t in tokens]
lines.append('{}{}'.format(self.indent(), ' '.join(tokens)))
if self.keyframes:
self.level += 1
for frame in self.keyframes:
lines.append('{}{}: {},\n'.format(
self.indent(),
frame,
self.formatted_value(frame=frame)
))
self.level -= 1
lines.append('{}}}\n'.format(self.indent()))
if not self.properties:
return lines
lines.append(" (\n")
self.level += 1
for prop in self.properties:
lines.extend(prop.lines(indent=self.level))
self.level -= 1
lines.append('{})'.format(self.indent()))
return lines
class Iterable(list):
"""Represents a USD list"""
mode = None
modes = (None, 'add', 'prepend', 'del')
def __init__(self, name, mode=None):
super(Iterable, self).__init__()
self.name = name
if mode not in self.modes:
raise ValueError("{} is not a valid mode for an iterable".format(mode))
self.mode = mode
self.level = 0
def indent(self, level=None):
"""Returns the indentation string"""
if level is None:
level = self.level
return indentation(level)
def lines(self, indent=0):
self.level = indent
lines = []
tokens = []
if self.mode:
tokens.append(self.mode)
tokens.extend([self.name, '='])
lines.append('{0}{1} [\n{2}{3}\n{0}]'.format(
self.indent(),
' '.join(tokens),
indentation(self.level + 1),
'{},\n'.format(indentation(self.level + 1).join(str(i) for i in self if i))
))
return lines
class IterObject(object):
"""Represents an object inside a USD list and can format to various formats"""
reference = False
location = None
object_name = None
is_file = False
def __init__(self, value=None, object_name=None, is_file=False):
super(IterObject, self).__init__()
if isinstance(value, AbstractData):
object_name = value.path()
value = None
is_file = False
self.value = value
self.object_name = object_name
self.is_file = is_file
def formatted(self):
val = self.value or ''
if not val and not self.object_name:
return
if not self.object_name and not self.is_file:
return formatted_value(val)
if self.is_file:
val = '@{}@'.format(val)
if self.object_name:
val += '<{}>'.format(self.object_name)
return str(val) or None
def __str__(self):
return self.formatted()
class VariantSet(Prim):
"""Create a Variant Set inside a prim"""
def lines(self, indent=0):
lines = AbstractData.lines(self, indent=indent)
lines.append('{}variantSet "{}" = {{\n'.format(self.indent(), self.name))
self.level += 1
for child in self.children:
lines.extend(child.lines(indent=self.level))
lines.append('\n')
self.level -= 1
lines.append('{0}}}\n'.format(self.indent()))
return lines
class Variant(Prim):
"""Create a Variant inside a Variant Set"""
def lines(self, indent=0):
lines = AbstractData.lines(self, indent=indent)
lines.append('{}"{}" {{\n'.format(self.indent(), self.name))
self.level += 1
for child in self.children:
lines.extend(child.lines(indent=self.level))
lines.append('\n')
self.level -= 1
lines.append('{0}}}\n'.format(self.indent()))
return lines
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.login_index, name='login_index'),
path('register', views.register, name='register'),
path('logout', views.user_logout, name='user_logout'),
]
|
import numpy as np
from collections import OrderedDict
from os import path
def load(filename, max_rank=None, vocabulary=None):
"""Load word vector data.
Args:
filename: name of file to load.
max_rank: maximum number of embeddings to load (None for no limit)
vocabulary: words to load embeddings for (None for all)
Returns:
Tuple (word_to_idx, embedding) where word_to_idx is
an OrderedDict mapping words to integer indices and embedding is
a numpy array of shape (word-count, vector-size).
"""
# TODO support for other formats
return load_w2v_binary(filename, max_rank, vocabulary)
def load_w2v_binary(filename, max_rank=None, vocabulary=None):
"""Load word2vec binary format.
Args:
filename: name of file to load.
max_rank: maximum number of embeddings to load (None for no limit)
vocabulary: words to load embeddings for (None for all)
Returns:
Tuple (word_to_idx, embedding) where word_to_idx is an
OrderedDict mapping words to integer indices and embedding is
a numpy array of shape (word-count, vector-size).
"""
word_to_idx, vectors = OrderedDict(), []
with open(filename, 'rb') as f:
# header has vocab and vector sizes as strings
word_count, vec_size = map(int, f.readline().split())
for i in range(word_count):
if max_rank and i > max_rank:
break
word_to_idx[read_w2v_word(f)] = len(word_to_idx)
vectors.append(np.fromfile(f, np.float32, vec_size))
vectors = np.array(vectors)
if vocabulary is not None:
word_to_idx, vectors = filter_words(word_to_idx, vectors, vocabulary)
return word_to_idx, vectors
def read_w2v_word(flo):
"""Return word from file-like object, break on space."""
# http://docs.python.org/2/library/functions.html#iter
word = ''.join(iter(lambda: flo.read(1), ' '))
return word.lstrip('\n') # harmonize w2v format variants
def filter_words(word_to_idx, vectors, vocabulary):
"""Filter word vector data to vocabulary."""
filtered_to_idx, filtered_indices = OrderedDict(), []
for word, idx in word_to_idx.items():
if word in vocabulary:
filtered_to_idx[word] = len(filtered_to_idx)
filtered_indices.append(idx)
return filtered_to_idx, vectors[filtered_indices]
|
import reversion
from django.db import models
from django.core.exceptions import ValidationError
from .tracked_model import TrackedModel
from .derived_sample import DerivedSample
from ._utils import add_error as _add_error
__all__ = ["DerivedBySample"]
@reversion.register()
class DerivedBySample(TrackedModel):
derived_sample = models.ForeignKey("DerivedSample", on_delete=models.PROTECT, related_name="derived_by_samples")
sample = models.ForeignKey("Sample", on_delete=models.PROTECT, related_name="derived_by_samples")
volume_ratio = models.DecimalField(max_digits=4, decimal_places=3, help_text="Volume ratio in pools.")
def clean(self):
super().clean()
errors = {}
def add_error(field: str, error: str):
_add_error(errors, field, ValidationError(error))
# Check concentration fields given sample_kind (moved from sample because information unavailable until relation created)
if self.sample.concentration is None and self.derived_sample.sample_kind.name in DerivedSample.BIOSPECIMEN_TYPES_CONC_REQUIRED:
add_error("concentration", "Concentration must be specified for a pool or if the sample_kind is DNA")
if errors:
raise ValidationError(errors)
def save(self, *args, **kwargs):
self.full_clean()
super().save(*args, **kwargs) # Save the object
|
import os
from mai.adsorbate_constructor import adsorbate_constructor
from ase.io import read, write
starting_mof_path = os.path.join('example_MOFs','Ni-BTP.cif') #path to CIF of MOF
#Get all Ni indices in ASE Atoms object of MOF
start_mof = read(starting_mof_path)
Ni_idx = [atom.index for atom in start_mof if atom.symbol == 'Ni']
#add H2O adsorbate
atoms = start_mof
for i, site_idx in enumerate(Ni_idx):
ads = adsorbate_constructor(ads='HOH',d_MX1=2.0,d_X1X2=0.96,d_X2X3=0.96,
ang_MX1X2=120,ang_triads=104.5,connect=2)
atoms = ads.get_adsorbate(atoms=atoms,site_idx=site_idx,write_file=False)
#Write out final CIF with all H2O molecules added
if not os.path.isdir('new_mofs'):
os.makedirs('new_mofs')
write(os.path.join('new_mofs','Ni-BTP_allH2O.cif'),atoms)
|
import pandas as pd
df_input_food = pd.read_csv("input_food.csv")
print(df_input_food.head())
df_food = pd.read_csv("food.csv")
print(df_food.describe())
print(df_food.head())
|
# -*- coding: utf-8 -*-
"""Wavefront Python SDK.
This library provides support for sending metrics, histograms and opentracing
spans to Wavefront via proxy or direct ingestion.
@author Hao Song (songhao@vmware.com)
"""
import pkg_resources
from .direct import WavefrontDirectClient
from .proxy import WavefrontProxyClient
__all__ = ['WavefrontDirectClient', 'WavefrontProxyClient']
__version__ = None
try:
__version__ = pkg_resources.get_distribution(
'wavefront-sdk-python'
).version
except pkg_resources.DistributionNotFound:
# __version__ is only available when distribution is installed.
pass
|
from functools import wraps
def ints_only(func):
@wraps(func):
def wrapper(*args, **kawrgs):
args = [int(x) for x in args]
kwargs = {n: int(v)
for n, v
in kwargs.items()}
return func(*args, **kwargs)
return wrapper
@ints_only
def add(left, right):
return left + right
add('57', 99.5)
|
from rest_framework import serializers
from clientes.models import Cliente
from .validators import *
class ClienteSerializer(serializers.ModelSerializer):
class Meta:
model = Cliente
fields = '__all__'
# primeiro modo de fazer validações
def validate(self, data):
if not cpf_valido(data['cpf']):
raise serializers.ValidationError({'cpf': 'Número de CPF inválido'})
if not celular_valido(data['celular']):
raise serializers.ValidationError({'celular': 'Celular deve conter 11 caracteres'})
return data
# segundo modo de fazer validações
def validate_nome(self, nome):
if not nome.isalpha():
raise serializers.ValidationError('Não inclua números no nome')
return nome
def validate_rg(self, rg):
if len(rg) != 9:
raise serializers.ValidationError('RG deve conter 9 caracteres')
return rg
|
"""Fetch weather information
Usage:
weather (-h | --help)
weather [--country=COUNTRY] <city>
Options:
-h, --help Show a brief usage summary.
--country=COUNTRY Restrict cities to an ISO 3166 country code.
An OpenWeatherMap API key MUST be provided via the OPENWEATHERMAP_KEY environment variable.
"""
import os
import sys
import requests
from docopt import docopt
# environment key containing OpenWeatherMap Key
envkey = 'OPENWEATHERMAP_KEY'
if __name__ == '__main__':
args = docopt(__doc__)
# check if key set in environment
if envkey not in os.environ:
print('Error: missing', envkey, 'environment variable')
exit(1)
# get key
apikey = os.environ[envkey]
# get city argument
city = args['<city>']
# get country argument
country = args['--country']
# append country to city (with comma) if set
if (country is not None) and (country != ''):
city += ','+country
try:
r = requests.get('http://api.openweathermap.org/data/2.5/weather?APPID=' + apikey + '&units=metric&q=' + city)
r.raise_for_status()
data = r.json()
except:
print('Error: failed to get response from openweathermap for', city, '['+str(r.status_code)+']')
exit(r.status_code)
print('Temperature for {0}, {1}: {2:.1f}\u2103'.format(data['name'], data['sys']['country'], data['main']['temp']))
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Define a common data structure to represent external packages and a
function to update packages.yaml given a list of detected packages.
Ideally, each detection method should be placed in a specific subpackage
and implement at least a function that returns a list of DetectedPackage
objects. The update in packages.yaml can then be done using the function
provided here.
The module also contains other functions that might be useful across different
detection mechanisms.
"""
import collections
import itertools
import os
import os.path
import re
import sys
import six
import llnl.util.tty
import spack.config
import spack.spec
import spack.util.spack_yaml
is_windows = sys.platform == 'win32'
#: Information on a package that has been detected
DetectedPackage = collections.namedtuple(
'DetectedPackage', ['spec', 'prefix']
)
def _externals_in_packages_yaml():
"""Return all the specs mentioned as externals in packages.yaml"""
packages_yaml = spack.config.get('packages')
already_defined_specs = set()
for pkg_name, package_configuration in packages_yaml.items():
for item in package_configuration.get('externals', []):
already_defined_specs.add(spack.spec.Spec(item['spec']))
return already_defined_specs
def _pkg_config_dict(external_pkg_entries):
"""Generate a package specific config dict according to the packages.yaml schema.
This does not generate the entire packages.yaml. For example, given some
external entries for the CMake package, this could return::
{
'externals': [{
'spec': 'cmake@3.17.1',
'prefix': '/opt/cmake-3.17.1/'
}, {
'spec': 'cmake@3.16.5',
'prefix': '/opt/cmake-3.16.5/'
}]
}
"""
pkg_dict = spack.util.spack_yaml.syaml_dict()
pkg_dict['externals'] = []
for e in external_pkg_entries:
if not _spec_is_valid(e.spec):
continue
external_items = [('spec', str(e.spec)), ('prefix', e.prefix)]
if e.spec.external_modules:
external_items.append(('modules', e.spec.external_modules))
if e.spec.extra_attributes:
external_items.append(
('extra_attributes',
spack.util.spack_yaml.syaml_dict(e.spec.extra_attributes.items()))
)
# external_items.extend(e.spec.extra_attributes.items())
pkg_dict['externals'].append(
spack.util.spack_yaml.syaml_dict(external_items)
)
return pkg_dict
def _spec_is_valid(spec):
try:
str(spec)
except spack.error.SpackError:
# It is assumed here that we can at least extract the package name from
# the spec so we can look up the implementation of
# determine_spec_details
msg = 'Constructed spec for {0} does not have a string representation'
llnl.util.tty.warn(msg.format(spec.name))
return False
try:
spack.spec.Spec(str(spec))
except spack.error.SpackError:
llnl.util.tty.warn(
'Constructed spec has a string representation but the string'
' representation does not evaluate to a valid spec: {0}'
.format(str(spec))
)
return False
return True
def is_executable(file_path):
"""Return True if the path passed as argument is that of an executable"""
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
def _convert_to_iterable(single_val_or_multiple):
x = single_val_or_multiple
if x is None:
return []
elif isinstance(x, six.string_types):
return [x]
elif isinstance(x, spack.spec.Spec):
# Specs are iterable, but a single spec should be converted to a list
return [x]
try:
iter(x)
return x
except TypeError:
return [x]
def executable_prefix(executable_dir):
"""Given a directory where an executable is found, guess the prefix
(i.e. the "root" directory of that installation) and return it.
Args:
executable_dir: directory where an executable is found
"""
# Given a prefix where an executable is found, assuming that prefix
# contains /bin/, strip off the 'bin' directory to get a Spack-compatible
# prefix
assert os.path.isdir(executable_dir)
components = executable_dir.split(os.sep)
if 'bin' not in components:
return executable_dir
idx = components.index('bin')
return os.sep.join(components[:idx])
def library_prefix(library_dir):
"""Given a directory where an library is found, guess the prefix
(i.e. the "root" directory of that installation) and return it.
Args:
library_dir: directory where an library is found
"""
# Given a prefix where an library is found, assuming that prefix
# contains /lib/ or /lib64/, strip off the 'lib' or 'lib64' directory
# to get a Spack-compatible prefix
assert os.path.isdir(library_dir)
components = library_dir.split(os.sep)
if 'lib64' in components:
idx = components.index('lib64')
return os.sep.join(components[:idx])
elif 'lib' in components:
idx = components.index('lib')
return os.sep.join(components[:idx])
else:
return library_dir
def update_configuration(detected_packages, scope=None, buildable=True):
"""Add the packages passed as arguments to packages.yaml
Args:
detected_packages (list): list of DetectedPackage objects to be added
scope (str): configuration scope where to add the detected packages
buildable (bool): whether the detected packages are buildable or not
"""
predefined_external_specs = _externals_in_packages_yaml()
pkg_to_cfg, all_new_specs = {}, []
for package_name, entries in detected_packages.items():
new_entries = [
e for e in entries if (e.spec not in predefined_external_specs)
]
pkg_config = _pkg_config_dict(new_entries)
all_new_specs.extend([
spack.spec.Spec(x['spec']) for x in pkg_config.get('externals', [])
])
if buildable is False:
pkg_config['buildable'] = False
pkg_to_cfg[package_name] = pkg_config
pkgs_cfg = spack.config.get('packages', scope=scope)
pkgs_cfg = spack.config.merge_yaml(pkgs_cfg, pkg_to_cfg)
spack.config.set('packages', pkgs_cfg, scope=scope)
return all_new_specs
def find_win32_additional_install_paths():
"""Not all programs on Windows live on the PATH
Return a list of other potential install locations.
"""
windows_search_ext = []
cuda_re = r'CUDA_PATH[a-zA-Z1-9_]*'
# The list below should be expanded with other
# common Windows install locations as neccesary
path_ext_keys = ['I_MPI_ONEAPI_ROOT',
'MSMPI_BIN',
'MLAB_ROOT',
'NUGET_PACKAGES']
user = os.environ["USERPROFILE"]
add_path = lambda key: re.search(cuda_re, key) or key in path_ext_keys
windows_search_ext.extend([os.environ[key] for key
in os.environ.keys() if
add_path(key)])
# note windows paths are fine here as this method should only ever be invoked
# to interact with Windows
# Add search path for default Chocolatey (https://github.com/chocolatey/choco)
# install directory
windows_search_ext.append("C:\\ProgramData\\chocolatey\\bin")
# Add search path for NuGet package manager default install location
windows_search_ext.append(os.path.join(user, ".nuget", "packages"))
windows_search_ext.extend(
spack.config.get("config:additional_external_search_paths", default=[])
)
windows_search_ext.extend(spack.util.environment.get_path('PATH'))
return windows_search_ext
def compute_windows_program_path_for_package(pkg):
"""Given a package, attempt to compute its Windows
program files location, return list of best guesses
Args:
pkg (spack.package_base.Package): package for which
Program Files location is to be computed
"""
if not is_windows:
return []
# note windows paths are fine here as this method should only ever be invoked
# to interact with Windows
program_files = 'C:\\Program Files{}\\{}'
return[program_files.format(arch, name) for
arch, name in itertools.product(("", " (x86)"),
(pkg.name, pkg.name.capitalize()))]
|
import os
import torch
torch.manual_seed(int(os.environ.get('SPLINE_MANUAL_SEED', 7)))
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(UserRole)
admin.site.register(RoleType)
admin.site.register(UserDetail)
admin.site.register(Question)
admin.site.register(Choice)
admin.site.register(School)
admin.site.register(School_post)
admin.site.register(Car_brand_model)
admin.site.register(Car_brand)
|
import json, datetime, time
import jwt
import hashlib
import time
import os, sys, base64
import uuid
from .sql import sql
from reportlab.pdfgen import canvas
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfmetrics
from reportlab.lib import colors
from reportlab.lib.units import mm
from reportlab.graphics.barcode import qr
from reportlab.graphics.shapes import Drawing
from reportlab.graphics import renderPDF
from datetime import datetime
class pdf_doc:
def __init__(self, userId = -1, filename = None):
self.userId = str(userId)
self.filename = filename
def report(self, floteur, id_points, date_start, date_end):
data = floteur.pdf_report(id_points, date_start * 1000, date_end * 1000)
if id_points[0] not in data[1]["data"]:
# return [False, "no data", 400]
size_data = 0
else:
size_data = len(data[1]["data"][id_points[0]])
total_page = round((size_data - 29) / 40 + 1)
total_page = total_page if total_page > 0 else 1
id_doc = str(uuid.uuid4())
documentTitle = 'Wellcheck - Report'
image = '/home/api/Source/Base/Report_base.png'
pdf = canvas.Canvas(sys.stdout)
pdf.setPageSize((210 * mm, 272 * mm))
pdf.setTitle(documentTitle)
pdf.drawImage(image, 0, 0, 210 * mm, 272 * mm)
pdf.setFillColorRGB(0, 0, 0)
page = 1
now = int(round(time.time()))
save = True
qr_link = 'https://doc.wellcheck.fr/?doc=' + id_doc
if int(date_end) > int(now - 200):
qr_link = "https://doc.wellcheck.fr/src.php?id=" + str(id_points[0]) + "&from=" + str(int(date_start)) + "&to=" + str(int(date_end))
save = False
qr_code = qr.QrCodeWidget(qr_link)
bounds = qr_code.getBounds()
width = bounds[2] - bounds[0]
height = bounds[3] - bounds[1]
dr = Drawing(80, 80, transform=[80./width,0,0,80./height,0,0])
dr.add(qr_code)
pdf.setFont("Helvetica-Bold", 14)
pdf.drawString(90 * mm, (267 - 40) * mm, "Informations:")
pdf.setFont("Helvetica-Bold", 12)
pdf.drawString(64 * mm, (267 - 46) * mm, "ID: " + id_points[0])
pdf.setFont("Helvetica", 12)
pdf.drawString(100 * mm, 13 * mm, str(page) + " / " + str(total_page))
pdf.drawString(64.5 * mm, 5 * mm, " unfinished period | temporary document" if not save else id_doc)
renderPDF.draw(dr, pdf, 210 * mm - 82, 2)
pdf.drawString(40 * mm, (267 - 54) * mm, "Sigfox_id")
pdf.drawString(40 * mm, (267 - 58) * mm, "Name")
pdf.drawString(40 * mm, (267 - 62) * mm, "Owned_by")
pdf.drawString(111 * mm, (267 - 54) * mm, "Time_start")
pdf.drawString(111 * mm, (267 - 58) * mm, "Time_end")
pdf.drawString(111 * mm, (267 - 62) * mm, "Time_total")
pdf.line(105 * mm, (267 - 50) * mm, 105 * mm, (267 - 63) * mm)
pdf.setFont("Helvetica-Oblique", 12)
diff = round((int(date_end) - int(date_start)) / 3600 , 1)
pdf.drawString(65 * mm, (267 - 54) * mm, str(data[1]["detail"][id_points[0]]["sigfox_id"]))
pdf.drawString(65 * mm, (267 - 58) * mm, str(data[1]["detail"][id_points[0]]["name"]))
pdf.drawString(65 * mm, (267 - 62) * mm, str(data[1]["detail"][id_points[0]]["user_id"]))
pdf.drawString(136 * mm, (267 - 54) * mm, str(datetime.fromtimestamp(int(date_start))))
pdf.drawString(136 * mm, (267 - 58) * mm, str(datetime.fromtimestamp(int(date_end))))
pdf.drawString(136 * mm, (267 - 62) * mm, str(diff) + (" hours" if diff > 0 else " hour"))
pdf.setFont("Helvetica-Bold", 12)
pdf.drawString(33 * mm, (267 - 80) * mm, "Date")
pdf.drawString(69 * mm, (267 - 80) * mm, "Note")
pdf.drawString(89 * mm, (267 - 80) * mm, "Ph")
pdf.drawString(109 * mm, (267 - 80) * mm, "Temp")
pdf.drawString(138 * mm, (267 - 80) * mm, "Redox")
pdf.drawString(168 * mm, (267 - 80) * mm, "Turbidity")
pdf.setFont("Helvetica", 12)
n = 0
t = 0
d = 11
start = 88
while n < size_data:
pdf.drawString(18 * mm, (267 - start - t * 5) * mm, str(datetime.fromtimestamp(int(int(data[1]["data"][id_points[0]][n]["date"]) / 1000))))
pdf.drawString(68 * mm, (267 - start - t * 5) * mm, str(data[1]["data"][id_points[0]][n]["data"]["data"]["note"]).rjust(4, '0'))
pdf.drawString(88 * mm, (267 - start - t * 5) * mm, str(data[1]["data"][id_points[0]][n]["data"]["data"]["ph"]).rjust(4, '0'))
pdf.drawString(110 * mm, (267 - start - t * 5) * mm, str(data[1]["data"][id_points[0]][n]["data"]["data"]["temp"]).rjust(4, '0'))
pdf.drawString(138 * mm, (267 - start - t * 5) * mm, str(data[1]["data"][id_points[0]][n]["data"]["data"]["redox"]).rjust(6, '0'))
pdf.drawString(172 * mm, (267 - start - t * 5) * mm, str(data[1]["data"][id_points[0]][n]["data"]["data"]["turbidity"]).rjust(4, '0'))
n += 1
d += 1
t += 1
if d > 40:
pdf.showPage()
start = 35
d = 0
t = 0
page += 1
pdf.drawImage(image, 0, 0, 210 * mm, 272 * mm)
pdf.setFont("Helvetica-Bold", 12)
pdf.drawString(33 * mm, (267 - start + 8) * mm, "Date")
pdf.drawString(69 * mm, (267 - start + 8) * mm, "Note")
pdf.drawString(89 * mm, (267 - start + 8) * mm, "Ph")
pdf.drawString(109 * mm, (267 - start + 8) * mm, "Temp")
pdf.drawString(138 * mm, (267 - start + 8) * mm, "Redox")
pdf.drawString(168 * mm, (267 - start + 8) * mm, "Turbidity")
pdf.setFont("Helvetica", 12)
pdf.drawString(100 * mm - ( 0 if page < 10 else 2 * mm if page < 100 else 4 * mm), 13 * mm, str(page) + " / " + str(total_page))
pdf.drawString(64.5 * mm, 5 * mm, " unfinished period | temporary document " if not save else id_doc)
renderPDF.draw(dr, pdf, 210 * mm - 82, 2)
return [True, {"doc_id": id_doc, "Content": str(base64.b64encode(pdf.getpdfdata().decode('utf8', 'ignore').encode('ascii')))[2:-1], "Type": "pdf", "Save": save}, None]
def __getsecret(self):
return str(os.getenv('API_SCRT', '!@ws4RT4ws212@#%'))
|
from decimal import Decimal
import pytest
from bitcart.utils import bitcoins, convert_amount_type, satoshis
@pytest.mark.parametrize("btc,expected", [(0.1, 10000000), (1, 100000000), (0.00000001, 1), (5, 500000000)])
def test_satoshis(btc, expected):
result = satoshis(btc)
assert isinstance(result, int)
assert result == expected
@pytest.mark.parametrize("sats,expected", [(10000000, "0.1"), (100000000, "1"), (1, "0.00000001"), (500000000, "5")])
def test_bitcoins(sats, expected):
result = bitcoins(sats)
assert isinstance(result, Decimal)
assert result == Decimal(expected)
def test_convertability():
assert bitcoins(satoshis(1)) == 1
def test_convert_amount_type():
assert convert_amount_type("1") == Decimal("1")
@pytest.mark.asyncio
async def test_decimal_sending(btc_wallet):
amount = Decimal("0.5")
req = await btc_wallet.add_request(amount) # ensures that it is possible to pass decimal
assert req[btc_wallet.amount_field] == amount
|
import tnetwork as dn
import os
import subprocess
import time
###############################
######For this class, it is necessary to have Matlab installed
######And to set up the matlab for python engine, see how to there
###### https://fr.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html
###### (you can find the value of matlabroot by tapping matlabroot in your matlab console)
################################
def launchCommandWaitAnswer(acommand, printOutput=True):
process = subprocess.Popen(acommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if printOutput:
while (True):
retcode = process.poll() # returns None while subprocess is running
line = process.stdout.readline()
print(line)
# yield line
if (retcode is not None):
if retcode != 0:
print
"FAILURE WITH : " + acommand
break
process.wait()
def iLCD(dynNet,par1=None,par2=None,runningTime=False):
#initialisation inspired by http://netwiki.amath.unc.edu/GenLouvain/GenLouvain
dir = os.path.dirname(__file__)
jarLocation = os.path.join(dir, "iLCD2016.jar")
sandBox = os.path.join(dir, "sandBox")
networkLocation = sandBox+"/network.ctnf"
communitiesLocation = sandBox+"/snapshot_affiliations"
dn.write_ordered_changes(dynNet, networkLocation, edgeIdentifier="")
commandToLaunch = "java -jar "+jarLocation+" -i "+networkLocation+" -o "+communitiesLocation
start_time = time.time()
launchCommandWaitAnswer(commandToLaunch,printOutput=False)
duration = (time.time() - start_time)
#print("algorithm Running time: %s seconds ---" % runningTime)
dynComs = dn.readListOfModifCOM(communitiesLocation+".ctnf")
if runningTime:
return duration
return dynComs
#preprocessMatrixForm(0.5)
#muchaOriginal("bla")
|
# coding=utf-8
# Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors, The Hugging Face Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class TFLayoutLMModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
range_bbox=1000,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.range_bbox = range_bbox
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# convert bbox to numpy since TF does not support item assignment
bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
t = bbox[i, j, 3]
bbox[i, j, 3] = bbox[i, j, 1]
bbox[i, j, 1] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
t = bbox[i, j, 2]
bbox[i, j, 2] = bbox[i, j, 0]
bbox[i, j, 0] = t
bbox = tf.convert_to_tensor(bbox)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = LayoutLMConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_model(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFLayoutLMModel(config=config)
result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, bbox, token_type_ids=token_type_ids)
result = model(input_ids, bbox)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFLayoutLMForMaskedLM(config=config)
result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFLayoutLMForSequenceClassification(config=config)
result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFLayoutLMForTokenClassification(config=config)
result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
bbox,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class TFLayoutLMModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification)
if is_tf_available()
else ()
)
test_head_masking = False
test_onnx = True
onnx_min_opset = 10
def setUp(self):
self.model_tester = TFLayoutLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=LayoutLMConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFLayoutLMModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def prepare_layoutlm_batch_inputs():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
input_ids = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231
attention_mask = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231
bbox = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231
token_type_ids = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231
# these are sequence labels (i.e. at the token level)
labels = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]]) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class TFLayoutLMModelIntegrationTest(unittest.TestCase):
@slow
def test_forward_pass_no_head(self):
model = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased")
input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids)
# test the sequence output on [0, :3, :3]
expected_slice = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]],
)
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3))
# test the pooled output on [1, :3]
expected_slice = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552])
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], expected_slice, atol=1e-3))
@slow
def test_forward_pass_sequence_classification(self):
# initialize model with randomly initialized sequence classification head
model = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2)
input_ids, attention_mask, bbox, token_type_ids, _ = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=tf.convert_to_tensor([1, 1]),
)
# test whether we get a loss as a scalar
loss = outputs.loss
expected_shape = (2,)
self.assertEqual(loss.shape, expected_shape)
# test the shape of the logits
logits = outputs.logits
expected_shape = (2, 2)
self.assertEqual(logits.shape, expected_shape)
@slow
def test_forward_pass_token_classification(self):
# initialize model with randomly initialized token classification head
model = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13)
input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(
input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels
)
# test the shape of the logits
logits = outputs.logits
expected_shape = tf.convert_to_tensor((2, 25, 13))
self.assertEqual(logits.shape, expected_shape)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import time
import threading
class Philosopher(threading.Thread):
def __init__(self, name, leftFork, rightFork):
print("{} Has Sat Down At the Table".format(name))
threading.Thread.__init__(self, name=name)
self.leftFork = leftFork
self.rightFork = rightFork
def run(self):
print("{} has started thinking".format(threading.currentThread().getName()))
while True:
time.sleep(random.randint(1, 5))
print("{} has finished thinking".format(threading.currentThread().getName()))
self.leftFork.acquire()
time.sleep(random.randint(1, 5))
try:
print("{} has acquired the left fork".format(threading.currentThread().getName()))
self.rightFork.acquire()
try:
print("{} has attained both forks, currently eating".format(threading.currentThread().getName()))
finally:
self.rightFork.release()
print("{} has released the right fork".format(threading.currentThread().getName()))
finally:
self.leftFork.release()
print("{} has released the left fork".format(threading.currentThread().getName()))
def main():
fork1 = threading.RLock()
fork2 = threading.RLock()
fork3 = threading.RLock()
fork4 = threading.RLock()
fork5 = threading.RLock()
philosopher1 = Philosopher("Kant", fork1, fork2)
philosopher2 = Philosopher("Aristotle", fork2, fork3)
philosopher3 = Philosopher("Spinoza", fork3, fork4)
philosopher4 = Philosopher("Marx", fork4, fork5)
philosopher5 = Philosopher("Russell", fork5, fork1)
philosopher1.start()
philosopher2.start()
philosopher3.start()
philosopher4.start()
philosopher5.start()
philosopher1.join()
philosopher2.join()
philosopher3.join()
philosopher4.join()
philosopher5.join()
if __name__ == '__main__':
main()
|
"""
Minimum Description Length Principle (MDLP) binning
- Original paper: http://sci2s.ugr.es/keel/pdf/algorithm/congreso/fayyad1993.pdf
- Implementation inspiration: https://www.ibm.com/support/knowledgecenter/it/SSLVMB_21.0.0/com.ibm.spss.statistics.help/alg_optimal-binning.htm
"""
import collections
import math
import numpy as np
from scipy import stats
from sklearn.utils import check_X_y
from .base import BaseSupervisedBinner
class MDLPBinner(BaseSupervisedBinner):
def fit(self, X, y, **fit_params):
"""Determine which are the best cut points for each column in X based on y."""
X, y = check_X_y(X, y, y_numeric=True)
self.cut_points_ = [mdlp_cut(x, y, []) for x in X.T]
return self
@property
def cut_points(self):
return self.cut_points_
def calc_class_entropy(y):
class_counts = np.unique(y, return_counts=True)[1]
return stats.entropy(class_counts, base=2)
def calc_class_information_entropy(x, y, cut_point):
partition = x <= cut_point
y_1 = y[partition]
y_2 = y[~partition]
ent_1 = calc_class_entropy(y_1)
ent_2 = calc_class_entropy(y_2)
return (y_1.size * ent_1 + y_2.size * ent_2) / (y_1.size + y_2.size)
def mdlp_cut(x, y, cut_points):
# No cut is necessary if there is only one class
if len(np.unique(y)) == 1:
return
# Calculate the current entropy
y_ent = calc_class_entropy(y)
# Sort x and y according to x
sorted_indexes = x.argsort()
x = x[sorted_indexes]
y = y[sorted_indexes]
# Find the potential cut points
potential_cut_points = []
for i in range(x.size - 1):
potential_cut_points.append((x[i] + x[i+1]) / 2)
# Ignore the cut points that appear more than once
potential_cut_points = list(set(potential_cut_points))
# Find the cut point with gives the lowest class information entropy
cut_point = min(
potential_cut_points,
key=lambda cut_point: calc_class_information_entropy(x, y, cut_point)
)
# Calculate the information gain obtained with the obtained cut point
new_ent = calc_class_information_entropy(x, y, cut_point)
gain = y_ent - new_ent
# Partition the data
partition = x <= cut_point
x_1 = x[partition]
y_1 = y[partition]
x_2 = x[~partition]
y_2 = y[~partition]
# Get the number of unique classes in each group
k = len(np.unique(y))
k_1 = len(np.unique(y_1))
k_2 = len(np.unique(y_2))
# Calculate the entropy of each group
y_1_ent = calc_class_entropy(y_1)
y_2_ent = calc_class_entropy(y_2)
# Calculate the acceptance criterion
delta = math.log2(3 ** k) - k * y_ent + k_1 * y_1_ent + k_2 * y_2_ent
n = y.size
acceptance_criterion = (math.log2(n - 1) + delta) / n
# Add the cut point if the gain is higher than the acceptance criterion
if gain > acceptance_criterion:
cut_points.append(cut_point)
# Recursively check if further cuts are possible
mdlp_cut(x_1, y_1, cut_points)
mdlp_cut(x_2, y_2, cut_points)
return sorted(cut_points)
|
import os
import numpy as np
import random
import argparse
import json
def check_info_all(annotations):
print("Element 0: ", annotations[0])
print("Keys of 0 -> end: ", annotations[1].keys())
print("The whole of annotations: ", annotations )
# for annotation in annotations:
# print(type(annotation))
# print(json.dumps(annotation, indent=4))
def check_info_element(annotations):
random_annotation = random.choice(annotations)
print('keys: ', random_annotation.keys())
print("Num key: ", len(random_annotation.keys()))
for key in random_annotation.keys():
if key != "ocr_info":
print("{}: {}".format(key, random_annotation[key]))
else:
print("ocr_info")
for e in random_annotation[key]:
print("\t", e)
def check_question_equal_images(annotations):
question_id = []
image_id = []
for annotation in annotations[1:]:
question_id.append(annotation["question_id"])
image_id.append(annotation['image_id'])
question_id = set(question_id)
image_id = set(image_id)
print("number question id: ", len(question_id))
print("number image id: ", len(image_id))
if len(question_id) == len(image_id):
return 1
return 0
def find_question(annotations):
questions_id = []
images_id = []
for annotation in annotations[1:]:
question_id = annotation["question_id"]
image_id = annotation['image_id']
if image_id in images_id:
print("image_id: {} question_id: {}".format(image_id, question_id))
questions_id.append(question_id)
images_id.append(image_id)
def show_question_require(annotations):
for annotation in annotations[1:]:
if annotation["question_id"] == 2 or annotation["question_id"] == 1:
print(annotation)
def show_all_questions(annotations):
print(len(annotations))
for annotation in annotations[1:]:
print(annotation["question_id"])
def show_all_ocr_tokens(annotations):
for annotation in annotations[1:]:
print(annotation["ocr_tokens"])
def show_all_ocr_tokens(annotations):
for annotation in annotations[1:]:
print(annotation["ocr_tokens"])
def find_img(annotations, img_name):
for annotation in annotations[1:]:
if annotation["image_name"]==img_name:
print("Yes")
return 1
print("NO")
def find_num_max_answer(annotations):
max_num = 0
for annotation in annotations[1:]:
if len(annotation["answers"]) > max_num:
max_num = len(annotation["answers"])
return max_num
def count_number_answer(annotations):
for annotation in annotations[1:]:
print(len(annotation["answers"]))
def show_all_answers(annotations):
for annotation in annotations[1:]:
print(annotation["answers"])
def check_number_answer(annotations, number_answer):
for annotation in annotations[1:]:
if len(annotation["answers"]) != number_answer:
print(len(annotation["answers"]))
print(annotation["answers"])
def sumarize_number_answer(annotations):
sum = []
for annotation in annotations[1:]:
sum.append(len(annotation["answers"]))
sum = set(sum)
return sum
def check_question_id(annotations, question_id):
for annotation in annotations[1:]:
if annotation["question_id"] == question_id:
return True
return False
def count_type_answer(annotations):
result = {1: 0}
for annotation in annotations[1:]:
num_answer = len(annotation["answers"])
if num_answer not in result.keys():
result[num_answer] = 1
else:
result[num_answer] += 1
return result
def check_ocr_bbox(annotations):
size_list = []
for annotation in annotations[1:]:
size_list.append(np.array(annotation['ocr_normalized_boxes']).shape)
if np.array(annotation['ocr_normalized_boxes']).shape[0] == 0:
print(annotation['image_name'])
print(annotation['ocr_normalized_boxes'])
size_list = set(size_list)
print('size list: ', size_list)
def main(args):
annotations = np.load(args.annot, allow_pickle=True)
print('len: ', len(annotations))
if args.option == 0:
print('Show all info of anotations')
check_info_all(annotations)
elif args.option == 1:
print("Show info random element")
check_info_element(annotations)
elif args.option == 2:
print("Is number question equal images ?")
if check_question_equal_images(annotations) == 1:
print("Yes")
else:
print("No")
elif args.option == 3:
print("Show image have one more question")
find_question(annotations)
elif args.option == 4:
print("Show required question")
show_question_require(annotations)
elif args.option == 5:
print("Show all question")
show_all_questions(annotations)
elif args.option == 6:
print("Show all ocr tokens")
show_all_ocr_tokens(annotations)
elif args.option == 7:
print("Find image")
img_name="20467.jpeg"
find_img(annotations, img_name)
elif args.option == 8 :
max_num_ans = find_num_max_answer(annotations)
print("Max number answer: ", max_num_ans)
elif args.option == 9:
print("Show number answers")
count_number_answer(annotations)
elif args.option == 10:
print("Show all answers")
show_all_answers(annotations)
elif args.option == 11:
print("Check number answer")
num_ans = 10
check_number_answer(annotations, num_ans)
elif args.option == 12:
print("number answer")
lst = sumarize_number_answer(annotations)
print(lst)
elif args.option == 13:
print("No type answer")
result = count_type_answer(annotations)
print(result)
elif args.option == 14:
question_id = 92270
print("Check question id ", question_id)
if check_question_id(annotations, question_id) == True:
print("Yes")
else:
print("No")
elif args.option == 15:
check_ocr_bbox(annotations)
else:
print("Please choose suitable option")
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--annot",
default="env_variable/data/datasets/textvqa/defaults/annotations/imdb_train_ocr_en.npy",
type=str,
help="The folder obtain annotation of TextVQA"
)
parser.add_argument(
"--option",
default=0,
type=int,
help="Choose suitable option: "
+ "0: show all info of annnotation"
+ "1: show info random element"
+ "2: check question equals image"
+ "3: find image have one more question"
+ "4: Show required question"
+ "5: show all question"
+ "6: Show all ocr tokens",
)
return parser.parse_args()
if __name__ == "__main__":
args = get_parser()
main(args)
'''
python utils/analysis_annotation.py \
--annot="env_variable/data/datasets/textvqa/defaults/annotations/imdb_train_ocr_en.npy" \
--option=1
python utils/analysis_annotation_textvqa.py \
--annot="/mlcv/WorkingSpace/NCKH/tiennv/vqa_thesis/docvqa/libs/mmf/new_annotations/lmdb_val_en.npy" \
--option=1
python utils/analysis_annotation.py \
--annot="env_variable/data/datasets/inforgraphicvqa/defaults/annotations/infoVQA_train_en.npy" \
--option=0
python utils/analysis_annotation.py \
--annot="env_variable/data/datasets/inforgraphicvqa/defaults/annotations/infoVQA_val_en.npy" \
--option=0
python utils/analysis_annotation.py \
--annot="env_variable/data/datasets/vi_infographicvqa/defaults/annotations/infoVQA_train_vi.npy" \
--option=0
python utils/analysis_annotation.py \
--annot="env_variable/data/datasets/vi_infographicvqa/20_object/annotations/infoVQA_val_vi.npy" \
--option=0
'''
|
from .Adding_Simple import AddSimple
from .Criteria import ArgsCriteria
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.