content
stringlengths 5
1.05M
|
|---|
import pytest
from pathlib import Path
import os
pai = os.path.dirname(__file__ )
examples_path = pai +'/examples'
@pytest.fixture
def example():
return lambda p: open(examples_path+ "/" + p).read()
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from keras.preprocessing.image import img_to_array
import imutils
import cv2
from keras.models import load_model
import numpy as np
import datetime
# parameters for loading data and images
detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
emotion_model_path = 'models/_mini_XCEPTION.102-0.66.hdf5'
# hyper-parameters for bounding boxes shape
# loading models
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised","neutral"]
#EMOTIONS = ["生气", "反感", "害怕", "开心", "悲伤", "惊讶", "平静"]
feelings_faces = []
for index, emotion in enumerate(EMOTIONS):
feelings_faces.append(imutils.resize(cv2.imread('emojis/' + emotion + '.png', -1),height=60,width=60))
# starting video streaming
#cv2.namedWindow('test')
imagepath = "./test_pic/happy.jpg"
image = cv2.imread(imagepath)
image = imutils.resize(image,width=300)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_detection.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)
print faces
canvas = np.zeros((250, 300, 3), dtype="uint8")
frameClone = image.copy()
if len(faces) > 0:
faces = sorted(faces, reverse=True,
key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(fX, fY, fW, fH) = faces
# Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
# the ROI for classification via the CNN
roi = gray[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (64, 64))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
starttime = datetime.datetime.now()
preds = emotion_classifier.predict(roi)[0]
endtime = datetime.datetime.now()
delta = (endtime - starttime).microseconds/1000.0
emotion_probability = np.max(preds)
label = EMOTIONS[preds.argmax()]
print preds
for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
# construct the label text
text = "{}: {:.2f}%".format(emotion, prob * 100)
emoji_face = feelings_faces[np.argmax(preds)]
w = int(prob * 300)
cv2.rectangle(canvas, (7, (i * 35) + 5),
(w, (i * 35) + 35), (0, 0, 255), -1)
cv2.putText(canvas, text, (10, (i * 35) + 23),
cv2.FONT_HERSHEY_SIMPLEX, 0.45,
(255, 255, 255), 2)
cv2.putText(frameClone, label, (fX, fY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
(0, 0, 255), 2)
for c in range(0, 3):
frameClone[10:70, 240:300, c] = emoji_face[:, :, c] * \
(emoji_face[:, :, 3] / 255.0) + frameClone[10:70,
240:300, c] * (1.0 - emoji_face[:, :, 3] / 255.0)
cv2.imshow("The result box. cost time: %s ms" % delta, frameClone)
print "The emotion is %s, and prob is %s" % (label, emotion_probability)
print "The processing cost time: %s ms" % delta
cv2.imshow("Probabilities", canvas)
cv2.waitKey(0)
|
import pandas
from classification.Classification import Classification
from experiment.configuration import Parameters
from loader.PreprocessedDataLoader import PreprocessedDataLoader
from plot.Ploter import plot_pie, plot_box
from vectorization.BagOfWordsModel import BagOfWordsModel
from vectorization.Doc2VecModel import Doc2VecModel
from vectorization.TdIdfModel import TfIdfModel
all_data = {
"arline": PreprocessedDataLoader('processed/arline.csv'),
"review": PreprocessedDataLoader('processed/review.csv'),
"amazon": PreprocessedDataLoader('processed/amazon.csv')
}
model = 'Naive Bayes'
def load_data(data_loader, plot=False):
data_loader.load()
data = data_loader.get_data()
data['sentiment'] = data['sentiment'].apply(lambda x: 0 if x == 'negative' else 1)
if plot:
plot_pie(data, data_loader.labels)
plot_box(data)
return data
def predict(vectorizer):
result_dict = {}
for name, loader in all_data.items():
print("Start processing for {} and {} ...".format(name, vectorizer.model_name()))
loaded_data = load_data(loader)
print("Data length: {}".format(len(loaded_data)))
print("Class number:")
print(loaded_data['sentiment'].value_counts())
classification = Classification(folds=5, score='f1')
train = loaded_data.text
train_labels = loaded_data.sentiment
train_data = vectorizer.fit_transform(train)
print("Train size {}".format(train_data.shape))
result = classification.fit(model, train_data, train_labels, Parameters.classification)
result_dict['model'] = vectorizer.model_name()
result_dict["{} precision".format(name)] = result.precision
result_dict["{} recall".format(name)] = result.recall
result_dict["{} f1".format(name)] = result.f1
result_dict["{} accuracy".format(name)] = result.accuracy
result_dict['execution_time'] = result.execution_time
params = {'{} {}'.format(name, k): v for k, v in result.best_param.items()}
result_dict.update(params)
vectorizer.clean()
print("Train time {} s".format(round(result.execution_time, 2)))
print("Finish processing for {} and {} ... \n".format(name, vectorizer.model_name()))
return pandas.DataFrame(data=result_dict, index=[0])
uni_gram_bow = predict(BagOfWordsModel(n=1))
bi_gram_bow = predict(BagOfWordsModel(n=2, min_frequent=1))
tri_gram_bow = predict(BagOfWordsModel(n=3, min_frequent=1))
uni_gram_td_idf = predict(TfIdfModel(n=1))
bi_gram_td_idf = predict(TfIdfModel(n=2, min_frequent=1))
tri_gram_td_idf = predict(TfIdfModel(n=3, min_frequent=1))
doc_2_vec_dm = predict(Doc2VecModel(dm=1, size=300))
doc_2_vec_dbow = predict(Doc2VecModel(dm=0, size=300))
frames = [
uni_gram_bow,
bi_gram_bow,
tri_gram_bow,
uni_gram_td_idf,
bi_gram_td_idf,
tri_gram_td_idf,
doc_2_vec_dm,
doc_2_vec_dbow
]
file_name = model.lower().replace(" ", "_")
final_frame = pandas.concat(frames)
final_frame.to_csv('results/{}.csv'.format(file_name), encoding='utf-8')
print("Final execution time for all models {} s\n".format(final_frame['execution_time'].sum()))
|
from datetime import datetime
from app import db, create_uuid
from app.dao.dao_utils import transactional, version_class
from app.models import ServiceCallbackApi
from app.models import DELIVERY_STATUS_CALLBACK_TYPE, COMPLAINT_CALLBACK_TYPE
@transactional
@version_class(ServiceCallbackApi)
def save_service_callback_api(service_callback_api):
service_callback_api.id = create_uuid()
service_callback_api.created_at = datetime.utcnow()
db.session.add(service_callback_api)
@transactional
@version_class(ServiceCallbackApi)
def reset_service_callback_api(service_callback_api, updated_by_id, url=None, bearer_token=None):
if url:
service_callback_api.url = url
if bearer_token:
service_callback_api.bearer_token = bearer_token
service_callback_api.updated_by_id = updated_by_id
service_callback_api.updated_at = datetime.utcnow()
db.session.add(service_callback_api)
def get_service_callback_api(service_callback_api_id, service_id):
return ServiceCallbackApi.query.filter_by(id=service_callback_api_id, service_id=service_id).first()
def get_service_delivery_status_callback_api_for_service(service_id):
return ServiceCallbackApi.query.filter_by(
service_id=service_id,
callback_type=DELIVERY_STATUS_CALLBACK_TYPE
).first()
def get_service_complaint_callback_api_for_service(service_id):
return ServiceCallbackApi.query.filter_by(
service_id=service_id,
callback_type=COMPLAINT_CALLBACK_TYPE
).first()
@transactional
def delete_service_callback_api(service_callback_api):
db.session.delete(service_callback_api)
|
import os
import shutil
import string
import random
import sqlite3
from functools import partial
from threading import Thread
from kivy.utils import platform
from kivy.uix.screenmanager import Screen
from kivy.properties import StringProperty
from kivy.animation import Animation
from kivy.core.clipboard import Clipboard
from kivymd.uix.list import (
OneLineIconListItem,
OneLineListItem,
TwoLineListItem,
OneLineAvatarIconListItem,
ILeftBodyTouch,
IRightBodyTouch,
ContainerSupport,
)
from kivymd.uix.selectioncontrol import MDCheckbox, MDSwitch
from kivymd.uix.dialog import MDDialog
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.button import MDFlatButton, MDRaisedButton
from kivymd.uix.filemanager import MDFileManager
from kivymd.toast import toast
from kivymd.theming import ThemeManager
class CustomOneLineIconListItem(OneLineIconListItem):
icon = StringProperty()
class OptionsScreen(Screen):
def __init__(self, **kwargs):
super().__init__(name=kwargs.get("name"))
self.initUI()
def initUI(self):
data = [
("Appearance", "format-paint"),
("Database", "database"),
("Security", "security"),
("Password Suggestion", "key"),
("About", "information-outline"),
]
for text, icon in data:
self.ids.container.add_widget(
CustomOneLineIconListItem(text=text, icon=icon, on_press=self.optionBtn)
)
def optionBtn(self, button):
text = button.text
if text == "Appearance":
self.manager.setAppearanceOptionsScreen()
elif text == "Database":
self.manager.setDatabaseOptionsScreen()
elif text == "Security":
self.manager.setSecurityOptionsScreen()
elif text == "Password Suggestion":
self.manager.setPasswordSuggestionOptionsScreen()
elif text == "About":
toast("Created by Ibrahim Cetin")
def goBackBtn(self):
self.manager.setMainScreen()
class SortSelectionItem(OneLineAvatarIconListItem):
divider = None
screen = None
def __init__(self, **kwargs):
super().__init__()
self.text = kwargs.get("text")
self.ids.check.active = kwargs.get("active")
class LeftCheckbox(MDCheckbox, ILeftBodyTouch):
pass
class SubtitlesSelectionItem(OneLineAvatarIconListItem):
def __init__(self, **kwargs):
super().__init__()
self.text = kwargs.get("text")
self.ids.check.active = kwargs.get("active")
class AppearanceOptionsScreen(Screen):
sort_options = {
"a_to_z": "Alphabetically (A to Z)",
"z_to_a": "Alphabetically (Z to A)",
"first_to_last": "Date (First to Last)",
"last_to_first": "Date (Last to First)",
}
dialog = None
sort_by = None
list_subtitles_options = None
def __init__(self, **kwargs):
super().__init__(name=kwargs.get("name"))
self.con = kwargs.get("con")
self.cursor = kwargs.get("cursor")
self.theme_cls = ThemeManager()
self.getOptions()
self.setOptions()
def getOptions(self):
self.cursor.execute(
"SELECT sort_by, list_subtitles, animation_options FROM options"
)
options = self.cursor.fetchall()[0]
self.sort_by = options[0]
self.list_subtitles_options = [bool(int(o)) for o in options[1].split(",")]
self.animation_options = [bool(int(o)) for o in options[2].split(",")]
def setOptions(self):
self.ids.sort_by_item.secondary_text = self.sort_options.get(self.sort_by)
self.setListSubtitlesText()
self.ids.transition_animation_switch.active = self.animation_options[0]
self.ids.bottomsheet_animation_switch.active = self.animation_options[1]
def sortByButton(self):
def is_current(code):
return code == self.sort_by
items = []
SortSelectionItem.screen = self
for code, text in self.sort_options.items():
items.append(SortSelectionItem(text=text, active=is_current(code)))
self.dialog = MDDialog(
title="Sort By",
type="confirmation",
items=items,
buttons=[
MDFlatButton(
text="Cancel",
text_color=self.theme_cls.primary_color,
on_press=self.closeDialog,
),
],
)
self.dialog.open()
def setSortByOption(self, text):
self.sort_by = list(self.sort_options.keys())[
list(self.sort_options.values()).index(text)
]
self.cursor.execute("UPDATE options SET sort_by = ?", (self.sort_by,))
self.con.commit()
self.ids.sort_by_item.secondary_text = text
self.closeDialog()
def listSubtitlesButton(self):
self.dialog = MDDialog(
title="List Subtitles",
type="confirmation",
items=[
SubtitlesSelectionItem(
text="EMail", active=self.list_subtitles_options[0]
),
SubtitlesSelectionItem(
text="Username", active=self.list_subtitles_options[1]
),
],
buttons=[
MDFlatButton(
text="Cancel",
text_color=self.theme_cls.primary_color,
on_press=self.closeDialog,
),
MDRaisedButton(text="Okay", on_press=self.getChecked),
],
)
self.dialog.open()
def getChecked(self, button):
self.list_subtitles_options = [
item.ids.check.active for item in self.dialog.items
]
new_status = ",".join([str(int(b)) for b in self.list_subtitles_options])
self.cursor.execute("UPDATE options SET list_subtitles = ?", (new_status,))
self.con.commit()
self.setListSubtitlesText()
self.closeDialog()
def setListSubtitlesText(self):
if all(self.list_subtitles_options):
text = "EMail and Username"
elif self.list_subtitles_options[0]:
text = "EMail"
elif self.list_subtitles_options[1]:
text = "Username"
else:
text = "No"
self.ids.list_subtitles_item.secondary_text = text
def animationFunctions(self):
options = []
options.append("1" if self.ids.transition_animation_switch.active else "0")
options.append("1" if self.ids.bottomsheet_animation_switch.active else "0")
o = ",".join(options)
self.cursor.execute("UPDATE options SET animation_options = ?", (o,))
self.con.commit()
def closeDialog(self, button=None):
self.dialog.dismiss()
def goBackBtn(self):
self.manager.setOptionsScreen()
class TwoLineListItemWithContainer(ContainerSupport, TwoLineListItem):
def start_ripple(self): # disable ripple behavior
pass
class RightSwitch(MDSwitch, IRightBodyTouch):
pass
class RemoteDatabaseDialogContent(MDBoxLayout):
pass
class DatabasePasswordDialogContent(MDBoxLayout):
hint_text = StringProperty()
def showPasswordButton(self, button, field):
if button.icon == "eye-outline":
field.password = False
button.icon = "eye-off-outline"
elif button.icon == "eye-off-outline":
field.password = True
button.icon = "eye-outline"
class DatabaseOptionsScreen(Screen):
def __init__(self, **kwargs):
super().__init__(name=kwargs.get("name"))
self.con = kwargs.get("con")
self.cursor = kwargs.get("cursor")
self.getOptions()
self.setOptions()
self.initUI()
def getOptions(self):
self.cursor.execute(
"SELECT auto_backup, auto_backup_location, remote_database, db_pass, db_user, db_name, db_port, db_host FROM options"
)
options = self.cursor.fetchall()[0]
self.auto_backup = bool(options[0])
self.auto_backup_location = options[1]
self.remote_database = bool(options[2])
self.pg_info = options[3:]
def setOptions(self):
self.ids.switch.active = self.auto_backup
self.ids.location_list_item.secondary_text = self.auto_backup_location
self.file_manager_start_path = self.auto_backup_location
self.ids.remote_database_switch.active = self.remote_database
if all(self.pg_info):
for list_item, description in zip(
self.ids.remote_database_list.children, self.pg_info
):
list_item.secondary_text = (
description if list_item.text != "Password" else "**********"
)
def initUI(self):
data = [
("Backup Database", "Backup encrypted database"),
("Restore Database", "Restore encrypted database"),
]
for text, description in data:
self.ids.database_container.add_widget(
TwoLineListItem(
text=text, secondary_text=description, on_press=self.checkPlatform
)
)
def checkPlatform(self, button):
if platform == "android":
from android.permissions import (
check_permission,
request_permissions,
Permission,
)
if check_permission("android.permission.WRITE_EXTERNAL_STORAGE"):
if isinstance(button, MDSwitch):
self.autoBackupFunction(active=button.active)
else:
self.databaseFunctions(text=button.text)
else:
if isinstance(button, MDSwitch):
if button.active: # request_permissions only run when switch active
request_permissions(
[
Permission.READ_EXTERNAL_STORAGE,
Permission.WRITE_EXTERNAL_STORAGE,
],
partial(self.autoBackupFunction, active=button.active),
)
else:
request_permissions(
[
Permission.READ_EXTERNAL_STORAGE,
Permission.WRITE_EXTERNAL_STORAGE,
],
partial(self.databaseFunctions, text=button.text),
)
else:
if isinstance(button, MDSwitch):
self.autoBackupFunction(active=button.active)
else:
self.databaseFunctions(text=button.text)
def autoBackupFunction(
self, permissions=None, grant_result=[True, True], active=False
):
if not grant_result == [True, True]:
self.auto_backup = 0
self.ids.switch.active = False # this line run switch's on_active method
# that's why if request_permissions is not run only while switch is active,
# request_permissions are run twice
self.cursor.execute("UPDATE options SET auto_backup = 0")
self.con.commit()
toast("Please, Allow Storage Permission")
return
self.auto_backup = 1 if active else 0
self.cursor.execute("UPDATE options SET auto_backup = ?", (self.auto_backup,))
self.con.commit()
if self.auto_backup == 1:
shutil.copy2("pass.db", self.auto_backup_location)
def autoBackupLocationFunction(self):
self.file_manager = MDFileManager(
exit_manager=self.exit_manager,
select_path=self.auto_backup_location_select_path,
search="dirs",
)
self.file_manager.show(self.file_manager_start_path)
self.manager.file_manager_open = True
def auto_backup_location_select_path(self, path):
if os.path.isdir(path):
self.exit_manager()
shutil.copy2("pass.db", path)
self.cursor.execute("UPDATE options SET auto_backup_location = ?", (path,))
self.con.commit()
self.getOptions()
self.setOptions()
else:
toast("Please Select a Directory")
def databaseFunctions(self, permissions=None, grant_result=[True, True], text=None):
if not grant_result == [True, True]:
toast("Please, Allow Storage Permission")
return
if text == "Backup Database":
self.file_manager = MDFileManager(
exit_manager=self.exit_manager,
select_path=self.backup_select_path,
search="dirs",
)
self.file_manager.show(self.file_manager_start_path)
self.manager.file_manager_open = True
elif text == "Restore Database":
self.file_manager = MDFileManager(
exit_manager=self.exit_manager,
select_path=self.restore_select_path,
ext=[".db"],
)
self.file_manager.show(self.file_manager_start_path)
self.manager.file_manager_open = True
def backup_select_path(self, path):
self.exit_manager()
shutil.copy2("pass.db", path)
toast("Database Backup was Successfully Created")
def restore_select_path(self, path):
self.openRestoreDatabaseDialog(path)
def openRestoreDatabaseDialog(self, path):
self.dialog = MDDialog(
title="Password of the Database Backup",
type="custom",
content_cls=DatabasePasswordDialogContent(
hint_text="Password of the Database Backup"
),
buttons=[
MDFlatButton(text="Cancel", on_press=self.dismiss_dialog),
MDFlatButton(
text="Okay",
on_press=lambda x: self.checkBackupPassword(
self.dialog.content_cls.ids.password_field.text, path
),
),
],
)
self.dialog.open()
def checkBackupPassword(self, password, path):
backup_con = sqlite3.connect(path)
backup_cursor = backup_con.cursor()
backup_cursor.execute("SELECT master_password, salt FROM options")
encrypted, salt = map(bytes.fromhex, backup_cursor.fetchone())
cipher = self.manager.createCipher(password, salt)
try:
result = cipher.decrypt(encrypted[:16], encrypted[16:], None)
if result.decode() == password:
restore = True
except:
restore = False
if restore:
self.exit_manager()
self.dismiss_dialog()
shutil.copy2(path, "pass.db")
self.manager.cipher = cipher
self.getOptions()
self.setOptions()
toast("Database Successfully Restored")
else:
toast("Wrong Password")
def remoteDatabaseSwitch(self, switch):
self.cursor.execute(
"UPDATE options SET remote_database = ?", (int(switch.active),)
)
self.con.commit()
def remoteDatabaseDialog(self, list_item):
content = RemoteDatabaseDialogContent()
content.ids.text_field.hint_text = list_item.text
self.dialog = MDDialog(
title=f"{list_item.text}:",
type="custom",
content_cls=content,
buttons=[
MDFlatButton(text="Cancel", on_press=self.dismiss_dialog),
MDRaisedButton(
text="Okay",
on_press=lambda btn: self.updateRemoteDatabaseOption(
list_item, content.ids.text_field.text
),
),
],
)
self.dialog.open()
def updateRemoteDatabaseOption(self, list_item, value):
if value.isspace() or value == "":
pass
else:
list_item.secondary_text = (
value if list_item.text != "Password" else "**********"
)
text = list_item.text
if text == "Database Name":
variable_name = "db_name"
elif text == "Password":
variable_name = "db_pass"
else:
variable_name = f"db_{text.lower()}"
query = f"UPDATE options SET {variable_name} = '{value}'"
self.cursor.execute(query)
self.con.commit()
self.dialog.dismiss()
def syncDatabaseButton(self):
if self.manager.pg_con is None:
self.cursor.execute(
"SELECT remote_database, db_name, db_user, db_pass, db_host, db_port FROM options"
)
pg_data = self.cursor.fetchone()
self.manager.connectRemoteDatabase(pg_data)
pg_con = self.manager.pg_con
pg_cursor = self.manager.pg_cursor
if pg_con is None:
toast("Something went wrong")
return
pg_cursor.execute("SELECT * FROM accounts")
remote_data = pg_cursor.fetchall()
self.cursor.execute("SELECT * FROM accounts")
local_data = self.cursor.fetchall()
pg_cursor.execute("SELECT master_password, salt FROM options")
remote_options = pg_cursor.fetchone()
self.cursor.execute("SELECT master_password, salt FROM options")
local_options = self.cursor.fetchone()
if remote_data and local_data:
toast("Please, Delete 'accounts' table in the PostgreSQL database")
# TODO user can select remote or local database for sync
elif local_data:
def insert_data_to_remote_database():
pg_cursor.execute("INSERT INTO options VALUES(%s, %s)", local_options)
for account in local_data:
pg_cursor.execute(
"INSERT INTO accounts VALUES(%s, %s, %s, %s, %s)", account
)
pg_con.commit()
toast("Sync Completed")
toast("Please wait until Sync is Complete")
Thread(target=insert_data_to_remote_database).start()
elif remote_data:
def syncWithRemoteDatabase(password):
encrypted, salt = map(bytes.fromhex, remote_options)
cipher = self.manager.createCipher(password, salt)
try:
result = cipher.decrypt(encrypted[:16], encrypted[16:], None)
if result.decode() == password:
sync = True
except:
sync = False
if sync:
dialog.dismiss()
toast("Please wait until Sync is Complete")
self.cursor.execute(
"UPDATE options SET master_password = ?, salt = ? WHERE master_password = ? AND salt = ?",
(*remote_options, *local_options),
)
for account in remote_data:
self.cursor.execute(
"INSERT INTO accounts VALUES(?,?,?,?,?)", account
)
self.con.commit()
self.manager.cipher = cipher
toast("Sync Completed")
else:
toast("Wrong Password")
dialog = MDDialog(
title="Password of the Remote Backup",
type="custom",
content_cls=DatabasePasswordDialogContent(
hint_text="Password of the Remote Backup"
),
buttons=[
MDFlatButton(text="Cancel", on_press=lambda x: dialog.dismiss()),
MDFlatButton(
text="Okay",
on_press=lambda x: syncWithRemoteDatabase(
dialog.content_cls.ids.password_field.text
),
),
],
)
dialog.open()
def exit_manager(self, *args):
self.file_manager.close()
self.manager.file_manager_open = False
def dismiss_dialog(self, btn=None):
self.dialog.dismiss()
def goBackBtn(self):
self.manager.setOptionsScreen()
class OneLineListItemWithContainer(ContainerSupport, OneLineListItem):
def start_ripple(self): # disable ripple behavior
pass
class SecurityOptionsScreen(Screen):
def __init__(self, **kwargs):
super().__init__(name=kwargs.get("name"))
self.con = kwargs.get("con")
self.cursor = kwargs.get("cursor")
self.theme_cls = ThemeManager()
self.getOptions()
self.setOptions()
def getOptions(self):
self.cursor.execute("SELECT fast_login, auto_exit FROM options")
options = self.cursor.fetchone()
self.fast_login = bool(options[0])
self.auto_exit = bool(options[1])
def setOptions(self):
self.ids.fast_login_switch.active = self.fast_login
self.ids.auto_exit_switch.active = self.auto_exit
def fastLoginFunction(self, active):
status = 1 if active else 0
self.cursor.execute("UPDATE options SET fast_login = ?", (status,))
self.con.commit()
def autoExitFunction(self, active):
status = 1 if active else 0
self.cursor.execute("UPDATE options SET auto_exit = ?", (status,))
self.con.commit()
def goBackBtn(self):
self.manager.setOptionsScreen()
class ChangeMasterPasswordScreen(Screen):
def __init__(self, **kwargs):
super().__init__(name=kwargs.get("name"))
self.con = kwargs.get("con")
self.cursor = kwargs.get("cursor")
self.cipher = kwargs.get("cipher")
self.getOptions()
def getOptions(self):
self.cursor.execute("SELECT master_password, remote_database FROM options")
data = self.cursor.fetchone()
encrypted = bytes.fromhex(data[0])
self.password = self.cipher.decrypt(
encrypted[:16], encrypted[16:], None
).decode()
self.remote_database = data[1]
def initCipher(self, password):
salt = os.urandom(32)
cipher = self.manager.createCipher(password, salt)
nonce = os.urandom(16)
encrypted = nonce + cipher.encrypt(nonce, password.encode(), None)
return encrypted, salt
def recryptPasswords(self):
old_cipher = self.cipher
new_cipher = self.manager.cipher
self.cursor.execute("SELECT id, password FROM accounts")
accounts = self.cursor.fetchall()
for account in accounts:
old_encrypted = bytes.fromhex(account[1])
password = old_cipher.decrypt(
old_encrypted[:16], old_encrypted[16:], None
).decode()
nonce = os.urandom(16)
new_encrypted = nonce + new_cipher.encrypt(nonce, password.encode(), None)
self.cursor.execute(
"UPDATE accounts SET password = ? WHERE id = ?",
(new_encrypted.hex(), account[0]),
)
self.con.commit()
self.cipher = new_cipher
def updateButton(self, current_password, new_password, confirm_new_password):
if current_password == self.password:
if self.password == new_password == confirm_new_password:
toast("Current password and new password cannot be same!")
elif new_password == confirm_new_password:
encrypted, salt = self.initCipher(new_password)
self.cursor.execute(
"UPDATE options SET master_password = ?, salt = ?",
(encrypted.hex(), salt.hex()),
)
self.con.commit()
self.manager.createCipher(new_password, salt)
self.recryptPasswords()
self.manager.setSecurityOptionsScreen()
toast("Master Password Successfully Changed")
if self.remote_database:
query = "UPDATE options SET master_password = {}, salt = {}".format(
repr(encrypted.hex), repr(salt.hex())
)
self.manager.runRemoteDatabaseQuery(query)
else:
instance = self.ids.confirm_new_password_field
self.initFieldError(instance)
else:
instance = self.ids.current_password_field
self.initFieldError(instance)
def showPasswordBtn(self):
button = self.ids.show_password_button
field_1 = self.ids.current_password_field
field_2 = self.ids.new_password_field
field_3 = self.ids.confirm_new_password_field
if button.icon == "eye-outline":
field_1.password = False
field_2.password = False
field_3.password = False
button.icon = "eye-off-outline"
elif button.icon == "eye-off-outline":
field_1.password = True
field_2.password = True
field_3.password = True
button.icon = "eye-outline"
def checkField(self, instance, text):
if not text:
return
else:
self.closeFieldError(instance)
def checkConfirmField(self, instance, text):
if not text:
return
if text != self.ids.new_password_field.text:
self.initFieldError(instance)
else:
self.closeFieldError(instance)
def goBackBtn(self):
self.manager.setSecurityOptionsScreen()
def initFieldError(self, instance):
instance.error = True
Animation(duration=0.2, _current_error_color=instance.error_color).start(
instance
)
Animation(
_current_right_lbl_color=instance.error_color,
_current_hint_text_color=instance.error_color,
_current_line_color=instance.error_color,
_line_width=instance.width,
duration=0.2,
t="out_quad",
).start(instance)
def closeFieldError(self, instance):
Animation(duration=0.2, _current_error_color=(0, 0, 0, 0)).start(instance)
Animation(
duration=0.2,
_current_line_color=instance.line_color_focus,
_current_hint_text_color=instance.line_color_focus,
_current_right_lbl_color=instance.line_color_focus,
).start(instance)
instance.error = False
class RightCheckbox(IRightBodyTouch, MDCheckbox):
pass
class PasswordSuggestionOptionsScreen(Screen):
def __init__(self, **kwargs):
super().__init__(name=kwargs.get("name"))
self.con = kwargs.get("con")
self.cursor = kwargs.get("cursor")
self.getOptions()
self.setOptions()
def getOptions(self):
self.cursor.execute(
"SELECT password_length, password_suggestion_options FROM options"
)
options = self.cursor.fetchone()
self.password_length = options[0]
self.password_suggestion_options = [bool(int(o)) for o in options[1].split(",")]
def setOptions(self):
self.ids.slider.value = self.password_length
self.ids.lowercase_checkbox.active = self.password_suggestion_options[0]
self.ids.uppercase_checkbox.active = self.password_suggestion_options[1]
self.ids.digits_checkbox.active = self.password_suggestion_options[2]
self.ids.symbols_checkbox.active = self.password_suggestion_options[3]
def sliderFunction(self, slider):
value = int(slider.value)
if value != self.password_length:
self.password_length = value
self.cursor.execute("UPDATE options SET password_length = ?", (value,))
self.con.commit()
def checkboxFunction(self, checkbox):
checkbox_status = []
for widget in self.ids.values():
if isinstance(widget, RightCheckbox):
checkbox_status.append(widget.active)
if any(checkbox_status):
options = ",".join([str(int(i)) for i in checkbox_status])
self.cursor.execute(
"UPDATE options SET password_suggestion_options = ?", (options,)
)
self.con.commit()
else:
checkbox.active = True
toast("You must choose at least one!")
def suggestPasswordButton(self):
self.getOptions()
options = self.password_suggestion_options
chars = ""
if options[0]:
chars += string.ascii_lowercase
if options[1]:
chars += string.ascii_uppercase
if options[2]:
chars += string.digits
if options[3]:
chars += string.punctuation
password = "".join(random.choices(chars, k=self.password_length))
Clipboard.copy(password)
toast(f"{password} Copied")
def goBackBtn(self):
self.manager.setOptionsScreen()
|
""" Process all the SdA homogeneity test .npy files, combine into a dataframe """
import sys, re, os
import numpy as np
import pandas as pd
# Extract the model name from each filename.
def extract_model(regex,filename):
match = regex.match(filename)
if match is not None:
return match.groups()[0]
input_dir = '/data/sda_output_data/homogeneity'
# compile a regex to extract the model from a given filename
model_and_param = re.compile("^([\d_]+)")
# Store the contents of each file as a DataFrame, add it to the hyperparam_dfs list.
data_files = []
print "...Processing files"
currdir = os.getcwd()
# for each file:
for layers in ["3_layers","4_layers"]:
for dimension in ["10","20","30","40","50"]:
# read a list of all files in the directory that match model output files
os.chdir(os.path.join(input_dir,layers,dimension))
model_files = os.listdir(".")
for f in model_files:
if not f.endswith(".npy"):
continue
f_model = extract_model(model_and_param, f)
infile = open(f, 'r')
homog_results = np.load(f)
infile.close()
# build the one line df, store in list
f_dict = {"Model": [f_model], "Layers": [layers], "Dimension": [dimension], "Homogeneity": [homog_results.mean()]}
data_files.append(pd.DataFrame(data=f_dict))
print "...Done"
print "...rbinding DataFrames"
master_df = data_files[0]
for i in xrange(1,len(data_files)):
master_df = master_df.append(data_files[i])
print "...Done"
os.chdir(input_dir)
master_df.to_csv(path_or_buf="all_sda_models.csv",index=False)
|
import torch
import flowlib
def test_invertible_conv_forward() -> None:
model = flowlib.InvertibleConv(in_channels=3)
x = torch.randn(4, 3, 8, 8)
z, logdet = model(x)
assert z.size() == x.size()
assert not torch.isnan(z).any()
assert logdet.size(), (1,)
def test_invertible_conv_inverse() -> None:
model = flowlib.InvertibleConv(in_channels=3)
z = torch.randn(4, 3, 8, 8)
x = model.inverse(z)
assert x.size() == z.size()
assert not torch.isnan(x).any()
def test_invertible_convlu_forward() -> None:
model = flowlib.InvertibleConvLU(in_channels=3)
x = torch.randn(4, 3, 8, 8)
z, logdet = model(x)
assert z.size() == x.size()
assert not torch.isnan(z).any()
assert logdet.size() == (1,)
def test_invertible_convlu_inverse() -> None:
model = flowlib.InvertibleConvLU(in_channels=3)
z = torch.randn(4, 3, 8, 8)
x = model.inverse(z)
assert x.size() == z.size()
assert not torch.isnan(x).any()
|
""" tool to run fastqc on FASTQ/SAM/BAM files from HTS experiments """
import subprocess
from bipy.utils import flatten, remove_suffix, is_pair
from bcbio.utils import safe_makedir, file_exists
import os
import logging
import abc
from mako.template import Template
from bipy.toolbox.reporting import LatexReport, safe_latex
import sh
import zipfile
from bipy.pipeline.stages import AbstractStage
from bcbio.log import logger, setup_local_logging
from bcbio.provenance import do
_FASTQ_RANGES = {"sanger": [33, 73],
"solexa": [59, 104],
"illumina_1.3+": [64, 104],
"illumina_1.5+": [66, 104],
"illumina_1.8+": [33, 74]}
def detect_fastq_format(in_file, MAX_RECORDS=1000000):
"""
detects the format of a fastq file
will return multiple formats if it could be more than one
"""
logger.info("Detecting FASTQ format on %s." % (in_file))
kept = list(_FASTQ_RANGES.keys())
with open(in_file) as in_handle:
records_read = 0
for i, line in enumerate(in_handle):
# get the quality line
if records_read >= MAX_RECORDS:
break
if i % 4 is 3:
records_read += 1
for c in line:
formats = kept
if len(formats) == 1:
return formats
for form in formats:
if (_FASTQ_RANGES[form][0] > ord(c) or
_FASTQ_RANGES[form][1] < ord(c)):
kept.remove(form)
return formats
# list of module names for parsing the output files from fastqc
MODULE_NAMES = ["Basic Statistics", "Per base sequence quality",
"Per sequence quality scores",
"Per base sequence content",
"Per base GC content",
"Per sequence GC content",
"Per base N content",
"Sequence Length Distribution",
"Overrepresented sequences"]
def _make_outdir(config):
""" make the output directory "fastqc" where the data files live """
outdir = os.path.join(config["dir"]["results"], "fastqc")
safe_makedir(outdir)
return outdir
def _make_outfile(input_file, config):
outdir = _make_outdir(config)
#outfile = "".join([os.path.basename(input_file), "_fastqc.zip"])
base, ext = os.path.splitext(os.path.basename(input_file))
# fastqc does not handle the .fq extension correctly
if ext == ".fq":
outfile = os.path.join(outdir, base + ext + "_fastqc.zip")
else:
outfile = os.path.join(outdir, base + "_fastqc.zip")
return outfile
def _build_command(input_file, fastqc_config, config):
program = fastqc_config["program"]
options = map(str, list(flatten(fastqc_config["options"])))
outdir = _make_outdir(config)
options += ["--outdir", outdir, "--kmers", "6"]
cmd = list(flatten([program, options, input_file]))
return cmd
def run(input_file, fastqc_config, config):
outfile = _make_outfile(input_file, config)
# if it is already done skip it
if os.path.exists(outfile):
return outfile
cmd = _build_command(input_file, fastqc_config, config)
do.run(cmd, "Running FastQC on %s" % (input_file), None)
return outfile
class FastQCParser(object):
"""
Parses the directory of FastQC output to prepare a report for
output. Mostly lifted from Brad Chapman (bcbio).
"""
GRAPHS = (("per_base_quality.png", "", 1.0),
("per_base_sequence_content.png", "", 0.85),
("per_sequence_gc_content.png", "", 0.85),
("kmer_profiles.png", "", 0.85),
("duplication_levels.png", "", 0.85),
("per_bases_n_content.png", "", 0.85),
("per_sequence_quality.png", "", 1.0),
("sequence_length_distribution.png", "", 1.0))
def __init__(self, base_dir):
(base, ext) = os.path.splitext(base_dir)
if ext == ".zip":
with zipfile.ZipFile(base_dir) as zip_handle:
zip_handle.extractall(os.path.dirname(base))
base_dir = base
self._dir = base_dir
self._max_seq_size = 45
self._max_overrep = 20
def get_fastqc_graphs(self):
final_graphs = []
for f, caption, size in self.GRAPHS:
full_f = os.path.join(self._dir, "Images", f)
if os.path.exists(full_f):
final_graphs.append((full_f, caption, size))
return final_graphs
def get_fastqc_summary(self):
stats = {}
for stat_line in self._fastqc_data_section("Basic Statistics")[1:]:
k, v = [safe_latex(x) for x in stat_line.split("\t")[:2]]
stats[k] = v
over_rep = []
for line in self._fastqc_data_section("Overrepresented sequences")[1:]:
parts = [safe_latex(x) for x in line.split("\t")]
over_rep.append(parts)
over_rep[-1][0] = self._splitseq(over_rep[-1][0])
return stats, over_rep[:self._max_overrep]
pieces = []
cur_piece = []
for s in seq:
if len(cur_piece) >= self._max_seq_size:
pieces.append("".join(cur_piece))
cur_piece = []
cur_piece.append(s)
pieces.append("".join(cur_piece))
return " ".join(pieces)
def _fastqc_data_section(self, section_name):
out = []
in_section = False
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file):
with open(data_file) as in_handle:
for line in in_handle:
if line.startswith(">>%s" % section_name):
in_section = True
elif in_section:
if line.startswith(">>END"):
break
out.append(line.rstrip("\r\n"))
return out
def report(base_dir, report_type=None):
REPORT_LOOKUP = {"rnaseq": RNASeqFastQCReport}
parser = FastQCParser(base_dir)
graphs = parser.get_fastqc_graphs()
(stats, overrep) = parser.get_fastqc_summary()
name = os.path.basename(base_dir)
report_handler = REPORT_LOOKUP.get(report_type, FastQCReport)
return report_handler.generate_report(name, summary=stats,
figures=graphs, overrep=overrep)
class FastQCReport(LatexReport):
CAPTIONS = {"per_base_quality.png": "",
"per_base_sequence_content.png": "",
"per_sequence_gc_content.png": "",
"kmer_profiles.png": "",
"duplication_levels.png": "",
"per_bases_n_content.png": "",
"per_sequence_quality.png": "",
"sequence_length_distribution.png": ""}
def template(self):
return self._template
def _add_captions(self, figures):
new_figures = []
for figure in figures:
filename = os.path.basename(figure)
caption = self.CAPTIONS.get(filename, "")
new_figures.append((figure[0], caption, figure[2]))
return new_figures
@classmethod
def generate_report(self, name=None, summary=None, figures=None,
overrep=None):
template = Template(self._template)
safe_name = safe_latex(name)
section = template.render(name=safe_name, summary=summary,
summary_table=summary, figures=figures,
overrep=overrep)
return section
_template = r"""
\subsection*{FastQC report for ${name}}
% if summary:
\begin{table}[h]
\centering
\begin{tabular}{|l|r|}
\hline
% for k, v in summary.items():
${k} & ${v} \\\
% endfor
\hline
\end{tabular}
\caption{Summary of lane results}
\end{table}
% endif
% if figures:
% for i, (figure, caption, size) in enumerate(figures):
\begin{figure}[htbp]
\centering
\includegraphics[width=${size}\linewidth] {${figure}}
\caption{${caption}}
\end{figure}
% endfor
% endif
% if overrep:
% if len(overrep) > 0:
\begin{table}[htbp]
\centering
\begin{tabular}{|p{8cm}rrp{4cm}|}
\hline
Sequence & Count & Percent & Match \\\
\hline
% for seq, count, percent, match in overrep:
\texttt{${seq}} & ${count} & ${"%.2f" % float(percent)} & ${match} \\\
% endfor
\hline
\end{tabular}
\caption{Overrepresented read sequences}
\end{table}
% endif
% endif
"""
class RNASeqFastQCReport(FastQCReport):
"""FastQCreport class for outputting information from RNASeq experiments"""
CAPTIONS = {"per_base_quality.png": "",
"per_base_sequence_content.png": "",
"per_sequence_gc_content.png": "",
"kmer_profiles.png": "",
"duplication_levels.png": "",
"per_bases_n_content.png": "",
"per_sequence_quality.png": "",
"sequence_length_distribution.png": ""}
class FastQC(AbstractStage):
stage = "fastqc"
def __init__(self, config):
self.config = config
super(FastQC, self).__init__(self.config)
self.stage_config = config["stage"][self.stage]
def _start_message(self, in_file):
logger.info("Starting %s on %s" % (self.stage, in_file))
def _end_message(self, in_file, out_file):
logger.info("%s complete on %s and stored as %s."
% (self.stage, in_file, out_file))
def _memoized_message(self, in_file, out_file):
logger.info("%s already run on %s and stored as %s, skipping."
% (self.stage, in_file, out_file))
def _check_run(self, in_file):
if not file_exists(in_file):
raise IOError('%s not found.' % (in_file))
def __call__(self, in_file):
self._start_message(in_file)
if is_pair(in_file):
out_file = [run(x, self.stage_config, self.config) for x in in_file]
else:
out_file = run(in_file, self.stage_config, self.config)
self._end_message(in_file, out_file)
return out_file
|
#! /usr/bin/env python
import glob
import numpy
from setuptools import setup, find_packages, Extension
from Cython.Build import cythonize
setup(
name="simtrie",
version="0.8.0",
description="An efficient data structure for fast string similarity searches",
author='Bernhard Liebl',
author_email='poke1024@gmx.de',
url='https://github.com/poke1024/minitrie/',
packages=find_packages(),
ext_modules=cythonize([
Extension(
name="simtrie",
sources=['simtrie/simtrie.pyx', 'mman/mman.c'],
extra_compile_args=["-O3", "-std=c++14"],
include_dirs=['lib', numpy.get_include()],
language="c++",
)
]),
install_requires=[
"numpy>=1.15.0",
"msgpack>=0.6.1",
"tqdm>=4.31.0"
],
python_requires=">=3.7",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing :: Linguistic',
],
)
|
from celery import Celery
from demo import predict
import urllib.request
import shutil
import os
import requests
import json
app = Celery('gaitlab', broker='redis://redis:6379/0')
@app.task(name='gaitlab.cp')
def cp(args):
path = "/gaitlab/input/input.mp4"
# remove the old file
os.system('rm {}'.format(path))
# save the new file
url = args["video_url"]
with urllib.request.urlopen(url) as response, open(path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
# run predictions
result, video = predict(path)
files = {'file': video}
# store results
r = requests.post("http://www/annotation/{}/".format(args["annotation_id"]),
files = files,
data = {"result": json.dumps(result)})
return None
|
class AuthError(Exception):
pass
class DatabaseError(Exception):
pass
class GitHubError(Exception):
pass
class NCBIError(Exception):
pass
class ProxyError(Exception):
pass
|
#!/user/bin/python
# coding: utf8
class Warrior(Ant):
"""docstring for Warrior"""
def __init__(self, arg):
super().__init__(arg)
self.arg = arg
|
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import matplotlib
import os
import argparse
from SyntheticDataGeneration import MultiviewDataGeneration
from algorithm.DDFA.utils.ddfa import str2bool
matplotlib.use('Agg')
__all__ = ['torch']
if __name__ == '__main__':
print("\n\n**********************************\nTEST Multiview Data Generation Learner\n"
"**********************************")
parser = argparse.ArgumentParser()
parser.add_argument("-device", default="cuda", type=str, help="choose between cuda or cpu ")
parser.add_argument("-path_in", default=os.path.join("opendr_internal", "projects",
"data_generation",
"",
"demos", "imgs_input"),
type=str, help='Give the path of image folder')
parser.add_argument('-path_3ddfa', default=os.path.join("opendr_internal", "projects",
"data_generation",
"",
"algorithm", "DDFA"),
type=str, help='Give the path of DDFA folder')
parser.add_argument('-save_path', default=os.path.join("opendr_internal", "projects",
"data_generation",
"",
"results"),
type=str, help='Give the path of results folder')
parser.add_argument('-val_yaw', default="10 20", nargs='+', type=str, help='yaw poses list between [-90,90] ')
parser.add_argument('-val_pitch', default="30 40", nargs='+', type=str,
help='pitch poses list between [-90,90] ')
parser.add_argument('-f', '--files', nargs='+',
help='image files paths fed into network, single or multiple images')
parser.add_argument('--show_flg', default='false', type=str2bool, help='whether show the visualization result')
parser.add_argument('--dump_res', default='true', type=str2bool,
help='whether write out the visualization image')
parser.add_argument('--dump_vertex', default='false', type=str2bool,
help='whether write out the dense face vertices to mat')
parser.add_argument('--dump_ply', default='true', type=str2bool)
parser.add_argument('--dump_pts', default='true', type=str2bool)
parser.add_argument('--dump_roi_box', default='false', type=str2bool)
parser.add_argument('--dump_pose', default='true', type=str2bool)
parser.add_argument('--dump_depth', default='true', type=str2bool)
parser.add_argument('--dump_pncc', default='true', type=str2bool)
parser.add_argument('--dump_paf', default='true', type=str2bool)
parser.add_argument('--paf_size', default=3, type=int, help='PAF feature kernel size')
parser.add_argument('--dump_obj', default='true', type=str2bool)
parser.add_argument('--dlib_bbox', default='true', type=str2bool, help='whether use dlib to predict bbox')
parser.add_argument('--dlib_landmark', default='true', type=str2bool,
help='whether use dlib landmark to crop image')
parser.add_argument('-m', '--mode', default='gpu', type=str, help='gpu or cpu mode')
parser.add_argument('--bbox_init', default='two', type=str, help='one|two: one-step bbox initialization or two-step')
parser.add_argument('--dump_2d_img', default='true', type=str2bool, help='whether to save 3d rendered image')
parser.add_argument('--dump_param', default='true', type=str2bool, help='whether to save param')
parser.add_argument('--dump_lmk', default='true', type=str2bool, help='whether to save landmarks')
parser.add_argument('--save_dir', default='./algorithm/DDFA/results', type=str, help='dir to save result')
parser.add_argument('--save_lmk_dir', default='./example', type=str, help='dir to save landmark result')
parser.add_argument('--img_list', default='./txt_name_batch.txt', type=str, help='test image list file')
parser.add_argument('--rank', default=0, type=int, help='used when parallel run')
parser.add_argument('--world_size', default=1, type=int, help='used when parallel run')
parser.add_argument('--resume_idx', default=0, type=int)
args = parser.parse_args()
synthetic = MultiviewDataGeneration(args)
synthetic.eval()
|
from click.testing import CliRunner
from pathlib import Path
from tag.cli import cli
import os
import unittest
class CommandTest(unittest.TestCase):
def run_command_test(self,
command,
touch = [],
assert_exist = [],
assert_not_exist = [],
output = '',
debug = False
):
runner = CliRunner()
with runner.isolated_filesystem():
for path in touch:
parent = os.path.split(path)[0]
if parent:
os.makedirs(parent, exist_ok=True)
Path(path).touch()
if debug:
print('\nCommand: ' + command)
print('Before:')
print(os.system('tree -a'))
result = runner.invoke(cli, command)
if debug:
print('After:')
print(os.system('tree -a'))
self.assertEqual(result.exit_code, 0, result.output)
cwd = os.getcwd()
self.assertEqual(result.output, output.replace('@cwd', cwd))
for path in assert_exist:
self.assertTrue(Path(path).is_file(), f'file should exist: {path}')
for path in assert_not_exist:
self.assertFalse(Path(path).is_file(), f'file should not exist: {path}')
|
from urllib.error import HTTPError
import pytest
from tests import api
from .constants import (
VALID_STORE,
VALID_PRODUCT,
INVALID_PRODUCT_ID,
INVALID_STORE_ID
)
def test_inventories_without_args():
resp = api.inventories()
assert resp['status'] == 200
assert 'result' in resp
res = resp['result']
assert isinstance(res, list)
assert len(res) == resp['pager']['records_per_page']
def test_inventories_with_store_id():
resp = api.inventories(VALID_STORE['id'])
assert resp['status'] == 200
assert 'pager' in resp
assert 'result' in resp
assert 'store' in resp
store = resp['store']
assert store['id'] == VALID_STORE['id']
assert store['name'] == VALID_STORE['name']
res = resp['result']
assert len(res) == resp['pager']['records_per_page']
def test_inventories_with_store_id_and_product_id():
resp = api.inventories(VALID_STORE['id'], VALID_PRODUCT['id'])
assert resp['status'] == 200
assert 'pager' not in resp
assert 'result' in resp
def test_inventories_with_params():
per_page = 100
resp = api.inventories(per_page=per_page)
assert resp['status'] == 200
assert 'pager' in resp
assert resp['pager']['records_per_page'] == per_page
assert 'result' in resp
assert len(resp['result']) == per_page
def test_inventories_with_invalid_store_id():
with pytest.raises(HTTPError):
api.inventories(store_id=INVALID_STORE_ID)
def test_inventories_with_invalid_product_id():
with pytest.raises(HTTPError):
api.inventories(product_id=INVALID_PRODUCT_ID)
def test_inventories_with_store_id_and_invalid_product_id():
with pytest.raises(HTTPError):
api.inventories(VALID_STORE['id'], INVALID_PRODUCT_ID)
def test_inventories_with_invalid_store_id_and_valid_product_id():
with pytest.raises(HTTPError):
api.inventories(INVALID_STORE_ID, VALID_PRODUCT['id'])
|
import pytest
from unittest import TestCase
from pyflamegpu import *
from random import randint
import time
# Global vars needed in several classes
sleepDurationMilliseconds = 500
tracked_err_ct = 0;
tracked_runs_ct = 0;
class simulateInit(pyflamegpu.HostFunctionCallback):
# Init should always be 0th iteration/step
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
# Generate a basic pop
POPULATION_TO_GENERATE = FLAMEGPU.environment.getPropertyUInt("POPULATION_TO_GENERATE")
agent = FLAMEGPU.agent("Agent")
for i in range(POPULATION_TO_GENERATE):
agent.newAgent().setVariableUInt("counter", 0)
class simulateExit(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
totalCounters = FLAMEGPU.agent("Agent").sumUInt("counter")
# Add to the file scoped atomic sum of sums. @todo
# testSimulateSumOfSums += totalCounters
class elapsedInit(pyflamegpu.HostFunctionCallback):
# Init should always be 0th iteration/step
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
# Generate a basic pop
POPULATION_TO_GENERATE = FLAMEGPU.environment.getPropertyUInt("POPULATION_TO_GENERATE")
agent = FLAMEGPU.agent("Agent")
for i in range(POPULATION_TO_GENERATE):
agent.newAgent().setVariableUInt("counter", 0)
class elapsedStep(pyflamegpu.HostFunctionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
# Sleep each thread for a duration of time.
seconds = sleepDurationMilliseconds / 1000.0
time.sleep(seconds)
class throwException(pyflamegpu.HostFunctionCallback):
i = 0;
def __init__(self):
super().__init__()
self.i = 0;
def run(self, FLAMEGPU):
global tracked_runs_ct
global tracked_err_ct
tracked_runs_ct += 1;
self.i+=1;
if (self.i % 2 == 0):
tracked_err_ct += 1;
FLAMEGPU.agent("does not exist"); # Just cause a failure
class TestCUDAEnsemble(TestCase):
def test_constructor(self):
# Create a model
model = pyflamegpu.ModelDescription("test")
# Declare a pointer
ensemble = None
# Use the ctor
# explicit CUDAEnsemble(const ModelDescription& model, int argc = 0, const char** argv = None)
ensemble = pyflamegpu.CUDAEnsemble(model, [])
assert ensemble != None
# Check a property
assert ensemble.Config().timing == False
# Run the destructor ~CUDAEnsemble
ensemble = None
# Check with simple argparsing.
argv = ["ensemble.exe", "--timing"]
ensemble = pyflamegpu.CUDAEnsemble(model, argv)
assert ensemble.Config().timing == True
ensemble = None
def test_EnsembleConfig(self):
# Create a model
model = pyflamegpu.ModelDescription("test")
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Verify that the getConfig method doesn't exist, as it is ignored.
with pytest.raises(AttributeError):
ensemble.getConfig()
# Get a config object.
# EnsembleConfig &Config()
ensemble.Config()
mutableConfig = ensemble.Config()
# Check the default values are correct.
assert mutableConfig.out_directory == ""
assert mutableConfig.out_format == "json"
assert mutableConfig.concurrent_runs == 4
# assert mutableConfig.devices == std::set<int>() # @todo - this will need to change
assert mutableConfig.quiet == False
assert mutableConfig.timing == False
# Mutate the configuration
mutableConfig.out_directory = "test"
mutableConfig.out_format = "xml"
mutableConfig.concurrent_runs = 1
# mutableConfig.devices = std::set<int>({0}) # @todo - this will need to change.
mutableConfig.quiet = True
mutableConfig.timing = True
# Check via the const ref, this should show the same value as config was a reference, not a copy.
assert mutableConfig.out_directory == "test"
assert mutableConfig.out_format == "xml"
assert mutableConfig.concurrent_runs == 1
# assert mutableConfig.devices == std::set<int>({0}) # @todo - this will need to change
assert mutableConfig.quiet == True
assert mutableConfig.timing == True
@pytest.mark.skip(reason="--help cannot be tested due to exit()")
def test_initialise_help(self):
# Create a model
model = pyflamegpu.ModelDescription("test")
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Call initialise with differnt cli arguments, which will mutate values. Check they have the new value.
argv = ["ensemble.exe", "--help"]
ensemble.initialise(argv)
def test_initialise_out(self):
# Create a model
model = pyflamegpu.ModelDescription("test")
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Call initialise with differnt cli arguments, which will mutate values. Check they have the new value.
assert ensemble.Config().out_directory == ""
assert ensemble.Config().out_format == "json"
argv = ["ensemble.exe", "--out", "test", "xml"]
ensemble.initialise(argv)
assert ensemble.Config().out_directory == "test"
assert ensemble.Config().out_format == "xml"
def test_initialise_concurrent_runs(self):
# Create a model
model = pyflamegpu.ModelDescription("test")
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Call initialise with differnt cli arguments, which will mutate values. Check they have the new value.
assert ensemble.Config().concurrent_runs == 4
argv = ["ensemble.exe", "--concurrent", "2"]
ensemble.initialise(argv)
assert ensemble.Config().concurrent_runs == 2
@pytest.mark.skip(reason="EnsembleConfig::devices is not currently swig-usable")
def test_initialise_devices(self):
# Create a model
model = pyflamegpu.ModelDescription("test")
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Call initialise with differnt cli arguments, which will mutate values. Check they have the new value.
assert ensemble.Config().devices == () # @todo
argv = ["ensemble.exe", "--devices", "0"]
ensemble.initialise(argv)
assert ensemble.Config().devices == (0) # @todo
def test_initialise_quiet(self):
# Create a model
model = pyflamegpu.ModelDescription("test")
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Call initialise with differnt cli arguments, which will mutate values. Check they have the new value.
assert ensemble.Config().quiet == False
argv = ["ensemble.exe", "--quiet"]
ensemble.initialise(argv)
assert ensemble.Config().quiet == True
def test_initialise_timing(self):
# Create a model
model = pyflamegpu.ModelDescription("test")
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Call initialise with differnt cli arguments, which will mutate values. Check they have the new value.
assert ensemble.Config().timing == False
argv = ["ensemble.exe", "--timing"]
ensemble.initialise(argv)
assert ensemble.Config().timing == True
def test_initialise_error_level(self):
# Create a model
model = pyflamegpu.ModelDescription("test")
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Call initialise with differnt cli arguments, which will mutate values. Check they have the new value.
assert ensemble.Config().error_level == pyflamegpu.CUDAEnsembleConfig.Slow
argv = ["ensemble.exe", "-e", "0"]
ensemble.initialise(argv)
assert ensemble.Config().error_level == pyflamegpu.CUDAEnsembleConfig.Off
argv = ["ensemble.exe", "--error", "1"]
ensemble.initialise(argv)
assert ensemble.Config().error_level == pyflamegpu.CUDAEnsembleConfig.Slow
argv = ["ensemble.exe", "-e", "2"]
ensemble.initialise(argv)
assert ensemble.Config().error_level == pyflamegpu.CUDAEnsembleConfig.Fast
argv = ["ensemble.exe", "--error", "Off"]
ensemble.initialise(argv)
assert ensemble.Config().error_level == pyflamegpu.CUDAEnsembleConfig.Off
argv = ["ensemble.exe", "-e", "SLOW"]
ensemble.initialise(argv)
assert ensemble.Config().error_level == pyflamegpu.CUDAEnsembleConfig.Slow
argv = ["ensemble.exe", "--error", "fast"]
ensemble.initialise(argv)
assert ensemble.Config().error_level == pyflamegpu.CUDAEnsembleConfig.Fast
# Agent function used to check the ensemble runs.
simulateAgentFn = """
FLAMEGPU_AGENT_FUNCTION(simulateAgentFn, flamegpu::MessageNone, flamegpu::MessageNone) {
// Increment agent's counter by 1.
FLAMEGPU->setVariable<int>("counter", FLAMEGPU->getVariable<int>("counter") + 1);
return flamegpu::ALIVE;
}
"""
def test_simulate(self):
# Number of simulations to run.
planCount = 2
populationSize = 32
# Create a model containing atleast one agent type and function.
model = pyflamegpu.ModelDescription("test")
# Environmental constant for initial population
model.Environment().newPropertyUInt("POPULATION_TO_GENERATE", populationSize, True)
# Agent(s)
agent = model.newAgent("Agent")
agent.newVariableUInt("counter", 0)
afn = agent.newRTCFunction("simulateAgentFn", self.simulateAgentFn)
# Control flow
model.newLayer().addAgentFunction(afn)
init = simulateInit()
model.addInitFunctionCallback(init)
exitfn = simulateExit()
model.addExitFunctionCallback(exitfn)
# Crete a small runplan, using a different number of steps per sim.
expectedResult = 0
plans = pyflamegpu.RunPlanVector(model, planCount)
for idx in range(plans.size()):
plan = plans[idx]
plan.setSteps(idx + 1) # Can't have 0 steps without exit condition
# Increment the expected result based on the number of steps.
expectedResult += (idx + 1) * populationSize
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Make it quiet to avoid outputting during the test suite
ensemble.Config().quiet = True
ensemble.Config().out_format = "" # Suppress warning
# Simulate the ensemble,
ensemble.simulate(plans)
# @todo - actually check the simulations did execute. Can't abuse atomics like in c++.
# An exception should be thrown if the Plan and Ensemble are for different models.
modelTwo = pyflamegpu.ModelDescription("two")
modelTwoPlans = pyflamegpu.RunPlanVector(modelTwo, 1)
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
ensemble.simulate(modelTwoPlans)
assert e.value.type() == "InvalidArgument"
# Exceptions can also be thrown if output_directory cannot be created, but I'm unsure how to reliably test this cross platform.
# Logging is more thoroughly tested in Logging. Here just make sure the methods work
def test_setStepLog(self):
# Create a model containing atleast one agent type and function.
model = pyflamegpu.ModelDescription("test")
# Environmental constant for initial population
model.Environment().newPropertyFloat("f", 0)
# Add an agent so that the simulation can be ran, to check for presence of logs
agent = model.newAgent("Agent")
agent.newVariableUInt("counter", 0)
# Define the logging configuraiton.
lcfg = pyflamegpu.LoggingConfig(model)
lcfg.logEnvironment("f")
slcfg = pyflamegpu.StepLoggingConfig(lcfg)
slcfg.setFrequency(1)
# Create a single run.
plans = pyflamegpu.RunPlanVector(model, 1)
plans[0].setSteps(1)
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Make it quiet to avoid outputting during the test suite
ensemble.Config().quiet = True
ensemble.Config().out_format = "" # Suppress warning
# Set the StepLog config.
ensemble.setStepLog(slcfg)
# Run the ensemble, generating logs
ensemble.simulate(plans)
# Get the logs, checking the correct number are present.
runLogs = ensemble.getLogs()
assert runLogs.size() == plans.size()
for log in runLogs:
stepLogs = log.getStepLog()
assert stepLogs.size() == 1 + 1 # This is 1 + 1 due to the always present init log.
expectedStepCount = 0
for stepLog in stepLogs:
assert stepLog.getStepCount() == expectedStepCount
expectedStepCount += 1
# An exception will be thrown if the step log config is for a different model.
modelTwo = pyflamegpu.ModelDescription("two")
lcfgTwo = pyflamegpu.LoggingConfig(modelTwo)
slcfgTwo = pyflamegpu.StepLoggingConfig(lcfgTwo)
slcfgTwo.setFrequency(1)
modelTwoPlans = pyflamegpu.RunPlanVector(modelTwo, 1)
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
ensemble.setStepLog(slcfgTwo)
assert e.value.type() == "InvalidArgument"
def test_setExitLog(self):
# Create a model containing at least one agent type and function.
model = pyflamegpu.ModelDescription("test")
# Environmental constant for initial population
model.Environment().newPropertyFloat("f", 0)
# Add an agent so that the simulation can be ran, to check for presence of logs
agent = model.newAgent("Agent")
agent.newVariableUInt("counter", 0)
# Define the logging configuration.
lcfg = pyflamegpu.LoggingConfig(model)
lcfg.logEnvironment("f")
# Create a single run.
plans = pyflamegpu.RunPlanVector(model, 1)
plans[0].setSteps(1)
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Make it quiet to avoid outputting during the test suite
ensemble.Config().quiet = True
ensemble.Config().out_format = "" # Suppress warning
# Set the StepLog config.
ensemble.setExitLog(lcfg)
# Run the ensemble, generating logs
ensemble.simulate(plans)
# Get the logs, checking the correct number are present.
runLogs = ensemble.getLogs()
assert runLogs.size() == plans.size()
for log in runLogs:
exitLog = log.getExitLog()
assert exitLog.getStepCount() == 1
# An exception will be thrown if the step log config is for a different model.
modelTwo = pyflamegpu.ModelDescription("two")
lcfgTwo = pyflamegpu.LoggingConfig(modelTwo)
modelTwoPlans = pyflamegpu.RunPlanVector(modelTwo, 1)
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
ensemble.setExitLog(lcfgTwo)
assert e.value.type() == "InvalidArgument"
def test_getLogs(self):
# Create an ensemble with no logging enabled, but call getLogs
# Create a model containing atleast one agent type and function.
model = pyflamegpu.ModelDescription("test")
plans = pyflamegpu.RunPlanVector(model, 1)
plans[0].setSteps(1)
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
ensemble.getLogs()
runLogs = ensemble.getLogs()
assert runLogs.size() == 0
# Agent function used to check the ensemble runs.
elapsedAgentFn = """
FLAMEGPU_AGENT_FUNCTION(elapsedAgentFn, flamegpu::MessageNone, flamegpu::MessageNone) {
// Increment agent's counter by 1.
FLAMEGPU->setVariable<int>("counter", FLAMEGPU->getVariable<int>("counter") + 1);
return flamegpu::ALIVE;
}
"""
def test_getEnsembleElapsedTime(self):
# Create a model containing at least one agent type and function.
model = pyflamegpu.ModelDescription("test")
# Environmental constant for initial population
model.Environment().newPropertyUInt("POPULATION_TO_GENERATE", 1, True)
# Agent(s)
agent = model.newAgent("Agent")
agent.newVariableUInt("counter", 0)
afn = agent.newRTCFunction("elapsedAgentFn", self.elapsedAgentFn)
# Control flow
model.newLayer().addAgentFunction(afn)
init = elapsedInit()
model.addInitFunctionCallback(init)
step = elapsedStep()
model.addStepFunctionCallback(step)
# Create a single run.
plans = pyflamegpu.RunPlanVector(model, 1)
plans[0].setSteps(1)
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Make it quiet to avoid outputting during the test suite
ensemble.Config().quiet = True
ensemble.Config().out_format = "" # Suppress warning
# Get the elapsed seconds before the sim has been executed
ensemble.getEnsembleElapsedTime()
# Assert that it is LE zero.
assert ensemble.getEnsembleElapsedTime() <= 0.
# Simulate the ensemble,
ensemble.simulate(plans)
# Get the elapsed seconds before the sim has been executed
elapsedSeconds = ensemble.getEnsembleElapsedTime()
# Ensure the elapsedMillis is larger than a threshold.
# Sleep accuracy via callback seems very poor.
assert elapsedSeconds > 0.0
def test_ErrorOff(self):
global tracked_runs_ct
global tracked_err_ct
tracked_runs_ct = 0
tracked_err_ct = 0
# Create a model containing at least one agent type and function.
model = pyflamegpu.ModelDescription("test")
# Environmental constant for initial population
model.Environment().newPropertyUInt("POPULATION_TO_GENERATE", 1, True)
# Agent(s)
agent = model.newAgent("Agent")
agent.newVariableUInt("counter", 0)
init = elapsedInit()
model.addInitFunctionCallback(init)
step = throwException()
model.addStepFunctionCallback(step)
# Create a set of 10 Run plans
ENSEMBLE_COUNT = 10
plans = pyflamegpu.RunPlanVector(model, ENSEMBLE_COUNT)
plans.setSteps(1)
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Make it quiet to avoid outputting during the test suite
ensemble.Config().quiet = True
ensemble.Config().out_format = "" # Suppress warning
ensemble.Config().error_level = pyflamegpu.CUDAEnsembleConfig.Off
ensemble.Config().concurrent_runs = 1 # Single device/no concurrency to ensure we get consistent data
ensemble.Config().devices = pyflamegpu.IntSet([0])
reported_err_ct = 0;
# Simulate the ensemble,
reported_err_ct = ensemble.simulate(plans)
# Check correct number of fails is reported
assert reported_err_ct == ENSEMBLE_COUNT / 2
assert tracked_err_ct == ENSEMBLE_COUNT / 2
assert tracked_runs_ct == ENSEMBLE_COUNT
def test_ErrorSlow(self):
global tracked_runs_ct
global tracked_err_ct
tracked_runs_ct = 0
tracked_err_ct = 0
# Create a model containing at least one agent type and function.
model = pyflamegpu.ModelDescription("test")
# Environmental constant for initial population
model.Environment().newPropertyUInt("POPULATION_TO_GENERATE", 1, True)
# Agent(s)
agent = model.newAgent("Agent")
agent.newVariableUInt("counter", 0)
init = elapsedInit()
model.addInitFunctionCallback(init)
step = throwException()
model.addStepFunctionCallback(step)
# Create a set of 10 Run plans
ENSEMBLE_COUNT = 10
plans = pyflamegpu.RunPlanVector(model, ENSEMBLE_COUNT)
plans.setSteps(1)
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Make it quiet to avoid outputting during the test suite
ensemble.Config().quiet = True
ensemble.Config().out_format = "" # Suppress warning
ensemble.Config().error_level = pyflamegpu.CUDAEnsembleConfig.Slow
ensemble.Config().concurrent_runs = 1 # Single device/no concurrency to ensure we get consistent data
ensemble.Config().devices = pyflamegpu.IntSet([0])
reported_err_ct = 0;
# Simulate the ensemble,
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
ensemble.simulate(plans)
assert e.value.type() == "EnsembleError"
# Check correct number of fails is reported
assert tracked_err_ct == ENSEMBLE_COUNT / 2
assert tracked_runs_ct == ENSEMBLE_COUNT
def test_ErrorSlow(self):
global tracked_runs_ct
global tracked_err_ct
tracked_runs_ct = 0
tracked_err_ct = 0
# Create a model containing at least one agent type and function.
model = pyflamegpu.ModelDescription("test")
# Environmental constant for initial population
model.Environment().newPropertyUInt("POPULATION_TO_GENERATE", 1, True)
# Agent(s)
agent = model.newAgent("Agent")
agent.newVariableUInt("counter", 0)
init = elapsedInit()
model.addInitFunctionCallback(init)
step = throwException()
model.addStepFunctionCallback(step)
# Create a set of 10 Run plans
ENSEMBLE_COUNT = 10
plans = pyflamegpu.RunPlanVector(model, ENSEMBLE_COUNT)
plans.setSteps(1)
# Create an ensemble
ensemble = pyflamegpu.CUDAEnsemble(model)
# Make it quiet to avoid outputting during the test suite
ensemble.Config().quiet = True
ensemble.Config().out_format = "" # Suppress warning
ensemble.Config().error_level = pyflamegpu.CUDAEnsembleConfig.Fast
ensemble.Config().concurrent_runs = 1 # Single device/no concurrency to ensure we get consistent data
ensemble.Config().devices = pyflamegpu.IntSet([0])
reported_err_ct = 0;
# Simulate the ensemble,
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
ensemble.simulate(plans)
assert e.value.type() == "EnsembleError"
# Check correct number of fails is reported
assert tracked_err_ct == 1
# The first run does not throw
assert tracked_runs_ct == 2
|
from flask import Flask, request,send_from_directory
from flask_cors import CORS
import os
from Models.ArimaModel import ArimaModel
from Models.ExponentialSmootheningModel import ExponentialSmootheningModel
from Models.ProphetModel import ProphetModel
from Models.LstmModel import LstmModel
import tensorflow as tf
import pandas as pd
import dataset as datasetMaker
import sys
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0],True)
app = Flask(__name__)
cors = CORS(app, resources={r"*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
dir = os.path.dirname(__file__)
@app.route('/predictions', methods = ['GET'])
def country_csv_prediction():
countries = request.args.get('country').split(",")
time = request.args.get('days')
field = request.args.get('field')
data = datasetMaker.create_dataset(countries)
dataset_to_use = data[ [field+x for x in countries] ]
dataset_to_send = pd.DataFrame()
model1 = ArimaModel()
model2 = ExponentialSmootheningModel()
model3 = ProphetModel()
model4 = LstmModel(lag=5)
for i in countries:
new_data1 = model1.predict_with_arima(dataset_to_use,i,field,int(time))
new_data2 = model2.predict_with_exp(dataset_to_use,i,field,int(time))
new_data3 = model3.predict_with_prophet(dataset_to_use,i,field,int(time))
new_data4 = model4.predict_with_lstm(dataset_to_use,i,field,int(time))
dataset_to_send = pd.concat([dataset_to_send, new_data1, new_data2, new_data3, new_data4], axis=1, sort=False)
datasetMaker.write_to_csv(dataset_to_send, "tmp")
return send_from_directory(".","tmp.csv", as_attachment=True)
@app.route('/predictions_based_on', methods = ['GET'])
def country_based_on():
base = request.args.get('base')
target = request.args.get('target')
field = request.args.get('field')
train_data = datasetMaker.create_dataset([base])
train_data = train_data[[field+base]]
predi_data = datasetMaker.create_dataset([target])
predi_data = predi_data[[field+target]]
lstm = LstmModel(lag=5)
fake_data = lstm.predict_dataset(train_data,predi_data, field, target)
datasetMaker.write_to_csv(fake_data, "tmpOn")
return send_from_directory(".","tmpOn.csv", as_attachment=True)
app.run(debug=True, port=5000)
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect, itertools, re, time
from conary import trove, versions
from conary.build import defaultrecipes, use
from conary.build import lookaside
from conary.build.errors import CookError
from conary.build.grouprecipe import _BaseGroupRecipe, _SingleGroup
from conary.conaryclient.cmdline import parseTroveSpec
from conary.conaryclient import cml, modelgraph, troveset
from conary.conaryclient.resolve import PythonDependencyChecker
from conary.lib import log
from conary.local import deptable
from conary.repository import errors, netclient, searchsource
from conary.deps import deps
def findRecipeLineNumber():
line = None
for frame in inspect.stack():
if frame[1].endswith('.recipe'):
line = frame[2]
break
return line
class GroupSetTroveCache(object):
def __init__(self, groupRecipe, cache):
self.cache = cache
self.groupRecipe = groupRecipe
self.depCache = {}
def __getattr__(self, name):
return getattr(self.cache, name)
def _cached(self, troveTupList, troveList):
# this avoids the call to recursively get children
# GroupSet.TroveCache needs
pass
def getRepos(self):
return self.cache.troveSource
def cacheTroves(self, troveList):
return self.cache.cacheTroves(troveList, _cached = self._cached)
def getTrove(self, n, v, f, withFiles = False):
raise NotImplementedError
def getTroves(self, troveList, withFiles = False):
return self.cache.getTroves(troveList, _cached = self._cached,
withFiles = withFiles)
def iterTroveList(self, troveTup, strongRefs=False, weakRefs=False):
raise NotImplementedError
def iterTroveListInfo(self, troveTup):
if isinstance(troveTup[1], versions.NewVersion):
sg = self.groupRecipe._getGroup(troveTup[0])
for x in sg.iterTroveListInfo():
yield (x[0], x[2], x[1])
for name, byDefault, explicit in sg.iterNewGroupList():
yield (name, versions.NewVersion(),
self.groupRecipe.flavor), byDefault, explicit
else:
for x in self.cache.iterTroveListInfo(troveTup):
yield x
def troveReferencesTrove(self, troveTup, troveRef):
if isinstance(troveTup[1], versions.NewVersion):
sg = self.groupRecipe._getGroup(troveTup[0])
return sg.hasTrove(*troveRef)
return self.cache.troveReferencesTrove(troveTup, troveRef)
class GroupActionData(troveset.ActionData):
def __init__(self, troveCache, groupRecipe):
troveset.ActionData.__init__(self, troveCache, groupRecipe.flavor)
self.groupRecipe = groupRecipe
class GroupTupleSetMethods(object):
# used mainly in a TroveSet context, so document it there from user POV
'''
NAME
====
B{C{TroveSet}} - collection of trove references
DESCRIPTION
===========
A B{TroveSet} is an immutable collection of references to
specific troves from a Conary repository, and set operations
on those collections. Each trove reference in a TroveSet is a
three-tuple of B{name}, B{version}, B{flavor}, along with an
attribute, C{isInstalled}, that describes whether the trove
is considered B{installed} or B{optional}. Each TroveSet is
immutable. TroveSet operations return new TroveSets; they do
not modify existing TroveSets.
METHODS
=======
The following methods are available in C{TroveSet} objects:
- L{components} : Recursively search for components
- L{createGroup} : Create a binary group
- L{depsNeeded} : Get troves satisfying dependencies
- L{difference} : Subtract one TroveSet from another (C{-})
- L{dump} : Debugging: print the contents of the TroveSet
- L{find} : Search the TroveSet for specified troves
- L{findByName} : Find troves by regular expression
- L{findBySourceName} : Find troves by the name of the source
package from which they were built
- L{flatten} : Resolve trove references recursively
- L{getInstall} : Get only install troves from set
- L{getOptional} : Get only optional troves from set
- L{isEmpty} : Assert that the TroveSet is entirely empty
- L{isNotEmpty} : Assert that the TroveSet contains something
- L{makeInstall} : Make all troves install, or add all provided
troves as install troves
- L{makeOptional} : Make all troves optional, or add all provided
troves as optional troves
- L{members} : Resolve exactly one level of trove references,
return only those resolved references
- L{packages} : Resolve trove references recursively, return packages
- L{patch} : Replace troves in the TroveSet with matching-named
troves from the replacement set
- L{union} : Get the union of all provided TroveSets (C{|}, C{+})
- L{update} : Replace troves in the TroveSet with all troves from
the replacement set
Except for C{dump}, which prints debugging information, each of these
methods returns a new TroveSet.
'''
_explainObjectName = 'TroveSet'
def depsNeeded(self, resolveSource = None, failOnUnresolved = True):
"""
NAME
====
B{C{TroveSet.depsNeeded}} - Get troves satisfying dependencies
SYNOPSIS
========
C{troveset.depsNeeded(resolveSource=None, failOnUnresolved=True)}
DESCRIPTION
===========
Looks for unresolved dependencies in the trove set. Those unmet
dependencies (and their dependencies, recursively) are sought in
the C{resolveSource}, which must be a C{TroveSet}, C{Repository}, or
C{SearchPath}. If there are unresolvable dependencies, it raises
an error unless C{failOnUnresolved=False}. Returns a troveset
containing the troves that were used to resolve the dependencies.
This is not a union operation; the contents of the returned
troveset do not include the contents of the original troveset.
If no C{resolveSource} is provided, then depsNeeded asserts that
there are no unresolved dependencies.
PARAMETERS
==========
- L{resolveSource} : Source against which to resolve dependencies,
or None to assert that all dependencies are met.
- L{failOnUnresolved} (C{True}) : Whether to fail if not all
dependencies can be resolved.
EXAMPLES
========
There are several significant use cases for C{depsNeeded}.
The first use case is perhaps the most obvious; creating a group
that is dependency-complete:
mygrp = repos['group-standard'] + repos['other-package']
mygrp += mygrp.depsNeeded(repos)
groupStandard = mygrp.createGroup('group-standard')
A second use case is to enforce that dependencies can be
resolved. If C{failOnUnresolved} is left to the default C{True}
and the resulting troveset is not used, this becomes an assertion
that all the dependencies for the original troveset not provided
within the original troveset can be found within the specified
search path.
std = repos['group-standard']
mygrp = std + myrepos['mypackage']
# mypackage has added only dependencies resolved in group-packages
mygrp.depsNeeded(repos['group-packages'])
groupStandard = mygrp.createGroup('group-standard')
A third use case is partial dependency closure. The
C{failOnUnresolved} option can be set if you want to resolve
all the dependencies possible, with the understanding that
other dependencies will be resolved in another context.
This is normally useful only when that other context is
outside of the current group cook.
"""
fetched = self._action(ActionClass = troveset.FetchAction)
if resolveSource:
if isinstance(resolveSource, troveset.SearchPathTroveSet):
resolveSource = resolveSource._action(
ActionClass = GroupSearchPathFetch)
elif isinstance(resolveSource, troveset.DelayedTupleSet):
resolveSource = resolveSource._action(
ActionClass = troveset.FetchAction)
return fetched._action(resolveSource,
failOnUnresolved = failOnUnresolved,
ActionClass = DepsNeededAction)
def difference(self, other):
"""
NAME
====
B{C{TroveSet.difference}} - Subtract one TroveSet from another (C{-})
SYNOPSIS
========
C{troveset.difference(other)}
C{troveset - other}
DESCRIPTION
===========
Returns a new troveset which includes the members of the
original set which are not in the troveset C{other}. The
isInstall values of the troves in troveset C{other} are
ignored when deciding if those troves should be included in
the result.
"""
if type(other) == str:
findSet = self.find(other)
return self._action(findSet, ActionClass = GroupDifferenceAction,
edgeList = [ None, '-' ] )
return self._action(other, ActionClass = GroupDifferenceAction)
__sub__ = difference
remove = difference
def find(self, *troveSpecs):
"""
NAME
====
B{C{TroveSet.find}} - Search the TroveSet for specified troves
SYNOPSIS
========
C{troveset.find('troveSpec1', 'troveSpec2', ..., 'troveSpecN')}
C{troveset['troveSpec']}
DESCRIPTION
===========
Returns a new C{troveset} containing all troves from the original
troveset which match the given C{troveSpec}(s). The original
troveset's isInstall settings are preserved for each returned
trove. The contents of the TroveSet are sought recursively.
EXAMPLES
========
C{groupOS = repos['group-os']}
C{allGlibcVersions = groupOS.find('glibc')}
C{compatGlibc = groupOS['glibc=@rpl:1-compat']}
This sets C{groupOS} to be a TroveSet containing the version of
C{group-os} found on the default label for Repository C{repos}.
It then finds all versions/flavors of glibc referenced (there
could be more than one) and creates an C{allGlibcVersions}
TroveSet that contains references to all of them, and another
C{compatGlibc} that contains refernces to all flavors of glibc
that are on a label matching C{@rpl:1-compat}.
"""
return self._action(ActionClass = GroupTroveSetFindAction, *troveSpecs)
def findByName(self, namePattern, emptyOkay = False):
"""
NAME
====
B{C{TroveSet.findByName}} - Find troves by regular expression
SYNOPSIS
========
C{troveset.findByName(nameRegularExpression, emptyOkay = False)}
DESCRIPTION
===========
The original troveset is searched for troves whose names match
C{nameRegularExpression}, and matching troves are returned in
a new troveset. The isInstall value is preserved from the
original troveset being searched. Unlike C{find}, the original
troveset is not searched recursively; use C{troveset.flatten()}
explicitly if you need to search recursively.
PARAMETERS
==========
- L{emptyOkay} : Unless set to C{True}, raise an exception if
no troves are found.
EXAMPLES
========
C{allGnomePackages = allPackages.findByName('^gnome-')}
Returns a troveset containing all troves in the troveset
C{allPackages} with a name starting with C{gnome-}
C{allTroves = repos['group-os'].flatten()}
C{allGroups = allTroves.findByName('^group-')}
C{allOtherTroves = allTroves - allGroups}
"""
return self._action(namePattern, emptyOkay = emptyOkay,
ActionClass = FindByNameAction)
def findBySourceName(self, sourceName):
"""
NAME
====
B{C{TroveSet.findBySourceName}} - Find troves by the name of the source
package from which they were built
SYNOPSIS
========
C{troveset.findBySourceName(sourceName)}
DESCRIPTION
===========
The original troveset is searched for troves which were built
from source trove called C{sourceName}, and all matching
troves are returned in a new troveset. The isInstall value is
preserved from the original troveset being searched. Unlike
C{find}, the original troveset is not searched recursively;
use C{troveset.flatten()} explicitly if you need to search
recursively.
EXAMPLES
========
C{allTroves = repos['group-os'].flatten()}
C{allPGPackages = allTroves.findBySourceName('postgresql')}
Returns a troveset containing all troves in the troveset
C{allTroves} that were built from C{postgresql:source}
"""
return self._action(sourceName,
ActionClass = FindBySourceNameAction)
__getitem__ = find
def components(self, *componentList):
"""
NAME
====
B{C{TroveSet.components}} - Returns named components included in
all members of the troveset.
SYNOPSIS
========
C{troveset.components(I{componentName1}, I{componentName2}, ...)}
DESCRIPTION
===========
Returns components included in all members of the troveset, found
recursively, where the component name (C{runtime}, C{lib}, C{data},
etc.) matches one of the component names provided. The C{isInstalled}
setting for each component in the returned troveset is determined
only by whether the component is installed or optional in the
package that contains it. This does not implicitly recurse, so
to find components of packages in a group, use C{flatten()}
EXAMPLES
========
C{groupOs = repos['group-os'].flatten()}
C{allDebugInfo = groupOs.components('debuginfo')}
Returns a TroveSet referencing all the C{debuginfo} components of
all packages referenced in C{group-os} as found in the C{repos}
object.
C{groupDist = repos['group-dist'].flatten()}
C{docTroves = groupDist.components('doc', 'supdoc')}
Returns a TroveSet referencing all the C{doc} and C{supdoc}
components of all packages referenced in C{group-dist} as found
in the C{repos} object.
"""
return self._action(ActionClass = ComponentsAction, *componentList)
def flatten(self):
"""
NAME
====
B{C{TroveSet.flatten}} - Returns all troves, recursively
SYNOPSIS
========
C{troveset.flatten()}
DESCRIPTION
===========
The troveset returned consists of any existing trove referenced
by the original troveset, directly or indirectly via groups.
The C{isInstall} setting for each troves is inherited from
the original troveset, not from the troves referenced. (The
only troves that will not be returned are references to binary
groups being built out of the recipe, as returned by the
C{TroveSet.createGroup()} method.)
This is useful for creating flattened groups (removing group
structure present in upstream groups but not desired in the
groups being built) and for creating trovesets to use to look
up specific troves (for example, C{find} and C{findByName}).
EXAMPLES
========
C{platGrp = repos['group-appliance-platform'].flatten()}
Returns all the non-group troves included directly in
group-appliance-platform, as well as those included only within
group-core (included in group-appliance-platform), and those
included only within group-bootable, included only because it
is included within group-core. Does not include any of those
groups; only the members of the groups.
"""
return self._action(ActionClass = FlattenAction)
def getInstall(self):
"""
NAME
====
B{C{TroveSet.getInstall}} - Returns only install members
SYNOPSIS
========
C{troveset.getInstall()}
DESCRIPTION
===========
Returns a new troveset which includes only the members of
this troveset which are marked as install; optional members are
omitted. All members of the returned set are marked as install.
"""
return self._action(ActionClass = GetInstalledAction)
def getOptional(self):
"""
NAME
====
B{C{TroveSet.getOptional}} - Returns only optional members
SYNOPSIS
========
C{troveset.getOptional()}
DESCRIPTION
===========
Returns a new troveset which includes only the members of
this troveset which are marked as optional; install members are
omitted. All members of the returned set are marked as optional.
"""
return self._action(ActionClass = GetOptionalAction)
def isEmpty(self):
"""
NAME
====
B{C{TroveSet.isEmpty}} - Assert that troveset is empty
SYNOPSIS
========
C{troveset.isEmpty()}
DESCRIPTION
===========
Raises an exception is raised if the troveset contains any members.
Otherwise, returns an identical (empty) troveset that may be ignored.
"""
return self._action(ActionClass = IsEmptyAction)
def isNotEmpty(self):
"""
NAME
====
B{C{TroveSet.isNotEmpty}} - Assert that troveset is not empty
SYNOPSIS
========
C{troveset.isNotEmpty()}
DESCRIPTION
===========
Raises an exception is raised if the troveset contains no members.
Otherwise, returns an identical troveset that may be ignored.
"""
return self._action(ActionClass = IsNotEmptyAction)
def makeInstall(self, installTroveSet = None):
"""
NAME
====
B{C{TroveSet.makeInstall}} - Make all troves install, or add all
provided troves as install troves
SYNOPSIS
========
C{troveset.makeInstall(installTroveSet = None)}
DESCRIPTION
===========
If C{installTroveSet} troveset is provided as an argument, all
members of that other troveset are included in the result as
install members. Any members of the original troveset which
are optional, and are not in C{installTroveSet}, are also
optional in the result.
If C{installTroveSet} is not provided, the troveset returned
includes all members of the original troveset as install members.
PARAMETERS
==========
- L{installTroveSet} : TroveSet providing all its members as install
"""
return self._action(ActionClass = MakeInstallAction,
installTroveSet = installTroveSet)
def makeOptional(self, optionalTroveSet = None):
"""
NAME
====
B{C{TroveSet.makeOptional}} - Make all troves optional, or add all
provided troves as optional troves
SYNOPSIS
========
C{troveset.makeOptional(optionalTroveSet = None)}
DESCRIPTION
===========
If C{optionalTroveSet} troveset is provided as an argument, all
members of that other troveset are included in the result as
optional members. Any members of the original troveset which
are install troves, and are not in C{optionalTroveSet}, are also
install troves in the returned troveset.
If C{optionalTroveSet} is not provided, the troveset returned
includes all members of the original troveset as optional members.
PARAMETERS
==========
- L{optionalTroveSet} : TroveSet providing all its members as optional
"""
return self._action(ActionClass = MakeOptionalAction,
optionalTroveSet = optionalTroveSet)
def members(self):
"""
NAME
====
B{C{TroveSet.members}} - Returns all members of the troveset
SYNOPSIS
========
C{troveset.members()}
DESCRIPTION
===========
All troves directly included by the troves in this troveset
are returned as a new troveset. They are optional in the result
only if they are optional in every member of this troveset which
includes them.
"""
return self._action(ActionClass = MembersAction)
def packages(self, *packageList):
"""
NAME
====
B{C{TroveSet.packages}} - Return recursively-search package references
SYNOPSIS
========
C{troveset.packages()}
DESCRIPTION
===========
Return all packages and filesets referenced directly or indirectly
by this troveset. They are optional in the result only if they
are optional in every member of this troveset which includes them.
"""
return self._action(ActionClass = PackagesAction, *packageList)
def scripts(self):
"""
NAME
====
B{C{TroveSet.scripts}} - Return scripts for a trove
SYNOPSIS
========
C{troveset.scripts()}
DESCRIPTION
===========
Returns a Scripts object which includes all of the scripts for the
trove included in this TroveSet. If this TroveSet is empty or contains
multiple troves, an exception is raised.
EXAMPLES
========
This creates a new group which includes the scripts from a group
which is already in the repository.
existingGrp = repos['group-standard']
thisGrpContents = repos['pkg']
r.Group(thisGrpContents, scripts = existingGrp.scripts()
"""
stubTroveSet = self._action(ActionClass = ScriptsAction)
return stubTroveSet.groupScripts
def union(self, *troveSetList):
"""
NAME
====
B{C{TroveSet.union}} - Get the union of all provided TroveSets (C{|}, C{+})
SYNOPSIS
========
C{troveset.union(other1, other2, ..., otherN)}
C{troveset + other1 + other2}
C{troveset | other1 | other2}
DESCRIPTION
===========
Return a troveset which includes all of the members of this trove
as well as all of the members of the arguments. Troves are optional
only if they are optional in all the trovesets they are part of.
"""
return self._action(ActionClass = GroupUnionAction, *troveSetList)
def patch(self, patchSet):
"""
NAME
====
B{C{TroveSet.patch}} - Replace troves with matching-name troves
SYNOPSIS
========
C{troveset.patch(patchSet)}
DESCRIPTION
===========
Look (recursively) for items in this troveset which can
reasonably be replaced by members found in the patchSet.
The isInstall values are inherited from the original troveset.
Any items in patchSet which do not appear to replace
members of this troveset are included as optional in the
result. Members of the original troveset which are outdated
by members of the patchSet are also included as optional
in the returned troveset, to prevent them from inadvertently
showing up as install troves due to other operations.
This is a recursive union operation in which only troves
which are installed in the original set are installed in
the resulting set, and all other troves are available.
The difference between C{TroveSet.update} and C{TroveSet.patch} is
how new troves introduced in C{patchSet} but not present in the
original set are handled. With C{TroveSet.patch}, the new
troves from C{patchSet} are not installed in the result; with
C{TroveSet.update}, the new troves are installed in the result if
they are installed in the C{updateSet}.
PARAMETERS
==========
- L{patchSet} : TroveSet containing potential replacements
EXAMPLES
========
This operation is intended to implement the appropriate
behavior for applying a group specifying a set of updated
packages. For example, if only the postgresql client is
in the current install set, and group-CVE-2015-1234 contains
both the postgresql client and server in different packages,
then the patch operation will mark the existing postgresql
client as optional, add the new postgresql client as install,
and add the new postgresql server as optional in the returned
troveSet.
base = repos['group-standard']
update = base.patch(repos['group-CVE-2015-1234'])
groupStandard = update.createGroup('group-standard')
"""
return self._action(patchSet, ActionClass = GroupPatchAction)
def update(self, updateSet):
"""
NAME
====
B{C{TroveSet.update}} - Replace troves in the TroveSet with
all troves from the replacement set
SYNOPSIS
========
C{troveset.update(updateSet)}
DESCRIPTION
===========
Returns a troveset that is a recusive union of the original
troveset and C{updateSet}, except that only where the names of
troves overlap, the versions from C{updateSet} are used, though
the choice of isInstall is honored from the original set.
The difference between C{TroveSet.update} and C{TroveSet.patch} is
how new troves introduced in C{updateSet} but not present in the
original set are handled. With C{TroveSet.patch}, the new
troves from C{patchSet} are not installed in the result; with
C{TroveSet.update}, the new troves are installed in the result if
they are installed in the C{updateSet}.
PARAMETERS
==========
- L{updateSet} : TroveSet providing all its contents
EXAMPLES
========
This is commonly used to update to new package versions while
preserving the semantics of a source group. This might be used
to apply a "hotfix". So if you are building a group based on
a specific version of a platform, and do not wish to move to
a new version of the platform, except that you want to inclue
a specific new package that implements a necessary fix, this
is most likely the correct operation.
base = repos['group-standard']
# Use latest conary to resolve CNY-98765 until resolved
update = base.update(repos['conary=centos.rpath.com@rpath:centos-5'])
groupStandard = update.createGroup('group-standard')
"""
return self._action(updateSet, ActionClass = GroupUpdateAction)
def createGroup(self, name, checkPathConflicts = True, scripts = None,
imageGroup = False):
"""
NAME
====
B{C{TroveSet.createGroup}} - Create a binary group
SYNOPSIS
========
C{troveset.createGroup(name, checkPathConflicts=True, scripts=None)}
DESCRIPTION
===========
Create a new group whose members are defined by this
troveset, and call it C{name} (which must begin with
"C{group-}").
Returns a troveset which references this newly created group,
which allows it to be included in other trovesets, and hence,
other groups.
PARAMETERS
==========
- C{checkPathConflicts} : Raise an error if any paths overlap (C{True})
- C{imageGroup} : (False) Designate that this group is a image group.
Image Group policies will be executed separately on this group.
- C{scripts} : Attach one or more scripts specified by a C{Scripts}
object (C{None})
"""
return self._action(name, checkPathConflicts = checkPathConflicts,
ActionClass = CreateNewGroupAction,
imageGroup = imageGroup,
scripts = scripts)
def _createGroup(self, name, checkPathConflicts = True, scripts = None,
imageGroup = False):
return self._action(name, ActionClass = CreateGroupAction,
checkPathConflicts = checkPathConflicts,
imageGroup = imageGroup,
scripts = scripts)
__add__ = union
__or__ = union
class GroupDelayedTroveTupleSet(GroupTupleSetMethods,
troveset.DelayedTupleSet):
def __init__(self, *args, **kwargs):
self._dump = False
self._lineNumStr = ''
index = findRecipeLineNumber()
if index is not None:
kwargs['index'] = index
self._lineNumStr = ':' + str(index)
troveset.DelayedTupleSet.__init__(self, *args, **kwargs)
def beenRealized(self, data):
def display(tupleSet):
if not tupleSet:
log.info("\t\t(empty)")
return
for (name, version, flavor) in sorted(tupleSet):
if isinstance(version, versions.NewVersion):
log.info("\t\t%s (newly created)" % name)
else:
log.info("\t\t%s=%s/%s[%s]"
% (name, version.trailingLabel(),
version.trailingRevision(), flavor))
troveset.DelayedTupleSet.beenRealized(self, data)
if self._dump or data.groupRecipe._dumpAll:
log.info("TroveSet contents for action %s" % str(self.action) +
self._lineNumStr)
log.info("\tInstall")
display(self._getInstallSet())
log.info("\tOptional")
display(self._getOptionalSet())
if data.groupRecipe._trackDict:
matches = []
foundMatch = False
try:
matches = self._findTroves(data.groupRecipe._trackDict.keys())
except errors.TroveNotFound:
matches = {}
if matches:
log.info("Tracking matches found in results for action %s"
% str(self.action) + self._lineNumStr)
for (parsedSpec, matchList) in matches.iteritems():
log.info("\tMatches for %s"
% data.groupRecipe._trackDict[parsedSpec])
display(matchList)
def dump(self):
self._dump = True
return self
class GroupLoggingDelayedTroveTupleSet(GroupDelayedTroveTupleSet):
def realize(self, *args):
mark = time.time()
if isinstance(self.action, GroupIncludeAction):
log.info("Including %s" % " ".join(
"%s=%s[%s]" % nvf for nvf in
self.action.includeSet._getInstallSet()))
else:
log.info("Running action %s" % str(self.action) + self._lineNumStr)
GroupDelayedTroveTupleSet.realize(self, *args)
runtime = time.time() - mark
if runtime > 0.1:
if isinstance(self.action, GroupIncludeAction):
log.info("\tinclude processing took %.1fs" % runtime)
else:
log.info("\ttook %.1fs" % runtime)
class GroupSearchPathTroveSet(troveset.SearchPathTroveSet):
'''
NAME
====
B{C{SearchPath}} - Collection of troves in which to search
SYNOPSIS
========
C{sp = r.SearchPath(TroveSet | Repository, ...)}
DESCRIPTION
===========
An object which searches multiple C{TroveSet} or C{Repository} objects
in the order specified. Troves can be looked up in that C{SearchPath}
object with the C{find} method, and the C{SearchPath} object can also
be used for resolving dependencies.
METHODS
=======
- L{find} : Search the SearchPath for specified troves
'''
_explainObjectName = 'SearchPath'
def find(self, *troveSpecs):
'''
NAME
====
B{C{SearchPath.find}} - Search the SearchPath for specified troves
SYNOPSIS
========
C{searchpath.find('troveSpec1', 'troveSpec2', ..., 'troveSpecN')}
C{searchpath['troveSpec']}
DESCRIPTION
===========
The B{SearchPath} is searched for troves which match the given
troveSpecs. All matches are included as installed in the
returned C{TroveSet}.
Each C{troveSpec} has the same format as a trove referenced on
the command line: C{name=version[flavor]}
- L{name} : Required: the full name of the trove
- L{version} : Optional: Any legal full or partial version,
with or without a full or partial label.
- L{flavor} : Optional: The flavor to match, composed with
the Repository flavor and the build configuration flavor.
'''
return self._action(ActionClass = GroupFindAction, *troveSpecs)
__getitem__ = find
class GroupSearchSourceTroveSet(troveset.SearchSourceTroveSet):
# This is really GroupSetRecipe.Repository, documented here
# for the benefit of Repository.find and Repository.latestPackages
# Specifically, the synopsis describes GroupSetRecipe.Repository,
# not this underlying object.
'''
NAME
====
B{C{Repository}} - Source of trove references
SYNOPSIS
========
C{r.Repository(defaultLabelList, baseFlavor)}
DESCRIPTION
===========
A B{Repository} object is used to look troves up in a repository,
and provide references to those troves as B{TroveSet} objects.
It has a list of default labels (or a single default label) and
a default flavor; these are used when no label or flavor is provided
to the B{find} method.
METHODS
=======
- L{find} : Search the repository for specified troves
- L{latestPackages} : All the latest normal packages on the
default label(s)
'''
_explainObjectName = 'Repository'
def find(self, *troveSpecs):
'''
NAME
====
B{C{Repository.find}} - Search the Repository for specified troves
SYNOPSIS
========
C{repos.find('troveSpec1', 'troveSpec2', ..., 'troveSpecN')}
C{repos['troveSpec']}
DESCRIPTION
===========
The B{Repository} is searched for troves which match the given
troveSpecs. All matches are included as installed in the
returned C{TroveSet}.
Each C{troveSpec} has the same format as a trove referenced on
the command line: C{name=version[flavor]}
- L{name} : Required: the full name of the trove
- L{version} : Optional: Any legal full or partial version,
with or without a full or partial label.
- L{flavor} : Optional: The flavor to match, composed with
the Repository flavor and the build configuration flavor.
'''
return self._action(ActionClass = GroupFindAction, *troveSpecs)
__getitem__ = find
def latestPackages(self):
'''
NAME
====
B{C{Repository.latestPackages}} - Get latest normal packages of the
default flavor on the default label
SYNOPSIS
========
C{repos.latestPackages()}
DESCRIPTION
===========
Returns a B{TroveSet} consisting of the latest packages and
filesets on the default search label. The troves returned are
those which best match the default flavor. Any troves which
have a redirect as their latest version are not included in
the returned TroveSet, nor are groups or components.
A package is considered latest only if it is built from the
latest source from which some binaries have been built. So
if the C{foo:source} package previously built both the C{foo}
and C{bar} packages, but the most recent binary version of
the C{bar} package is built from a C{foo:source} that did not
build a C{bar} package, the C{bar} package previously built
from C{foo:source} will not be considered latest. (Thus, a
redirect from C{bar} to nothing is not required here.)
'''
return self._action(ActionClass = LatestPackagesFromSearchSourceAction)
class GroupFindAction(troveset.FindAction):
resultClass = GroupDelayedTroveTupleSet
class GroupDelayedTupleSetAction(troveset.DelayedTupleSetAction):
resultClass = GroupDelayedTroveTupleSet
class GroupDifferenceAction(troveset.DifferenceAction):
resultClass = GroupDelayedTroveTupleSet
class GroupUnionAction(troveset.UnionAction):
resultClass = GroupDelayedTroveTupleSet
class GroupPatchAction(troveset.PatchAction):
resultClass = GroupDelayedTroveTupleSet
class GroupUpdateAction(troveset.UpdateAction):
resultClass = GroupDelayedTroveTupleSet
class ComponentsAction(GroupDelayedTupleSetAction):
def __init__(self, primaryTroveSet, *componentNames):
GroupDelayedTupleSetAction.__init__(self, primaryTroveSet)
self.componentNames = set(componentNames)
def componentsAction(self, data):
installSet = set()
optionalSet = set()
for (troveTup), inInstall, explicit in \
self.primaryTroveSet._walk(data.troveCache):
if not trove.troveIsComponent(troveTup[0]):
continue
componentName = troveTup[0].split(':')[1]
if componentName in self.componentNames:
if inInstall:
installSet.add(troveTup)
else:
optionalSet.add(troveTup)
self.outSet._setInstall(installSet)
self.outSet._setOptional(optionalSet)
return True
__call__ = componentsAction
class CopyAction(GroupDelayedTupleSetAction):
def copyAction(self, data):
self.outSet._setInstall(self.primaryTroveSet._getInstallSet())
self.outSet._setOptional(self.primaryTroveSet._getOptionalSet())
return True
__call__ = copyAction
class CreateGroupAction(GroupDelayedTupleSetAction):
prefilter = troveset.FetchAction
def __init__(self, primaryTroveSet, name, checkPathConflicts = True,
imageGroup = False, scripts = None):
if hasattr(scripts, "ts"):
GroupDelayedTupleSetAction.__init__(self, primaryTroveSet,
scripts.ts)
else:
GroupDelayedTupleSetAction.__init__(self, primaryTroveSet)
self.name = name
self.checkPathConflicts = checkPathConflicts
self.imageGroup = imageGroup
self.scripts = scripts
def createGroupAction(self, data):
grp = SG(data.groupRecipe.name,
checkPathConflicts = self.checkPathConflicts,
imageGroup = self.imageGroup)
data.groupRecipe._addGroup(self.name, grp)
data.groupRecipe._setDefaultGroup(grp)
self._create(data.groupRecipe.defaultGroup,
self.primaryTroveSet, self.outSet, data)
return True
__call__ = createGroupAction
def _create(self, sg, ts, outSet, data):
if self.scripts is not None:
for script, scriptName in self.scripts.iterScripts():
sg.addScript(scriptName, script.contents, script.fromClass)
sg.populate(ts, data.troveCache)
outSet._setInstall([ (sg.name, versions.NewVersion(),
data.groupRecipe.flavor) ])
outSet.realized = True
def __str__(self):
return self.name
class CreateNewGroupAction(CreateGroupAction):
resultClass = GroupLoggingDelayedTroveTupleSet
def __init__(self, primaryTroveSet, name, checkPathConflicts = True,
scripts = None, imageGroup = False):
CreateGroupAction.__init__(self, primaryTroveSet, name,
checkPathConflicts = checkPathConflicts,
imageGroup = imageGroup,
scripts = scripts)
def createNewGroupAction(self, data):
newGroup = SG(self.name, checkPathConflicts = self.checkPathConflicts,
imageGroup = self.imageGroup)
data.groupRecipe._addGroup(self.name, newGroup)
self._create(newGroup, self.primaryTroveSet, self.outSet, data)
return True
__call__ = createNewGroupAction
class DelayedSearchPathTroveSet(GroupSearchPathTroveSet):
def __init__(self, troveSetList = None, graph = None, index = None,
action = None):
troveset.SearchPathTroveSet.__init__(self, troveSetList = troveSetList,
graph = graph, index = index)
self.action = action
assert(not self.troveSetList)
def realize(self, data):
result = self.action(data)
self.realized = True
return True
class GroupSearchPathFetch(troveset.DelayedTupleSetAction):
resultClass = DelayedSearchPathTroveSet
def __init__(self, *args, **kwargs):
troveset.DelayedTupleSetAction.__init__(self, *args, **kwargs)
def groupSearchPathFetch(self, data):
actionList = []
needed = self.primaryTroveSet.troveSetList[:]
while needed:
ts = needed.pop(0)
if isinstance(ts, troveset.TroveTupleSet):
f = troveset.FetchAction(ts)
actionList.append(f)
elif isinstance(ts, troveset.SearchPathTroveSet):
needed += ts.troveSetList
troveset.FetchAction._fetch(actionList, data)
self.outSet.setTroveSetList(self.primaryTroveSet.troveSetList)
return True
__call__ = groupSearchPathFetch
class DepsNeededAction(GroupDelayedTupleSetAction):
resultClass = GroupLoggingDelayedTroveTupleSet
def __init__(self, primaryTroveSet, resolveTroveSet,
failOnUnresolved = True):
GroupDelayedTupleSetAction.__init__(self, primaryTroveSet,
resolveTroveSet)
self.failOnUnresolved = failOnUnresolved
self.resolveTroveSet = resolveTroveSet
def depsNeededAction(self, data):
checker = PythonDependencyChecker(
data.troveCache,
ignoreDepClasses = [ deps.AbiDependency,
deps.RpmLibDependencies ])
troveList = []
for (troveTuple, isInstall, isExplicit) in \
self.primaryTroveSet._walk(data.troveCache,
newGroups = False, recurse = True):
if isInstall:
troveList.append(troveTuple)
jobSet = [ (n, (None, None), (v, f), False) for (n,v,f) in troveList ]
checker.addJobs(jobSet)
if self.resolveTroveSet:
# might be nice to share a single depDb across all instances
# of this class?
resolveMethod = (self.resolveTroveSet._getResolveSource(
depDb = deptable.DependencyDatabase()).
getResolveMethod())
else:
resolveMethod = None
failedDeps, suggMap = checker.resolve(resolveMethod)
if self.failOnUnresolved and failedDeps:
raise CookError("Unresolved Deps:\n" +
"\n".join(
[ "\t%s=%s[%s] requires %s" % (name, version, flavor, dep)
for ((name, version, flavor), dep) in failedDeps ]))
installSet = set()
for requiredBy, requiredSet in suggMap.iteritems():
installSet.update(requiredSet)
self.outSet._setInstall(installSet)
return True
__call__ = depsNeededAction
class GetInstalledAction(GroupDelayedTupleSetAction):
def getInstalledAction(self, data):
self.outSet._setInstall(self.primaryTroveSet._getInstallSet())
return True
__call__= getInstalledAction
class GetOptionalAction(GroupDelayedTupleSetAction):
def getOptionalAction(self, data):
self.outSet._setOptional(self.primaryTroveSet._getOptionalSet())
return True
__call__= getOptionalAction
class FindByNameAction(GroupDelayedTupleSetAction):
def __init__(self, primaryTroveSet, namePattern, emptyOkay = False):
GroupDelayedTupleSetAction.__init__(self, primaryTroveSet)
self.namePattern = namePattern
self.emptyOkay = emptyOkay
def findByNameAction(self, data):
def _gather(troveTupleSet, nameRegex):
s = set()
for troveTup in troveTupleSet:
if nameRegex.match(troveTup[0]):
s.add(troveTup)
return s
r = re.compile(self.namePattern + '\\Z')
install = _gather(self.primaryTroveSet._getInstallSet(), r)
self.outSet._setInstall(install)
optional = _gather(self.primaryTroveSet._getOptionalSet(), r)
self.outSet._setOptional(optional)
if (not self.emptyOkay and not install and not optional):
raise CookError("findByName() matched no trove names")
return True
__call__= findByNameAction
class FindBySourceNameAction(GroupDelayedTupleSetAction):
def __init__(self, primaryTroveSet, sourceName):
GroupDelayedTupleSetAction.__init__(self, primaryTroveSet)
self.sourceName = sourceName
def findBySourceNameAction(self, data):
troveTuples = (
list(itertools.izip(itertools.repeat(True),
self.primaryTroveSet._getInstallSet())) +
list(itertools.izip(itertools.repeat(False),
self.primaryTroveSet._getOptionalSet())) )
sourceNames = data.troveCache.getTroveInfo(
trove._TROVEINFO_TAG_SOURCENAME,
[ x[1] for x in troveTuples ])
installs = []
optional = []
for (isInstallSet, troveTup), sourceName in \
itertools.izip(troveTuples, sourceNames):
if sourceName() != self.sourceName:
continue
if isInstallSet:
installs.append(troveTup)
else:
optional.append(troveTup)
self.outSet._setInstall(installs)
self.outSet._setOptional(optional)
if (not installs and not optional):
raise CookError("findBySourceName() matched no trove names")
return True
__call__ = findBySourceNameAction
class IsEmptyAction(GroupDelayedTupleSetAction):
def isEmptyAction(self, data):
if (self.primaryTroveSet._getInstallSet() or
self.primaryTroveSet._getOptionalSet()):
raise CookError("Trove set is not empty")
# self.outSet is already empty
return True
__call__ = isEmptyAction
class IsNotEmptyAction(GroupDelayedTupleSetAction):
def isNotEmptyAction(self, data):
if (not self.primaryTroveSet._getInstallSet() and
not self.primaryTroveSet._getOptionalSet()):
raise CookError("Trove set is empty")
self.outSet._setInstall(self.primaryTroveSet._getInstallSet())
self.outSet._setOptional(self.primaryTroveSet._getOptionalSet())
return True
__call__ = isNotEmptyAction
class GroupIncludeAction(troveset.IncludeAction):
resultClass = GroupLoggingDelayedTroveTupleSet
class LatestPackagesFromSearchSourceAction(GroupDelayedTupleSetAction):
resultClass = GroupLoggingDelayedTroveTupleSet
def latestPackageFromSearchSourceAction(self, data):
troveSource = self.primaryTroveSet.searchSource.getTroveSource()
# data hiding? what's that
flavor = self.primaryTroveSet.searchSource.flavor
labelList = self.primaryTroveSet.searchSource.installLabelPath
d = { None : {} }
for label in labelList:
d[None][label] = [ flavor ]
matches = troveSource.getTroveLatestByLabel(d,
troveTypes=netclient.TROVE_QUERY_PRESENT, bestFlavor=True)
fullTupList = []
for name in matches:
if not (trove.troveIsPackage(name) or trove.troveIsFileSet(name)):
continue
for version in matches[name]:
for flavor in matches[name][version]:
fullTupList.append( (name, version, flavor) )
sourceNames = data.troveCache.getTroveInfo(
trove._TROVEINFO_TAG_SOURCENAME, fullTupList)
bySource = {}
for sourceName, troveTup in itertools.izip(sourceNames, fullTupList):
bySource.setdefault(sourceName(), []).append(troveTup)
resultTupList = []
for sourceName, tupList in bySource.iteritems():
if len(sourceName) > 2:
mostRecent = sorted([ x[1] for x in tupList ])[-1]
resultTupList += [ x for x in tupList if x[1] == mostRecent ]
else:
resultTupList += tupList
self.outSet._setInstall(resultTupList)
return True
__call__ = latestPackageFromSearchSourceAction
class MakeInstallAction(GroupDelayedTupleSetAction):
def __init__(self, primaryTroveSet, installTroveSet = None):
GroupDelayedTupleSetAction.__init__(self, primaryTroveSet,
installTroveSet)
self.installTroveSet = installTroveSet
def makeInstallAction(self, data):
if self.installTroveSet:
self.outSet._setOptional(self.primaryTroveSet._getOptionalSet())
self.outSet._setInstall(
(self.installTroveSet._getInstallSet() |
self.installTroveSet._getOptionalSet()))
else:
self.outSet._setInstall(self.primaryTroveSet._getInstallSet() |
self.primaryTroveSet._getOptionalSet())
return True
__call__ = makeInstallAction
class MakeOptionalAction(GroupDelayedTupleSetAction):
def __init__(self, primaryTroveSet, optionalTroveSet = None):
GroupDelayedTupleSetAction.__init__(self, primaryTroveSet,
optionalTroveSet)
self.optionalTroveSet = optionalTroveSet
def makeOptionalAction(self, data):
if self.optionalTroveSet:
self.outSet._setInstall(self.primaryTroveSet._getInstallSet())
self.outSet._setOptional(
(self.optionalTroveSet._getInstallSet() |
self.optionalTroveSet._getOptionalSet() |
self.primaryTroveSet._getOptionalSet()))
else:
self.outSet._setOptional(self.primaryTroveSet._getInstallSet() |
self.primaryTroveSet._getOptionalSet())
return True
__call__ = makeOptionalAction
class MembersAction(GroupDelayedTupleSetAction):
prefilter = troveset.FetchAction
justStrong = True
includeTop = False
def membersAction(self, data):
for (troveTuple, installSet) in itertools.chain(
itertools.izip(self.primaryTroveSet._getInstallSet(),
itertools.repeat(True)),
itertools.izip(self.primaryTroveSet._getOptionalSet(),
itertools.repeat(False))):
installs = []
available = []
if self.includeTop:
if installSet:
installs.append(troveTuple)
else:
available.append(troveTuple)
for (refTrove, byDefault, isStrong) in \
data.troveCache.iterTroveListInfo(troveTuple):
if self.justStrong and not isStrong:
continue
if byDefault:
installs.append(refTrove)
else:
available.append(refTrove)
self.outSet._setInstall(installs)
self.outSet._setOptional(available)
return True
__call__ = membersAction
class FlattenAction(MembersAction):
justStrong = False
includeTop = True
@classmethod
def Create(klass, primaryTroveSet):
if hasattr(primaryTroveSet, "_flattened"):
return primaryTroveSet._flattened
resultSet = super(FlattenAction, klass).Create(primaryTroveSet)
primaryTroveSet._flattened = resultSet
return resultSet
class GroupTroveSetFindAction(troveset.FindAction):
prefilter = FlattenAction
resultClass = GroupDelayedTroveTupleSet
def _applyFilters(self, l):
assert(len(l) == 1)
if hasattr(l[0], "_flattened"):
return [ l[0]._flattened ]
result = troveset.FindAction._applyFilters(self, l)
l[0]._flattened = result[0]
return result
class PackagesAction(GroupDelayedTupleSetAction):
prefilter = troveset.FetchAction
def __init__(self, primaryTroveSet):
GroupDelayedTupleSetAction.__init__(self, primaryTroveSet)
def packagesAction(self, data):
installSet = set()
optionalSet = set()
for (troveTup), inInstall, explicit in \
self.primaryTroveSet._walk(data.troveCache,
newGroups = False,
recurse = True):
if (not trove.troveIsPackage(troveTup[0]) and
not trove.troveIsFileSet(troveTup[0])):
continue
if inInstall:
installSet.add(troveTup)
else:
optionalSet.add(troveTup)
self.outSet._setInstall(installSet)
self.outSet._setOptional(optionalSet)
return True
return True
__call__ = packagesAction
class ScriptsAction(GroupDelayedTupleSetAction):
prefilter = troveset.FetchAction
def __init__(self, *args, **kwargs):
GroupDelayedTupleSetAction.__init__(self, *args, **kwargs)
def getResultTupleSet(self, *args, **kwargs):
ts = GroupDelayedTupleSetAction.getResultTupleSet(self, *args, **kwargs)
ts.groupScripts = GroupScripts()
# this loop is gross. we use it to get the right dependencies on things
# which use the scripts though
ts.groupScripts.ts = ts
return ts
def scriptsAction(self, data):
totalSet = (self.primaryTroveSet._getInstallSet() |
self.primaryTroveSet._getOptionalSet())
if not totalSet:
raise CookError("Empty trove set for scripts()")
elif len(totalSet) > 1:
raise CookError("Multiple troves in trove set for scripts()")
troveTup = list(totalSet)[0]
trv = data.troveCache.getTroves([ troveTup ])[0]
groupScripts = self.outSet.groupScripts
for scriptName in GroupScripts._scriptNames:
trvScript = getattr(trv.troveInfo.scripts, scriptName[:-7])
if not trvScript.script():
continue
selfScript = getattr(groupScripts, scriptName[:-7])
selfScript.set(trvScript.script())
return True
__call__ = scriptsAction
class SG(_SingleGroup):
def __init__(self, *args, **kwargs):
_SingleGroup.__init__(self, *args, **kwargs)
self.autoResolve = False
self.depCheck = False
def populate(self, troveSet, troveCache):
seen = set()
for troveTup, byDefault, explicit in troveSet._walk(troveCache):
if not explicit:
continue
seen.add(troveTup)
if isinstance(troveTup[1], versions.NewVersion):
self.addNewGroup(troveTup[0], byDefault = byDefault,
explicit = True)
else:
self.addTrove(troveTup, explicit = True, byDefault = byDefault,
components = [])
for troveTup, byDefault, explicit in troveSet._walk(troveCache,
recurse = True):
if troveTup in seen:
# if it's explicit, it's already been seen
continue
seen.add(troveTup)
if isinstance(troveTup[1], versions.NewVersion):
self.addNewGroup(troveTup[0], byDefault = byDefault,
explicit = False)
else:
self.addTrove(troveTup, explicit = False, byDefault = byDefault,
components = [])
def iterAddSpecs(self):
return []
def iterAddAllSpecs(self):
return []
def iterReplaceSpecs(self):
return []
def iterDifferenceSpecs(self):
return []
def iterNewGroupDifferenceList(self):
return []
def iterCopiedFrom(self):
return []
def getComponentsToMove(self):
return []
def getRequires(self):
return deps.DependencySet()
class ModelCompiler(modelgraph.AbstractModelCompiler):
SearchPathTroveSet = GroupSearchPathTroveSet
FlattenAction = FlattenAction
IncludeAction = GroupIncludeAction
class GroupScript(object):
'''
NAME
====
B{C{Script}} - Specify script contents and compatibility class
SYNOPSIS
========
C{scriptObj = r.Script('#!/bin/sh...'I{, [fromClass = 1]})}
DESCRIPTION
===========
A B{C{Script}} object holds the contents, and optionally the
compatibility class, of a script that can then be attached to
one or more groups. The C{Scripts} object associates the
script with the type, and C{Group} and C{TroveSet.createGroup}
each take an optional C{scripts=} parameter to associate a
C{Scripts} object with a group being created.
EXAMPLE
=======
Create a script that attaches to multiple groups as multiple types::
myTroves = repos.find(...)
fixup = r.Script("""#!/bin/sh
[ -x /opt/me/fixme ] && /opt/me/fixme""")
fixscripts = r.Scripts(preUpdate=fixup, preRollback=fixup)
r.Group(myTroves, scripts=fixscripts)
'''
_explainObjectName = 'Script'
def __init__(self, contents, fromClass = None):
self.contents = contents
self.fromClass = fromClass
def set(self, contents, fromClass = None):
self.contents = contents
self.fromClass = fromClass
class GroupScripts(object):
'''
NAME
====
B{C{Scripts}} - Associate scripts with types
SYNOPSIS
========
C{scripts = r.Scripts(postInstall = script, preRollback = script, ...)}
DESCRIPTION
===========
A C{Script} object holds the contents, and optionally the
compatibility class, of a script that can then be attached to
one or more groups. The B{C{Scripts}} object associates the
script with the type, and C{Group} and C{TroveSet.createGroup}
each take an optional C{scripts=} parameter to associate a
C{Scripts} object with a group being created.
PARAMETERS
==========
Each of the parameters specifies a script type and takes a C{Script}
to associate with that script type.
- C{postInstall} : Specifies a script to run after the installation
of any group to which this script is attached.
- C{preRollback} : Specifies a script to run before the rollback
of any group to which this script is attached.
- C{postRollback} : Specifies a script to run after the rollback
of any group to which this script is attached.
- C{preUpdate} : Specifies a script to run before the update
of any group to which this script is attached.
- C{postUpdate} : Specifies a script to run after the update
of any group to which this script is attached.
EXAMPLE
=======
Create a script that attaches to multiple groups as multiple types::
innerTroves = repos.find(...)
myTroves = repos.find(...)
fixup = r.Script("""#!/bin/sh
[ -x /opt/me/fixme ] && /opt/me/fixme""")
fixscripts = r.Scripts(preUpdate=fixup, preRollback=fixup)
innerGroup = innerTroves.createGroup('group-inner', scripts=fixscripts)
r.Group(myTroves + innerGroup, scripts=fixscripts)
In general, you will not want to attach the same script to multiple
groups that will be updated at the same time. Conary will not
"de-duplicate" the scripts, and they will be run more than once
if you do so.
'''
_explainObjectName = 'Scripts'
_scriptNames = ('postInstallScripts', 'preRollbackScripts',
'postRollbackScripts', 'preUpdateScripts',
'postUpdateScripts')
def __init__(self, **kwargs):
for scriptName in self._scriptNames:
contents = kwargs.pop(scriptName[:-7], None)
if contents is None:
contents = GroupScript(None)
setattr(self, scriptName[:-7], contents)
if kwargs:
raise TypeError("GroupScripts() got an unexpected keyword "
"argument '%s'" % kwargs.keys()[0])
def iterScripts(self):
for scriptName in self._scriptNames:
script = getattr(self, scriptName[:-7])
if script is not None and script.contents is not None:
yield script, scriptName
class _GroupSetRecipe(_BaseGroupRecipe):
Flags = use.LocalFlags
internalAbstractBaseClass = 1
def __init__(self, repos, cfg, label, flavor, laReposCache, srcdirs=None,
extraMacros={}, lightInstance = False):
klass = self._getParentClass('_BaseGroupRecipe')
klass.__init__(self, laReposCache = laReposCache,
srcdirs = srcdirs,
lightInstance = lightInstance,
cfg = cfg)
self.troveSource = repos
self.repos = repos
self.Script = GroupScript
self.Scripts = GroupScripts
self.labelPath = [ label ]
self.buildLabel = label
self.flavor = flavor
self.searchSource = searchsource.NetworkSearchSource(
repos, self.labelPath, flavor)
self.g = troveset.OperationGraph()
self.world = GroupSearchSourceTroveSet(self.searchSource,
graph = self.g)
self.fileFinder = lookaside.FileFinder(self.name, self.laReposCache,
localDirs=self.srcdirs,
multiurlMap=self.multiurlMap,
mirrorDirs=cfg.mirrorDirs,
cfg=cfg)
self._dumpAll = False
self._trackDict = {}
for key in cfg.macros:
self.macros._override(key, cfg['macros'][key])
self.macros.name = self.name
self.macros.version = self.version
if '.' in self.version:
self.macros.major_version = '.'.join(self.version.split('.')[0:2])
else:
self.macros.major_version = self.version
if extraMacros:
self.macros.update(extraMacros)
def _findSources(self, *args, **kwargs):
# GroupSetRecipe does not implement recursive builds, so just
# return an empty list -- this allows rmake builds of
# GroupSetRecipe groups to work.
return []
def _realizeGraph(self, cache, callback):
data = GroupActionData(troveCache = GroupSetTroveCache(self, cache),
groupRecipe = self)
self.g.realize(data)
ordering = self.g.getTotalOrdering()
nv = versions.NewVersion()
allTroveTups = set()
for node in ordering:
if not isinstance(node, troveset.TroveTupleSet):
continue
allTroveTups.update(node._getInstallSet())
allTroveTups.update(node._getOptionalSet())
for outerName in self.getGroupNames():
allTroveTups.remove( (outerName, nv, self.flavor) )
for outerName in self.getPrimaryGroupNames():
grp = self._getGroup(outerName)
grp.setBuildRefs(allTroveTups
- set(grp.iterTroveList(strongRefs = True,
weakRefs = True))
- set((x, nv, self.flavor)
for x in grp.iterNewGroupList()))
def dumpAll(self):
'''
NAME
====
B{C{dumpAll}} - Display copious output describing each action.
SYNOPSYS
========
C{r.dumpAll()}
DESCRIPTION
===========
Causes a GroupSetRecipe to print a textual listing of the
entire contents of each TroveSet as it is populated.
C{dumpAll} is a debugging tool and does not return a TroveSet.
'''
self._dumpAll = True
def getLabelPath(self):
return self.labelPath
def getSearchFlavor(self):
return self.flavor
def iterReplaceSpecs(self):
return []
def getResolveTroveSpecs(self):
return []
def getChildGroups(self, groupName = None):
return []
def getGroupMap(self, groupName = None):
return {}
def _getSearchSource(self):
return self.troveSource
def getSearchPath(self):
return [ ]
def writeDotGraph(self, path):
'''
NAME
====
B{C{GroupSetRecipe.writeDotGraph}} - write "dot" graph for recipe
SYNOPSIS
========
C{r.writeDotGraph('path')}
DESCRIPTION
===========
Writes a description of the internal graph represenstation of
the elements of the GroupSetRecipe in C{dot} format. This
graph can be converted to SVG format using the dot command:
C{dot -Tsvg outputfile > outputfile.svg}
The resulting SVG file may be viewed in any tool capable of
displaying SVG files, including many Web browsers.
C{writeDotGraph} is a debugging tool and does not return a TroveSet.
'''
self.g.generateDotFile(path, edgeFormatFn = lambda a,b,c: c)
def Group(self, ts, checkPathConflicts = True, scripts = None,
imageGroup = False):
'''
NAME
====
B{C{GroupSetRecipe.Group}} - Create primary group object
SYNOPSIS
========
C{r.Group(troveSet, checkPathConflicts=True, scripts=None)}
DESCRIPTION
===========
Set the passed B{TroveSet} as the contents of the primary
group being built; the group that has the same name as
the source component. The return value is a troveset which
references the newly-created primary group. This returned
TroveSet can be used to create other groups which reference
the primary group.
PARAMETERS
==========
- C{checkPathConflicts} : Raise an error if any paths
overlap (C{True})
- C{imageGroup} : (False) Designate that this group is a image group.
Image Group policies will be executed separately on this group.
- C{scripts} : Attach one or more scripts specified by a C{Scripts}
object (C{None})
'''
return ts._createGroup(self.name,
checkPathConflicts = checkPathConflicts,
scripts = scripts, imageGroup = imageGroup)
def Repository(self, labelList, flavor):
# Documented in GroupSearchSourceTroveSet as "Repository" so that
# Repository.find and Repository.latestPackages documentation
# shows up in cvc explain
'''
See Repository.
'''
if type(labelList) == tuple:
labelList = list(tuple)
elif type(labelList) != list:
labelList = [ labelList ]
for i, label in enumerate(labelList):
if type(label) == str:
labelList[i] = versions.Label(label)
elif not isinstance(label, versions.Label):
raise CookError('String label or Label object expected, got %r'%
label)
if type(flavor) == str:
flavor = deps.parseFlavor(flavor)
searchSource = searchsource.NetworkSearchSource(
self.repos, labelList, flavor)
return GroupSearchSourceTroveSet(searchSource, graph = self.g)
def SearchPath(self, *troveSets):
# Documented in GroupSearchPathTroveSet as "SearchPath" so that
# SearchPath.find documentation shows up in cvc explain
'''
See SearchPath.
'''
notTroveSets = [repr(x) for x in troveSets
if not isinstance(x, troveset.TroveSet)]
if notTroveSets:
raise CookError('Invalid arguments %s: SearchPath arguments must be'
' Repository or TroveSet' %', '.join(notTroveSets))
return GroupSearchPathTroveSet(troveSets, graph = self.g)
def CML(self, modelText, searchPath = None):
"""
NAME
====
B{C{GroupSetRecipe.CML}} - Build TroveSet from CML specification
SYNOPSIS
========
C{r.CML(modelText, searchPath=None)}
DESCRIPTION
===========
Builds a TroveSet from a specification in Conary Modelling
Lanuage (CML). The optional C{searchPath} initializes the
search path; search lines from the system model are prepended
to any provided C{searchPath}.
Returns a standard troveset with an extra attribute called
C{searchPath}, which is a TroveSet representing the final
SearchPath from the model. This search path is normally used
for dependency resolution.
PARAMETERS
==========
- C{modelText} (Required) : the text of the model in CML
- C{searchPath} (Optional) : an initial search path, a fallback
sought after any items provided in the model.
EXAMPLE
=======
To build a group from a system defined in CML, provide
the contents of the /etc/conary/system-model file as the
C{modelText}. This may be completely literal (leading white
space is ignored in CML)::
ts = r.CML('''
search group-os=conary.rpath.com@rpl:2/2.0.1-0.9-30
install group-appliance-platform
install httpd
install mod_ssl
''')
needed = ts.depsNeeded(ts.searchPath)
finalSet = ts + needed
If you are using a product definition and want to use the
search path it provides as the context for the model, it
might look like this::
repo = r.Repository('conary.rpath.com@rpl:2', r.flavor)
# Fetch latest packages on build label first
searchPathList = [ r.Repository(r.macros.buildlabel, r.flavor) ]
if 'productDefinitionSearchPath' in r.macros:
# proper build with product definition
searchPathList.extend([repo[x] for x in
r.macros.productDefinitionSearchPath.split('\\\\n')])
else:
# local test build against specific version
searchPathList.extend(
repo['group-os=conary.rpath.com@rpl:2/2.0.1-0.9-30'])
searchPath = r.SearchPath(*searchPathList)
ts = r.CML('''
install group-appliance-platform
install httpd
install mod_ssl
''', searchPath=searchPath)
needed = ts.depsNeeded(ts.searchPath)
finalSet = ts + needed
"""
if searchPath is None:
searchSource = searchsource.NetworkSearchSource(
self.repos, [], self.flavor)
searchPath = GroupSearchSourceTroveSet(searchSource,
graph = self.g)
model = cml.CML(None)
lineNum = findRecipeLineNumber()
if isinstance(modelText, str):
modelText = modelText.split('\n')
model.parse(modelText, context = '(recipe):%d' % lineNum)
comp = ModelCompiler(self.flavor, self.repos, self.g, searchPath, None)
sysModelSet = comp.build(model)
result = sysModelSet._action(ActionClass = CopyAction)
result.searchPath = sysModelSet.searchPath
return result
def track(self, troveSpec):
'''
NAME
====
B{C{GroupSetRecipe.track}}
SYNOPSIS
========
C{r.track('troveSpec')}
DESCRIPTION
===========
Prints out actions that match the provided C{troveSpec}. Usually
used when a trove is unexpectedly present or missing in one or
more TroveSets (or their resulting groups), in order to learn why
the trove is present or missing.
C{track} is a debugging tool and does not return a TroveSet.
'''
self._trackDict[parseTroveSpec(troveSpec)] = troveSpec
from conary.build.packagerecipe import BaseRequiresRecipe
exec defaultrecipes.GroupSetRecipe
|
# -*- coding: utf-8 -*-
from numpy import insert
from SciDataTool import DataFreq
from pyleecan.Functions.Electrical.dqh_transformation import dqh2n_DataTime
def store(self, out_dict, out_dict_harm):
"""Store the standard outputs of Electrical that are temporarily in out_dict as arrays into OutElec as Data object
Parameters
----------
self : OutElec
the OutElec object to update
out_dict : dict
Dict containing all electrical quantities that have been calculated in EEC
out_dict_harm : dict
Dict containing harmonic quantities that have been calculated in EEC
"""
# Store Id, Iq, Ud, Uq
self.OP.Id_ref = out_dict["Id"]
self.OP.Iq_ref = out_dict["Iq"]
self.OP.Ud_ref = out_dict["Ud"]
self.OP.Uq_ref = out_dict["Uq"]
# Compute currents
self.Is = None
self.Is = self.get_Is()
# Compute voltage
self.Us = None
self.Us = self.get_Us()
self.Pj_losses = out_dict["Pj_losses"]
self.Tem_av_ref = out_dict["Tem_av_ref"]
self.Pem_av_ref = out_dict["Pem_av_ref"]
if "Is_harm" in out_dict_harm:
# Create Data object
# Add f=0Hz
out_dict_harm["axes_list"][0].initial = 0
out_dict_harm["axes_list"][0].number += 1
values = insert(out_dict_harm["Is_harm"], 0, 0, axis=0)
Is_dqh = DataFreq(
name="Harmonic stator current",
unit="A",
symbol="I_s^{harm}",
axes=out_dict_harm["axes_list"],
values=values,
)
# ifft
Is_dqh_time = Is_dqh.freq_to_time()
qs = self.parent.simu.machine.stator.winding.qs
# back to ABC
Is_abc = dqh2n_DataTime(
Is_dqh_time, qs, is_n_rms=True, phase_dir=self.phase_dir
)
self.Is_harm = Is_abc.time_to_freq()
|
from django.contrib import admin
from .models import Membership, UserMembership, Subcription
admin.site.register(Membership)
admin.site.register(UserMembership)
admin.site.register(Subcription)
|
from Expresion.Binaria import Binaria
from Expresion.Aritmetica import Aritmetica
from Expresion.Unaria import Unaria
from Expresion.Aritmetica import Aritmetica
from Expresion.Logica import Logica
from Expresion.FuncionesNativas import FuncionesNativas
from Entorno import Entorno
from Tipo import Tipo
from Expresion.Terminal import Terminal
from tkinter import *
from Expresion.variablesestaticas import *
from reportes import *
class Relacional(Binaria):
def __init__(self, exp1, exp2, operador):
'Se usan los valores de las clases padres'
Binaria.__init__(self, exp1, exp2, operador)
def getval(self,entorno):
if isinstance(self.exp1,Terminal) and isinstance(self.exp2,Terminal):
if (self.exp1.tipo.tipo == 'identificador' or self.exp2.tipo.tipo == 'identificador'):
return self
valizq=self.exp1.getval(entorno);
valder=self.exp2.getval(entorno);
valizq=valizq.valor
valder=valder.valor
try:
if self.operador == '>':
self.valor = valizq > valder;
elif self.operador == '<':
self.valor = valizq < valder;
elif self.operador == '>=':
self.valor = valizq >= valder;
elif self.operador == '<=':
self.valor = valizq <= valder;
elif self.operador == '<>':
self.valor = valizq != valder;
elif self.operador == '=':
self.valor = valizq == valder;
self.tipo = 'boolean'
return self
except :
reporteerrores.append(Lerrores("Error Semantico",
'Los tipos que se estan comparando no coinciden',
0, 0))
variables.consola.insert(INSERT,
'Los tipos que se estan comparando no coinciden\n')
return
|
def dostuff():
print("stuff happens here")
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
wlanpi_webui
~~~~~~~~~~~~
a custom WebUI made to run locally on the WLAN Pi
"""
import logging
from logging.handlers import RotatingFileHandler
from wlanpi_webui.app import create_app
if __name__ == "__main__":
app = create_app()
log_filename = "app.log"
logging.basicConfig(
filename=log_filename,
level=logging.DEBUG,
format=f"%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s",
)
handler = RotatingFileHandler(log_filename, maxBytes=10000, backupCount=2)
handler.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
app.run(host="0.0.0.0", port="5000", debug=True)
|
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models.fields.files import FieldFile
from django.test import TestCase
from django.urls import resolve
from django.utils import timezone
from apps.core.constants import MAX_DATASET_SIZE
from ..forms import CreateExperimentForm
from ..models import Experiment
from ..views import CreateExperimentView, MainExperimentView
UserModel = get_user_model()
class CreateExperimentViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserModel.objects.create_user(username='guest', email="guest@guest.gr")
cls.url = '/experiments/create/'
def setUp(self):
self.client.force_login(self.user)
def tearDown(self):
"""
To delete all dummy csv files from disk
"""
for experiment in Experiment.objects.all():
experiment.delete()
super().tearDown()
def test_expriment_url_resolves_to_CreateExperimentView(self):
found = resolve(self.url)
self.assertEqual(found.func.__name__, CreateExperimentView.as_view().__name__)
def test_methods_allowed(self):
response = self.client.options(self.url)
self.assertEqual(response['allow'], 'GET, POST')
def test_GET_template(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'experiments/create_experiment.html')
def test_GET_has_CreateExperimentForm(self):
response = self.client.get(self.url)
self.assertIsInstance(response.context['form'], CreateExperimentForm)
def test_unauthorized_user_redirect(self):
self.client.logout()
response = self.client.options(self.url)
self.assertEqual(response.status_code, 302)
def test_unauthorized_user_redirect_page(self):
self.client.logout()
response = self.client.options(self.url)
self.assertRedirects(response, '/accounts/login/?next=/experiments/create/')
def test_POST_success_creates_new_experiment(self):
self.client.post(
self.url,
data={
"name": "test_demo",
"dataset": SimpleUploadedFile("demo_file.csv", b"Dummy"),
},
)
self.assertEqual(Experiment.objects.count(), 1)
self.assertEqual(Experiment.objects.first().name, "test_demo")
def test_POST_success_creates_saves_dataset(self):
self.client.post(
self.url,
data={
"name": "test_demo",
"dataset": SimpleUploadedFile("demo_file.csv", b"Dummy"),
},
)
self.assertEqual(
Experiment.objects.first().dataset.url,
f"/datasets/{self.user.username}/{timezone.now().date().strftime('%Y/%m')}/"
f"{Experiment.objects.first().id}.csv",
)
def test_POST_success_redirects_to_homepage(self):
response = self.client.post(
self.url,
data={
"name": "test_demo",
"dataset": SimpleUploadedFile("demo_file.csv", b"Dummy"),
},
)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, f"/experiments/{Experiment.objects.first().id}/main/")
self.assertEqual(response['location'], f"/experiments/{Experiment.objects.first().id}/main/")
def test_POST_success_saves_description(self):
self.client.post(
self.url,
data={
"name": "test_demo",
"description": "Dummy",
"dataset": SimpleUploadedFile("demo_file.csv", b"Dummy"),
},
)
self.assertEqual(Experiment.objects.first().description, "Dummy")
def test_POST_success_saves_experiment_to_correct_user(self):
another_user = UserModel.objects.create_user(username='guest2', email="guest2@guest.gr")
self.client.post(
self.url,
data={
"name": "test_demo",
"description": "Dummy",
"dataset": SimpleUploadedFile("demo_file.csv", b"Dummy"),
},
)
self.assertEqual(Experiment.objects.filter(experimenter=self.user).count(), 1)
self.assertEqual(Experiment.objects.filter(experimenter=another_user).count(), 0)
def test_POST_validation_error_required_fields(self):
response = self.client.post(self.url, data={})
self.assertEqual(response.status_code, 200)
self.assertEqual(Experiment.objects.count(), 0)
self.assertTrue('required' in response.context['form'].errors['name'][0])
self.assertTrue('required' in response.context['form'].errors['dataset'][0])
def test_POST_validation_error_file_extension(self):
response = self.client.post(
self.url,
data={
"name": "test_demo",
"description": "Dummy",
"dataset": SimpleUploadedFile("demo_file.txt", b"Dummy"),
},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Experiment.objects.count(), 0)
self.assertTrue('File extension “txt” is not allowed.' in response.context['form'].errors['dataset'][0])
def test_POST_validation_error_big_file(self):
with patch.object(FieldFile, 'size', MAX_DATASET_SIZE + 1):
response = self.client.post(
self.url,
data={
"name": "test_demo",
"description": "Dummy",
"dataset": SimpleUploadedFile("demo_file.csv", b"Dummy"),
},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Experiment.objects.count(), 0)
self.assertTrue('demo_file.csv must be less than 1MB' in response.context['form'].errors['dataset'][0])
class MainExperimentViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserModel.objects.create_user(username='guest', email="guest@guest.gr")
cls.experiment = Experiment.objects.create(
experimenter=cls.user, name="test_demo", dataset=SimpleUploadedFile("demo_file.csv", b"Dummy")
)
cls.url = f"/experiments/{cls.experiment.id}/main/"
def setUp(self):
self.client.force_login(self.user)
@classmethod
def tearDownClass(cls):
cls.experiment.delete()
super().tearDownClass()
def test_expriment_url_resolves_to_MainExperimentView(self):
found = resolve(self.url)
self.assertEqual(found.func.__name__, MainExperimentView.as_view().__name__)
def test_methods_allowed(self):
response = self.client.options(self.url)
self.assertEqual(response['allow'], 'GET')
def test_GET_template(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'experiments/main_experiment.html')
def test_unauthorized_user_redirect(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_unauthorized_user_redirect_page(self):
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(response, f"/accounts/login/?next=/experiments/{self.experiment.id}/main/")
def test_object_in_context_data(self):
response = self.client.get(self.url)
self.assertEqual(response.context['object'].name, self.experiment.name)
def test_only_the_experiments_owner_can_see_the_main_page(self):
self.client.logout()
another_user = UserModel.objects.create_user(username='guest2', email="guest2@guest.gr")
self.client.force_login(another_user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
|
# Hacked from winnt.h
DELETE = (65536)
READ_CONTROL = (131072)
WRITE_DAC = (262144)
WRITE_OWNER = (524288)
SYNCHRONIZE = (1048576)
STANDARD_RIGHTS_REQUIRED = (983040)
STANDARD_RIGHTS_READ = (READ_CONTROL)
STANDARD_RIGHTS_WRITE = (READ_CONTROL)
STANDARD_RIGHTS_EXECUTE = (READ_CONTROL)
STANDARD_RIGHTS_ALL = (2031616)
SPECIFIC_RIGHTS_ALL = (65535)
ACCESS_SYSTEM_SECURITY = (16777216)
MAXIMUM_ALLOWED = (33554432)
GENERIC_READ = (-2147483648)
GENERIC_WRITE = (1073741824)
GENERIC_EXECUTE = (536870912)
GENERIC_ALL = (268435456)
# file security permissions
FILE_READ_DATA= ( 1 )
FILE_LIST_DIRECTORY= ( 1 )
FILE_WRITE_DATA= ( 2 )
FILE_ADD_FILE= ( 2 )
FILE_APPEND_DATA= ( 4 )
FILE_ADD_SUBDIRECTORY= ( 4 )
FILE_CREATE_PIPE_INSTANCE= ( 4 )
FILE_READ_EA= ( 8 )
FILE_WRITE_EA= ( 16 )
FILE_EXECUTE= ( 32 )
FILE_TRAVERSE= ( 32 )
FILE_DELETE_CHILD= ( 64 )
FILE_READ_ATTRIBUTES= ( 128 )
FILE_WRITE_ATTRIBUTES= ( 256 )
FILE_ALL_ACCESS= (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 511)
FILE_GENERIC_READ= (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE)
FILE_GENERIC_WRITE= (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE)
FILE_GENERIC_EXECUTE= (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE)
SECURITY_NULL_SID_AUTHORITY = (0,0,0,0,0,0)
SECURITY_WORLD_SID_AUTHORITY = (0,0,0,0,0,1)
SECURITY_LOCAL_SID_AUTHORITY = (0,0,0,0,0,2)
SECURITY_CREATOR_SID_AUTHORITY = (0,0,0,0,0,3)
SECURITY_NON_UNIQUE_AUTHORITY = (0,0,0,0,0,4)
SECURITY_RESOURCE_MANAGER_AUTHORITY = (0,0,0,0,0,9)
SECURITY_NULL_RID = 0
SECURITY_WORLD_RID = 0
SECURITY_LOCAL_RID = 0X00000000
SECURITY_CREATOR_OWNER_RID = 0
SECURITY_CREATOR_GROUP_RID = 1
SECURITY_CREATOR_OWNER_SERVER_RID = 2
SECURITY_CREATOR_GROUP_SERVER_RID = 3
SECURITY_CREATOR_OWNER_RIGHTS_RID = 4
# NT well-known SIDs
SECURITY_NT_AUTHORITY = (0,0,0,0,0,5)
SECURITY_DIALUP_RID = 1
SECURITY_NETWORK_RID = 2
SECURITY_BATCH_RID = 3
SECURITY_INTERACTIVE_RID = 4
SECURITY_SERVICE_RID = 6
SECURITY_ANONYMOUS_LOGON_RID = 7
SECURITY_PROXY_RID = 8
SECURITY_SERVER_LOGON_RID = 9
SECURITY_LOGON_IDS_RID = 5
SECURITY_LOGON_IDS_RID_COUNT = 3
SECURITY_LOCAL_SYSTEM_RID = 18
SECURITY_NT_NON_UNIQUE = 21
SECURITY_BUILTIN_DOMAIN_RID = 32
# well-known domain relative sub-authority values (RIDs)...
DOMAIN_USER_RID_ADMIN = 500
DOMAIN_USER_RID_GUEST = 501
DOMAIN_USER_RID_KRBTGT = 502
DOMAIN_USER_RID_MAX = 999
# well-known groups ...
DOMAIN_GROUP_RID_ADMINS = 512
DOMAIN_GROUP_RID_USERS = 513
DOMAIN_GROUP_RID_GUESTS = 514
DOMAIN_GROUP_RID_COMPUTERS = 515
DOMAIN_GROUP_RID_CONTROLLERS = 516
DOMAIN_GROUP_RID_CERT_ADMINS = 517
DOMAIN_GROUP_RID_SCHEMA_ADMINS = 518
DOMAIN_GROUP_RID_ENTERPRISE_ADMINS = 519
DOMAIN_GROUP_RID_POLICY_ADMINS = 520
DOMAIN_GROUP_RID_READONLY_CONTROLLERS = 521
# well-known aliases ...
DOMAIN_ALIAS_RID_ADMINS = 544
DOMAIN_ALIAS_RID_USERS = 545
DOMAIN_ALIAS_RID_GUESTS = 546
DOMAIN_ALIAS_RID_POWER_USERS = 547
DOMAIN_ALIAS_RID_ACCOUNT_OPS = 548
DOMAIN_ALIAS_RID_SYSTEM_OPS = 549
DOMAIN_ALIAS_RID_PRINT_OPS = 550
DOMAIN_ALIAS_RID_BACKUP_OPS = 551
DOMAIN_ALIAS_RID_REPLICATOR = 552
DOMAIN_ALIAS_RID_RAS_SERVERS = 553
DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 554
DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 555
DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 556
DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 557
DOMAIN_ALIAS_RID_MONITORING_USERS = 558
DOMAIN_ALIAS_RID_LOGGING_USERS = 559
DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 560
DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 561
DOMAIN_ALIAS_RID_DCOM_USERS = 562
DOMAIN_ALIAS_RID_IUSERS = 568
DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 569
DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 571
DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 572
DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 573
SECURITY_MANDATORY_LABEL_AUTHORITY = (0,0,0,0,0,16)
SECURITY_MANDATORY_UNTRUSTED_RID = 0x00000000
SECURITY_MANDATORY_LOW_RID = 0x00001000
SECURITY_MANDATORY_MEDIUM_RID = 0x00002000
SECURITY_MANDATORY_HIGH_RID = 0x00003000
SECURITY_MANDATORY_SYSTEM_RID = 0x00004000
SECURITY_MANDATORY_PROTECTED_PROCESS_RID = 0x00005000
SECURITY_MANDATORY_MAXIMUM_USER_RID = SECURITY_MANDATORY_SYSTEM_RID
SYSTEM_LUID = (999, 0)
ANONYMOUS_LOGON_LUID = (998, 0)
LOCALSERVICE_LUID = (997, 0)
NETWORKSERVICE_LUID = (996, 0)
IUSER_LUID = (995, 0)
# Group attributes
SE_GROUP_MANDATORY = 1
SE_GROUP_ENABLED_BY_DEFAULT = 2
SE_GROUP_ENABLED = 4
SE_GROUP_OWNER = 8
SE_GROUP_USE_FOR_DENY_ONLY = 16
SE_GROUP_INTEGRITY = 32
SE_GROUP_INTEGRITY_ENABLED = 64
SE_GROUP_RESOURCE = 536870912
SE_GROUP_LOGON_ID = -1073741824
# User attributes
# (None yet defined.)
# ACE types
ACCESS_MIN_MS_ACE_TYPE = (0)
ACCESS_ALLOWED_ACE_TYPE = (0)
ACCESS_DENIED_ACE_TYPE = (1)
SYSTEM_AUDIT_ACE_TYPE = (2)
SYSTEM_ALARM_ACE_TYPE = (3)
ACCESS_MAX_MS_V2_ACE_TYPE = (3)
ACCESS_ALLOWED_COMPOUND_ACE_TYPE = (4)
ACCESS_MAX_MS_V3_ACE_TYPE = (4)
ACCESS_MIN_MS_OBJECT_ACE_TYPE = (5)
ACCESS_ALLOWED_OBJECT_ACE_TYPE = (5)
ACCESS_DENIED_OBJECT_ACE_TYPE = (6)
SYSTEM_AUDIT_OBJECT_ACE_TYPE = (7)
SYSTEM_ALARM_OBJECT_ACE_TYPE = (8)
ACCESS_MAX_MS_OBJECT_ACE_TYPE = (8)
ACCESS_MAX_MS_V4_ACE_TYPE = (8)
ACCESS_MAX_MS_ACE_TYPE = (8)
ACCESS_ALLOWED_CALLBACK_ACE_TYPE = 9
ACCESS_DENIED_CALLBACK_ACE_TYPE = 10
ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE = 11
ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE = 12
SYSTEM_AUDIT_CALLBACK_ACE_TYPE = 13
SYSTEM_ALARM_CALLBACK_ACE_TYPE = 14
SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE = 15
SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE = 16
SYSTEM_MANDATORY_LABEL_ACE_TYPE = 17
ACCESS_MAX_MS_V5_ACE_TYPE = 17
# The following are the inherit flags that go into the AceFlags field
# of an Ace header.
OBJECT_INHERIT_ACE = 1
CONTAINER_INHERIT_ACE = 2
NO_PROPAGATE_INHERIT_ACE = 4
INHERIT_ONLY_ACE = 8
VALID_INHERIT_FLAGS = 15
SUCCESSFUL_ACCESS_ACE_FLAG = 64
FAILED_ACCESS_ACE_FLAG = 128
SE_OWNER_DEFAULTED = 1
SE_GROUP_DEFAULTED = 2
SE_DACL_PRESENT = 4
SE_DACL_DEFAULTED = 8
SE_SACL_PRESENT = 16
SE_SACL_DEFAULTED = 32
SE_SELF_RELATIVE = 32768
SE_PRIVILEGE_ENABLED_BY_DEFAULT = 1
SE_PRIVILEGE_ENABLED = 2
SE_PRIVILEGE_USED_FOR_ACCESS = -2147483648
PRIVILEGE_SET_ALL_NECESSARY = 1
# NT Defined Privileges
SE_CREATE_TOKEN_NAME = "SeCreateTokenPrivilege"
SE_ASSIGNPRIMARYTOKEN_NAME = "SeAssignPrimaryTokenPrivilege"
SE_LOCK_MEMORY_NAME = "SeLockMemoryPrivilege"
SE_INCREASE_QUOTA_NAME = "SeIncreaseQuotaPrivilege"
SE_UNSOLICITED_INPUT_NAME = "SeUnsolicitedInputPrivilege"
SE_MACHINE_ACCOUNT_NAME = "SeMachineAccountPrivilege"
SE_TCB_NAME = "SeTcbPrivilege"
SE_SECURITY_NAME = "SeSecurityPrivilege"
SE_TAKE_OWNERSHIP_NAME = "SeTakeOwnershipPrivilege"
SE_LOAD_DRIVER_NAME = "SeLoadDriverPrivilege"
SE_SYSTEM_PROFILE_NAME = "SeSystemProfilePrivilege"
SE_SYSTEMTIME_NAME = "SeSystemtimePrivilege"
SE_PROF_SINGLE_PROCESS_NAME = "SeProfileSingleProcessPrivilege"
SE_INC_BASE_PRIORITY_NAME = "SeIncreaseBasePriorityPrivilege"
SE_CREATE_PAGEFILE_NAME = "SeCreatePagefilePrivilege"
SE_CREATE_PERMANENT_NAME = "SeCreatePermanentPrivilege"
SE_BACKUP_NAME = "SeBackupPrivilege"
SE_RESTORE_NAME = "SeRestorePrivilege"
SE_SHUTDOWN_NAME = "SeShutdownPrivilege"
SE_DEBUG_NAME = "SeDebugPrivilege"
SE_AUDIT_NAME = "SeAuditPrivilege"
SE_SYSTEM_ENVIRONMENT_NAME = "SeSystemEnvironmentPrivilege"
SE_CHANGE_NOTIFY_NAME = "SeChangeNotifyPrivilege"
SE_REMOTE_SHUTDOWN_NAME = "SeRemoteShutdownPrivilege"
# Enum SECURITY_IMPERSONATION_LEVEL:
SecurityAnonymous = 0
SecurityIdentification = 1
SecurityImpersonation = 2
SecurityDelegation = 3
SECURITY_MAX_IMPERSONATION_LEVEL = SecurityDelegation
DEFAULT_IMPERSONATION_LEVEL = SecurityImpersonation
TOKEN_ASSIGN_PRIMARY = 1
TOKEN_DUPLICATE = 2
TOKEN_IMPERSONATE = 4
TOKEN_QUERY = 8
TOKEN_QUERY_SOURCE = 16
TOKEN_ADJUST_PRIVILEGES = 32
TOKEN_ADJUST_GROUPS = 64
TOKEN_ADJUST_DEFAULT = 128
TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED |\
TOKEN_ASSIGN_PRIMARY |\
TOKEN_DUPLICATE |\
TOKEN_IMPERSONATE |\
TOKEN_QUERY |\
TOKEN_QUERY_SOURCE |\
TOKEN_ADJUST_PRIVILEGES |\
TOKEN_ADJUST_GROUPS |\
TOKEN_ADJUST_DEFAULT)
TOKEN_READ = (STANDARD_RIGHTS_READ |\
TOKEN_QUERY)
TOKEN_WRITE = (STANDARD_RIGHTS_WRITE |\
TOKEN_ADJUST_PRIVILEGES |\
TOKEN_ADJUST_GROUPS |\
TOKEN_ADJUST_DEFAULT)
TOKEN_EXECUTE = (STANDARD_RIGHTS_EXECUTE)
SidTypeUser = 1
SidTypeGroup = 2
SidTypeDomain =3
SidTypeAlias = 4
SidTypeWellKnownGroup = 5
SidTypeDeletedAccount = 6
SidTypeInvalid = 7
SidTypeUnknown = 8
SidTypeComputer = 9
SidTypeLabel = 10
# Token types
TokenPrimary = 1
TokenImpersonation = 2
# TOKEN_INFORMATION_CLASS, used with Get/SetTokenInformation
TokenUser = 1
TokenGroups = 2
TokenPrivileges = 3
TokenOwner = 4
TokenPrimaryGroup = 5
TokenDefaultDacl = 6
TokenSource = 7
TokenType = 8
TokenImpersonationLevel = 9
TokenStatistics = 10
TokenRestrictedSids = 11
TokenSessionId = 12
TokenGroupsAndPrivileges = 13
TokenSessionReference = 14
TokenSandBoxInert = 15
TokenAuditPolicy = 16
TokenOrigin = 17
TokenElevationType = 18
TokenLinkedToken = 19
TokenElevation = 20
TokenHasRestrictions = 21
TokenAccessInformation = 22
TokenVirtualizationAllowed = 23
TokenVirtualizationEnabled = 24
TokenIntegrityLevel = 25
TokenUIAccess = 26
TokenMandatoryPolicy = 27
TokenLogonSid = 28
# DirectoryService related constants.
# Generated by h2py from NtDsAPI.h
DS_BEHAVIOR_WIN2000 = 0
DS_BEHAVIOR_WIN2003_WITH_MIXED_DOMAINS = 1
DS_BEHAVIOR_WIN2003 = 2
DS_SYNCED_EVENT_NAME = "NTDSInitialSyncsCompleted"
ACTRL_DS_OPEN = 0x00000000
ACTRL_DS_CREATE_CHILD = 0x00000001
ACTRL_DS_DELETE_CHILD = 0x00000002
ACTRL_DS_LIST = 0x00000004
ACTRL_DS_SELF = 0x00000008
ACTRL_DS_READ_PROP = 0x00000010
ACTRL_DS_WRITE_PROP = 0x00000020
ACTRL_DS_DELETE_TREE = 0x00000040
ACTRL_DS_LIST_OBJECT = 0x00000080
ACTRL_DS_CONTROL_ACCESS = 0x00000100
NTDSAPI_BIND_ALLOW_DELEGATION = (0x00000001)
DS_REPSYNC_ASYNCHRONOUS_OPERATION = 0x00000001
DS_REPSYNC_WRITEABLE = 0x00000002
DS_REPSYNC_PERIODIC = 0x00000004
DS_REPSYNC_INTERSITE_MESSAGING = 0x00000008
DS_REPSYNC_ALL_SOURCES = 0x00000010
DS_REPSYNC_FULL = 0x00000020
DS_REPSYNC_URGENT = 0x00000040
DS_REPSYNC_NO_DISCARD = 0x00000080
DS_REPSYNC_FORCE = 0x00000100
DS_REPSYNC_ADD_REFERENCE = 0x00000200
DS_REPSYNC_NEVER_COMPLETED = 0x00000400
DS_REPSYNC_TWO_WAY = 0x00000800
DS_REPSYNC_NEVER_NOTIFY = 0x00001000
DS_REPSYNC_INITIAL = 0x00002000
DS_REPSYNC_USE_COMPRESSION = 0x00004000
DS_REPSYNC_ABANDONED = 0x00008000
DS_REPSYNC_INITIAL_IN_PROGRESS = 0x00010000
DS_REPSYNC_PARTIAL_ATTRIBUTE_SET = 0x00020000
DS_REPSYNC_REQUEUE = 0x00040000
DS_REPSYNC_NOTIFICATION = 0x00080000
DS_REPSYNC_ASYNCHRONOUS_REPLICA = 0x00100000
DS_REPSYNC_CRITICAL = 0x00200000
DS_REPSYNC_FULL_IN_PROGRESS = 0x00400000
DS_REPSYNC_PREEMPTED = 0x00800000
DS_REPADD_ASYNCHRONOUS_OPERATION = 0x00000001
DS_REPADD_WRITEABLE = 0x00000002
DS_REPADD_INITIAL = 0x00000004
DS_REPADD_PERIODIC = 0x00000008
DS_REPADD_INTERSITE_MESSAGING = 0x00000010
DS_REPADD_ASYNCHRONOUS_REPLICA = 0x00000020
DS_REPADD_DISABLE_NOTIFICATION = 0x00000040
DS_REPADD_DISABLE_PERIODIC = 0x00000080
DS_REPADD_USE_COMPRESSION = 0x00000100
DS_REPADD_NEVER_NOTIFY = 0x00000200
DS_REPADD_TWO_WAY = 0x00000400
DS_REPADD_CRITICAL = 0x00000800
DS_REPDEL_ASYNCHRONOUS_OPERATION = 0x00000001
DS_REPDEL_WRITEABLE = 0x00000002
DS_REPDEL_INTERSITE_MESSAGING = 0x00000004
DS_REPDEL_IGNORE_ERRORS = 0x00000008
DS_REPDEL_LOCAL_ONLY = 0x00000010
DS_REPDEL_NO_SOURCE = 0x00000020
DS_REPDEL_REF_OK = 0x00000040
DS_REPMOD_ASYNCHRONOUS_OPERATION = 0x00000001
DS_REPMOD_WRITEABLE = 0x00000002
DS_REPMOD_UPDATE_FLAGS = 0x00000001
DS_REPMOD_UPDATE_ADDRESS = 0x00000002
DS_REPMOD_UPDATE_SCHEDULE = 0x00000004
DS_REPMOD_UPDATE_RESULT = 0x00000008
DS_REPMOD_UPDATE_TRANSPORT = 0x00000010
DS_REPUPD_ASYNCHRONOUS_OPERATION = 0x00000001
DS_REPUPD_WRITEABLE = 0x00000002
DS_REPUPD_ADD_REFERENCE = 0x00000004
DS_REPUPD_DELETE_REFERENCE = 0x00000008
DS_INSTANCETYPE_IS_NC_HEAD = 0x00000001
DS_INSTANCETYPE_NC_IS_WRITEABLE = 0x00000004
DS_INSTANCETYPE_NC_COMING = 0x00000010
DS_INSTANCETYPE_NC_GOING = 0x00000020
NTDSDSA_OPT_IS_GC = ( 1 << 0 )
NTDSDSA_OPT_DISABLE_INBOUND_REPL = ( 1 << 1 )
NTDSDSA_OPT_DISABLE_OUTBOUND_REPL = ( 1 << 2 )
NTDSDSA_OPT_DISABLE_NTDSCONN_XLATE = ( 1 << 3 )
NTDSCONN_OPT_IS_GENERATED = ( 1 << 0 )
NTDSCONN_OPT_TWOWAY_SYNC = ( 1 << 1 )
NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT = (1 << 2 )
NTDSCONN_OPT_USE_NOTIFY = (1 << 3)
NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION = (1 << 4)
NTDSCONN_OPT_USER_OWNED_SCHEDULE = (1 << 5)
NTDSCONN_KCC_NO_REASON = ( 0 )
NTDSCONN_KCC_GC_TOPOLOGY = ( 1 << 0 )
NTDSCONN_KCC_RING_TOPOLOGY = ( 1 << 1 )
NTDSCONN_KCC_MINIMIZE_HOPS_TOPOLOGY = ( 1 << 2 )
NTDSCONN_KCC_STALE_SERVERS_TOPOLOGY = ( 1 << 3 )
NTDSCONN_KCC_OSCILLATING_CONNECTION_TOPOLOGY = ( 1 << 4 )
NTDSCONN_KCC_INTERSITE_GC_TOPOLOGY = (1 << 5)
NTDSCONN_KCC_INTERSITE_TOPOLOGY = (1 << 6)
NTDSCONN_KCC_SERVER_FAILOVER_TOPOLOGY = (1 << 7)
NTDSCONN_KCC_SITE_FAILOVER_TOPOLOGY = (1 << 8)
NTDSCONN_KCC_REDUNDANT_SERVER_TOPOLOGY = (1 << 9)
FRSCONN_PRIORITY_MASK = 0x70000000
FRSCONN_MAX_PRIORITY = 0x8
NTDSCONN_OPT_IGNORE_SCHEDULE_MASK = (-2147483648)
NTDSSETTINGS_OPT_IS_AUTO_TOPOLOGY_DISABLED = ( 1 << 0 )
NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED = ( 1 << 1 )
NTDSSETTINGS_OPT_IS_TOPL_MIN_HOPS_DISABLED = ( 1 << 2 )
NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = ( 1 << 3 )
NTDSSETTINGS_OPT_IS_INTER_SITE_AUTO_TOPOLOGY_DISABLED = ( 1 << 4 )
NTDSSETTINGS_OPT_IS_GROUP_CACHING_ENABLED = ( 1 << 5 )
NTDSSETTINGS_OPT_FORCE_KCC_WHISTLER_BEHAVIOR = ( 1 << 6 )
NTDSSETTINGS_OPT_FORCE_KCC_W2K_ELECTION = ( 1 << 7 )
NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED = ( 1 << 8 )
NTDSSETTINGS_OPT_IS_SCHEDULE_HASHING_ENABLED = ( 1 << 9 )
NTDSSETTINGS_OPT_IS_REDUNDANT_SERVER_TOPOLOGY_ENABLED = ( 1 << 10 )
NTDSSETTINGS_DEFAULT_SERVER_REDUNDANCY = 2
NTDSTRANSPORT_OPT_IGNORE_SCHEDULES = ( 1 << 0 )
NTDSTRANSPORT_OPT_BRIDGES_REQUIRED = (1 << 1 )
NTDSSITECONN_OPT_USE_NOTIFY = ( 1 << 0 )
NTDSSITECONN_OPT_TWOWAY_SYNC = ( 1 << 1 )
NTDSSITECONN_OPT_DISABLE_COMPRESSION = ( 1 << 2 )
NTDSSITELINK_OPT_USE_NOTIFY = ( 1 << 0 )
NTDSSITELINK_OPT_TWOWAY_SYNC = ( 1 << 1 )
NTDSSITELINK_OPT_DISABLE_COMPRESSION = ( 1 << 2 )
GUID_USERS_CONTAINER_A = "a9d1ca15768811d1aded00c04fd8d5cd"
GUID_COMPUTRS_CONTAINER_A = "aa312825768811d1aded00c04fd8d5cd"
GUID_SYSTEMS_CONTAINER_A = "ab1d30f3768811d1aded00c04fd8d5cd"
GUID_DOMAIN_CONTROLLERS_CONTAINER_A = "a361b2ffffd211d1aa4b00c04fd7d83a"
GUID_INFRASTRUCTURE_CONTAINER_A = "2fbac1870ade11d297c400c04fd8d5cd"
GUID_DELETED_OBJECTS_CONTAINER_A = "18e2ea80684f11d2b9aa00c04f79f805"
GUID_LOSTANDFOUND_CONTAINER_A = "ab8153b7768811d1aded00c04fd8d5cd"
GUID_FOREIGNSECURITYPRINCIPALS_CONTAINER_A = "22b70c67d56e4efb91e9300fca3dc1aa"
GUID_PROGRAM_DATA_CONTAINER_A = "09460c08ae1e4a4ea0f64aee7daa1e5a"
GUID_MICROSOFT_PROGRAM_DATA_CONTAINER_A = "f4be92a4c777485e878e9421d53087db"
GUID_NTDS_QUOTAS_CONTAINER_A = "6227f0af1fc2410d8e3bb10615bb5b0f"
GUID_USERS_CONTAINER_BYTE = "\xa9\xd1\xca\x15\x76\x88\x11\xd1\xad\xed\x00\xc0\x4f\xd8\xd5\xcd"
GUID_COMPUTRS_CONTAINER_BYTE = "\xaa\x31\x28\x25\x76\x88\x11\xd1\xad\xed\x00\xc0\x4f\xd8\xd5\xcd"
GUID_SYSTEMS_CONTAINER_BYTE = "\xab\x1d\x30\xf3\x76\x88\x11\xd1\xad\xed\x00\xc0\x4f\xd8\xd5\xcd"
GUID_DOMAIN_CONTROLLERS_CONTAINER_BYTE = "\xa3\x61\xb2\xff\xff\xd2\x11\xd1\xaa\x4b\x00\xc0\x4f\xd7\xd8\x3a"
GUID_INFRASTRUCTURE_CONTAINER_BYTE = "\x2f\xba\xc1\x87\x0a\xde\x11\xd2\x97\xc4\x00\xc0\x4f\xd8\xd5\xcd"
GUID_DELETED_OBJECTS_CONTAINER_BYTE = "\x18\xe2\xea\x80\x68\x4f\x11\xd2\xb9\xaa\x00\xc0\x4f\x79\xf8\x05"
GUID_LOSTANDFOUND_CONTAINER_BYTE = "\xab\x81\x53\xb7\x76\x88\x11\xd1\xad\xed\x00\xc0\x4f\xd8\xd5\xcd"
GUID_FOREIGNSECURITYPRINCIPALS_CONTAINER_BYTE = "\x22\xb7\x0c\x67\xd5\x6e\x4e\xfb\x91\xe9\x30\x0f\xca\x3d\xc1\xaa"
GUID_PROGRAM_DATA_CONTAINER_BYTE = "\x09\x46\x0c\x08\xae\x1e\x4a\x4e\xa0\xf6\x4a\xee\x7d\xaa\x1e\x5a"
GUID_MICROSOFT_PROGRAM_DATA_CONTAINER_BYTE = "\xf4\xbe\x92\xa4\xc7\x77\x48\x5e\x87\x8e\x94\x21\xd5\x30\x87\xdb"
GUID_NTDS_QUOTAS_CONTAINER_BYTE = "\x62\x27\xf0\xaf\x1f\xc2\x41\x0d\x8e\x3b\xb1\x06\x15\xbb\x5b\x0f"
DS_REPSYNCALL_NO_OPTIONS = 0x00000000
DS_REPSYNCALL_ABORT_IF_SERVER_UNAVAILABLE = 0x00000001
DS_REPSYNCALL_SYNC_ADJACENT_SERVERS_ONLY = 0x00000002
DS_REPSYNCALL_ID_SERVERS_BY_DN = 0x00000004
DS_REPSYNCALL_DO_NOT_SYNC = 0x00000008
DS_REPSYNCALL_SKIP_INITIAL_CHECK = 0x00000010
DS_REPSYNCALL_PUSH_CHANGES_OUTWARD = 0x00000020
DS_REPSYNCALL_CROSS_SITE_BOUNDARIES = 0x00000040
DS_LIST_DSA_OBJECT_FOR_SERVER = 0
DS_LIST_DNS_HOST_NAME_FOR_SERVER = 1
DS_LIST_ACCOUNT_OBJECT_FOR_SERVER = 2
DS_ROLE_SCHEMA_OWNER = 0
DS_ROLE_DOMAIN_OWNER = 1
DS_ROLE_PDC_OWNER = 2
DS_ROLE_RID_OWNER = 3
DS_ROLE_INFRASTRUCTURE_OWNER = 4
DS_SCHEMA_GUID_NOT_FOUND = 0
DS_SCHEMA_GUID_ATTR = 1
DS_SCHEMA_GUID_ATTR_SET = 2
DS_SCHEMA_GUID_CLASS = 3
DS_SCHEMA_GUID_CONTROL_RIGHT = 4
DS_KCC_FLAG_ASYNC_OP = (1 << 0)
DS_KCC_FLAG_DAMPED = (1 << 1)
DS_EXIST_ADVISORY_MODE = (0x1)
DS_REPL_INFO_FLAG_IMPROVE_LINKED_ATTRS = (0x00000001)
DS_REPL_NBR_WRITEABLE = (0x00000010)
DS_REPL_NBR_SYNC_ON_STARTUP = (0x00000020)
DS_REPL_NBR_DO_SCHEDULED_SYNCS = (0x00000040)
DS_REPL_NBR_USE_ASYNC_INTERSITE_TRANSPORT = (0x00000080)
DS_REPL_NBR_TWO_WAY_SYNC = (0x00000200)
DS_REPL_NBR_RETURN_OBJECT_PARENTS = (0x00000800)
DS_REPL_NBR_FULL_SYNC_IN_PROGRESS = (0x00010000)
DS_REPL_NBR_FULL_SYNC_NEXT_PACKET = (0x00020000)
DS_REPL_NBR_NEVER_SYNCED = (0x00200000)
DS_REPL_NBR_PREEMPTED = (0x01000000)
DS_REPL_NBR_IGNORE_CHANGE_NOTIFICATIONS = (0x04000000)
DS_REPL_NBR_DISABLE_SCHEDULED_SYNC = (0x08000000)
DS_REPL_NBR_COMPRESS_CHANGES = (0x10000000)
DS_REPL_NBR_NO_CHANGE_NOTIFICATIONS = (0x20000000)
DS_REPL_NBR_PARTIAL_ATTRIBUTE_SET = (0x40000000)
DS_REPL_NBR_MODIFIABLE_MASK = \
( \
DS_REPL_NBR_SYNC_ON_STARTUP | \
DS_REPL_NBR_DO_SCHEDULED_SYNCS | \
DS_REPL_NBR_TWO_WAY_SYNC | \
DS_REPL_NBR_IGNORE_CHANGE_NOTIFICATIONS | \
DS_REPL_NBR_DISABLE_SCHEDULED_SYNC | \
DS_REPL_NBR_COMPRESS_CHANGES | \
DS_REPL_NBR_NO_CHANGE_NOTIFICATIONS \
)
# from enum DS_NAME_FORMAT
DS_UNKNOWN_NAME = 0
DS_FQDN_1779_NAME = 1
DS_NT4_ACCOUNT_NAME = 2
DS_DISPLAY_NAME = 3
DS_UNIQUE_ID_NAME = 6
DS_CANONICAL_NAME = 7
DS_USER_PRINCIPAL_NAME = 8
DS_CANONICAL_NAME_EX = 9
DS_SERVICE_PRINCIPAL_NAME = 10
DS_SID_OR_SID_HISTORY_NAME = 11
DS_DNS_DOMAIN_NAME = 12
DS_DOMAIN_SIMPLE_NAME = DS_USER_PRINCIPAL_NAME
DS_ENTERPRISE_SIMPLE_NAME = DS_USER_PRINCIPAL_NAME
# from enum DS_NAME_FLAGS
DS_NAME_NO_FLAGS = 0x0
DS_NAME_FLAG_SYNTACTICAL_ONLY = 0x1
DS_NAME_FLAG_EVAL_AT_DC = 0x2
DS_NAME_FLAG_GCVERIFY = 0x4
DS_NAME_FLAG_TRUST_REFERRAL = 0x8
# from enum DS_NAME_ERROR
DS_NAME_NO_ERROR = 0
DS_NAME_ERROR_RESOLVING = 1
DS_NAME_ERROR_NOT_FOUND = 2
DS_NAME_ERROR_NOT_UNIQUE = 3
DS_NAME_ERROR_NO_MAPPING = 4
DS_NAME_ERROR_DOMAIN_ONLY = 5
DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING = 6
DS_NAME_ERROR_TRUST_REFERRAL = 7
# from enum DS_SPN_NAME_TYPE
DS_SPN_DNS_HOST = 0
DS_SPN_DN_HOST = 1
DS_SPN_NB_HOST = 2
DS_SPN_DOMAIN = 3
DS_SPN_NB_DOMAIN = 4
DS_SPN_SERVICE = 5
# from enum DS_SPN_WRITE_OP
DS_SPN_ADD_SPN_OP = 0
DS_SPN_REPLACE_SPN_OP = 1
DS_SPN_DELETE_SPN_OP = 2
# Generated by h2py from DsGetDC.h
DS_FORCE_REDISCOVERY = 0x00000001
DS_DIRECTORY_SERVICE_REQUIRED = 0x00000010
DS_DIRECTORY_SERVICE_PREFERRED = 0x00000020
DS_GC_SERVER_REQUIRED = 0x00000040
DS_PDC_REQUIRED = 0x00000080
DS_BACKGROUND_ONLY = 0x00000100
DS_IP_REQUIRED = 0x00000200
DS_KDC_REQUIRED = 0x00000400
DS_TIMESERV_REQUIRED = 0x00000800
DS_WRITABLE_REQUIRED = 0x00001000
DS_GOOD_TIMESERV_PREFERRED = 0x00002000
DS_AVOID_SELF = 0x00004000
DS_ONLY_LDAP_NEEDED = 0x00008000
DS_IS_FLAT_NAME = 0x00010000
DS_IS_DNS_NAME = 0x00020000
DS_RETURN_DNS_NAME = 0x40000000
DS_RETURN_FLAT_NAME = (-2147483648)
DSGETDC_VALID_FLAGS = ( \
DS_FORCE_REDISCOVERY | \
DS_DIRECTORY_SERVICE_REQUIRED | \
DS_DIRECTORY_SERVICE_PREFERRED | \
DS_GC_SERVER_REQUIRED | \
DS_PDC_REQUIRED | \
DS_BACKGROUND_ONLY | \
DS_IP_REQUIRED | \
DS_KDC_REQUIRED | \
DS_TIMESERV_REQUIRED | \
DS_WRITABLE_REQUIRED | \
DS_GOOD_TIMESERV_PREFERRED | \
DS_AVOID_SELF | \
DS_ONLY_LDAP_NEEDED | \
DS_IS_FLAT_NAME | \
DS_IS_DNS_NAME | \
DS_RETURN_FLAT_NAME | \
DS_RETURN_DNS_NAME )
DS_INET_ADDRESS = 1
DS_NETBIOS_ADDRESS = 2
DS_PDC_FLAG = 0x00000001
DS_GC_FLAG = 0x00000004
DS_LDAP_FLAG = 0x00000008
DS_DS_FLAG = 0x00000010
DS_KDC_FLAG = 0x00000020
DS_TIMESERV_FLAG = 0x00000040
DS_CLOSEST_FLAG = 0x00000080
DS_WRITABLE_FLAG = 0x00000100
DS_GOOD_TIMESERV_FLAG = 0x00000200
DS_NDNC_FLAG = 0x00000400
DS_PING_FLAGS = 0x0000FFFF
DS_DNS_CONTROLLER_FLAG = 0x20000000
DS_DNS_DOMAIN_FLAG = 0x40000000
DS_DNS_FOREST_FLAG = (-2147483648)
DS_DOMAIN_IN_FOREST = 0x0001
DS_DOMAIN_DIRECT_OUTBOUND = 0x0002
DS_DOMAIN_TREE_ROOT = 0x0004
DS_DOMAIN_PRIMARY = 0x0008
DS_DOMAIN_NATIVE_MODE = 0x0010
DS_DOMAIN_DIRECT_INBOUND = 0x0020
DS_DOMAIN_VALID_FLAGS = ( \
DS_DOMAIN_IN_FOREST | \
DS_DOMAIN_DIRECT_OUTBOUND | \
DS_DOMAIN_TREE_ROOT | \
DS_DOMAIN_PRIMARY | \
DS_DOMAIN_NATIVE_MODE | \
DS_DOMAIN_DIRECT_INBOUND )
DS_GFTI_UPDATE_TDO = 0x1
DS_GFTI_VALID_FLAGS = 0x1
DS_ONLY_DO_SITE_NAME = 0x01
DS_NOTIFY_AFTER_SITE_RECORDS = 0x02
DS_OPEN_VALID_OPTION_FLAGS = ( DS_ONLY_DO_SITE_NAME | DS_NOTIFY_AFTER_SITE_RECORDS )
DS_OPEN_VALID_FLAGS = ( \
DS_FORCE_REDISCOVERY | \
DS_ONLY_LDAP_NEEDED | \
DS_KDC_REQUIRED | \
DS_PDC_REQUIRED | \
DS_GC_SERVER_REQUIRED | \
DS_WRITABLE_REQUIRED )
## from aclui.h
# SI_OBJECT_INFO.dwFlags
SI_EDIT_PERMS = 0x00000000
SI_EDIT_OWNER = 0x00000001
SI_EDIT_AUDITS = 0x00000002
SI_CONTAINER = 0x00000004
SI_READONLY = 0x00000008
SI_ADVANCED = 0x00000010
SI_RESET = 0x00000020
SI_OWNER_READONLY = 0x00000040
SI_EDIT_PROPERTIES = 0x00000080
SI_OWNER_RECURSE = 0x00000100
SI_NO_ACL_PROTECT = 0x00000200
SI_NO_TREE_APPLY = 0x00000400
SI_PAGE_TITLE = 0x00000800
SI_SERVER_IS_DC = 0x00001000
SI_RESET_DACL_TREE = 0x00004000
SI_RESET_SACL_TREE = 0x00008000
SI_OBJECT_GUID = 0x00010000
SI_EDIT_EFFECTIVE = 0x00020000
SI_RESET_DACL = 0x00040000
SI_RESET_SACL = 0x00080000
SI_RESET_OWNER = 0x00100000
SI_NO_ADDITIONAL_PERMISSION = 0x00200000
SI_MAY_WRITE = 0x10000000
SI_EDIT_ALL = (SI_EDIT_PERMS | SI_EDIT_OWNER | SI_EDIT_AUDITS)
SI_AUDITS_ELEVATION_REQUIRED = 0x02000000
SI_VIEW_ONLY = 0x00400000
SI_OWNER_ELEVATION_REQUIRED = 0x04000000
SI_PERMS_ELEVATION_REQUIRED = 0x01000000
# SI_ACCESS.dwFlags
SI_ACCESS_SPECIFIC = 0x00010000
SI_ACCESS_GENERAL = 0x00020000
SI_ACCESS_CONTAINER = 0x00040000
SI_ACCESS_PROPERTY = 0x00080000
# SI_PAGE_TYPE enum
SI_PAGE_PERM = 0
SI_PAGE_ADVPERM = 1
SI_PAGE_AUDIT = 2
SI_PAGE_OWNER = 3
SI_PAGE_EFFECTIVE =4
CFSTR_ACLUI_SID_INFO_LIST = "CFSTR_ACLUI_SID_INFO_LIST"
PSPCB_SI_INITDIALOG = 1025 ## WM_USER+1
|
from django.core.management.base import BaseCommand
from django.conf import settings
from django.apps import apps
import os
import sys
import multiprocessing
root = os.getcwd()
django_project = os.path.basename(root)
class Command(BaseCommand):
help = "Runs this project as a uWSGI application. Requires the uwsgi binary in system path."
http_port = os.getenv('PORT', '8000') # for heroku
socket_addr = None
def handle(self, *args, **options):
for arg in args:
k, v = arg.split('=')
if k == 'http':
if self.http_port:
self.http_port = v
elif k == 'socket':
self.http_port = None
self.socket_addr = v
# load the Django WSGI handler
os.environ['UWSGI_MODULE'] = '%s.wsgi' % django_project
# DJANGO settings
if options['settings']:
os.environ['DJANGO_SETTINGS_MODULE'] = options['settings']
else:
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % django_project
# set protocol as uwsgi
os.environ['UWSGI_PROTOCOL'] = 'uwsgi'
# bind the http server to the default port
if self.http_port:
os.environ['UWSGI_HTTP_SOCKET'] = ':%s' % self.http_port
elif self.socket_addr:
os.environ['UWSGI_UWSGI_SOCKET'] = self.socket_addr
os.environ['UWSGI_CHMOD_SOCKET'] = '664'
# set process names
os.environ['UWSGI_AUTO_PROCNAME'] = 'true'
os.environ['UWSGI_PROCNAME_PREFIX_SPACED'] = '[uWSGI %s]' % django_project
# remove sockets/pidfile at exit
os.environ['UWSGI_VACUUM'] = 'true'
# retrieve/set the PythonHome
os.environ['UWSGI_VIRTUALENV'] = sys.prefix
# add project to python path
os.environ['UWSGI_PP'] = root
os.environ['UWSGI_POST_BUFFERING'] = '1048576'
os.environ['UWSGI_RELOAD_ON_RSS'] = '300'
# increase buffer size a bit
os.environ['UWSGI_BUFFER_SIZE'] = '65535'
# some additions required by newrelic
os.environ['UWSGI_ENABLE_THREADS'] = 'true'
os.environ['UWSGI_LAZY_APPS'] = 'true'
os.environ['UWSGI_SINGLE_INTERPRETER'] = 'true'
os.environ['UWSGI_AUTOLOAD'] = 'true'
# set 12 workers and cheaper to number of cpus
os.environ['UWSGI_WORKERS'] = '12'
os.environ['UWSGI_CHEAPER'] = str(multiprocessing.cpu_count())
# enable the master process
os.environ['UWSGI_MASTER'] = 'true'
os.environ['UWSGI_NO_ORPHANS'] = 'true'
os.environ['UWSGI_MEMORY_REPORT'] = 'true'
os.environ['UWSGI_DISABLE_LOGGING'] = 'true'
# set harakiri
os.environ['UWSGI_HARAKIRI'] = '60'
os.environ['UWSGI_HARAKIRI_VERBOSE'] = 'true'
# set uid and gid
os.environ['UWSGI_UID'] = str(os.getuid())
os.environ['UWSGI_GID'] = str(os.getgid())
# TODO: Figure out cache
os.environ['UWSGI_CACHE2'] = 'name=%s,items=20000,keysize=128,blocksize=4096' % django_project
if settings.DEBUG:
if apps.is_installed('configurations'):
os.environ.setdefault('DJANGO_CONFIGURATION', 'Development')
import configurations
configurations.setup()
# map and serve static files
os.environ['UWSGI_STATIC_MAP'] = '%s=%s' % (settings.STATIC_URL, settings.STATIC_ROOT)
os.environ['UWSGI_PY_AUTORELOAD'] = '2'
# run spooler for mail task
if 'django_uwsgi' in settings.EMAIL_BACKEND:
os.environ['UWSGI_SPOOLER'] = '/tmp'
os.environ['UWSGI_SPOOLER_IMPORT'] = 'django_uwsgi.task'
# exec the uwsgi binary
if apps.ready:
os.execvp('uwsgi', ('uwsgi',))
def usage(self, subcomand):
return r"""
run this project on the uWSGI server
http=PORT run the embedded http server on port PORT
socket=ADDR bind the uwsgi server on address ADDR (this will disable the http server)
"""
|
# An array A consists of n integers in locations A[0], A[1] ....A[n-1].
# It is required to shift the elements of the array cyclically to the left by k places, where 1 <= k <= (n-1).
def rotate(ar, k):
n = min = len(ar)
i = 0
while i<min:
temp = ar[i]
j = i
while j!= ((n+i-k) %n):
ar[j] = ar[(j+k)%n]
j = (j+k)%n
if j<min: min = j
a = (n+i-k) %n
ar[(n+i-k)%n] = temp
i += 1
print(ar)
ar = [0,1,2,3,4,5,6,7]
rotate(ar, 3)
|
# Python Native
import time
# 3rdparty
import sensor_harm
start = time.time()
safel1c = '/path/to/S2/L1C.SAFE'
sr_dir = '/path/to/S2/SR/images/' #can also use L2A.SAFE dir
target_dir = '/path/to/output/NBAR/'
sensor_harm.sentinel_harmonize(safel1c, sr_dir, target_dir, apply_bandpass=True)
end = time.time()
print(f'Duration time: {end - start}')
|
'''Neopixel wrapper that supports simulation.
This module supports 4 modes of operation:
- Running on a Raspberry PI via the Adafruit blinka library.
- Running on a Circuit Python board using the Adafruit Neopixel library.
- Running using ../circuitpy_sim, which uses tkinter to draw simulated
graphical LEDs on a Linux workstation
- Running with simulation=True passed to the constructor, which is
"headless" (i.e. no LED or graphical output). You can set() and get()
colors for testing, but that's about it.
Installing dependencies:
- Raspberry PI:
(https://learn.adafruit.com/neopixels-on-raspberry-pi/python-usage)
# sudo pip3 install rpi_ws281x adafruit-circuitpython-neopixel
# (not needed?) sudo python3 -m pip install --force-reinstall adafruit-blinka
- TODO: other modes...
Various pieces copied from Adafruit's libraries. Thanks Adafruit!
'''
# ---------- color def constants
AMBER = (255, 100, 0)
AQUA = (50, 255, 255)
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
CYAN = (0, 255, 255)
GOLD = (255, 222, 30)
GREEN = (0, 255, 0)
JADE = (0, 255, 40)
MAGENTA = (255, 0, 20)
OFF = (0, 0, 0)
ORANGE = (255, 40, 0)
PINK = (242, 90, 255)
PURPLE = (180, 0, 255)
RED = (255, 0, 0)
TEAL = (0, 255, 120)
WHITE = (255, 255, 255)
YELLOW = (255, 150, 0)
RGBW_WHITE_RGB = (255, 255, 255, 0)
RGBW_WHITE_RGBW = (255, 255, 255, 255)
RGBW_WHITE_W = (0, 0, 0, 255)
# ---------- color mapping functions
# map circuit python pixel API into rpi_ws281x python API
# (based on http://circuitpython.readthedocs.io/projects/neopixel/en/latest/_modules/neopixel.html)
def wheel(pos):
'''Takes an int between 0 and 255, returns a color tuple r->g->b->r...'''
if pos < 85:
return (int(pos*3), int(255 - (pos*3)), 0)
elif pos < 170:
pos -= 85
return (int(255 - (pos*3)), 0, int(pos*3))
else:
pos -= 170
return (0, int(pos*3), int(255 - pos*3))
def color_to_rgb(color):
'''Takes a color number (e.g. 0x123456) and returns an RGB tuple (e.g. (0x12, 0x34, 0x56))'''
store = []
for i in range(3):
element = color & 0xff
store.append(element)
color = color >> 8
tmp = store[::-1] # reverses list order
return tuple(i for i in tmp)
def rgb_to_color(rgb):
'''Takes a rgb tuple and returns a merged color number.'''
color = 0
for i in rgb: color = (color << 8) + int(i)
return color
# ---------- Neopixel abstraction
class Neo(object):
'''Object oriented abstraction for Adafruit Neopixels.
n is the number of chained neopixels.
RPi in non-simulation mode:
- MUST BE RUN BY ROOT
- Allowed pins: GPIO10(doesn't work?), GPIO12, GPIO18, or GPIO21
- will autoselect D18 (Adafruit's default) if not otherwise specified.
reverse_rg is used for cases where the hardware has red and green LEDs in reverse order
(i.e. if you ask for red and get green, set this). include_w is for RGBW leds.'''
# ---------- general API
def __init__(self, n=1, pin=None, brightness=1.0,
auto_write=True, simulation=False,
reverse_rg=False, include_w=False):
self._auto_write = auto_write
self._brightness = brightness
self._n = n
self._vals = [0] * n
if simulation:
self._strip = None
return
import neopixel
if include_w:
order = neopixel.RGBW if reverse_rg else neopixel.GRBW
else:
order = neopixel.RGB if reverse_rg else neopixel.GRB
if not pin:
import board
pin = board.D18
self._strip = neopixel.NeoPixel(pin, n, brightness=brightness, auto_write=auto_write, pixel_order=order)
def get(self, index):
if index >= self._n or index < 0: raise IndexError
return self._vals[index]
def set(self, index, value):
if index < 0: index += len(self)
if index >= self._n or index < 0: raise IndexError
self._vals[index] = value # Raw value, without brightness applied.
if self._strip: self._strip[index] = value
@property
def brightness(self): return self._brightness
@brightness.setter
def brightness(self, brightness):
self._brightness = min(max(brightness, 0.0), 1.0)
if self._strip: self._strip.brightness = self._brightness
def redraw(self):
for i in range(self._n): self.set(i, self._vals[i])
if not self._auto_write: self.show()
def show(self):
if self._strip: self._strip.show()
# ----- helpers to set multiple LEDs
def black(self):
self.fill(0)
if not self._auto_write: self.show()
def fill(self, color):
for i in range(self._n): self.set(i, color)
if not self._auto_write: self.show()
def fill_wheel(self):
for i in range(self._n):
wheel_pos = int(255.0 * i / self._n)
self.set(i, wheel(wheel_pos))
if not self._auto_write: self.show()
def wipe(self):
self.black()
# ---------- internals
def __getitem__(self, index): return self._vals[index]
# basically just a wrapper around set() that supports slices.
def __setitem__(self, index, val):
if isinstance(index, slice):
for index, val_i in enumerate(range(index.start, index.stop + 1, index.step or 1)):
v = val[index] if isinstance(val, list) else val
self.set(val_i, v)
else: self.set(index, val)
|
# -------------------------------------------------------------------------------
# Name: Linguistics corpora construction
# Purpose: NLP use
#
# Author: Mohammed Belkacem
#
# Created: 10/12/2021
# Copyright: (c) Mohammed Belkacem 2021
# Licence: CCO
# -------------------------------------------------------------------------------
## This script creates text file for each article
## Files are utf-8 encoded
## This script is for nlp use only
## This script can't be used on other websites. If this website is updated, it may not work
## Dont forget to cite me
from requests_html import HTMLSession
import re
import os
import urllib3
import time
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
categories = ['regions','sport', 'societe', 'culture', 'sante-science-technologie','algerie', 'economie', 'monde']
taille = [192, 166, 144, 162, 138, 710, 232, 207]
# Nmber of all pages. Please look at the number dispyed on the web site at
# https://www.depechedekabylie.com/ddk-tamazight/ if the max pages is 690; please change it above to 684
all_pages = 684
# number of retrieved pages. Please change it. Note that it takes a lot of time to fetch articles
max_pages = 3
# construct pages urls
def retreive_pages(categories, taille):
pages = []
for i in categories:
#print(taille[categories.index(i)] * 10)
for j in range(taille[categories.index(i)]):
pages.append('https://www.aps.dz/tamazight-tal/' + i + "?start=" + str(j * 10)) # first page
print(pages)
#exit()
return pages
# remove html tags from content
def remove_tags(text, TAG_RE):
return TAG_RE.sub('', text)
# create articles retreived from a page
def create_article(article, a):
TAG_RE = re.compile(r'<[^>]+>')
session = HTMLSession()
article = 'https://www.aps.dz' + article
print (article)
time.sleep(10)
try:
if not os.path.isfile(str(a) + ".txt"):
r = session.get(article, verify=False,proxies={"http": "http://111.233.225.166:1234"})
about = r.html.find('p')
g = open(str(a) + ".txt", "w+", encoding='utf-8')
for i in about:
#print (remove_tags(i.html, TAG_RE))
g.write(remove_tags(i.html, TAG_RE) + '\n')
g.close()
except:
print(article)
def fetch_pages(max_pages, all_pages, categories, taille):
articles = []
nb_page = 0
session = HTMLSession()
a = 0
for page in retreive_pages(categories, taille):
time.sleep(10)
try:
r = session.get(page, verify=False,proxies={"http": "http://111.233.225.166:1234"})
about = r.html.find('a')
for i in about:
if i.html.find('href="/tamazight-tal/algerie/') > 0 \
or i.html.find('href="/tamazight-tal/economie/') > 0 \
or i.html.find('href="/tamazight-tal/monde/') > 0 \
or i.html.find('href="/tamazight-tal/sport/') > 0 \
or i.html.find('href="/tamazight-tal/societe/') > 0 \
or i.html.find('href="/tamazight-tal/culture/') > 0 \
or i.html.find('href="/tamazight-tal/regions/') > 0 \
or i.html.find('href="/tamazight-tal/sante-science-technologie/') > 0: # and #i.html.find('rel="bookmark" class="td-image-wrap"') > 0:
j = i.html.split(' ')[1].split('"')[1]
if j not in articles:
articles.append(j)
create_article(j, a)
a = a + 1
nb_page = nb_page + 1
except:
print(page)
fetch_pages(max_pages, all_pages, categories, taille)
|
# Coded by : Pasan Manula Bandara - UofM
# Date : 31/01/2020
# Deep Learning Assingment 1 - Question 3 - Part C
import numpy as np
import cPickle
from numpy import linalg as LA
from PIL import Image
# Global Variable
Input_Training_Rows = 10000
def initialize_weights():
Weight_init = np.random.uniform(0,1,size = (1,1025))
# Weight_init = np.random.randn(1,1025)
W = Weight_init[0,0:1024]
b= Weight_init[0,1024]
W=W.reshape(1024,1)
return W, b
def sigmoid(w_sig,x_sig,b_sig,rows_x_sig,col_w_sig):
temp = np.array(np.add(np.dot(x_sig,w_sig),np.full((rows_x_sig, col_w_sig), b_sig))) #x.w+b
# print(w_sig)
out = 1/(1 + np.exp(-temp)) #overflow encountered in exp
# print(out)
# print(np.shape(out))
return out
def unpickle(cifar10_dataset_folder_path, batch_id):
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = cPickle.load(file)
features = batch['data']
labels = batch['labels']
return features, labels
def unpickle_test(cifar10_dataset_folder_path):
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = cPickle.load(file)
features = batch['data']
labels = batch['labels']
return features, labels
def reconstruct_class(lables,priority):
for check_class in range(Input_Training_Rows):
if lables[check_class] != priority: #e.g., If the image is an airplane --> class is 1 otherwise 0
lables[check_class] = 0
else:
lables[check_class] = 1 #Airplane
return lables
def rgb2gray(rgb_img):
red_channel = rgb_img[:,0:1024]
green_channel = rgb_img[:,1024:2048]
blue_channel = rgb_img[:,2048:3072]
gray_img =np.dot(0.2125,red_channel) + np.dot(0.7154,green_channel) + np.dot(0.0721,blue_channel)
return gray_img
def run_learn(X,Y,W,b):
alpha = 0.001
threshold = 0.0001
noofepoch = 1
marix_norm = [1]
combine_bias = np.full((1, Input_Training_Rows), 1)
for epoch in range(noofepoch):
if marix_norm < threshold:
break
else:
sig_return = sigmoid(W,X,b,Input_Training_Rows,1)
minus = Y.transpose() - sig_return
final_gradient = -(np.dot(X.transpose(),minus))/Input_Training_Rows
final_gradient_B = -(np.dot(combine_bias,minus))/Input_Training_Rows
norm_calc = np.append(final_gradient,final_gradient_B,axis =0)
marix_norm = LA.norm(norm_calc, axis=0)
W = W - (alpha*final_gradient)
b = b - (alpha*final_gradient_B)
return W, b
def main():
file_path = '/home/pasan/Documents/PythonCode/cifar-10-python/cifar-10-batches-py' #replace with your path
file_path_airplane = '/home/pasan/Documents/PythonCode/cifar-10-python/cifar-10-batches-py/airplane' #replace with your path
# airplane : 0 automobile : 1 bird : 2 cat : 3 deer : 4 dog : 5 frog : 6 horse : 7 ship : 8 truck : 9
Input_Test_Rows = 1000
classes = [0,1,2,3,4,5,6,7,8]
batch_number = [1,2,3,4,5]
weight_tensor = np.ndarray((len(classes),1024,1))
bias_tensor = np.ndarray((len(classes),1,1))
Y_hat_matrix = np.ndarray((Input_Test_Rows,len(classes)))
Y_hat_matrix = []
# Y_hat = np.ndarray((len(classes),Input_Test_Rows,1))
W,b = initialize_weights()
for class_id in classes:
for batch_id in batch_number:
print("Serving Now : Class ID :"+ str(class_id) + " Batch ID :" + str(batch_id))
batch_features,batch_class = unpickle(file_path,batch_id)
reconstruct_class_re = reconstruct_class(batch_class,class_id)
Gray = rgb2gray(batch_features)
X = np.divide(Gray, 255) #Image Downscale
Y = np.matrix(reconstruct_class_re)
W,b = run_learn(X,Y,W,b)
weight_tensor[class_id][:][:] = W
bias_tensor[class_id][:][:] = b
print("All Tensors DIM: Weight Tensor")
print(np.shape(weight_tensor))
print("All Tensors DIM: Bias Tensor")
print(np.shape(bias_tensor))
print("***Testing has been started!***")
#Forward Pass
for classifier in classes:
trained_w = weight_tensor[classifier][:][:]
trained_b = bias_tensor[classifier][:][:]
batch_features,batch_class = unpickle_test(file_path_airplane)
reconstruct_class_re = reconstruct_class(batch_class,classifier)
Gray = rgb2gray(batch_features)
X_input = np.divide(Gray, 255) #Image Downscale
# Y_real = np.matrix(reconstruct_class_re)
classi_out = sigmoid(trained_w,X_input,trained_b,Input_Test_Rows,1)
if classifier != 0:
Y_hat_matrix = np.append(Y_hat_matrix,classi_out,axis =1)
else:
Y_hat_matrix = classi_out
Y_argmax = np.empty([Input_Test_Rows, 1])
for i in range(Input_Test_Rows):
Y_argmax[i] = Y_hat_matrix[i,:].argmax()
arg_count = Y_argmax.reshape(Input_Test_Rows).astype(np.int64)
frequency = np.bincount(arg_count)
class_identifier = np.nonzero(frequency)[0]
print("NOTE :-> airplane : 0 automobile : 1 bird : 2 cat : 3 deer : 4 dog : 5 frog : 6 horse : 7 ship : 8 truck : 9")
print(zip(class_identifier,frequency[class_identifier]))
if __name__== "__main__":
main()
|
import numpy as np
def fcann2_trainer(x, y_, param_niter=1e4, param_delta=1e-3, param_lambda=1e-5, hidden_layer_dim=5, parameters=None):
n, d = x.shape
c = np.max(y_) + 1
y_one_hot = np.zeros((len(y_), c))
y_one_hot[np.arange(len(y_)), y_] = 1
if parameters is not None:
w1, b1, w2, b2 = parameters
else:
w1 = np.random.randn(hidden_layer_dim, c)
b1 = np.random.randn(hidden_layer_dim, 1)
w2 = np.random.randn(c, hidden_layer_dim)
b2 = np.random.randn(c, 1)
for it in range(int(param_niter) + 1):
p = fcann2_classifier(x, (w1, b1, w2, b2))
loss = -np.mean(np.log(np.sum(p * y_one_hot, axis=1)))
if it % 1000 == 0:
print("iteration: #{}, loss: {}".format(it, loss))
s1, h1, s2 = calculate_layers(w1, b1, w2, b2, x)
diff_p_y_ = p - y_one_hot
dw2 = np.dot(diff_p_y_.transpose(), h1) / n
db2 = np.mean(diff_p_y_, axis=0).reshape(-1, 1)
w2 -= param_delta * dw2 - param_lambda * w2
b2 -= param_delta * db2
diff_p_y__w2 = np.dot(diff_p_y_, w2)
diag_s1 = np.array([np.diag(s_ > 0) for s_ in s1])
diff_p_y__w2_diag_s1 = np.einsum('ij,ijk->ij', diff_p_y__w2, diag_s1)
dw1 = np.dot(diff_p_y__w2_diag_s1.transpose(), x) / n
db1 = np.mean(diff_p_y__w2_diag_s1, axis=0).reshape(-1, 1)
w1 -= param_delta * dw1 - param_lambda * w1
b1 -= param_delta * db1
return w1, b1, w2, b2
def fcann2_classifier(x, parameters):
w1, b1, w2, b2 = parameters
s1, h1, s2 = calculate_layers(w1, b1, w2, b2, x)
return np.apply_along_axis(soft_max, 1, s2)
def calculate_layers(w1, b1, w2, b2, x):
s1 = np.dot(np.array([x]), w1.transpose())[0] + b1.transpose()
h1 = np.maximum(0, s1)
s2 = np.dot(np.array([h1]), w2.transpose())[0] + b2.transpose()
return s1, h1, s2
def fcann2_single_classifier(x, parameters):
w1, b1, w2, b2 = parameters
s1 = np.dot(w1.transpose(), x) + b1
h1 = np.maximum(0, s1)
s2 = np.dot(w2.transpose(), h1) + b2
return soft_max(s2)
def divide_large(a, b):
if np.isinf(b):
if np.isinf(a):
return 1.
else:
return 0.
else:
return a / b
def soft_max(s):
exp_s = np.exp(s - np.max(s))
return exp_s / np.sum(exp_s)
|
import random
from datetime import datetime
from sqlalchemy import or_, select
from app.middleware.Jwt import UserToken
from app.middleware.RedisManager import RedisHelper
from app.models import Session, async_session
from app.models.user import User
from app.utils.logger import Log
class UserDao(object):
log = Log("UserDao")
@staticmethod
def register_for_github(username, name, email, avatar):
try:
with Session() as session:
user = session.query(User).filter(or_(User.username == username, User.email == email)).first()
if user:
# 如果存在,则给用户更新信息
user.last_login_at = datetime.now()
user.name = name
user.avatar = avatar
else:
random_pwd = random.randint(100000, 999999)
user = User(username, name, UserToken.add_salt(str(random_pwd)), email, avatar)
session.add(user)
session.commit()
session.refresh(user)
return user
except Exception as e:
UserDao.log.error(f"Github用户登录失败: {str(e)}")
raise Exception("登录失败")
@staticmethod
def register_user(username, name, password, email):
"""
:param username: 用户名
:param name: 姓名
:param password: 密码
:param email: 邮箱
:return:
"""
try:
with Session() as session:
users = session.query(User).filter(or_(User.username == username, User.email == email)).all()
if users:
raise Exception("用户名或邮箱已存在")
# 注册的时候给密码加盐
pwd = UserToken.add_salt(password)
user = User(username, name, pwd, email)
session.add(user)
session.commit()
except Exception as e:
UserDao.log.error(f"用户注册失败: {str(e)}")
return str(e)
return None
@staticmethod
def login(username, password):
try:
pwd = UserToken.add_salt(password)
with Session() as session:
# 查询用户名/密码匹配且没有被删除的用户
user = session.query(User).filter_by(username=username, password=pwd, deleted_at=None).first()
if user is None:
return None, "用户名或密码错误"
# 更新用户的最后登录时间
user.last_login_at = datetime.now()
session.commit()
session.refresh(user)
return user, None
except Exception as e:
UserDao.log.error(f"用户{username}登录失败: {str(e)}")
return None, str(e)
@staticmethod
@RedisHelper.cache("user_list", 3 * 3600, True)
def list_users():
try:
with Session() as session:
return session.query(User).filter_by(deleted_at=None).all()
except Exception as e:
UserDao.log.error(f"获取用户列表失败: {str(e)}")
raise Exception("获取用户列表失败")
@staticmethod
@RedisHelper.cache("query_user", 72 * 3600)
async def query_user(id: int):
async with async_session() as session:
query = await session.execute(select(User).where(User.id == id))
result = query.scalars().first()
if result is None:
return "unknown"
return result.name
@staticmethod
@RedisHelper.cache("user_email", 3600)
async def list_user_email(*user):
try:
if not user:
return []
async with async_session() as session:
query = await session.execute(select(User).where(User.id.in_(user), User.deleted_at == None))
return [q.email for q in query.scalars().all()]
except Exception as e:
UserDao.log.error(f"获取用户邮箱失败: {str(e)}")
raise Exception(f"获取用户邮箱失败: {e}")
|
#!/usr/bin/env python3
#
# Copyright 2022 Graviti. Licensed under MIT License.
#
"""The implementation of the Graviti Series."""
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional, Sequence, Union, overload
from graviti.dataframe.column.indexing import ColumnSeriesILocIndexer, ColumnSeriesLocIndexer
from graviti.utility import MAX_REPR_ROWS
class Series:
"""One-dimensional array.
Arguments:
data: The data that needs to be stored in Series. Could be ndarray or Iterable.
schema: Data type to force. If None, will be inferred from ``data``.
name: The name to the Series.
index: Index of the data, must have the same length as ``data``.
Examples:
Constructing Series from a list.
>>> d = [1,2,3,4]
>>> series = Series(data=d)
>>> series
0 1
1 2
2 3
3 4
"""
_indices_data: Optional[Dict[int, int]]
_indices: Optional[List[int]]
def __init__(
self,
data: Sequence[Any],
schema: Any = None,
name: Union[str, int, None] = None,
index: Optional[Iterable[int]] = None,
) -> None:
if schema is not None:
# TODO: missing schema processing
pass
self._data = data
self.name = name
if index is None:
self._indices_data = index
self._indices = index
else:
self._indices_data = {raw_index: location for location, raw_index in enumerate(index)}
self._indices = list(index)
# @overload
# def __getitem__(self, key: slice) -> "Series":
# ...
@overload
def __getitem__(self, key: int) -> Any:
...
@overload
def __getitem__(self, key: Iterable[int]) -> "Series":
...
def __getitem__(self, key: Union[int, Iterable[int]]) -> Any:
integer_location = self._get_location_by_index(key)
if isinstance(integer_location, list):
# https://github.com/PyCQA/pylint/issues/3105
new_data = [
self._data[location]
for location in integer_location # pylint: disable=not-an-iterable
]
return Series(new_data, name=self.name, index=integer_location)
return self._data[integer_location]
@overload
def __setitem__(self, key: slice, value: Iterable[Any]) -> None:
...
@overload
def __setitem__(self, key: int, value: Any) -> None:
...
def __setitem__(
self,
key: Union[int, slice],
value: Union[Iterable[Any], Any],
) -> None:
pass
def __len__(self) -> int:
return self._data.__len__()
def __repr__(self) -> str:
indices = list(self._get_repr_indices())
indice_width = len(str(max(indices)))
body = []
body_item_width = 0
for i in indices:
item = self.loc[i]
name = item._repr_folding() if hasattr(item, "_repr_folding") else str(item)
body.append(name)
body_item_width = max(len(name), body_item_width)
lines = []
for indice, value in zip(indices, body):
lines.append(f"{indice:<{indice_width+2}}{value:<{body_item_width+2}}")
if self.__len__() > MAX_REPR_ROWS:
lines.append(f"...({self.__len__()})")
if self.name:
lines.append(f"Name: {self.name}")
return "\n".join(lines)
def _get_repr_indices(self) -> Iterable[int]:
length = self.__len__()
# pylint: disable=protected-access
if self._indices is None:
return range(min(length, MAX_REPR_ROWS))
if length >= MAX_REPR_ROWS:
return islice(self._indices, MAX_REPR_ROWS)
return self._indices
@overload
def _get_location_by_index(self, key: Iterable[int]) -> List[int]:
...
@overload
def _get_location_by_index(self, key: int) -> int:
...
def _get_location_by_index(self, key: Union[int, Iterable[int]]) -> Union[int, List[int]]:
if self._indices_data is None:
if isinstance(key, Iterable):
return list(key)
return key
if isinstance(key, Iterable):
return [self._indices_data[index] for index in key]
return self._indices_data[key]
# @overload
# def _getitem_by_location(self, key: slice) -> "Series":
# ...
@overload
def _getitem_by_location(self, key: int) -> Any:
...
@overload
def _getitem_by_location(self, key: Iterable[int]) -> "Series":
...
def _getitem_by_location(self, key: Union[int, Iterable[int]]) -> Any:
if isinstance(key, int):
return self._data[key]
if self._indices is None:
indices = list(key)
else:
indices = [self._indices[index] for index in key]
new_data = [self._data[index] for index in key]
return Series(new_data, name=self.name, index=indices)
@property
def iloc(self) -> ColumnSeriesILocIndexer:
"""Purely integer-location based indexing for selection by position.
Allowed inputs are:
- An integer, e.g. ``5``.
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array of the same length as the axis being sliced.
Returns:
The instance of the ILocIndexer.
Examples:
>>> series = Series([1, 2, 3])
>>> series.loc[0]
1
>>> df.loc[[0]]
0 1
dtype: int64
"""
return ColumnSeriesILocIndexer(self)
@property
def loc(self) -> ColumnSeriesLocIndexer:
"""Access a group of rows and columns by indexes or a boolean array.
Allowed inputs are:
- A single index, e.g. ``5``.
- A list or array of indexes, e.g. ``[4, 3, 0]``.
- A slice object with indexes, e.g. ``1:7``.
- A boolean array of the same length as the axis being sliced.
Returns:
The instance of the LocIndexer.
Examples:
>>> series = Series([1, 2, 3])
>>> series.loc[0]
1
>>> df.loc[[0]]
0 1
dtype: int64
"""
return ColumnSeriesLocIndexer(self)
|
import math
import jarray
from ij import WindowManager, IJ, ImageStack, ImagePlus
from ij.measure import ResultsTable
from org.apache.commons.math3.ml.clustering import DoublePoint
from itertools import groupby
from operator import itemgetter
def main(action,tableName1,tableName2,moreArgs):
if action == "DistanceMatrix":
calculateDistanceMatrix(tableName1,tableName2)
if action == "CumulatedNeighbors":
calculateCumulatedNeighbors(tableName1,tableName2)
if action == "PlotDistanceDistribution":
plotDistanceDistribution(tableName1,tableName2,moreArgs)
if action == "CountCloserNeighbors":
countCloserNeighbors(tableName1,tableName2,moreArgs)
if action == "GetCloserPairs":
getCloseCoordPairs(tableName1,tableName2,moreArgs)
if action == "GetNearestNeighbors":
getNearestNeighbors(tableName1,tableName2)
if action == "GetMeanDistances":
getMeanDistances(tableName1,tableName2)
def calculateDistanceMatrix(tableName1,tableName2):
pointsA = pointList3DFromRT(tableName1)
pointsB = pointList3DFromRT(tableName2)
matrix = getDistanceMatrix(pointsA,pointsB)
copyMatrixToImage2D(matrix,"Distance Matrix",len(matrix),len(matrix[0]))
copyMatrixToRt2D(matrix,"Distance Matrix")
def calculateCumulatedNeighbors(tableName1,tableName2):
xShift = [-15,16,1]
yShift = [-15,16,1]
zShift = [-4 ,5 ,1]
cumulatedNeighbors, _ = getNeighborsWhileShifting(tableName1,tableName2,xShift,yShift,zShift,-1)
copyMatrixToImage3D(cumulatedNeighbors,"Cumulated Neighbors Distance")
def countCloserNeighbors(tableName1,tableName2,threshold):
if threshold != "":
distanceThreshold = float(threshold)
else:
distanceThreshold = 1
xShift = [-15,16,1]
yShift = [-15,16,1]
zShift = [-4 ,5 ,1]
_, closeNeighborsCount = getNeighborsWhileShifting(tableName1,tableName2,xShift,yShift,zShift,distanceThreshold)
copyMatrixToImage3D(closeNeighborsCount,"Neighbors Count with distance <"+str(distanceThreshold))
def getCloseCoordPairs(tableName1,tableName2,threshold):
IJ.log("Entering Get Close Pairs")
if threshold != "":
distanceThreshold = float(threshold)
else:
distanceThreshold = 1
pointsA = pointList3DFromRT(tableName1)
pointsB = pointList3DFromRT(tableName2)
neighborsDistancesA, neighborsDistancesB, idNearestA, idNearestB = calculateNearestNeighbors(pointsA,pointsB)
pairs = getPairsCloserThan(distanceThreshold,neighborsDistancesA,idNearestA)+getPairsCloserThan(distanceThreshold,neighborsDistancesB,idNearestB,True)
pairsCoords = getCoordsOfPairs(pointsA,pointsB,pairs)
copyMatrixToRt2D(pairsCoords,"Pairs Coords",useFirstRowAsHeader=True)
def getNearestNeighbors(tableName1,tableName2):
neighborsDistancesA, neighborsDistancesB, idNearestA, idNearestB = calculateNearestNeighborsFromRT(tableName1,tableName2)
nearestNeighborsA = []
nearestNeighborsA.append(("Distance",))
nearestNeighborsA.append(("ID Neighbor",))
nearestNeighborsA[0]=(nearestNeighborsA[0]+tuple(neighborsDistancesA))
nearestNeighborsA[1]=(nearestNeighborsA[1]+tuple(idNearestA))
copyMatrixToRt2D(nearestNeighborsA,"Nearest Neighbors "+tableName1+">"+tableName2,useFirstRowAsHeader=True)
nearestNeighborsB = []
nearestNeighborsB.append(("Distance",))
nearestNeighborsB.append(("ID Neighbor",))
nearestNeighborsB[0]=(nearestNeighborsB[0]+tuple(neighborsDistancesB))
nearestNeighborsB[1]=(nearestNeighborsB[1]+tuple(idNearestB))
copyMatrixToRt2D(nearestNeighborsB,"Nearest Neighbors "+tableName2+">"+tableName1,useFirstRowAsHeader=True)
return nearestNeighborsA,nearestNeighborsB
def calculateNearestNeighborsFromRT(tableName1,tableName2):
pointsA = pointList3DFromRT(tableName1)
pointsB = pointList3DFromRT(tableName2)
return calculateNearestNeighbors(pointsA,pointsB)
def getMeanDistances(tableName1,tableName2):
neighborsDistancesA, neighborsDistancesB, idNearestA, idNearestB = calculateNearestNeighborsFromRT(tableName1,tableName2)
minA, maxA, sumA, meanA, varianceA, stdDevA = getStatistics(neighborsDistancesA)
IJ.log("Statistics Distances from "+tableName1+">"+tableName2)
IJ.log("Min ="+str(minA))
IJ.log("Max ="+str(maxA))
IJ.log("Sum ="+str(sumA))
IJ.log("Mean ="+str(meanA))
IJ.log("Variance ="+str(varianceA))
IJ.log("Std Dev ="+str(stdDevA))
IJ.log(" ")
minA, maxA, sumA, meanA, varianceA, stdDevA = getStatistics(neighborsDistancesB)
IJ.log("Statistics Distances from "+tableName2+">"+tableName1)
IJ.log("Min ="+str(minA))
IJ.log("Max ="+str(maxA))
IJ.log("Sum ="+str(sumA))
IJ.log("Mean ="+str(meanA))
IJ.log("Variance ="+str(varianceA))
IJ.log("Std Dev ="+str(stdDevA))
def getStatistics(dataset):
min = dataset[0]
max = dataset[0]
sum = 0
for d in dataset:
sum += d
if d < min:
min = d
if d > max:
max = d
mean = sum / len(dataset)
sumOfSquare = 0
for d in dataset:
sumOfSquare += (d - mean)**2
variance = sumOfSquare / len(dataset)-1
stdDev = math.sqrt(variance)
return min, max, sum, mean, variance, stdDev
def plotDistanceDistribution(tableName1,tableName2,nbCategories):
if nbCategories != "":
histogramSize = int(nbCategories)
else:
histogramSize = 256
neighborsDistancesA, neighborsDistancesB, idNearestA, idNearestB = calculateNearestNeighborsFromRT(tableName1,tableName2)
histo1 = makeHistogram(neighborsDistancesA,histogramSize)
copyMatrixToRt2D(histo1,"Distance Distribution "+tableName1+">"+tableName2,useFirstRowAsHeader=True)
histo2 = makeHistogram(neighborsDistancesB,histogramSize)
copyMatrixToRt2D(histo2,"Distance Distribution "+tableName2+">"+tableName1,useFirstRowAsHeader=True)
def makeHistogram(neighborsDistances,nbCategories):
count = [0] * nbCategories
matrix = []
matrix.append(("Values",))
matrix.append(("Count",))
matrix.append(("Probability",))
minD,maxD = getMinMax(neighborsDistances)
stepD = (maxD - minD)/nbCategories
for d in neighborsDistances:
correctI = (d - minD)/stepD
count[int(correctI-1)]+=1
for i in range(nbCategories):
val = minD + stepD * i
matrix[0]=(matrix[0]+(val,))
matrix[1]=(matrix[1]+(count[i],))
proba = float(count[i])/float(len(neighborsDistances))
matrix[2]=(matrix[2]+(proba,))
return matrix
def getMinMax(distribution):
minD = distribution[0]
maxD = distribution[0]
for d in distribution:
if d > maxD:
maxD = d
if d < minD:
minD = d
return minD,maxD
def getNeighborsWhileShifting(tableName1,tableName2,xShift,yShift,zShift,distanceThreshold):
minModifierX = xShift[0]
maxModifierX = xShift[1]
stpModifierX = xShift[2]
minModifierY = yShift[0]
maxModifierY = yShift[1]
stpModifierY = yShift[2]
minModifierZ = zShift[0]
maxModifierZ = zShift[1]
stpModifierZ = zShift[2]
cumulatedNeighbors = []
closeNeighbors = []
for modifierX in range(minModifierX,maxModifierX,stpModifierX):
idX = (modifierX - minModifierX)/stpModifierX
cumulatedNeighbors.append([])
closeNeighbors.append([])
for modifierY in range(minModifierY,maxModifierY,stpModifierY):
idY = (modifierY - minModifierY)/stpModifierY
cumulatedNeighbors[idX].append([])
closeNeighbors[idX].append([])
for modifierZ in range(minModifierZ,maxModifierZ,stpModifierZ):
idZ = (modifierZ - minModifierZ)/stpModifierZ
pointsA = pointList3DFromRT(tableName1,modifierX,modifierY,modifierZ)
pointsB = pointList3DFromRT(tableName2)
neighborsDistancesA, neighborsDistancesB, idNearestA, idNearestB = calculateNearestNeighbors(pointsA,pointsB)
#Cumulated Neighbors
distanceA = sum(neighborsDistancesA)
distanceB = sum(neighborsDistancesB)
cumulatedNearestDistance = (distanceA+distanceB)
cumulatedNeighbors[idX][idY].append(cumulatedNearestDistance)
#Count neighbor close to each other
closeNeighbors[idX][idY].append(countNeighborCloserThan(distanceThreshold,neighborsDistancesA)+countNeighborCloserThan(distanceThreshold,neighborsDistancesB))
return cumulatedNeighbors, closeNeighbors
def getCoordsOfPairs(pointsA,pointsB,pairs):
coords = []
coords.append(("xA",))
coords.append(("yA",))
coords.append(("zA",))
coords.append(("xB",))
coords.append(("yB",))
coords.append(("zB",))
coords.append(("Dist",))
for indexP in range(len(pairs)):
indexA = pairs[indexP][0]
indexB = pairs[indexP][1]
coordsA = pointsA[indexA].getPoint()
coordsB = pointsB[indexB].getPoint()
coords[0]=(coords[0]+(coordsA[0],))
coords[1]=(coords[1]+(coordsA[1],))
coords[2]=(coords[2]+(coordsA[2],))
coords[3]=(coords[3]+(coordsB[0],))
coords[4]=(coords[4]+(coordsB[1],))
coords[5]=(coords[5]+(coordsB[2],))
coords[6]=(coords[6]+(pairs[indexP][2],))
#coords.append([coordsA[0],coordsA[1],coordsA[2],coordsB[0],coordsB[1],coordsB[2],pairs[indexP][2]])
return coords
def countNeighborCloserThan(value,neighborsDistances):
count = 0
neighbors = sorted(neighborsDistances)
for i in neighbors:
if i < value:
count+=1
else:
break
return count
def getPairsCloserThan(threshold,neighborsDistances,idNearest,reverseOrder=False):
pairs = []
for i in range(len(neighborsDistances)):
# IJ.log(str(neighborsDistances[i])+"<"+str(threshold)+" ?")
if neighborsDistances[i] < threshold:
# IJ.log("Small enough")
if reverseOrder:
pairs.append([idNearest[i],i,neighborsDistances[i]])
else:
pairs.append([i,idNearest[i],neighborsDistances[i]])
return pairs
def calculateNearestNeighbors(pointsA,pointsB):
nearestDistancesA = [10000 for i in range(len(pointsA))]
nearestDistancesB = [10000 for i in range(len(pointsB))]
idNearestA = [-1 for i in range(len(pointsA))]
idNearestB = [-1 for i in range(len(pointsB))]
for indexA in range(len(pointsA)):
for indexB in range(len(pointsB)):
dist = getDistance(pointsA[indexA],pointsB[indexB])
if nearestDistancesA[indexA]>dist :
nearestDistancesA[indexA] = dist
idNearestA[indexA] = indexB
#IJ.log("Current closer iA A"+str(indexA)+" > B"+str(indexB)+" with dist = "+str(dist))
if nearestDistancesB[indexB]>dist :
nearestDistancesB[indexB] = dist
idNearestB[indexB] = indexA
#IJ.log("Current closer iB B"+str(indexB)+" > A"+str(indexA)+" with dist = "+str(dist))
#IJ.log("Distance between"+str(pointsA[indexA])+"and"+str(pointsB[indexB])+"="+str(dist))
return nearestDistancesA, nearestDistancesB, idNearestA, idNearestB
def getDistanceMatrix(pointsA,pointsB):
matrix = []
for indexA in range(len(pointsA)):
matrix.append([])
for indexB in range(len(pointsB)):
dist = getDistance(pointsA[indexA],pointsB[indexB])
matrix[indexA].append(dist)
return matrix
def getDistance(pointA,pointB):
dist = 0
pA = pointA.getPoint()
pB = pointB.getPoint()
for dim in range(len(pA)):
dist += (pA[dim]-pB[dim])**2
#dist += math.fabs(pA[dim]-pB[dim])
dist=math.sqrt(dist)
return dist
def copyMatrixToRt2D(matrix,tableName="Results",sizeX=-1,sizeY=-1,useFirstRowAsHeader=False):
if sizeX == -1:
sizeX = len(matrix)
if sizeY == -1:
sizeY = len(matrix[0])
table = ResultsTable()
for indexX in range(sizeX):
for indexY in range(sizeY):
if useFirstRowAsHeader:
if indexY == 0:
continue
table.setValue(str(matrix[indexX][0]),indexY-1,matrix[indexX][indexY])
else:
table.setValue(indexX,indexY,matrix[indexX][indexY])
table.show(tableName)
def copyMatrixToImage2D(matrix,imageName,sizeX = -1,sizeY = -1):
if sizeX == -1:
sizeX = len(matrix)
if sizeY == -1:
sizeY = len(matrix[0])
imp = ImageStack.create(sizeX, sizeY, 1,16)
processor = imp.getProcessor(1)
for indexX in range(sizeX):
for indexY in range(sizeY):
processor.set(indexX,indexY,int(matrix[indexX][indexY]))
ImagePlus(imageName, imp).show()
def copyMatrixToImage3D(matrix,imageName,sizeX = -1,sizeY = -1,sizeZ = -1):
if sizeX == -1:
sizeX = len(matrix)
if sizeY == -1:
sizeY = len(matrix[0])
if sizeZ == -1:
sizeZ = len(matrix[0][0])
imp = ImageStack.create(sizeX, sizeY, sizeZ,16)
for indexZ in range(sizeZ):
processor = imp.getProcessor(indexZ+1)
for indexX in range(sizeX):
for indexY in range(sizeY):
processor.set(indexX,indexY,int(matrix[indexX][indexY][indexZ]))
ImagePlus(imageName, imp).show()
def pointList3DFromRT(tableName,modifierX = 0,modifierY = 0,modifierZ = 0, XColumn='X (microns)', YColumn='Y (microns)', ZColumn='Z (microns)'):
win = WindowManager.getWindow(tableName)
rt = win.getResultsTable()
X = rt.getColumn(rt.getColumnIndex(XColumn))
Y = rt.getColumn(rt.getColumnIndex(YColumn))
Z = rt.getColumn(rt.getColumnIndex(ZColumn))
# X = rt.getColumn(XColumn)
# Y = rt.getColumn(YColumn)
# Z = rt.getColumn(ZColumn)
dplist = []
for x, y, z in zip(X, Y, Z):
array = []
array.append(x+modifierX)
array.append(y+modifierY)
array.append(z+modifierZ)
jArray = jarray.array(array, 'd')
dp = DoublePoint(jArray)
dplist.append(dp)
return dplist
XColumn = 'X'
YColumn = 'Y'
ZColumn = 'Z'
if 'getArgument' in globals():
if not hasattr(zip, '__call__'):
del zip # the python function zip got overriden by java.util.zip, so it must be deleted to get the zip-function to work.
parameter = getArgument()
args = parameter.split(",")
action = args[0].split("=")[1]
tableA = args[1].split("=")[1]
tableB = args[2].split("=")[1]
moreArgs = args[3].split("=")[1]
main(action,tableA, tableB, moreArgs)
|
class Solution:
def reverse(self, x: int) -> int:
ans = 0
remainder = 0
isNegative = x < 0
if isNegative:
x = x * -1
while x > 0:
remainder = x % 10
x = x // 10
ans = (ans * 10) + remainder
if isNegative:
ans = ans * -1
if -2 ** 31 > ans or ans > 2 ** 31:
return 0
return ans
|
"""
==========================================================================
map_helper.py
==========================================================================
Helper map and functions to get corresponding functional unit and ctrl.
Author : Cheng Tan
Date : Feb 22, 2020
"""
from .opt_type import *
from ..fu.single.AdderRTL import AdderRTL
from ..fu.single.ShifterRTL import ShifterRTL
from ..fu.single.LogicRTL import LogicRTL
from ..fu.single.MulRTL import MulRTL
from ..fu.single.MemUnitRTL import MemUnitRTL
from ..fu.single.CompRTL import CompRTL
from ..fu.single.PhiRTL import PhiRTL
from ..fu.single.RetRTL import RetRTL
from ..fu.single.BranchRTL import BranchRTL
from ..fu.single.SelRTL import SelRTL
# -----------------------------------------------------------------------
# Global dictionary for UnitType and OptType
# -----------------------------------------------------------------------
unit_map = { "Adder" : AdderRTL,
"Mul" : MulRTL,
"Phi" : PhiRTL,
"Comp" : CompRTL,
"Branch" : BranchRTL,
"Ret" : RetRTL,
"Logic" : LogicRTL,
"Shifter" : ShifterRTL,
"Selecter" : SelRTL,
"MemUnit" : MemUnitRTL }
opt_map = { "OPT_START" : OPT_START,
"OPT_NAH" : OPT_NAH,
"OPT_ADD" : OPT_ADD,
"OPT_ADD_CONST" : OPT_ADD_CONST,
"OPT_INC" : OPT_INC,
"OPT_SUB" : OPT_SUB,
"OPT_LLS" : OPT_LLS,
"OPT_LRS" : OPT_LRS,
"OPT_MUL" : OPT_MUL,
"OPT_OR" : OPT_OR,
"OPT_XOR" : OPT_XOR,
"OPT_AND" : OPT_AND,
"OPT_NOT" : OPT_NOT,
"OPT_LD" : OPT_LD,
"OPT_STR" : OPT_STR,
"OPT_EQ" : OPT_EQ,
"OPT_EQ_CONST" : OPT_EQ_CONST,
"OPT_LE" : OPT_LE,
"OPT_RET" : OPT_RET,
"OPT_BRH" : OPT_BRH,
"OPT_BRH_START" : OPT_BRH_START,
"OPT_PHI" : OPT_PHI,
"OPT_PHI_CONST" : OPT_PHI_CONST,
"OPT_MUL_ADD" : OPT_MUL_ADD,
"OPT_MUL_SUB" : OPT_MUL_SUB,
"OPT_MUL_LLS" : OPT_MUL_LLS,
"OPT_MUL_LRS" : OPT_MUL_LRS,
"OPT_MUL_ADD_LLS" : OPT_MUL_ADD_LLS,
"OPT_MUL_SUB_LLS" : OPT_MUL_SUB_LLS,
"OPT_MUL_SUB_LRS" : OPT_MUL_SUB_LRS }
def getUnitType( fu_name ):
return unit_map[ fu_name ]
def getOptType( opt_name ):
return opt_map[ opt_name ]
|
from maju.log import LoggerManager
from maju.utils.deezer import DeezerSearch
from maju.utils.spotify import SpotifySearch
from maju.utils.weather import Weather
from maju.config import read_config
from werkzeug.exceptions import BadRequest, Unauthorized, UnprocessableEntity
class PlaylistServices:
def __init__(self, city_name):
self._city_name = city_name.lower()
self._provider = None
self._city_temper = None
self._provider_response = None
self._weather_response = None
self._uf = None
def get_city_temper(self):
try:
weather_response = Weather().get_weather(self._city_name)
if weather_response.get('by') == "city_name":
self._city_temper = weather_response.get('results').get('temp')
self._uf = weather_response.get('results').get('city').split(',')[1].strip()
self._weather_response = weather_response
response = {'status': True, 'temp': self._city_temper}
else:
response = {'status': False, 'message': 'City not found.'}
self._weather_response = response
return response
except Exception as e:
print(
f"Error in function get_city_temper in class PlaylistServices - msg: '{self._weather_response.get('message')}' - err: {str(e)}")
self._weather_response = Weather().get_weather(self._city_name)
return {'status': False, 'temp': self._weather_response.get('message'), 'err': str(e)}
def parser_temp_to_gender(self):
get_temper = self.get_city_temper()
temp = get_temper.get('temp')
gender = None
if get_temper.get('status'):
gender = 'classic'
if temp >= 10 and temp <= 25:
gender = 'rock'
elif temp > 25:
gender = 'pop'
return gender
def get_playlist(self):
gender = self.parser_temp_to_gender()
errors = []
playlist = []
count = 0
try:
while playlist == []:
count += 1
musics = SpotifySearch().get_playlist(gender)
if musics.get('status'):
self._provider_response = musics
self._provider = 'Spotify'
playlist = musics.get('playlist')
break
else:
errors.append({'Spotify': {'attempt': count, 'err': musics.get('err')}})
self._provider_response = errors
musics = DeezerSearch().get_playlist(gender)
if musics.get('status'):
self._provider_response = musics
self._provider = 'Deezer'
playlist = musics.get('playlist')
break
else:
errors.append({'Deezer': {'attempt': count, 'err': musics.get('err')}})
self._provider_response = errors
if count == 5:
break
if playlist == []:
LoggerManager(city_name=self._city_name, uf=self._uf, temp=self._city_temper, gender=gender, provider=self._provider,
provider_response=self._provider_response, weather_response=self._weather_response, status='filed').save_log()
return "Oh no.. ): Something went wrong, please try again later."
LoggerManager(city_name=self._city_name, uf=self._uf, temp=self._city_temper, gender=gender, provider=self._provider,
provider_response=self._provider_response, weather_response=self._weather_response).save_log()
return playlist
except Exception as e:
print(f"Error in function get_playlist in class PlaylistServices - {str(e)}")
LoggerManager(city_name=self._city_name, uf=self._uf, temp=self._city_temper, gender=gender, provider=self._provider,
provider_response=self._provider_response, weather_response=self._weather_response, status='filed').save_log()
return {'status': False, 'err': str(e)}
|
import json
import requests
from PIL import Image
from vk_api import bot_longpoll, VkUpload
from .BasePlug import BasePlug
class KonachanPlug(BasePlug):
name = "Konachan"
description = "Присылает арты с konachan.net"
keywords = ('konachan', 'коначан')
def __init__(self, bot):
super(KonachanPlug, self).__init__(bot)
def on_start(self):
pass
def work(self, peer_id: int, msg: str, event: bot_longpoll.VkBotEvent):
limit = msg.split(maxsplit=1)[1]
try:
if limit is None or int(limit) <= 1:
limit = 1
except ValueError:
self.bot.send_message(peer_id, "Самый умный?")
return
if event.message["from_id"] not in self.bot.admins and int(limit) >= 3:
limit = 5
r = requests.get(f"https://konachan.net/post.json?limit={limit}&tags=order%3Arandom")
json_parsed = json.loads(r.text)
attachments = []
self.bot.send_message(peer_id, "Начинаю выкачку...")
for i, jsonx in enumerate(json_parsed):
img_r = requests.get(jsonx["file_url"], stream=True)
img_r.raw.decode_content = True
img: Image = Image.open(img_r.raw)
img.save(f"/tmp/foo{i}.png", "PNG")
# logging.info(msg)
upload = VkUpload(self.bot.vk)
photo = upload.photo_messages(f"/tmp/foo{i}.png")[0]
attachments.append(f"photo{photo['owner_id']}_{photo['id']},")
attachment_str = ""
for attachment in attachments:
attachment_str += attachment
self.bot.send_message(peer_id=peer_id, msg=None, attachment=attachment_str)
return
|
#!/usr/bin/env python3
"""
Created on 17 Jun 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from scs_core.data.json import JSONify
from scs_host.sys.nmcli import NMCLi
# --------------------------------------------------------------------------------------------------------------------
response = [
'eth0: connected to Wired connection 1',
' "TP-LINK USB 10/100/1000 LAN"',
' ethernet (r8152), 98:DE:D0:04:9B:CC, hw, mtu 1500',
' ip4 default',
' inet4 192.168.1.88/24',
' inet6 fe80::131d:325a:f7bd:e3e/64',
'',
'wlan0: connected to TP-Link_0F04',
' "Broadcom "',
' wifi (brcmfmac), B8:27:EB:56:50:8F, hw, mtu 1500',
' inet4 192.168.1.122/24',
' inet6 fe80::212a:9d31:4b3e:59c/64',
'',
'lo: unmanaged',
' loopback (unknown), 00:00:00:00:00:00, sw, mtu 65536']
conns = NMCLi.parse(response)
print(conns)
print("-")
nmcli = NMCLi(conns)
print(nmcli)
print("-")
print(JSONify.dumps(nmcli))
print("-")
print("find...")
nmcli = NMCLi.find()
print(nmcli)
|
from core.entities import make_user
def build_add_user(insert_user):
def add_user(first_name, last_name, phone, email, password, birthday, gender) -> bool:
user = make_user(first_name, last_name, phone,
email, password, None, birthday, gender)
return insert_user(user)
return add_user
|
from PIL import Image
def photoFlip(img):
(w, h) = img.size
mw = img.width // 2
a = img.crop((0, 0, mw, h))
b = img.crop((mw, 0, w, h))
flip = Image.new(img.mode, img.size)
flip.paste(b, (0, 0))
flip.paste(a, (mw, 0))
return flip
from sys import argv
for fname in argv[1:]:
photoFlip(Image.open(fname)).save(f"flip_{fname}")
|
""" run with
python setup.py install; nosetests -v --nocapture tests/cm_cloud/test_limits.py:Test_limits.test_001
nosetests -v --nocapture tests/test_limits.py
or
nosetests -v tests/test_limits.py
"""
from cloudmesh_client.common.ConfigDict import ConfigDict
from cloudmesh_client.common.Shell import Shell
from cloudmesh_client.common.dotdict import dotdict
from cloudmesh_client.common.util import HEADING
from cloudmesh_client.common.util import banner
from cloudmesh_client.default import Default
# noinspection PyPep8Naming
class Test_limits:
"""
This class tests the LimitsCommand
"""
data = dotdict({
"cloud": Default.cloud,
"wrong_cloud": "no_cloud",
"tenant": "TBD",
"format": 'table'
})
config = ConfigDict("cloudmesh.yaml")
credentials = config["cloudmesh"]["clouds"][data.cloud]["credentials"]
data.tenant = credentials.get("OS_TENANT_NAME") or credentials.get("OS_TENANT_ID")
def run(self, command):
command = command.format(**self.data)
banner(command, c="-")
print(command)
parameter = command.split(" ")
shell_command = parameter[0]
args = parameter[1:]
result = Shell.execute(shell_command, args)
print(result)
return result
def setup(self):
pass
# noinspection PyPep8Naming
def tearDown(self):
pass
def test_001(self):
HEADING("test limits list")
result = self.run("cm limits list --cloud={cloud}")
assert "Name" in result
def test_002(self):
HEADING("test limits list with csv output")
result = self.run("cm limits list --format={format}").split('\n')
assert "maxTotalFloatingIps" in str(result)
def test_003(self):
HEADING("test limits class where cloud doesnt exist")
result = self.run("cm limits list --cloud={wrong_cloud}")
assert "ERROR" in str(result)
# --tenant=
# not allowed on chameleon cloud, so we outcomment.
#
# def test_004(self):
# HEADING("test limits class with unauthorised access")
# result = self.run("cm limits list --tenant={tenant}")
# assert "Not authorized" in str(result)
|
# Copyright 2015 Cisco Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ..models import Experiment, Component
from toposort import toposort_flatten
from rest_framework import viewsets
from django.http import HttpResponse
from django.conf import settings
from django.core import serializers
from collections import defaultdict
# TODO: [refactor] this import statement should specify some file instead of '*'
from pandas import *
import redis
import threading
import json
CACHE = {}
class storm_client (threading.Thread):
def __init__(self, thread_id, name, experiment, component_id, max_results, cache_results):
threading.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.experiment = experiment
self.comp_id = component_id
self.result = {}
self.max_results = max_results
self.cache_results = cache_results
print "Submitting topology to storm. End component", self.comp_id
exp = Experiment.objects.get(pk=self.experiment)
graph = exp.workflow.graph_data
graph_data = {}
print graph
tmp = graph.split(',')
for elem in tmp:
first_node = elem.split(":")[0]
second_node = elem.split(":")[1]
if second_node in graph_data:
depend_nodes = graph_data[second_node]
depend_nodes.add(first_node)
else:
graph_data[second_node] = set()
graph_data[second_node].add(first_node)
topological_graph = toposort_flatten(graph_data)
print "Graph after topological sort", topological_graph
message = {
'exp_id': self.experiment, 'result': self.comp_id,
'graph': topological_graph, 'components': defaultdict()}
for data in topological_graph:
component_id = int(data)
comp = Component.objects.get(pk=component_id)
if comp.operation_type.function_type == 'Create':
if comp.operation_type.function_arg == 'Table':
filename = comp.operation_type.function_subtype_arg
input_data = read_csv(filename)
message['input'] = {}
for elem in list(input_data.columns):
message['input'][elem] = list(input_data[elem])
message['cols'] = list(input_data.columns)
# message['input'] = input_data.to_dict()
serialized_obj = serializers.serialize('json', [comp.operation_type, ])
print "Component_id", component_id, " ", comp.operation_type
message['components'][data] = serialized_obj
print "Message ", message
r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=0)
self.pubsub = r.pubsub(ignore_subscribe_messages=True)
self.pubsub.subscribe("Exp " + str(self.experiment))
ret = r.publish('workflow', json.dumps(message))
print "return", ret
def run(self):
print "Listening for results"
for message in self.pubsub.listen():
self.result = json.loads(message['data'])
print self.result
break
self.pubsub.unsubscribe()
self.pubsub.close()
class ResultViewSet(viewsets.ViewSet):
def list(self, request):
exp_id = int(request.GET.get('experiment', ''))
component_id = int(request.GET.get('component_id', ''))
print "Experiment ", exp_id
try:
client_thread = storm_client(1, "WorkFlow Thread", exp_id, component_id, 10, False)
client_thread.start()
client_thread.join()
except Exception, e:
print "Exception Raised during storm cluster connection", str(e)
client_thread.result = {'status': 'failed', 'message': str(e)}
print "Client thread status ", client_thread.result
return HttpResponse(json.dumps(client_thread.result), content_type="application/json")
|
import pyautogui
pyautogui.PAUSE = 0.2
import os
import sys
class Screen():
def setMousePositions(self):
file = open("mouse_positions.txt", "w")
file.write("mouseX_enumerator,mouseY_enumerator=")
pyautogui.alert("Posicione o mouse sobre o enumerador do Word.")
mouseX_enumerator, mouseY_enumerator = pyautogui.position()
file.write(f"({mouseX_enumerator},{mouseY_enumerator})\n")
file.write("mouseX_boxFont,mouseY_boxFont=")
pyautogui.alert("Posicione o mouse sobre a caixa de fonte.")
mouseX_boxFont, mouseY_boxFont = pyautogui.position()
file.write(f"({mouseX_boxFont},{mouseY_boxFont})\n")
file.write("mouseX_boxFontSize,mouseY_boxFontSize=")
pyautogui.alert("Posicione o mouse sobre a caixa de tamanho da fonte.")
mouseX_boxFontSize, mouseY_boxFontSize = pyautogui.position()
file.write(f"({mouseX_boxFontSize},{mouseY_boxFontSize})\n")
file.write("mouseX_sectionInsert,mouseY_sectionInsert=")
pyautogui.alert("Posicione o mouse sobre a seção 'Inserir' no topo do documento.")
mouseX_sectionInsert, mouseY_sectionInsert = pyautogui.position()
file.write(f"({mouseX_sectionInsert},{mouseY_sectionInsert})\n")
file.write("mouseX_optionTabela,mouseY_optionTabela=")
pyautogui.alert("Clique na seção 'Inserir' e posicione o mouse sobre a opção 'Tabela'.")
mouseX_optionTabela, mouseY_optionTabela = pyautogui.position()
file.write(f"({mouseX_optionTabela},{mouseY_optionTabela})\n")
file.write("mouseX_createTabela,mouseY_createTabela=")
pyautogui.alert("Clique na opção 'Tabela' e posicione o mouse sobre o primeiro quadrado à esquerda para a criação de uma tabela 1x1.")
mouseX_createTabela, mouseY_createTabela = pyautogui.position()
file.write(f"({mouseX_createTabela},{mouseY_createTabela})\n")
file.write("mouseX_sectionPagInitial,mouseY_sectionPagInitial=")
pyautogui.alert("Posicione o mouse sobre a seção 'Página Inicial' no topo do documento.")
mouseX_sectionPagInitial, mouseY_sectionPagInitial = pyautogui.position()
file.write(f"({mouseX_sectionPagInitial},{mouseY_sectionPagInitial})")
file.close()
self.getMousePositions()
def getMousePositions(self):
isEmpty = os.stat("mouse_positions.txt").st_size==0
if (isEmpty):
setMousePositions = pyautogui.confirm(text="Antes de utilizar a aplicação é necessário setar algumas configurações de tela. Deseja fazer isso?", title="Configurações", buttons=["Sim", "Não"])
if (setMousePositions[0].lower()=="s"):
self.setMousePositions()
else:
pyautogui.alert("Não será possível utilizar a aplicação.")
sys.exit()
else:
file = open("mouse_positions.txt", "r")
reading = file.readlines()
self.mouse_positions = []
# REF: https://pt.stackoverflow.com/questions/521595/pegar-trecho-de-uma-string-entre-dois-caracteres
for line in reading:
position = line.split('(')[1].split(')')[0].split(',')
self.mouse_positions.append(position)
file.close()
def __init__(self):
self.getMousePositions()
self.mouseX_enumerator, self.mouseY_enumerator = (int(self.mouse_positions[0][0]), int(self.mouse_positions[0][1]))
self.mouseX_boxFont, self.mouseY_boxFont = (int(self.mouse_positions[1][0]), int(self.mouse_positions[1][1]))
self.mouseX_boxFontSize, self.mouseY_boxFontSize = (int(self.mouse_positions[2][0]), int(self.mouse_positions[2][1]))
self.mouseX_sectionInserir, self.mouseY_sectionInserir = (int(self.mouse_positions[3][0]), int(self.mouse_positions[3][1]))
self.mouseX_optionTabela, self.mouseY_optionTabela = (int(self.mouse_positions[4][0]), int(self.mouse_positions[4][1]))
self.mouseX_createTabela, self.mouseY_createTabela = (int(self.mouse_positions[5][0]), int(self.mouse_positions[5][1]))
self.mouseX_sectionPagInitial, self.mouseY_sectionPagInitial = (int(self.mouse_positions[6][0]), int(self.mouse_positions[6][1]))
|
# --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df= pd.read_csv(path)
df.head(5)
X= df[['age','sex','bmi','children','smoker','region','charges']]
y=df['insuranceclaim']
X_train,X_test,y_train,y_test= train_test_split(X,y, test_size=0.2, random_state=6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
plt.figure(figsize=(10,5))
plt.boxplot(X_train['bmi'])
plt.title("Box Plot")
q_value= np.quantile(X_train['bmi'],0.95)
y_train.value_counts()
# Code ends here
# --------------
# Code starts here
import seaborn as sns
relation= X_train.corr()
print("Correlation between the features is: \n", relation)
sns.pairplot(X_train)
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols=['children','sex','region','smoker']
fig, axes= plt.subplots(nrows = 2 , ncols = 2, figsize=(10,10))
for i in range(0,2):
for j in range(0,2):
col= cols[i*2+j]
sns.countplot(x=X_train[col],hue= y_train, ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr= LogisticRegression(random_state=9)
grid=GridSearchCV(estimator=lr, param_grid= parameters)
grid.fit(X_train,y_train)
y_pred= grid.predict(X_test)
accuracy= accuracy_score(y_test,y_pred)
print("accuracy of the grid: \n", accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Code starts here
score= roc_auc_score(y_test,y_pred)
y_pred_proba= grid.predict_proba(X_test)[:,1]
fpr, tpr,_=metrics.roc_curve(y_test,y_pred)
roc_auc= roc_auc_score(y_test,y_pred_proba)
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
# Code ends here
|
from cfp.resolver_factories.parameter_store_resolver_factory import (
ParameterStoreResolverFactory,
)
from cfp.resolver_factories.resolver_factory import AnyResolverFactory, ResolverFactory
from cfp.resolver_factories.string_resolver_factory import StringResolverFactory
from cfp.resolver_factories.use_previous_resolver_factory import (
UsePreviousValueResolverFactory,
)
__all__ = [
"AnyResolverFactory",
"ParameterStoreResolverFactory",
"ResolverFactory",
"StringResolverFactory",
"UsePreviousValueResolverFactory",
]
|
'''
Check if a given number is prime
'''
from math import sqrt
def isPrime(num):
if num < 0:
raise ValueError
if num < 2:
return False
if num < 4:
return True
# Check if the number is dividing by 2
if num % 2 == 0:
return False
# Check if the number is dividable by odd numbers only
# This, in theory, should reduce operation time since only half the numbers in the range is tested
for divisor in range(3, int(sqrt(num)), 2):
if num % divisor == 0:
return False
return True
print('N = ', end='')
try:
result = isPrime(int(input()))
print('Output =', 'True' if result else 'False')
except ValueError:
print('Only positive numbers are supported')
|
from math import sqrt
from jmetal.algorithm.multiobjective.nsgaii import NSGAII
from jmetal.operator import PolynomialMutation, SBXCrossover
from jmetal.problem import ZDT1
from jmetal.util.observer import ProgressBarObserver, VisualizerObserver
from jmetal.util.termination_criterion import StoppingByEvaluations
class ZDT1Modified(ZDT1):
"""
Problem ZDT1.
.. note:: Version including a loop for increasing the computing time of the evaluation functions.
"""
def evaluate(self, solution):
g = self.__eval_g(solution)
h = self.__eval_h(solution.variables[0], g)
solution.objectives[0] = solution.variables[0]
solution.objectives[1] = h * g
s: float = 0.0
for i in range(5000000):
s += i * 0.235 / 1.234
return solution
def __eval_g(self, solution):
g = sum(solution.variables) - solution.variables[0]
constant = 9.0 / (solution.number_of_variables - 1)
g = constant * g
g = g + 1.0
return g
def __eval_h(self, f: float, g: float) -> float:
return 1.0 - sqrt(f / g)
def get_name(self):
return 'ZDT1m'
if __name__ == '__main__':
# creates the problem
problem = ZDT1Modified()
# creates the algorithm
max_evaluations = 25000
algorithm = NSGAII(
problem=problem,
population_size=100,
offspring_population_size=100,
mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20),
crossover=SBXCrossover(probability=1.0, distribution_index=20),
termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations)
)
algorithm.observable.register(observer=ProgressBarObserver(max=max_evaluations))
algorithm.observable.register(observer=VisualizerObserver())
algorithm.run()
front = algorithm.get_result()
print('Computing time: ' + str(algorithm.total_computing_time))
|
import pandas as pd
import yaml
import os
import argparse
from imblearn.over_sampling import RandomOverSampler
def read_params(config_path):
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
return config
def balance(config_path):
config = read_params(config_path)
train_class_path=config["balanced_data"]["train_class"]
train_label_path=config["balanced_data"]["train_label"]
test_class_path=config["balanced_data"]["test_class"]
test_label_path=config["balanced_data"]["test_label"]
train_processed_path =config["processed"]["train_path"]
test_processed_path =config["processed"]["test_path"]
train_data=pd.read_csv(train_processed_path)
test_data=pd.read_csv(test_processed_path)
train_class=train_data["Class"].copy()
train_label=train_data.drop('Class',axis=1).copy()
test_class=test_data["Class"].copy()
test_label=test_data.drop('Class',axis=1).copy()
test_label.to_csv(test_label_path,index=False)
test_class.to_csv(test_class_path,index=False)
ros=RandomOverSampler(sampling_strategy='all')
X_train_res,y_train_res=ros.fit_resample(train_label,train_class)
X_train_res.to_csv(train_label_path,index=False)
y_train_res.to_csv(train_class_path,index=False)
if __name__=="__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", default="params.yaml")
parsed_args = args.parse_args()
data = balance(config_path=parsed_args.config)
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ufora.FORA.python.ParseException as ParseException
import ufora.FORA.python.ForaValue as ForaValue
import ufora.native.FORA as ForaNative
def freshVarname(startingName, namesUsed):
"""pick a variant on 'startingName' that's not in namesUsed"""
if startingName not in namesUsed:
return startingName
ix = 1
while True:
name = startingName + "_" + str(ix)
if name not in namesUsed:
return name
ix = ix + 1
class NameExistsButDoesntParseException(object):
def __init__(self, projectNode, name):
self.projectNode = projectNode
self.name = name
@property
def childrenWhoDontParse(self):
return self.projectNode.childrenWhoDontParse
class Expression(object):
"""Represents a FORA expression."""
def __init__(self, nativeExpression, codeDefinitionPoint):
assert isinstance(nativeExpression, ForaNative.Expression)
self.nativeExpression_ = nativeExpression
self.codeDefinitionPoint_ = codeDefinitionPoint
#returns a list of variables that are 'free' in the expression
freeVariables = property(lambda self: self.nativeExpression_.freeVariables)
#returns a list of variables that are mentioned anywhere in the expression
mentionedVariables = property(lambda self: self.nativeExpression_.mentionedVariables)
#returns a list of free variables that are assigned to by the expression
assignedVariables = property(lambda self: self.nativeExpression_.assignedVariables)
#returns a list of pairs of strings representing expressions like "x.y"
#where "x" is a free variable and "y" is a member. used to rebind things in
#the expression
freeVariableMemberAccesses = property(lambda self:
self.nativeExpression_.freeVariableMemberAccesses)
def packAssignedVarsIntoTuple(self):
"""Return an expression that returns the value of the expression and also any assigned variables.
If 'self' is 'e', this expression returns (e, (a1, a2, ...)) where the a's are assigned
variables in the environment sorted alphabetically.
If the expression throws an exception/stacktrace pair, the new expression throws
(exception, (a1, a2, ...))
from the same stacktrace.
This function may not be called on an expression with return statements in it.
"""
return Expression(self.nativeExpression_.packAssignedVarsIntoTuple(), self.codeDefinitionPoint_)
def freeVariableUsePoints(self, varname):
"""return a list of CodeLocations where 'varname' is free"""
return self.nativeExpression_.getFreeVariableRanges(varname)
def getReturnStatementPoints(self):
"""return a list of CodeLocations where the expression has a 'return' statement"""
return self.nativeExpression_.getReturnStatementRanges()
def assignedVariableUsePoints(self, varname):
"""return a list of CodeLocations where a free variable 'varname' is assigned to"""
return self.nativeExpression_.getAssignedVariableRanges(varname)
def toFunction(self):
"""convert to a function with one argument per free variable"""
return ForaValue.FORAValue(self.nativeExpression_.toFunctionImplval(False))
def toFunctionAsImplval(self):
"""convert to a function with one argument per free variable"""
return self.nativeExpression_.toFunctionImplval(False)
def toFunctionAsImplvalWithPassthroughArgument(self):
"""convert to a function with one argument per free variable, plus an extra argument
that gets added to the final result as a tuple."""
return self.nativeExpression_.toFunctionImplvalWithPassthroughArgument(False)
def rebindFreeVariableMemberAccess(self, varname, memberName, newName):
"""convert expressions like 'x.y' where 'x' is free to a new free
variable"""
return Expression(
self.nativeExpression_.rebindFreeVariableMemberAccess(
varname,
memberName,
newName
),
self.codeDefinitionPoint_
)
def raiseParseException_(self, functionParseError):
raise ParseException.ParseException(
functionParseError,
self.codeDefinitionPoint_
)
def raiseIfUsesUndefinedVariables(self, knownVariables):
for variable in self.assignedVariables:
self.checkVariableInKnownVariables(variable, knownVariables)
def checkVariableInKnownVariables(self, variable, knownVariables):
if variable not in knownVariables:
assignmentLocations = self.assignedVariableUsePoints(variable)
assert assignmentLocations, "Can't find assignment point for %s in '%s'." % (variable, str(self))
self.raiseParseException_(
ForaNative.FunctionParseError(
"can't assign to free variable " + variable,
assignmentLocations[0].range
)
)
def raiseParseErrorIfHasReturnStatements(self):
getReturnStatementPoints = self.getReturnStatementPoints()
if getReturnStatementPoints:
self.raiseParseException_(
ForaNative.FunctionParseError(
"can't use a return statement in a command-line expression",
getReturnStatementPoints[0].range
)
)
def specializeFreeVariableMapping(self,
freeVariableMapping,
specializationFunc,
lookupFunc,
finalVariableValueValidator
):
"""Allow an expression containing expressions like 'm.x' to be specialized.
If we know what 'm' binds to, and
specializationFunc(freeVariableMapping[m], 'x')
produces a non-null value, we rebind the free variable to a new name with the result
of specialzationFunc.
Finally, we call 'finalVariableValueValidator' with the value each variable chain
has been resolved to, along with a string of dot-separated identifiers. This function
should return an error string if the intermediate value that has been bound is invalid.
"""
done = False
checkedNames = set()
expr = self
dotSequences = {}
# Search through all the free variables and look up their values using the given lookup
# function.
for varname in self.freeVariables:
dotSequences[varname] = varname
if varname not in freeVariableMapping:
varValue = lookupFunc(varname)
if varValue is None:
codeLocations = self.freeVariableUsePoints(varname)
raise ParseException.ParseException(
ForaNative.FunctionParseError(
"Couldn't resolve free variable '%s'" % (varname,),
codeLocations[0].range
),
self.codeDefinitionPoint_
)
elif isinstance(varValue, NameExistsButDoesntParseException):
raise ParseException.ParseException(
ForaNative.FunctionParseError(
"Some modules did not parse correctly.",
self.freeVariableUsePoints(varname)[0].range
),
self.codeDefinitionPoint_
)
freeVariableMapping[varname] = varValue
while not done:
done = True
#freeVariables = expr.freeVariables
freeVariablesBound = expr.freeVariableMemberAccesses
for varname, memberName in freeVariablesBound:
if varname not in freeVariableMapping:
freeVariableMapping[varname] = lookupFunc(varname)
mapsTo = freeVariableMapping[varname]
if (mapsTo is not None and varname not in checkedNames):
subNode = specializationFunc(mapsTo, memberName)
if subNode is not None:
#we can bind this value to the node
newName = freshVarname(varname + "_" + memberName, set(expr.mentionedVariables))
dotSequences[newName] = dotSequences[varname] + "." + memberName
expr = expr.rebindFreeVariableMemberAccess(varname, memberName, newName)
freeVariableMapping[newName] = subNode
done = False
else:
checkedNames.add(varname)
for var in expr.freeVariables:
errString = finalVariableValueValidator(freeVariableMapping[var], dotSequences[var])
if errString is not None:
raise ParseException.ParseException(
ForaNative.FunctionParseError(
errString,
expr.freeVariableUsePoints(var)[0].range
),
self.codeDefinitionPoint_
)
return expr
def returnFreeVariableValues(self, locals, importFunction):
"""Return arguments for the free variables in expression.
importFunction = a function to call with free symbols that aren't in
locals to look them up
"""
boundVariables = []
for freeVariable in self.freeVariables:
if freeVariable in locals:
boundVariables += [locals[freeVariable]]
else:
binding = importFunction(freeVariable)
if binding is None:
raise ParseException.ParseException(
ForaNative.FunctionParseError(
"Couldn't resolve free variable '%s'" % freeVariable,
self.freeVariableUsePoints(freeVariable)[0].range
),
self.codeDefinitionPoint_
)
else:
boundVariables += [binding]
return boundVariables
@staticmethod
def parse(textToParse, codeDefinitionPoint = None, nameScope = "<eval>", parseAsModule=False):
"""parse a string to an Expression object or throw a FunctionParseError.
textToParse - a string containing the FORA expression
parsePath - a list of strings containing the path to the
code being parsed. this will show up in any stacktraces
thrown by this expression or by code called by this expression
nameScope - the name that functions and objects should descend from. If the
expression is a simple 'fun' or 'object', this will be its name
"""
if codeDefinitionPoint is None:
codeDefinitionPoint = \
ForaNative.CodeDefinitionPoint.ExternalFromStringList(
[nameScope]
)
if parseAsModule:
nativeExpressionOrParseError = \
ForaNative.parseObjectDefinitionBodyToExpression(
textToParse,
["Tsunami", nameScope + ".fora"],
False,
nameScope,
nameScope
)
else:
nativeExpressionOrParseError = \
ForaNative.parseStringToExpression(
textToParse,
codeDefinitionPoint,
nameScope
)
if isinstance(nativeExpressionOrParseError, ForaNative.FunctionParseError):
raise ParseException.ParseException(
nativeExpressionOrParseError,
codeDefinitionPoint
)
return Expression(nativeExpressionOrParseError, codeDefinitionPoint)
def __str__(self):
return str(self.nativeExpression_)
|
import json, boto3, uuid
def save(event, context):
print("Event: %s" % json.dumps(event))
holidays = event['holidays']
client = boto3.resource('dynamodb')
table = client.Table('Holidays')
for holiday in holidays:
response = table.put_item(
Item={
'id' : str(uuid.uuid4()),
'name': holiday['name'],
'date': holiday['date'],
'country': holiday['country']
}
)
print(response)
return {
'statusCode': 201,
'body': 'Holidays added'
}
|
import git,os,tempfile,fnmatch,argparse,sys,shutil,stat,github
from github import Github
from subprocess import call
def RepoDuplicate(username,password,input_repo_url,output_repo_name):
tempdir,path,flag = clone_repo(input_repo_url)
if flag == 1:
#error in cloning
shutil.rmtree(tempdir,ignore_errors=True)
sys.exit(1)
flag = delete_files(path)
if flag == 1:
#error deleting readme and license files
sys.exit(1)
#print("Removing branches and all history....")
flag = delete_git_file(path)
if flag == 1:
#print("Error deleting history and branches.")
sys.exit(1)
elif flag == 0:
#print("Done")
pass
flag = git_operations(username,password,output_repo_name,path)
if flag == 1:
#print("Reverting changes")
delete_git_file(path)
shutil.rmtree(tempdir,ignore_errors=True)
return 1
elif flag == 0:
#print("Deleting local clone...")
delete_git_file(path)
shutil.rmtree(tempdir,ignore_errors=True)
#print("Done")
return 0
def clone_repo(input_repo_url):
try:
tempdir = tempfile.mkdtemp(prefix="",suffix="")
predictable_filename = 'clonedfile'
saved_umask = os.umask(0o077)
path = os.path.join(tempdir,predictable_filename)
#Splitting the input url.
url_user = input_repo_url.split('/')[3]
url_repo = input_repo_url.split('/')[4]
#print("Begining tasks...")
#Cloning a public repository to a temporary local directory
#print("Cloning the repository at "+path)
#Check if repository is public else stop execution
repo = git.Repo.clone_from(f'https://github.com/{url_user}/{url_repo}.git',path, branch="master",depth=1)
#print("Done")
return tempdir,path,0
except git.exc.GitError as err:
#If there's an error cloning the repository
#print(f'ERROR! : https://github.com/{url_user}/{url_repo}.git maybe not a public repository,\n check url format [-h].')
#print(err)
return tempdir,path,1
def delete_files(path):
pattern1="LICENSE*"
pattern2="README*"
#Removing README and LICENSE files from the cloned repository
#print("Deleting README and LICENSE files....")
try:
for roots,dirs,files in os.walk(os.path.join(path,'')):
for file in fnmatch.filter(files,pattern1):
os.remove(os.path.join(roots,file))
for file in fnmatch.filter(files,pattern2):
os.remove(os.path.join(roots,file))
except Exception as err:
#print("Error in deleting file:"+os.path.join(roots,file))
#print(err)
return 1
#print("Done")
return 0
def git_operations(username,password,output_repo_name,path):
#Fetching the github user account and creating a github empty repository
try:
g = Github(username,password)
user = g.get_user()
repo1 = user.create_repo(output_repo_name)
#creating target url to push cloned local repo using username and output_repo_name
target_url = f'https://github.com/{username}/{output_repo_name}.git'
#print("Pushing the cloned repository to: "+target_url)
#initialize the repo after deleting .git directory
new_repo = git.Repo.init(path)
new_repo.git.add(A=True)
new_repo.create_remote("new",url=target_url)
new_repo.git.commit(m='initial commit')
new_repo.git.push('new','master')
except github.GithubException as err:
#print(err)
return 1
except Exception as err:
#delete repository if pushing fails
repo1.delete()
#print(err)
return 1
#print("Git operations done.")
return 0
def delete_git_file(path):
def on_rm_error(func, dir, exc_info):
os.chmod(dir, stat.S_IWRITE)
os.unlink(dir)
try:
for i in os.listdir(path):
if i.endswith('git'):
tmp = os.path.join(path, i)
# We want to unhide the .git folder before unlinking it.
while True:
call(['attrib', '-H', tmp])
break
shutil.rmtree(tmp, onerror=on_rm_error)
except Exception as err:
#print(err)
return 1
return 0
|
import copy
import logging
import os
logger = logging.getLogger(__name__)
class Context():
ENV_VARIABLE = 'DAISY_CONTEXT'
def __init__(self, **kwargs):
self.__dict = dict(**kwargs)
def copy(self):
return copy.deepcopy(self)
def to_env(self):
return ':'.join('%s=%s' % (k, v) for k, v in self.__dict.items())
def __setitem__(self, k, v):
k = str(k)
v = str(v)
if '=' in k or ':' in k:
raise RuntimeError("Context variables must not contain = or :.")
if '=' in v or ':' in v:
raise RuntimeError("Context values must not contain = or :.")
self.__dict[k] = v
def __getitem__(self, k):
return self.__dict[k]
def get(self, k, v=None):
return self.__dict.get(k, v)
def __repr__(self):
return self.to_env()
@staticmethod
def from_env():
try:
tokens = os.environ[Context.ENV_VARIABLE].split(':')
except KeyError:
logger.error(
"%s environment variable not found!",
Context.ENV_VARIABLE)
raise
context = Context()
for token in tokens:
k, v = token.split('=')
context[k] = v
return context
|
from ipykernel.kernelbase import Kernel
import subprocess
import json
import tempfile
class CbshKernel(Kernel):
implementation = 'Couchbase Shell'
implementation_version = '1.0'
language = 'no-op'
language_version = '0.1'
language_info = {
'name': 'Any text',
'mimetype': 'text/plain',
'file_extension': '.txt',
}
banner = "Couchbase Shell - shell yeah!"
def do_execute(self, code, silent, store_history=True, user_expressions=None,
allow_stdin=False):
if not silent:
temp = tempfile.NamedTemporaryFile(suffix=".nu")
for line in code.splitlines():
line = line + " | to html\n"
temp.write(line.encode('utf-8'))
temp.flush()
command = 'cbsh --script ' + temp.name
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
output = output.decode('utf-8')
err = err.decode()
if err:
display_data = {
'data': {
"text/plain": err,
},
'metadata': {},
}
self.send_response(self.iopub_socket, 'display_data', display_data)
else:
display_data = {
'data': {
"text/html": output,
},
'metadata': {},
}
self.send_response(self.iopub_socket, 'display_data', display_data)
temp.close()
return {'status': 'ok',
# The base class increments the execution count
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=CbshKernel)
|
import pandas as pd
from db_connector import FixturesDB
from db_connector import LeagueDB
from db_connector import PlayersDB
from db_connector import TeamsDB
class DataInit():
def __init__(self, league='EN_PR', season='2019'):
self.league = league
self.season = season
@staticmethod
def to_df(query):
return pd.DataFrame.from_dict(query)
def team_standing(self):
"""Returns all documents with a t_id key"""
db = TeamsDB(self.league, self.season)
query = db.get_teams_standing()
df = self.to_df(query)
df = df.drop('_id', 1)
return df
def team_names(self):
"""Returns all the unique team names"""
db = LeagueDB(self.league, self.season)
query = db.get_league_teams()
df = self.to_df({'teams': query})
return df
def league_standings(self):
"""Returns the league standings"""
db = LeagueDB(self.league, self.season)
query = db.get_league_standings_overall()
df = self.to_df(query)
cols = df.columns.tolist()
cols = cols[1:2] + cols[0:1] + cols[2:]
df = df[cols]
df = df.rename(columns={'team_shortName': 'Club', 'position': 'Position', 'overall_played': 'Played',
'overall_won': 'W','overall_draw': 'D', 'overall_lost': 'L',
'overall_goalsFor': 'GF', 'overall_goalsAgainst':'GA',
'overall_goalsDifference': 'GD', 'overall_points': 'Points'})
return df
def fixture_form_decending(self, team_shortName: str, limit=5):
"""Return the latest five games for a team,
Args:
team_shortName: A teams shortname
limit: The number of latest games
"""
db = TeamsDB(self.league, self.season)
query = db.get_latest_fixtures(team_shortName, limit)
df = self.to_df(query)
cols = df.columns.tolist()
cols = cols[4:5] + cols[0:1] + cols[2:4]+ cols[1:2] + cols[5:]
df = df[cols]
return df
if __name__ == '__main__':
init = DataInit()
print(init.fixture_form_decending('Arsenal'))
|
import argparse
import textwrap
from inspera import InsperaReader
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='InsperaReader',
epilog=textwrap.dedent('''\
Info:
- For each candidate given in the source data, a data field of each response is made, with the following fields:
- question id -> int
- response (raw) -> str
- response (parsed) -> str
- grading -> list[int]
- max_score -> int
- duration -> int
''')
)
file_help = 'The path for the .json file from Inspera'
parser.add_argument('-f', '--file', required=True, type=str, help=file_help)
args = parser.parse_args()
reader = InsperaReader(args.file)
|
from .gcn import GCN, DenseGCN
from .gat import GAT
from .clustergcn import ClusterGCN
from .fastgcn import FastGCN
from .dagnn import DAGNN
from .pairnorm import *
from .simpgcn import SimPGCN
from .mlp import MLP
from .tagcn import TAGCN
from .appnp import APPNP, PPNP
# experimantal model
from .experimental.median_gcn import MedianGCN
from .experimental.trimmed_gcn import TrimmedGCN
|
import setuptools
with open("description", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='Fugue-generator',
author="Adam Bradley",
author_email='adam_bradley@brown.edu',
description='',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/brown-university-library/fugue',
version='0.9a1',
packages=setuptools.find_packages(),
install_requires=[
"certifi>=2018.8.24",
"Click>=7.0",
"lxml>=4.1.0",
"markdown2==2.3.8",
"pytidylib>=0.3.2",
"PyYAML>=5.1",
"urllib3>=1.25.3",
],
package_data={
# Include mimetypes data.
'': ['mime.types'],
},
entry_points='''
[console_scripts]
fugue=fugue:fugue
''',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Text Processing :: Markup :: XML",
],
python_requires='>=3.6',
)
|
"""
- INFO_VERSION: 1
"""
import functools
import hashlib
import inspect
from typing import Dict
import omegaconf
from pydantic import validate_arguments
from gdsfactory.component import Component, clean_dict
from gdsfactory.name import MAX_NAME_LENGTH, clean_name, clean_value
CACHE: Dict[str, Component] = {}
INFO_VERSION = 1
class CellReturnTypeError(ValueError):
pass
def clear_cache() -> None:
"""Clears the cache of components."""
global CACHE
CACHE = {}
def print_cache():
for k in CACHE:
print(k)
def clean_doc(name: str) -> str:
"""Returns a clean docstring"""
# replace_map = {
# " ": " ",
# " ": " ",
# " ": " ",
# " ": ",",
# " ": " ",
# "\n": " ",
# "\n\n": " ",
# }
# for k, v in list(replace_map.items()):
# name = name.replace(k, v)
# name = ",".join(name.split('\n'))
# name = " ".join(name.split())
return name
def cell_without_validator(func):
"""Cell Decorator.
Args:
autoname (bool): renames Component by with Keyword arguments
name (str): Optional (ignored when autoname=True)
uid (bool): adds a unique id to the name
cache (bool): get component from the cache if it already exists
Implements a cache so that if a component has already been build
it will return the component from the cache.
This avoids 2 exact cells that are not references of the same cell
You can always over-ride this with `cache = False`.
.. plot::
:include-source:
import gdsfactory as gf
@gf.cell
def rectangle(size=(4,2), layer=0)->gf.Component:
c = gf.Component()
w, h = size
points = [[w, h], [w, 0], [0, 0], [0, h]]
c.add_polygon(points, layer=layer)
return c
c = rectangle(layer=(1,0))
c.plot()
"""
@functools.wraps(func)
def _cell(*args, **kwargs):
prefix = kwargs.pop("prefix", func.__name__)
autoname = kwargs.pop("autoname", True)
cache = kwargs.pop("cache", True)
info = kwargs.pop("info", omegaconf.DictConfig({}))
sig = inspect.signature(func)
args_as_kwargs = dict(zip(sig.parameters.keys(), args))
args_as_kwargs.update(**kwargs)
args_as_kwargs_string_list = [
f"{key}={clean_value(args_as_kwargs[key])}"
for key in sorted(args_as_kwargs.keys())
]
arguments = "_".join(args_as_kwargs_string_list)
arguments_hash = hashlib.md5(arguments.encode()).hexdigest()[:8]
name_long = name = (
clean_name(f"{prefix}_{arguments_hash}") if arguments else prefix
)
decorator = kwargs.pop("decorator", None)
if len(name) > MAX_NAME_LENGTH:
name_hash = hashlib.md5(name.encode()).hexdigest()[:8]
name = f"{name[:(MAX_NAME_LENGTH - 9)]}_{name_hash}"
name_component = kwargs.pop("name", name)
if (
"args" not in sig.parameters
and "kwargs" not in sig.parameters
and "settings" not in sig.parameters
):
for key in kwargs.keys():
if key not in sig.parameters.keys():
raise TypeError(
f"{func.__name__}() got invalid argument `{key}`\n"
f"valid arguments are {list(sig.parameters.keys())}"
)
if cache and name in CACHE:
# print(f"CACHE {func.__name__}({kwargs_repr})")
return CACHE[name]
else:
# print(f"BUILD {name} {func.__name__}({arguments})")
# print(f"BUILD {name}, {name_long}")
assert callable(
func
), f"{func} got decorated with @cell! @cell decorator is only for functions"
component = func(*args, **kwargs)
if decorator:
assert callable(
decorator
), f"decorator = {type(decorator)} needs to be callable"
decorator(component)
if not isinstance(component, Component):
raise CellReturnTypeError(
f"function `{func.__name__}` return type = `{type(component)}`",
"make sure that functions with @cell decorator return a Component",
)
if autoname and getattr(component, "_autoname", True):
component.name = name_component
# docstring = func.__doc__ if hasattr(func, "__doc__") else func.func.__doc__
# component.info.doc = docstring
component.info.module = func.__module__
component.info.function_name = func.__name__
component.info.info_version = INFO_VERSION
component.info.name_long = name_long
component.info.name = component.name
component.info.update(**info)
default = {
p.name: p.default
for p in sig.parameters.values()
if not callable(p.default)
}
full = default.copy()
full.update(**args_as_kwargs)
changed = args_as_kwargs.copy()
clean_dict(full)
clean_dict(default)
clean_dict(changed)
component.info.changed = changed
component.info.default = default
component.info.full = full
CACHE[name] = component
return component
return
return _cell
def cell(func, *args, **kwargs):
return cell_without_validator(validate_arguments(func), *args, **kwargs)
@cell
def wg(length: int = 3, width: float = 0.5) -> Component:
from gdsfactory.component import Component
c = Component("straight")
w = width / 2
layer = (1, 0)
c.add_polygon([(0, -w), (length, -w), (length, w), (0, w)], layer=layer)
c.add_port(name="o1", midpoint=[0, 0], width=width, orientation=180, layer=layer)
c.add_port(name="o2", midpoint=[length, 0], width=width, orientation=0, layer=layer)
return c
def test_autoname_true() -> None:
c = wg(length=3)
# assert c.name == "wg_length3", c.name
assert c.name == "wg_2dcab9f2", c.name
def test_autoname_false() -> None:
c = wg(length=3.32, autoname=False)
assert c.name == "straight", c.name
def test_set_name() -> None:
c = wg(length=3, name="hi_there")
assert c.name == "hi_there", c.name
@cell
def _dummy(length: int = 3, wg_width: float = 0.5) -> Component:
"""Dummy cell"""
c = Component()
w = length
h = wg_width
points = [
[-w / 2.0, -h / 2.0],
[-w / 2.0, h / 2],
[w / 2, h / 2],
[w / 2, -h / 2.0],
]
c.add_polygon(points)
return c
def test_autoname() -> None:
name_base = _dummy().name
assert name_base == "_dummy", name_base
name_int = _dummy(length=3).name
assert name_int == "_dummy_2dcab9f2", name_int
dummy2 = functools.partial(_dummy, length=3)
component_int = dummy2(length=3)
name_int = component_int.name
assert name_int == "_dummy_2dcab9f2", name_int
# assert component_int.info.doc == "Dummy cell"
name_float = _dummy(wg_width=0.5).name
assert name_float == "_dummy_b78ec006", name_float
name_length_first = _dummy(length=3, wg_width=0.5).name
name_width_first = _dummy(wg_width=0.5, length=3).name
assert (
name_length_first == name_width_first
), f"{name_length_first} != {name_width_first}"
name_args = _dummy(3).name
assert name_int == "_dummy_2dcab9f2", name_int
name_with_prefix = _dummy(prefix="hi").name
assert name_with_prefix == "hi", name_with_prefix
name_args = _dummy(3).name
name_kwargs = _dummy(length=3).name
assert name_args == name_kwargs, name_with_prefix
if __name__ == "__main__":
# dummy2 = functools.partial(_dummy, length=3)
# c = dummy2()
# c = _dummy()
# test_raise_error_args()
# c = gf.components.straight()
# test_autoname_false()
# test_autoname_true()
# test_autoname()
# test_set_name()
# c = wg(length=3)
# c = wg(length=3, autoname=False)
import gdsfactory as gf
info = dict(polarization="te")
c = gf.components.straight()
c = gf.components.straight(info=info)
# c = gf.components.straight(length=3, info=info)
print(c.info.polarization)
# print(c.settings.info.doc)
# c = gf.components.spiral_inner_io(length=1e3)
# c = gf.components.straight(length=3)
# print(c.name)
# c.show()
# D = gf.Component()
# arc = D << gf.components.bend_circular(
# radius=10, width=0.5, angle=90, layer=(1, 0), info=dict(polarization="te")
# )
# arc.rotate(90)
# rect = D << gf.components.bbox(bbox=arc.bbox, layer=(0, 0))
|
from settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SESSION_COOKIE_SECURE = False
# Allows you to run Kitsune without running Celery---all tasks
# will be done synchronously.
CELERY_ALWAYS_EAGER = True
# Allows you to specify waffle settings in the querystring.
WAFFLE_OVERRIDE = True
# Change this to True if you're going to be doing search-related
# work.
ES_LIVE_INDEXING = False
# Basic cache configuration for development.
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.CacheClass',
'LOCATION': 'localhost:11211',
'PREFIX': 'sumo:',
}
}
# Basic database configuration for development.
DATABASES = {
'default': {
'NAME': 'kitsune',
'ENGINE': 'django.db.backends.mysql',
'HOST': 'localhost',
'USER': 'kitsune',
'PASSWORD': '',
'OPTIONS': {'init_command': 'SET storage_engine=InnoDB'},
'TEST_CHARSET': 'utf8',
'TEST_COLLATION': 'utf8_unicode_ci',
},
}
LESS_PREPROCESS = True
LESS_BIN = '/usr/bin/lessc'
|
from django.conf.urls import include, url
from django.contrib import admin
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.views import login
from django.contrib.auth.views import logout
from django.contrib.auth.decorators import user_passes_test
from soal.admin import admin_site_tu
# create forbidden page
# ---------------------
# login page automatic redirect after user login.
# login page can't accessing user after login.
# this lamdba function to checking anonymous user,
# if user is anonymous, login page can access by user.
login_forbidden = user_passes_test(lambda u: u.is_anonymous(), '/')
urlpatterns = [
# localhost/admin
url(r'^admin/', include(admin.site.urls)),
# localhost/tu
url(r'tu/', include(admin_site_tu.urls)),
# localhost
url(r'', include('soal.urls', namespace='soal')),
# localhost/login
url(
r'^login/',
login_forbidden(login),
kwargs = {
'template_name':'soal/login.html',
}, name='login'
),
# localhost/logout
url(r'^logout/', 'django.contrib.auth.views.logout',
kwargs={
'next_page':reverse_lazy('login')
}, name='logout')
]
handler404 = 'errorhandling.views.custom_page_not_found'
|
import os,subprocess
from shlex import quote
from ehive.runnable.IGFBaseProcess import IGFBaseProcess
from igf_data.utils.fileutils import get_temp_dir, remove_dir
from igf_data.utils.fileutils import copy_remote_file
from igf_data.utils.project_status_utils import Project_status
class UpdateProjectStatus(IGFBaseProcess):
'''
An ehive runnable class for updating data for project info page
'''
def param_defaults(self):
params_dict=super(UpdateProjectStatus,self).param_defaults()
params_dict.update({
'remote_project_path':None,
'remote_user':None,
'remote_host':None,
'status_data_json':'status_data.json',
'demultiplexing_pipeline_name':None,
'analysis_pipeline_name':None,
'sample_igf_id':None,
'use_ephemeral_space':0,
})
return params_dict
def run(self):
try:
project_igf_id = self.param_required('project_igf_id')
sample_igf_id = self.param_required('sample_igf_id')
remote_project_path = self.param_required('remote_project_path')
igf_session_class = self.param_required('igf_session_class')
remote_user = self.param_required('remote_user')
remote_host = self.param_required('remote_host')
status_data_json = self.param('status_data_json')
demultiplexing_pipeline_name = self.param_required('demultiplexing_pipeline_name')
analysis_pipeline_name = self.param_required('analysis_pipeline_name')
use_ephemeral_space = self.param('use_ephemeral_space')
temp_work_dir = get_temp_dir(use_ephemeral_space=use_ephemeral_space) # get a temp dir
ps = \
Project_status(\
igf_session_class=igf_session_class,
project_igf_id=project_igf_id)
temp_status_output = \
os.path.join(\
temp_work_dir,
status_data_json) # get path for temp status file
remote_project_dir = \
os.path.join(\
remote_project_path,
project_igf_id) # get remote project directory path
ps.generate_gviz_json_file(\
output_file=temp_status_output,
demultiplexing_pipeline=demultiplexing_pipeline_name,
analysis_pipeline=analysis_pipeline_name) # write data to output json file
remote_file_path = \
os.path.join(\
remote_project_dir,
status_data_json)
self._check_and_copy_remote_file(\
remote_user=remote_user,
remote_host=remote_host,
source_file=temp_status_output,
remote_file=remote_file_path) # copy file to remote
self.param('dataflow_params',
{'remote_project_info':'done'})
remove_dir(temp_work_dir) # remove temp dir
except Exception as e:
message = \
'project: {2}, sample:{3}, Error in {0}: {1}'.\
format(\
self.__class__.__name__,
e,
project_igf_id,
sample_igf_id)
self.warning(message)
self.post_message_to_slack(message,reaction='fail') # post msg to slack for failed jobs
self.post_message_to_ms_team(
message=message,
reaction='fail')
raise
@staticmethod
def _check_and_copy_remote_file(remote_user,remote_host,
source_file,remote_file):
'''
An internal static method for copying files to remote path
:param remote_user: Username for the remote server
:param remote_host: Hostname for the remote server
:param source_file: Source filepath
:param remote_file: Remote filepath
'''
try:
if not os.path.exists(source_file):
raise IOError('Source file {0} not found for copy'.\
format(source_file))
remote_config='{0}@{1}'.format(remote_user,remote_host)
os.chmod(source_file,
mode=0o754)
copy_remote_file(\
source_path=source_file,
destination_path=remote_file,
destination_address=remote_config) # create dir and copy file to remote
except:
raise
|
"""
34. Find First and Last Position of Element in Sorted Array
Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value.
If target is not found in the array, return [-1, -1].
Follow up: Could you write an algorithm with O(log n) runtime complexity?
Example 1:
Input: nums = [5,7,7,8,8,10], target = 8
Output: [3,4]
Example 2:
Input: nums = [5,7,7,8,8,10], target = 6
Output: [-1,-1]
Example 3:
Input: nums = [], target = 0
Output: [-1,-1]
Constraints:
0 <= nums.length <= 105
-109 <= nums[i] <= 109
nums is a non-decreasing array.
-109 <= target <= 109
"""
from typing import List
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
Hash = []
for i in range(len(nums)):
if nums[i] == target:
Hash.append(i)
if len(Hash) == 0:
return [-1 ,-1]
return [min(Hash) ,max(Hash)]
|
from random import sample
N, S, E, W, V = 1, 2, 4, 8, 16
opposite = {N:S, S:N, E:W, W:E}
move = {N: lambda x, y: (x, y-1),
S: lambda x, y: (x, y+1),
E: lambda x, y: (x+1, y),
W: lambda x, y: (x-1, y)}
directions = lambda: sample((N, S, E, W), 4)
def maze_generation(width, height):
maze = [[0] * width for i in range(height)]
total_cells = width * height
x, y = 0, 0
while total_cells > 1:
for direction in directions():
nx, ny = move[direction](x, y)
if not 0 <= nx < width or not 0 <= ny < height:
continue
elif maze[ny][nx] == 0:
maze[y][x] |= direction
maze[ny][nx] |= opposite[direction]
x, y = nx, ny
break
else:
for direction in directions():
nx, ny = move[direction](x, y)
if (not 0 <= nx < width
or not 0 <= ny < height
or not maze[ny][nx] & V == 0):
continue
elif not maze[ny][nx] & opposite[direction] == 0:
maze[y][x] |= V
total_cells -= 1
x, y = nx, ny
break
return maze
def draw_maze(maze):
#print(maze)
start = False
finish = False
width = len(maze[0])
height = len(maze)
with open('Tanks_new/Maps/level_1.goose', 'w') as f:
f.truncate(0)
print('1' * (width*2+1), file=f)
for i in range(height*2):
line = ['1']
for j in range(width*2):
if i % 2 == 0 and j % 2 == 0:
if start == False:
line.append('3')
start = True
else:
line.append('0')
elif i % 2 == 0 and j % 2 != 0:
if not maze[i//2][j//2] & E == 0:
line.append('0')
else:
line.append('1')
elif i % 2 != 0 and j % 2 == 0:
if not maze[i//2][j//2] & S == 0:
line.append('0')
else:
line.append('1')
else:
line.append('0')
print(''.join(line),file=f)
print('1'*12+'0'*2+'2', file=f)
# Edit the values to change the height and width of the labyrinth
#draw_maze(maze_generation(8,6))
|
# -*- coding: utf-8 -*-
from .IsobarImg import *
__version__ = '0.4.2'
|
from socket import *
import sys, time
if len(sys.argv) <= 1:
print 'Usage: "python proxy.py server_ip"\n[server_ip : It is the IP Address of the Proxy Server'
sys.exit(2)
# Create a server socket, bind it to a port and start listening
tcpSERVERPort = 8080
tcpSERVERSock = socket(AF_INET, SOCK_STREAM)
fp = open('log.txt','w')
# Prepare a server socket
tcpSERVERSock.bind((sys.argv[1], tcpSERVERPort))
tcpSERVERSock.listen(5)
while True:
# Start receiving data from the client
print 'Ready to serve...'
tcpCLIENTSock, addr = tcpSERVERSock.accept()
print 'Received a connection from: ', addr
t = time.time()
message = tcpCLIENTSock.recv(4096)
print "message= Hello ",message
fp.write(message)
a = len(message)
print 'number of bytes sent =',a
# Extract the filename from the given message
if message == '':
print "No data"
else:
print "m2=::::",message.split()[1]
filename = message.split()[1].partition("/")[2]
print "filename = ",filename
fileExist = "false"
filetouse = "/" + filename
print "filetouse= :",filetouse
try:
# Check whether the file exists in the cache
f = open(filetouse[1:], "r")
outputdata = f.readlines()
b = len(outputdata)
print "bytes received from server = ",b
print "outputdata = ",outputdata
fileExist = "true"
print 'File Exists!'
# ProxyServer finds a cache hit and generates a response message
tcpCLIENTSock.send("HTTP/1.0 200 OK\r\n")
print "HTTP/1.0 200 OK\r\n"
tcpCLIENTSock.send("Content-Type:text/html\r\n")
# Send the content of the requested file to the client
for i in range(0, len(outputdata)):
tcpCLIENTSock.send(outputdata[i])
print 'Read from cache'
# Error handling for file not found in cache
except IOError:
print 'File Exist: ', fileExist
if fileExist == "false":
# Create a socket on the proxyserver
print 'Creating socket on proxyserver'
c = socket(AF_INET, SOCK_STREAM)
hostn = filename.replace("www.", "", 1)
print 'Host Name: ', hostn
try:
# Connect to the socket to port 80
c.connect((hostn, 80))
print 'Socket connected to port 80 of the host'
# Create a temporary file on this socket and ask port 80
# for the file requested by the client
fileobj = c.makefile('r', 0)
fileobj.write("GET " + "http://" + filename + " HTTP/1.0\n\n")
# Read the response into buffer
buffer = fileobj.readlines()
b = len(buffer)
print 'bytes received =' ,b
#resp = c.recv(4096)
#response = ""
#while resp:
#response += resp
# Create a new file in the cache for the requested file.
# Also send the response in the buffer to client socket
# and the corresponding file in the cache
tempFile = open("./" + filename, "wb")
#tempFile.write(response)
#tempFile.close()
#tcpcLIENTsock.send(response)
for i in range(0, len(buffer)):
tempFile.write(buffer[i])
tcpCLIENTSock.send(buffer[i])
except:
print 'illegal request'
else:
# HTTP response message for file not found
print 'File Not Found...'
elap = time.time()
diff = elap - t
# Close the socket and the server sockets
tcpCLIENTSock.close()
fp.write("\n time taken =" + str(diff))
fp.write("\n bytes sent =" + str(a))
fp.write("\n bytes received =" + str(b))
fp.write("\n")
fp.close()
print "Closing the server connection"
tcpSERVERSock.close()
|
from PIL import Image
import torch.nn as nn
from torch.autograd import Variable
import torch
import numpy as np
from data import CreateTrgDataSSLLoader
from model import CreateSSLModel
import os
from options.test_options import TestOptions
import scipy.io as sio
import gzip # to compresss numpy files so that less cache space is required
#####This is the optimized pseudo label generation file which takes at max 12 gb CPU RAM and 3 GB GPU ram.#####
def main():
opt = TestOptions()
args = opt.initialize()
os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
if not os.path.exists(args.save):
os.makedirs(args.save)
args.restore_from = args.restore_opt1
model1 = CreateSSLModel(args)
model1.eval()
model1.cuda()
args.restore_from = args.restore_opt2
model2 = CreateSSLModel(args)
model2.eval()
model2.cuda()
args.restore_from = args.restore_opt3
model3 = CreateSSLModel(args)
model3.eval()
model3.cuda()
targetloader = CreateTrgDataSSLLoader(args)
# change the mean for different dataset
IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)
IMG_MEAN = torch.reshape( torch.from_numpy(IMG_MEAN), (1,3,1,1) )
mean_img = torch.zeros(1, 1)
image_name = []
x = [None]*19 # x values for all 19 classes
cachepath = "../cache" # Directory to save the numpy files as cache.
with torch.no_grad():
for index, batch in enumerate(targetloader):
if index % 1 == 0:
print( '%d processd' % index )
image, _, name = batch
if mean_img.shape[-1] < 2:
B, C, H, W = image.shape
mean_img = IMG_MEAN.repeat(B,1,H,W)
image = image.clone() - mean_img
image = Variable(image).cuda()
# forward
output1 = model1(image)
output1 = nn.functional.softmax(output1, dim=1)
output2 = model2(image)
output2 = nn.functional.softmax(output2, dim=1)
output3 = model3(image)
output3 = nn.functional.softmax(output3, dim=1)
a, b = 0.3333, 0.3333
output = a*output1 + b*output2 + (1.0-a-b)*output3
output = nn.functional.interpolate(output, (512, 1024), mode='bilinear', align_corners=True).cpu().data[0].numpy()
output = output.transpose(1,2,0)
label, prob = np.argmax(output, axis=2), np.max(output, axis=2)
# Saving the prob and label files for each index seperately so that while loading the whole array need not be loaded in turn saving a lot of CPU ram.
f1 = gzip.GzipFile(os.path.join(cachepath, "label_" + str(index))+'.npy.gz', "w")
f2 = gzip.GzipFile(os.path.join(cachepath, "prob_" + str(index))+'.npy.gz', "w")
np.save(f1,label)
np.save(f2, prob)
f1.close()
f2.close()
for i in range(19):
d = prob[label==i]
if(len(d)==0):
continue
if x[i] is None:
x[i]=d
else:
x[i]= np.concatenate((x[i],d))
image_name.append(name[0])
thres = []
thres = []
for i in range(19):
if x[i] is None:
thres.append(0)
<<<<<<< HEAD
continue
temp=x[i]
temp=np.sort(temp)
print("temp[np.int(np.round(len(temp*0.66))]", temp[np.int(np.round(len(temp)*0.66))])
thres.append(temp[np.int(np.round(len(temp)*0.66))].item())
# print(thres)
=======
continue
x = np.sort(x)
thres.append(x[np.int(np.round(len(x)*0.66))]) # paper mentions taking top 66% or 0.9 as threshold
print( thres )
>>>>>>> upstream/master
thres = np.array(thres)
thres[thres > 0.9] = 0.9
print("Cuda", thres)
for index in range(len(targetloader)):
name = image_name[index]
#Loading the prob and label files for each index.
f3 = gzip.GzipFile(os.path.join(cachepath, "label_" + str(index))+'.npy.gz', "r")
f4 = gzip.GzipFile(os.path.join(cachepath, "prob_" + str(index))+'.npy.gz', "r")
label = np.load(f3)
prob = np.load(f4)
for i in range(19):
label[ (prob<thres[i]) * (label==i) ] = 255
output = np.asarray(label, dtype=np.uint8)
output = Image.fromarray(output)
name = name.split('/')[-1]
#Deleting the prob and label files to clear the cache space.
os.remove(os.path.join(cachepath,"label_"+str(index)+".npy.gz"))
os.remove(os.path.join(cachepath,"prob_"+str(index)+".npy.gz"))
output.save('%s/%s' % (args.save, name))
if __name__ == '__main__':
main()
|
from __future__ import print_function, absolute_import, division
import KratosMultiphysics as KM
import KratosMultiphysics.MappingApplication as KratosMapping
import KratosMultiphysics.KratosUnittest as KratosUnittest
'''
This test is a fast test for testing the mappers, WORKS ONLY IN SERIAL
It stems from the patch tests in the StructuralMechanicsApplication
It covers only 2D!
Setup:
ModelPart Origin:
x-----x-----x---x
Modelpart Destination:
x--x--x----x---x
'''
class TestPatchTestMappers(KratosUnittest.TestCase):
def setUp(self):
self.test_model = KM.Model()
self.mp_origin = self.test_model.CreateModelPart("origin_part")
self.mp_destination = self.test_model.CreateModelPart("destination_part")
self._add_variables()
self._create_nodes()
self._create_elements()
def _add_variables(self):
self.mp_origin.AddNodalSolutionStepVariable(KM.PRESSURE)
self.mp_origin.AddNodalSolutionStepVariable(KM.FORCE)
self.mp_destination.AddNodalSolutionStepVariable(KM.TEMPERATURE)
self.mp_destination.AddNodalSolutionStepVariable(KM.VELOCITY)
def _create_nodes(self):
self.mp_origin.CreateNewNode(1, -5.0, 0.0, 0.0)
self.mp_origin.CreateNewNode(2, 0.0, 0.0, 0.0)
self.mp_origin.CreateNewNode(3, 5.0, 0.0, 0.0)
self.mp_origin.CreateNewNode(4, 8.0, 0.0, 0.0)
self.mp_destination.CreateNewNode(1, -5.0, 0.0, 0.0)
self.mp_destination.CreateNewNode(2, -3.0, 0.0, 0.0)
self.mp_destination.CreateNewNode(3, -1.0, 0.0, 0.0)
self.mp_destination.CreateNewNode(4, 3.0, 0.0, 0.0)
self.mp_destination.CreateNewNode(5, 6.0, 0.0, 0.0)
def _create_elements(self):
element_name = "Element2D2N"
# This seems to create properties on the fly
props =self. mp_origin.GetProperties()[1]
self.mp_origin.CreateNewElement(element_name, 1, [1,2], props)
self.mp_origin.CreateNewElement(element_name, 2, [2,3], props)
self.mp_origin.CreateNewElement(element_name, 3, [3,4], props)
props =self. mp_destination.GetProperties()[1]
self.mp_destination.CreateNewElement(element_name, 1, [1,2], props)
self.mp_destination.CreateNewElement(element_name, 2, [2,3], props)
self.mp_destination.CreateNewElement(element_name, 3, [3,4], props)
self.mp_destination.CreateNewElement(element_name, 4, [4,5], props)
def _set_values_origin(self):
value = 0
for node in self.mp_origin.Nodes:
node.SetSolutionStepValue(KM.PRESSURE, value+0.2)
node.SetSolutionStepValue(KM.FORCE, [value, value+0.1, value-0.3])
value += 1
def _set_values_destination(self):
value = 0
for node in self.mp_destination.Nodes:
node.SetSolutionStepValue(KM.TEMPERATURE, value-0.3)
node.SetSolutionStepValue(KM.VELOCITY, [value, value-0.1, value+0.4])
value += 1
def _set_values_mp_const(self, mp, variable, value):
for node in mp.Nodes:
node.SetSolutionStepValue(variable, value)
def _create_mapper(self, mapper_name):
mapper_settings = KM.Parameters("""{
"mapper_type" : \"""" + mapper_name + """\"
}""")
self.mapper = KratosMapping.MapperFactory.CreateMapper(self.mp_origin,
self.mp_destination,
mapper_settings)
def _check_results_scalar(self, mp, results, variable):
if len(results) != mp.NumberOfNodes():
raise RuntimeError("Number of results does not match number of Nodes!")
for index, node in enumerate(mp.Nodes):
self.assertAlmostEqual(node.GetSolutionStepValue(variable), results[index], 10)
def _check_results_vector(self, mp, results, variable):
if len(results) != mp.NumberOfNodes():
raise RuntimeError("Number of results does not match number of Nodes!")
for index, node in enumerate(mp.Nodes):
self.assertAlmostEqual(node.GetSolutionStepValue(variable)[0], results[index][0], 10)
self.assertAlmostEqual(node.GetSolutionStepValue(variable)[1], results[index][1], 10)
self.assertAlmostEqual(node.GetSolutionStepValue(variable)[2], results[index][2], 10)
def _check_results_scalar_const(self, mp, value, variable):
for node in mp.Nodes:
self.assertAlmostEqual(node.GetSolutionStepValue(variable), value)
def _check_results_vector_const(self, mp, value, variable):
for node in mp.Nodes:
self.assertAlmostEqual(node.GetSolutionStepValue(variable)[0], value[0])
self.assertAlmostEqual(node.GetSolutionStepValue(variable)[1], value[1])
self.assertAlmostEqual(node.GetSolutionStepValue(variable)[2], value[2])
def _execute_constant_value_test(self):
# Check mapping of a constant field and the basic functionalities
### Map ###
# Scalar Mapping
mapping_value = 1.33
self._set_values_mp_const(self.mp_origin, KM.PRESSURE, mapping_value)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE)
self._check_results_scalar_const(self.mp_destination, mapping_value, KM.TEMPERATURE)
self.mapper.UpdateInterface()
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.ADD_VALUES)
self._check_results_scalar_const(self.mp_destination, 2*mapping_value, KM.TEMPERATURE)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.ADD_VALUES | KratosMapping.Mapper.SWAP_SIGN)
self._check_results_scalar_const(self.mp_destination, mapping_value, KM.TEMPERATURE)
# Vector Mapping
mapping_value = [1.443, -5.874, 7.99]
self._set_values_mp_const(self.mp_origin, KM.FORCE, mapping_value)
self.mapper.Map(KM.FORCE, KM.VELOCITY)
self._check_results_vector_const(self.mp_destination, mapping_value, KM.VELOCITY)
self.mapper.Map(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.ADD_VALUES)
self._check_results_vector_const(self.mp_destination, [2*x for x in mapping_value], KM.VELOCITY)
self.mapper.Map(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.ADD_VALUES | KratosMapping.Mapper.SWAP_SIGN)
self._check_results_vector_const(self.mp_destination, mapping_value, KM.VELOCITY)
### InverseMap ###
# Scalar Mapping
mapping_value = -71.33
self._set_values_mp_const(self.mp_destination, KM.TEMPERATURE, mapping_value)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE)
self._check_results_scalar_const(self.mp_origin, mapping_value, KM.PRESSURE)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.ADD_VALUES)
self._check_results_scalar_const(self.mp_origin, 2*mapping_value, KM.PRESSURE)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.ADD_VALUES | KratosMapping.Mapper.SWAP_SIGN)
self._check_results_scalar_const(self.mp_origin, mapping_value, KM.PRESSURE)
# Vector Mapping
mapping_value = [-5.443, 44.874, -7.9779]
self._set_values_mp_const(self.mp_destination, KM.VELOCITY, mapping_value)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY)
self._check_results_vector_const(self.mp_origin, mapping_value, KM.FORCE)
self.mapper.UpdateInterface(KratosMapping.Mapper.REMESHED)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.ADD_VALUES)
self._check_results_vector_const(self.mp_origin, [2*x for x in mapping_value], KM.FORCE)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.ADD_VALUES | KratosMapping.Mapper.SWAP_SIGN)
self._check_results_vector_const(self.mp_origin, mapping_value, KM.FORCE)
def _execute_non_constant_value_test(self, results, use_transpose=False):
# Check mapping of a non-constant field
if use_transpose:
mapper_flag = KratosMapping.Mapper.USE_TRANSPOSE
else:
mapper_flag=KM.Flags()
### Map ###
# Scalar Mapping
self._set_values_origin()
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, mapper_flag)
self._check_results_scalar(self.mp_destination, results[0], KM.TEMPERATURE)
if use_transpose:
self.__CheckValuesSum(self.mp_origin, self.mp_destination, KM.PRESSURE, KM.TEMPERATURE, True)
# Vector Mapping
self.mapper.Map(KM.FORCE, KM.VELOCITY, mapper_flag)
self._check_results_vector(self.mp_destination, results[1], KM.VELOCITY)
if use_transpose:
self.__CheckValuesSum(self.mp_origin, self.mp_destination, KM.FORCE, KM.VELOCITY, True)
### InverseMap ###
# Scalar Mapping
self._set_values_destination()
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE, mapper_flag)
self._check_results_scalar(self.mp_origin, results[2], KM.PRESSURE)
if use_transpose:
self.__CheckValuesSum(self.mp_origin, self.mp_destination, KM.PRESSURE, KM.TEMPERATURE, True)
# Vector Mapping
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY, mapper_flag)
self._check_results_vector(self.mp_origin, results[3], KM.FORCE)
if use_transpose:
self.__CheckValuesSum(self.mp_origin, self.mp_destination, KM.FORCE, KM.VELOCITY, True)
def test_nearest_neighbor_mapper(self):
mapper_name = "nearest_neighbor"
map_results_scalar = [0.2, 0.2, 1.2, 2.2, 2.2]
map_results_vector = [[0.0,0.1,-0.3], [0.0,0.1,-0.3], [1.0,1.1,0.7], [2.0,2.1,1.7], [2.0,2.1,1.7]]
inverse_map_results_scalar = [-0.3, 1.7, 3.7, 3.7]
inverse_map_results_vector = [[0.0,-0.1,0.4], [2.0,1.9,2.4], [4.0,3.9,4.4], [4.0,3.9,4.4]]
map_results_scalar_conservative = [0.2, 0.0, 1.2, 0.0, 5.4]
map_results_vector_conservative = [[0.0,0.1,-0.3], [0.0,0.0,0.0], [1.0,1.1,0.7], [0.0,0.0,0.0], [5.0,5.2,4.4]]
inverse_map_results_scalar_conservative = [0.4, 1.7, 6.4, 0.0]
inverse_map_results_vector_conservative = [[1.0,0.8,1.8], [2.0,1.9,2.4], [7.0,6.8,7.8], [0.0,0.0,0.0]]
results = [map_results_scalar, map_results_vector]
results.extend([inverse_map_results_scalar, inverse_map_results_vector])
results_conservative = [map_results_scalar_conservative, map_results_vector_conservative]
results_conservative.extend([inverse_map_results_scalar_conservative, inverse_map_results_vector_conservative])
self._create_mapper(mapper_name)
self._execute_constant_value_test()
self._execute_non_constant_value_test(results)
# Test Mapping with transpose
self._execute_non_constant_value_test(results_conservative, True)
def test_nearest_element_mapper(self):
mapper_name = "nearest_element"
map_results_scalar = [0.2, 0.6, 1.0, 1.8, 7.6/3]
map_results_vector = [[0.0,0.1,-0.3], [0.4,0.5,0.1], [0.8,0.9,0.5], [1.6,1.7,1.3], [7/3,7.3/3,6.1/3]]
inverse_map_results_scalar = [-0.3, 1.95, 10.1/3, 3.7]
inverse_map_results_vector = [[0.0,-0.1,0.4], [2.25,2.15,2.65], [11/3,10.7/3,12.2/3], [4.0,3.9,4.4]]
map_results_scalar_conservative = [0.2, 0.0, 0.9, 1.033333333333337, 14/3]
map_results_vector_conservative = [[0.0,0.1,-0.3], [0.0,0.0,0.0], [0.75,0.825,0.525], [0.0,0.0,0.0], [1.6+7/3,1.7+7.3/3,1.3+6.1/3]]
inverse_map_results_scalar_conservative = [999, 1.95, 999, 0.0]
inverse_map_results_vector_conservative = [[999,999,999], [2.25,2.15,2.65], [999,999,999], [0.0,0.0,0.0]]
results = [map_results_scalar, map_results_vector]
results.extend([inverse_map_results_scalar, inverse_map_results_vector])
results_conservative = [map_results_scalar_conservative, map_results_vector_conservative]
results_conservative.extend([inverse_map_results_scalar_conservative, inverse_map_results_vector_conservative])
self._create_mapper(mapper_name)
self._execute_constant_value_test()
self._execute_non_constant_value_test(results)
# # Test conservative Mapping
self._execute_non_constant_value_test(results_conservative, True) # TODO check the values!
def __CheckValuesSum(self, mp1, mp2, var1, var2, value_is_historical=True):
var_type = KM.KratosGlobals.GetVariableType(var1.Name())
if var_type != KM.KratosGlobals.GetVariableType(var2.Name()):
raise TypeError("Variable types-mismatch!")
if value_is_historical:
if var_type == "Double":
val_1 = KM.VariableUtils().SumHistoricalNodeScalarVariable(var1, mp1, 0)
val_2 = KM.VariableUtils().SumHistoricalNodeScalarVariable(var2, mp2, 0)
self.assertAlmostEqual(val_1, val_2)
else:
val_1 = KM.VariableUtils().SumHistoricalNodeVectorVariable(var1, mp1, 0)
val_2 = KM.VariableUtils().SumHistoricalNodeVectorVariable(var2, mp2, 0)
self.assertAlmostEqual(val_1[0], val_2[0])
self.assertAlmostEqual(val_1[1], val_2[1])
self.assertAlmostEqual(val_1[2], val_2[2])
else:
if var_type == "Double":
val_1 = KM.VariableUtils().SumNonHistoricalNodeScalarVariable(var1, mp1)
val_2 = KM.VariableUtils().SumNonHistoricalNodeScalarVariable(var2, mp2)
self.assertAlmostEqual(val_1, val_2)
else:
val_1 = KM.VariableUtils().SumNonHistoricalNodeVectorVariable(var1, mp1)
val_2 = KM.VariableUtils().SumNonHistoricalNodeVectorVariable(var2, mp2)
self.assertAlmostEqual(val_1[0], val_2[0])
self.assertAlmostEqual(val_1[1], val_2[1])
self.assertAlmostEqual(val_1[2], val_2[2])
if __name__ == '__main__':
KratosUnittest.main()
|
# Copyright 2021 Ginkgo Bioworks
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Input/Output capabilities."""
import os
import pytest
import geckopy
def test_read_geckopy_from_file(path_ecoli_core):
"""Read model directly from file."""
model = geckopy.io.read_sbml_ec_model(path_ecoli_core)
assert len(model.proteins) == 55
def test_copy_geckopy(ec_model_core):
"""Check that deepcopy works."""
copied = ec_model_core.copy()
assert len(copied.proteins) == len(ec_model_core.proteins)
assert len(copied.reactions) == len(ec_model_core.reactions)
assert len(copied.metabolites) == len(ec_model_core.metabolites)
def test_parsing_captures_naming_convention(dummy_ec_model):
"""Check proteins rely on the naming convention prot_UNIPROT are parsed."""
assert dummy_ec_model.proteins.query("prot_P0A805")
def test_parsing_captures_protein_group(dummy_ec_model):
"""Check members of Protein group are parsed as proteins."""
assert dummy_ec_model.groups.query("Protein")
assert dummy_ec_model.proteins.query("prot_P0A825")
assert dummy_ec_model.proteins.query("dummy_prot")
def test_protein_parsing_does_not_get_normal_metabolites(dummy_ec_model):
"""Check normal metabolites are not parsed as proteins."""
assert not dummy_ec_model.proteins.query("normal_met")
assert dummy_ec_model.metabolites.query("normal_met")
mets = set(dummy_ec_model.metabolites)
prots = set(dummy_ec_model.proteins)
assert mets ^ prots == mets | prots
def test_serialized_model_grows(slim_solution_core, ec_model_core):
"""Check that deserialized model grows at the same rate."""
geckopy.io.write_sbml_ec_model(ec_model_core, "_tmpfull.xml")
redeserialized = geckopy.io.read_sbml_ec_model(
"_tmpfull.xml", hardcoded_rev_reactions=False
)
assert pytest.approx(redeserialized.slim_optimize()) == pytest.approx(
slim_solution_core
)
os.remove("_tmpfull.xml")
def test_serialized_model_has_concentrations(dummy_ec_model):
"""Check that concentrations are properly saved on SBML serialization."""
dummy_ec_model.proteins.prot_P0A825.concentration = 123
geckopy.io.write_sbml_ec_model(dummy_ec_model, "_tmp.xml")
redeserialized = geckopy.io.read_sbml_ec_model(
"_tmp.xml", hardcoded_rev_reactions=False
)
assert redeserialized.proteins.prot_P0A825.concentration == 123
os.remove("_tmp.xml")
def test_proteins_are_grouped_on_write(dummy_ec_model):
"""Check that grouped proteins not following naming are properly handled."""
dummy_ec_model.add_proteins([geckopy.Protein("my_unconventional_protein")])
assert (
dummy_ec_model.proteins.prot_P0A805
not in dummy_ec_model.groups.get_by_id("Protein").members
)
geckopy.io.write_sbml_ec_model(
dummy_ec_model, "_tmp_auto_grouping.xml", group_untyped_proteins=True # default
)
# proteins that were not grouped but follow the conventions are not grouped
assert (
dummy_ec_model.proteins.prot_P0A805
not in dummy_ec_model.groups.get_by_id("Protein").members
)
redeserialized = geckopy.io.read_sbml_ec_model(
"_tmp_auto_grouping.xml", hardcoded_rev_reactions=False
)
assert (
redeserialized.proteins.my_unconventional_protein.id
== "my_unconventional_protein"
)
os.remove("_tmp_auto_grouping.xml")
def test_grouped_proteins_are_correctly_deserialized(ec_model_core):
"""Check that deepcopy works."""
copied = ec_model_core.copy()
copied.reactions.NH4t.add_protein("W", 60)
geckopy.io.write_sbml_ec_model(
copied, "_tmp_with_prot.xml", group_untyped_proteins=True # default
)
model = geckopy.io.read_sbml_ec_model("_tmp_with_prot.xml")
assert pytest.approx(model.proteins.W.kcats["NH4t"]) == 60
os.remove("_tmp_with_prot.xml")
def test_gene_and_proteins_point_to_each_other(ec_model_core):
"""Check that annotating genes and proteins point to each other."""
geckopy.io.standard.annotate_gene_protein_rules(ec_model_core)
assert ec_model_core.genes.get_by_id(
"b1241"
).protein == ec_model_core.proteins.get_by_id("prot_P0A9Q7")
assert (
ec_model_core.proteins.get_by_id("prot_P0A9Q7")
== ec_model_core.genes.get_by_id("b1241").protein
)
# all proteins in the EC core model are in a gene
assert sum(gene.protein is not None for gene in ec_model_core.genes) == len(
ec_model_core.proteins
)
|
# Copyright (C) 2017-2021 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from unittest.mock import patch
from unittest import TestCase
from hamcrest import ( all_of,
assert_that,
calling,
empty,
ends_with,
equal_to,
contains_exactly,
has_entries,
has_entry,
has_items,
has_key,
is_not,
raises )
from ycmd.completers import completer
from ycmd.completers.language_server import language_server_completer as lsc
from ycmd.completers.language_server.language_server_completer import (
NoHoverInfoException,
NO_HOVER_INFORMATION )
from ycmd.completers.language_server import language_server_protocol as lsp
from ycmd.tests.language_server import MockConnection
from ycmd.request_wrap import RequestWrap
from ycmd.tests.test_utils import ( BuildRequest,
ChunkMatcher,
DummyCompleter,
LocationMatcher,
RangeMatcher )
from ycmd.tests.language_server import IsolatedYcmd, PathToTestFile
from ycmd import handlers, utils, responses
import os
class MockCompleter( lsc.LanguageServerCompleter, DummyCompleter ):
def __init__( self, custom_options = {} ):
user_options = handlers._server_state._user_options.copy()
user_options.update( custom_options )
super().__init__( user_options )
self._connection = MockConnection(
lambda request: self.WorkspaceConfigurationResponse( request ) )
self._started = False
def Language( self ):
return 'foo'
def StartServer( self, request_data, **kwargs ):
self._started = True
self._project_directory = self.GetProjectDirectory( request_data )
return True
def GetConnection( self ):
return self._connection
def HandleServerCommand( self, request_data, command ):
return super().HandleServerCommand( request_data, command )
def ServerIsHealthy( self ):
return self._started
def GetCommandLine( self ):
return [ 'server' ]
def GetServerName( self ):
return 'mock_completer'
def _TupleToLSPRange( tuple ):
return { 'line': tuple[ 0 ], 'character': tuple[ 1 ] }
def _Check_Distance( point, start, end, expected ):
point = _TupleToLSPRange( point )
start = _TupleToLSPRange( start )
end = _TupleToLSPRange( end )
range = { 'start': start, 'end': end }
result = lsc._DistanceOfPointToRange( point, range )
assert_that( result, equal_to( expected ) )
class LanguageServerCompleterTest( TestCase ):
@IsolatedYcmd( { 'global_ycm_extra_conf':
PathToTestFile( 'extra_confs', 'settings_extra_conf.py' ) } )
def test_LanguageServerCompleter_ExtraConf_ServerReset( self, app ):
filepath = PathToTestFile( 'extra_confs', 'foo' )
app.post_json( '/event_notification',
BuildRequest( filepath = filepath,
filetype = 'foo',
contents = '',
event_name = 'FileReadyToParse' ) )
request_data = RequestWrap( BuildRequest() )
completer = MockCompleter()
assert_that( None, equal_to( completer._project_directory ) )
completer.OnFileReadyToParse( request_data )
assert_that( completer._project_directory, is_not( None ) )
assert_that( completer._settings.get( 'ls', {} ), is_not( empty() ) )
completer.ServerReset()
assert_that( completer._settings.get( 'ls', {} ), empty() )
assert_that( None, equal_to( completer._project_directory ) )
@IsolatedYcmd( { 'global_ycm_extra_conf':
PathToTestFile( 'extra_confs', 'empty_extra_conf.py' ) } )
def test_LanguageServerCompleter_ExtraConf_FileEmpty( self, app ):
filepath = PathToTestFile( 'extra_confs', 'foo' )
completer = MockCompleter()
request_data = RequestWrap( BuildRequest( filepath = filepath,
filetype = 'ycmtest',
contents = '' ) )
completer.OnFileReadyToParse( request_data )
assert_that( {}, equal_to( completer._settings.get( 'ls', {} ) ) )
# Simulate receipt of response and initialization complete
initialize_response = {
'result': {
'capabilities': {}
}
}
completer._HandleInitializeInPollThread( initialize_response )
assert_that( {}, equal_to( completer._settings.get( 'ls', {} ) ) )
# We shouldn't have used the extra_conf path for the project directory, but
# that _also_ happens to be the path of the file we opened.
assert_that( PathToTestFile( 'extra_confs' ),
equal_to( completer._project_directory ) )
@IsolatedYcmd( { 'global_ycm_extra_conf':
PathToTestFile( 'extra_confs',
'settings_none_extra_conf.py' ) } )
def test_LanguageServerCompleter_ExtraConf_SettingsReturnsNone( self, app ):
filepath = PathToTestFile( 'extra_confs', 'foo' )
completer = MockCompleter()
request_data = RequestWrap( BuildRequest( filepath = filepath,
filetype = 'ycmtest',
contents = '' ) )
completer.OnFileReadyToParse( request_data )
assert_that( {}, equal_to( completer._settings.get( 'ls', {} ) ) )
# We shouldn't have used the extra_conf path for the project directory, but
# that _also_ happens to be the path of the file we opened.
assert_that( PathToTestFile( 'extra_confs' ),
equal_to( completer._project_directory ) )
@IsolatedYcmd( { 'global_ycm_extra_conf':
PathToTestFile( 'extra_confs', 'settings_extra_conf.py' ) } )
def test_LanguageServerCompleter_ExtraConf_SettingValid( self, app ):
filepath = PathToTestFile( 'extra_confs', 'foo' )
completer = MockCompleter()
request_data = RequestWrap( BuildRequest( filepath = filepath,
filetype = 'ycmtest',
working_dir = PathToTestFile(),
contents = '' ) )
assert_that( {}, equal_to( completer._settings.get( 'ls', {} ) ) )
completer.OnFileReadyToParse( request_data )
assert_that( { 'java.rename.enabled' : False },
equal_to( completer._settings.get( 'ls', {} ) ) )
# We use the working_dir not the path to the global extra conf (which is
# ignored)
assert_that( PathToTestFile(), equal_to( completer._project_directory ) )
@IsolatedYcmd( { 'extra_conf_globlist': [ '!*' ] } )
def test_LanguageServerCompleter_ExtraConf_NoExtraConf( self, app ):
filepath = PathToTestFile( 'extra_confs', 'foo' )
completer = MockCompleter()
request_data = RequestWrap( BuildRequest( filepath = filepath,
filetype = 'ycmtest',
working_dir = PathToTestFile(),
contents = '' ) )
assert_that( {}, equal_to( completer._settings.get( 'ls', {} ) ) )
completer.OnFileReadyToParse( request_data )
assert_that( {}, equal_to( completer._settings.get( 'ls', {} ) ) )
# Simulate receipt of response and initialization complete
initialize_response = {
'result': {
'capabilities': {}
}
}
completer._HandleInitializeInPollThread( initialize_response )
assert_that( {}, equal_to( completer._settings.get( 'ls', {} ) ) )
# We use the client working directory
assert_that( PathToTestFile(), equal_to( completer._project_directory ) )
@IsolatedYcmd( { 'extra_conf_globlist': [ '*' ] } )
def test_LanguageServerCompleter_ExtraConf_NonGlobal( self, app ):
filepath = PathToTestFile( 'project',
'settings_extra_conf',
'foo' )
completer = MockCompleter()
request_data = RequestWrap( BuildRequest( filepath = filepath,
filetype = 'ycmtest',
# ignored; ycm conf path used
working_dir = 'ignore_this',
contents = '' ) )
assert_that( {}, equal_to( completer._settings.get( 'ls', {} ) ) )
completer.OnFileReadyToParse( request_data )
assert_that( { 'java.rename.enabled' : False },
equal_to( completer._settings.get( 'ls', {} ) ) )
# Simulate receipt of response and initialization complete
initialize_response = {
'result': {
'capabilities': {}
}
}
completer._HandleInitializeInPollThread( initialize_response )
assert_that( PathToTestFile( 'project', 'settings_extra_conf' ),
equal_to( completer._project_directory ) )
@IsolatedYcmd()
def test_LanguageServerCompleter_Initialise_Aborted( self, app ):
completer = MockCompleter()
request_data = RequestWrap( BuildRequest() )
with patch.object( completer.GetConnection(),
'ReadData',
side_effect = RuntimeError ):
assert_that( completer.ServerIsReady(), equal_to( False ) )
completer.OnFileReadyToParse( request_data )
with patch.object( completer,
'_HandleInitializeInPollThread' ) as handler:
completer.GetConnection().run()
handler.assert_not_called()
assert_that( completer._initialize_event.is_set(), equal_to( False ) )
assert_that( completer.ServerIsReady(), equal_to( False ) )
with patch.object( completer, 'ServerIsHealthy', return_value = False ):
assert_that( completer.ServerIsReady(), equal_to( False ) )
@IsolatedYcmd()
def test_LanguageServerCompleter_Initialise_Shutdown( self, app ):
completer = MockCompleter()
request_data = RequestWrap( BuildRequest() )
with patch.object( completer.GetConnection(),
'ReadData',
side_effect = lsc.LanguageServerConnectionStopped ):
assert_that( completer.ServerIsReady(), equal_to( False ) )
completer.OnFileReadyToParse( request_data )
with patch.object( completer,
'_HandleInitializeInPollThread' ) as handler:
completer.GetConnection().run()
handler.assert_not_called()
assert_that( completer._initialize_event.is_set(), equal_to( False ) )
assert_that( completer.ServerIsReady(), equal_to( False ) )
with patch.object( completer, 'ServerIsHealthy', return_value = False ):
assert_that( completer.ServerIsReady(), equal_to( False ) )
@IsolatedYcmd()
def test_LanguageServerCompleter_GoTo( self, app ):
if utils.OnWindows():
filepath = 'C:\\test.test'
uri = 'file:///c:/test.test'
else:
filepath = '/test.test'
uri = 'file:/test.test'
contents = 'line1\nline2\nline3'
completer = MockCompleter()
# LSP server supports all code navigation features.
completer._server_capabilities = {
'definitionProvider': True,
'declarationProvider': True,
'typeDefinitionProvider': True,
'implementationProvider': True,
'referencesProvider': True
}
request_data = RequestWrap( BuildRequest(
filetype = 'ycmtest',
filepath = filepath,
contents = contents,
line_num = 2,
column_num = 3
) )
@patch.object( completer, '_ServerIsInitialized', return_value = True )
def Test( responses, command, exception, throws, *args ):
with patch.object( completer.GetConnection(),
'GetResponse',
side_effect = responses ):
if throws:
assert_that(
calling( completer.OnUserCommand ).with_args( [ command ],
request_data ),
raises( exception )
)
else:
result = completer.OnUserCommand( [ command ], request_data )
print( f'Result: { result }' )
assert_that( result, exception )
location = {
'uri': uri,
'range': {
'start': { 'line': 0, 'character': 0 },
'end': { 'line': 0, 'character': 0 },
}
}
goto_response = has_entries( {
'filepath': filepath,
'column_num': 1,
'line_num': 1,
'description': 'line1'
} )
cases = [
( [ { 'result': None } ], 'GoToDefinition', RuntimeError, True ),
( [ { 'result': location } ], 'GoToDeclaration', goto_response, False ),
( [ { 'result': {} } ], 'GoToType', RuntimeError, True ),
( [ { 'result': [] } ], 'GoToImplementation', RuntimeError, True ),
( [ { 'result': [ location ] } ],
'GoToReferences', goto_response, False ),
( [ { 'result': [ location, location ] } ],
'GoToReferences',
contains_exactly( goto_response, goto_response ),
False ),
]
for response, goto_handlers, exception, throws in cases:
Test( response, goto_handlers, exception, throws )
# All requests return an invalid URI.
with patch(
'ycmd.completers.language_server.language_server_protocol.UriToFilePath',
side_effect = lsp.InvalidUriException ):
Test( [ {
'result': {
'uri': uri,
'range': {
'start': { 'line': 0, 'character': 0 },
'end': { 'line': 0, 'character': 0 } }
}
} ], 'GoTo', LocationMatcher( '', 1, 1 ), False )
with patch( 'ycmd.completers.completer_utils.GetFileContents',
side_effect = IOError ):
Test( [ {
'result': {
'uri': uri,
'range': {
'start': { 'line': 0, 'character': 0 },
'end': { 'line': 0, 'character': 0 } }
}
} ], 'GoToDefinition', LocationMatcher( filepath, 1, 1 ), False )
# Both requests return the location where the cursor is.
Test( [ {
'result': {
'uri': uri,
'range': {
'start': { 'line': 1, 'character': 0 },
'end': { 'line': 1, 'character': 4 } }
}
}, {
'result': {
'uri': uri,
'range': {
'start': { 'line': 1, 'character': 0 },
'end': { 'line': 1, 'character': 4 },
}
}
} ], 'GoTo', LocationMatcher( filepath, 2, 1 ), False )
# First request returns two locations.
Test( [ {
'result': [ {
'uri': uri,
'range': {
'start': { 'line': 0, 'character': 0 },
'end': { 'line': 0, 'character': 4 } }
}, {
'uri': uri,
'range': {
'start': { 'line': 1, 'character': 0 },
'end': { 'line': 1, 'character': 4 },
}
} ],
} ], 'GoTo', contains_exactly(
LocationMatcher( filepath, 1, 1 ),
LocationMatcher( filepath, 2, 1 )
), False )
# First request returns the location where the cursor is and second request
# returns a different URI.
if utils.OnWindows():
other_filepath = 'C:\\another.test'
other_uri = 'file:///c:/another.test'
else:
other_filepath = '/another.test'
other_uri = 'file:/another.test'
Test( [ {
'result': {
'uri': uri,
'range': {
'start': { 'line': 1, 'character': 0 },
'end': { 'line': 1, 'character': 4 } }
}
}, {
'result': {
'uri': other_uri,
'range': {
'start': { 'line': 1, 'character': 0 },
'end': { 'line': 1, 'character': 4 },
}
}
} ], 'GoTo', LocationMatcher( other_filepath, 2, 1 ), False )
# First request returns a location before the cursor.
Test( [ {
'result': {
'uri': uri,
'range': {
'start': { 'line': 0, 'character': 1 },
'end': { 'line': 1, 'character': 1 } }
}
} ], 'GoTo', LocationMatcher( filepath, 1, 2 ), False )
# First request returns a location after the cursor.
Test( [ {
'result': {
'uri': uri,
'range': {
'start': { 'line': 1, 'character': 3 },
'end': { 'line': 2, 'character': 3 } }
}
} ], 'GoTo', LocationMatcher( filepath, 2, 4 ), False )
def test_GetCompletions_RejectInvalid( self ):
if utils.OnWindows():
filepath = 'C:\\test.test'
else:
filepath = '/test.test'
contents = 'line1.\nline2.\nline3.'
request_data = RequestWrap( BuildRequest(
filetype = 'ycmtest',
filepath = filepath,
contents = contents,
line_num = 1,
column_num = 7
) )
text_edit = {
'newText': 'blah',
'range': {
'start': { 'line': 0, 'character': 6 },
'end': { 'line': 0, 'character': 6 },
}
}
assert_that( lsc._GetCompletionItemStartCodepointOrReject( text_edit,
request_data ),
equal_to( 7 ) )
text_edit = {
'newText': 'blah',
'range': {
'start': { 'line': 0, 'character': 6 },
'end': { 'line': 1, 'character': 6 },
}
}
assert_that(
calling( lsc._GetCompletionItemStartCodepointOrReject ).with_args(
text_edit, request_data ),
raises( lsc.IncompatibleCompletionException ) )
text_edit = {
'newText': 'blah',
'range': {
'start': { 'line': 0, 'character': 20 },
'end': { 'line': 0, 'character': 20 },
}
}
assert_that(
lsc._GetCompletionItemStartCodepointOrReject( text_edit, request_data ),
equal_to( 7 ) )
text_edit = {
'newText': 'blah',
'range': {
'start': { 'line': 0, 'character': 6 },
'end': { 'line': 0, 'character': 5 },
}
}
assert_that(
lsc._GetCompletionItemStartCodepointOrReject( text_edit, request_data ),
equal_to( 7 ) )
def test_WorkspaceEditToFixIt( self ):
if utils.OnWindows():
filepath = 'C:\\test.test'
uri = 'file:///c:/test.test'
else:
filepath = '/test.test'
uri = 'file:/test.test'
contents = 'line1\nline2\nline3'
request_data = RequestWrap( BuildRequest(
filetype = 'ycmtest',
filepath = filepath,
contents = contents
) )
# Null response to textDocument/codeActions is valid
assert_that( lsc.WorkspaceEditToFixIt( request_data, None ),
equal_to( None ) )
# Empty WorkspaceEdit is not explicitly forbidden
assert_that( lsc.WorkspaceEditToFixIt( request_data, {} ),
equal_to( None ) )
# We don't support versioned documentChanges
workspace_edit = {
'documentChanges': [
{
'textDocument': {
'version': 1,
'uri': uri
},
'edits': [
{
'newText': 'blah',
'range': {
'start': { 'line': 0, 'character': 5 },
'end': { 'line': 0, 'character': 5 },
}
}
]
}
]
}
response = responses.BuildFixItResponse( [
lsc.WorkspaceEditToFixIt( request_data, workspace_edit, 'test' )
] )
print( f'Response: { response }' )
assert_that(
response,
has_entries( {
'fixits': contains_exactly( has_entries( {
'text': 'test',
'chunks': contains_exactly(
ChunkMatcher( 'blah',
LocationMatcher( filepath, 1, 6 ),
LocationMatcher( filepath, 1, 6 ) ) )
} ) )
} )
)
workspace_edit = {
'changes': {
uri: [
{
'newText': 'blah',
'range': {
'start': { 'line': 0, 'character': 5 },
'end': { 'line': 0, 'character': 5 },
}
},
]
}
}
response = responses.BuildFixItResponse( [
lsc.WorkspaceEditToFixIt( request_data, workspace_edit, 'test' )
] )
print( f'Response: { response }' )
print( f'Type Response: { type( response ) }' )
assert_that(
response,
has_entries( {
'fixits': contains_exactly( has_entries( {
'text': 'test',
'chunks': contains_exactly(
ChunkMatcher( 'blah',
LocationMatcher( filepath, 1, 6 ),
LocationMatcher( filepath, 1, 6 ) ) )
} ) )
} )
)
@IsolatedYcmd( { 'extra_conf_globlist': [ '!*' ] } )
def test_LanguageServerCompleter_DelayedInitialization( self, app ):
completer = MockCompleter()
request_data = RequestWrap( BuildRequest( filepath = 'Test.ycmtest' ) )
with patch.object( completer, '_UpdateServerWithFileContents' ) as update:
with patch.object( completer, '_PurgeFileFromServer' ) as purge:
completer.OnFileReadyToParse( request_data )
completer.OnBufferUnload( request_data )
update.assert_not_called()
purge.assert_not_called()
# Simulate receipt of response and initialization complete
initialize_response = {
'result': {
'capabilities': {}
}
}
completer._HandleInitializeInPollThread( initialize_response )
update.assert_called_with( request_data )
purge.assert_called_with( 'Test.ycmtest' )
@IsolatedYcmd()
def test_LanguageServerCompleter_RejectWorkspaceConfigurationRequest(
self, app ):
completer = MockCompleter()
notification = {
'jsonrpc': '2.0',
'method': 'workspace/configuration',
'id': 1234,
'params': {
'items': [ { 'section': 'whatever' } ]
}
}
with patch( 'ycmd.completers.language_server.'
'language_server_protocol.Reject' ) as reject:
completer.GetConnection()._DispatchMessage( notification )
reject.assert_called_with( notification, lsp.Errors.MethodNotFound )
@IsolatedYcmd()
def test_LanguageServerCompleter_ShowMessage( self, app ):
completer = MockCompleter()
request_data = RequestWrap( BuildRequest() )
notification = {
'method': 'window/showMessage',
'params': {
'message': 'this is a test'
}
}
assert_that( completer.ConvertNotificationToMessage( request_data,
notification ),
has_entries( { 'message': 'this is a test' } ) )
@IsolatedYcmd()
def test_LanguageServerCompleter_GetCompletions_List( self, app ):
completer = MockCompleter()
request_data = RequestWrap( BuildRequest() )
completion_response = { 'result': [ { 'label': 'test' } ] }
resolve_responses = [
{ 'result': { 'label': 'test' } },
]
with patch.object( completer, '_is_completion_provider', True ):
with patch.object( completer.GetConnection(),
'GetResponse',
side_effect = [ completion_response ] +
resolve_responses ):
assert_that(
completer.ComputeCandidatesInner( request_data, 1 ),
contains_exactly(
has_items( has_entries( { 'insertion_text': 'test' } ) ),
False
)
)
@IsolatedYcmd()
def test_LanguageServerCompleter_GetCompletions_UnsupportedKinds( self, app ):
completer = MockCompleter()
request_data = RequestWrap( BuildRequest() )
completion_response = { 'result': [ { 'label': 'test',
'kind': len( lsp.ITEM_KIND ) + 1 } ] }
resolve_responses = [
{ 'result': { 'label': 'test' } },
]
with patch.object( completer, '_is_completion_provider', True ):
with patch.object( completer.GetConnection(),
'GetResponse',
side_effect = [ completion_response ] +
resolve_responses ):
assert_that(
completer.ComputeCandidatesInner( request_data, 1 ),
contains_exactly(
has_items( all_of( has_entry( 'insertion_text', 'test' ),
is_not( has_key( 'kind' ) ) ) ),
False
)
)
@IsolatedYcmd()
def test_LanguageServerCompleter_GetCompletions_NullNoError( self, app ):
completer = MockCompleter()
request_data = RequestWrap( BuildRequest() )
complete_response = { 'result': None }
resolve_responses = []
with patch.object( completer, '_ServerIsInitialized', return_value = True ):
with patch.object( completer,
'_is_completion_provider',
return_value = True ):
with patch.object( completer.GetConnection(),
'GetResponse',
side_effect = [ complete_response ] +
resolve_responses ):
assert_that(
completer.ComputeCandidatesInner( request_data, 1 ),
contains_exactly(
empty(),
False
)
)
@IsolatedYcmd()
def test_LanguageServerCompleter_GetCompletions_CompleteOnStartColumn(
self, app ):
completer = MockCompleter()
completer._resolve_completion_items = False
complete_response = {
'result': {
'items': [
{ 'label': 'aa' },
{ 'label': 'ac' },
{ 'label': 'ab' }
],
'isIncomplete': False
}
}
with patch.object( completer, '_is_completion_provider', True ):
request_data = RequestWrap( BuildRequest(
column_num = 2,
contents = 'a',
force_semantic = True
) )
with patch.object( completer.GetConnection(),
'GetResponse',
return_value = complete_response ) as response:
assert_that(
completer.ComputeCandidates( request_data ),
contains_exactly(
has_entry( 'insertion_text', 'aa' ),
has_entry( 'insertion_text', 'ab' ),
has_entry( 'insertion_text', 'ac' )
)
)
# Nothing cached yet.
assert_that( response.call_count, equal_to( 1 ) )
request_data = RequestWrap( BuildRequest(
column_num = 3,
contents = 'ab',
force_semantic = True
) )
with patch.object( completer.GetConnection(),
'GetResponse',
return_value = complete_response ) as response:
assert_that(
completer.ComputeCandidates( request_data ),
contains_exactly(
has_entry( 'insertion_text', 'ab' )
)
)
# Since the server returned a complete list of completions on the
# starting column, no request should be sent to the server and the
# cache should be used instead.
assert_that( response.call_count, equal_to( 0 ) )
@IsolatedYcmd()
def test_LanguageServerCompleter_GetCompletions_CompleteOnCurrentColumn(
self, app ):
completer = MockCompleter()
completer._resolve_completion_items = False
a_response = {
'result': {
'items': [
{ 'label': 'aba' },
{ 'label': 'aab' },
{ 'label': 'aaa' }
],
'isIncomplete': True
}
}
aa_response = {
'result': {
'items': [
{ 'label': 'aab' },
{ 'label': 'aaa' }
],
'isIncomplete': False
}
}
aaa_response = {
'result': {
'items': [
{ 'label': 'aaa' }
],
'isIncomplete': False
}
}
ab_response = {
'result': {
'items': [
{ 'label': 'abb' },
{ 'label': 'aba' }
],
'isIncomplete': False
}
}
with patch.object( completer, '_is_completion_provider', True ):
# User starts by typing the character "a".
request_data = RequestWrap( BuildRequest(
column_num = 2,
contents = 'a',
force_semantic = True
) )
with patch.object( completer.GetConnection(),
'GetResponse',
return_value = a_response ) as response:
assert_that(
completer.ComputeCandidates( request_data ),
contains_exactly(
has_entry( 'insertion_text', 'aaa' ),
has_entry( 'insertion_text', 'aab' ),
has_entry( 'insertion_text', 'aba' )
)
)
# Nothing cached yet.
assert_that( response.call_count, equal_to( 1 ) )
# User types again the character "a".
request_data = RequestWrap( BuildRequest(
column_num = 3,
contents = 'aa',
force_semantic = True
) )
with patch.object( completer.GetConnection(),
'GetResponse',
return_value = aa_response ) as response:
assert_that(
completer.ComputeCandidates( request_data ),
contains_exactly(
has_entry( 'insertion_text', 'aaa' ),
has_entry( 'insertion_text', 'aab' )
)
)
# The server returned an incomplete list of completions the first time
# so a new completion request should have been sent.
assert_that( response.call_count, equal_to( 1 ) )
# User types the character "a" a third time.
request_data = RequestWrap( BuildRequest(
column_num = 4,
contents = 'aaa',
force_semantic = True
) )
with patch.object( completer.GetConnection(),
'GetResponse',
return_value = aaa_response ) as response:
assert_that(
completer.ComputeCandidates( request_data ),
contains_exactly(
has_entry( 'insertion_text', 'aaa' )
)
)
# The server returned a complete list of completions the second time
# and the new query is a prefix of the cached one ("aa" is a prefix of
# "aaa") so the cache should be used.
assert_that( response.call_count, equal_to( 0 ) )
# User deletes the third character.
request_data = RequestWrap( BuildRequest(
column_num = 3,
contents = 'aa',
force_semantic = True
) )
with patch.object( completer.GetConnection(),
'GetResponse',
return_value = aa_response ) as response:
assert_that(
completer.ComputeCandidates( request_data ),
contains_exactly(
has_entry( 'insertion_text', 'aaa' ),
has_entry( 'insertion_text', 'aab' )
)
)
# The new query is still a prefix of the cached one ("aa" is a prefix of
# "aa") so the cache should again be used.
assert_that( response.call_count, equal_to( 0 ) )
# User deletes the second character.
request_data = RequestWrap( BuildRequest(
column_num = 2,
contents = 'a',
force_semantic = True
) )
with patch.object( completer.GetConnection(),
'GetResponse',
return_value = a_response ) as response:
assert_that(
completer.ComputeCandidates( request_data ),
contains_exactly(
has_entry( 'insertion_text', 'aaa' ),
has_entry( 'insertion_text', 'aab' ),
has_entry( 'insertion_text', 'aba' )
)
)
# The new query is not anymore a prefix of the cached one ("aa" is not a
# prefix of "a") so the cache is invalidated and a new request is sent.
assert_that( response.call_count, equal_to( 1 ) )
# Finally, user inserts the "b" character.
request_data = RequestWrap( BuildRequest(
column_num = 3,
contents = 'ab',
force_semantic = True
) )
with patch.object( completer.GetConnection(),
'GetResponse',
return_value = ab_response ) as response:
assert_that(
completer.ComputeCandidates( request_data ),
contains_exactly(
has_entry( 'insertion_text', 'aba' ),
has_entry( 'insertion_text', 'abb' )
)
)
# Last response was incomplete so the cache should not be used.
assert_that( response.call_count, equal_to( 1 ) )
def test_FindOverlapLength( self ):
for line, text, overlap in [
( '', '', 0 ),
( 'a', 'a', 1 ),
( 'a', 'b', 0 ),
( 'abcdef', 'abcdefg', 6 ),
( 'abcdefg', 'abcdef', 0 ),
( 'aaab', 'aaab', 4 ),
( 'abab', 'ab', 2 ),
( 'aab', 'caab', 0 ),
( 'abab', 'abababab', 4 ),
( 'aaab', 'baaa', 1 ),
( 'test.', 'test.test', 5 ),
( 'test.', 'test', 0 ),
( 'test', 'testtest', 4 ),
( '', 'testtest', 0 ),
( 'test', '', 0 ),
( 'Some CoCo', 'CoCo Beans', 4 ),
( 'Have some CoCo and CoCo', 'CoCo and CoCo is here.', 13 ),
( 'TEST xyAzA', 'xyAzA test', 5 ),
]:
with self.subTest( line = line, text = text, overlap = overlap ):
assert_that( lsc.FindOverlapLength( line, text ), equal_to( overlap ) )
@IsolatedYcmd()
def test_LanguageServerCompleter_GetCodeActions_CursorOnEmptyLine(
self, app ):
completer = MockCompleter()
request_data = RequestWrap( BuildRequest( line_num = 1,
column_num = 1,
contents = '' ) )
fixit_response = { 'result': [] }
with patch.object( completer, '_ServerIsInitialized', return_value = True ):
with patch.object( completer.GetConnection(),
'GetResponse',
side_effect = [ fixit_response ] ):
with patch( 'ycmd.completers.language_server.language_server_protocol.'
'CodeAction' ) as code_action:
assert_that( completer.GetCodeActions( request_data ),
has_entry( 'fixits', empty() ) )
assert_that(
# Range passed to lsp.CodeAction.
# LSP requires to use the start of the next line as the end position
# for a range that ends with a newline.
code_action.call_args[ 0 ][ 2 ],
has_entries( {
'start': has_entries( {
'line': 0,
'character': 0
} ),
'end': has_entries( {
'line': 1,
'character': 0
} )
} )
)
@IsolatedYcmd()
def test_LanguageServerCompleter_Diagnostics_MaxDiagnosticsNumberExceeded(
self, app ):
completer = MockCompleter( { 'max_diagnostics_to_display': 1 } )
filepath = os.path.realpath( '/foo' )
uri = lsp.FilePathToUri( filepath )
request_data = RequestWrap( BuildRequest( line_num = 1,
column_num = 1,
filepath = filepath,
contents = '' ) )
notification = {
'jsonrpc': '2.0',
'method': 'textDocument/publishDiagnostics',
'params': {
'uri': uri,
'diagnostics': [ {
'range': {
'start': { 'line': 3, 'character': 10 },
'end': { 'line': 3, 'character': 11 }
},
'severity': 1,
'message': 'First error'
}, {
'range': {
'start': { 'line': 4, 'character': 7 },
'end': { 'line': 4, 'character': 13 }
},
'severity': 1,
'message': 'Second error [8]'
} ]
}
}
completer.GetConnection()._notifications.put( notification )
completer.HandleNotificationInPollThread( notification )
with patch.object( completer, '_ServerIsInitialized', return_value = True ):
completer.OnFileReadyToParse( request_data )
# Simulate receipt of response and initialization complete
initialize_response = {
'result': {
'capabilities': {}
}
}
completer._HandleInitializeInPollThread( initialize_response )
diagnostics = contains_exactly(
has_entries( {
'kind': equal_to( 'ERROR' ),
'location': LocationMatcher( filepath, 4, 11 ),
'location_extent': RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ),
'ranges': contains_exactly(
RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ) ),
'text': equal_to( 'First error' ),
'fixit_available': False
} ),
has_entries( {
'kind': equal_to( 'ERROR' ),
'location': LocationMatcher( filepath, 1, 1 ),
'location_extent': RangeMatcher( filepath, ( 1, 1 ), ( 1, 1 ) ),
'ranges': contains_exactly(
RangeMatcher( filepath, ( 1, 1 ), ( 1, 1 ) ) ),
'text': equal_to( 'Maximum number of diagnostics exceeded.' ),
'fixit_available': False
} )
)
assert_that( completer.OnFileReadyToParse( request_data ), diagnostics )
assert_that(
completer.PollForMessages( request_data ),
contains_exactly( has_entries( {
'diagnostics': diagnostics,
'filepath': filepath
} ) )
)
@IsolatedYcmd()
def test_LanguageServerCompleter_Diagnostics_NoLimitToNumberOfDiagnostics(
self, app ):
completer = MockCompleter( { 'max_diagnostics_to_display': 0 } )
filepath = os.path.realpath( '/foo' )
uri = lsp.FilePathToUri( filepath )
request_data = RequestWrap( BuildRequest( line_num = 1,
column_num = 1,
filepath = filepath,
contents = '' ) )
notification = {
'jsonrpc': '2.0',
'method': 'textDocument/publishDiagnostics',
'params': {
'uri': uri,
'diagnostics': [ {
'range': {
'start': { 'line': 3, 'character': 10 },
'end': { 'line': 3, 'character': 11 }
},
'severity': 1,
'message': 'First error'
}, {
'range': {
'start': { 'line': 4, 'character': 7 },
'end': { 'line': 4, 'character': 13 }
},
'severity': 1,
'message': 'Second error'
} ]
}
}
completer.GetConnection()._notifications.put( notification )
completer.HandleNotificationInPollThread( notification )
with patch.object( completer, '_ServerIsInitialized', return_value = True ):
completer.OnFileReadyToParse( request_data )
# Simulate receipt of response and initialization complete
initialize_response = {
'result': {
'capabilities': {}
}
}
completer._HandleInitializeInPollThread( initialize_response )
diagnostics = contains_exactly(
has_entries( {
'kind': equal_to( 'ERROR' ),
'location': LocationMatcher( filepath, 4, 11 ),
'location_extent': RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ),
'ranges': contains_exactly(
RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ) ),
'text': equal_to( 'First error' ),
'fixit_available': False
} ),
has_entries( {
'kind': equal_to( 'ERROR' ),
'location': LocationMatcher( filepath, 5, 8 ),
'location_extent': RangeMatcher( filepath, ( 5, 8 ), ( 5, 14 ) ),
'ranges': contains_exactly(
RangeMatcher( filepath, ( 5, 8 ), ( 5, 14 ) ) ),
'text': equal_to( 'Second error' ),
'fixit_available': False
} )
)
assert_that( completer.OnFileReadyToParse( request_data ), diagnostics )
assert_that(
completer.PollForMessages( request_data ),
contains_exactly( has_entries( {
'diagnostics': diagnostics,
'filepath': filepath
} ) )
)
@IsolatedYcmd()
def test_LanguageServerCompleter_GetHoverResponse( self, app ):
completer = MockCompleter()
request_data = RequestWrap( BuildRequest( line_num = 1,
column_num = 1,
contents = '' ) )
with patch.object( completer, '_ServerIsInitialized', return_value = True ):
with patch.object( completer.GetConnection(),
'GetResponse',
side_effect = [ { 'result': None } ] ):
assert_that(
calling( completer.GetHoverResponse ).with_args( request_data ),
raises( NoHoverInfoException, NO_HOVER_INFORMATION )
)
with patch.object(
completer.GetConnection(),
'GetResponse',
side_effect = [ { 'result': { 'contents': 'test' } } ] ):
assert_that( completer.GetHoverResponse( request_data ),
equal_to( 'test' ) )
@IsolatedYcmd()
def test_LanguageServerCompleter_Diagnostics_Code( self, app ):
completer = MockCompleter()
filepath = os.path.realpath( '/foo.cpp' )
uri = lsp.FilePathToUri( filepath )
request_data = RequestWrap( BuildRequest( line_num = 1,
column_num = 1,
filepath = filepath,
contents = '' ) )
notification = {
'jsonrpc': '2.0',
'method': 'textDocument/publishDiagnostics',
'params': {
'uri': uri,
'diagnostics': [ {
'range': {
'start': { 'line': 3, 'character': 10 },
'end': { 'line': 3, 'character': 11 }
},
'severity': 1,
'message': 'First error',
'code': 'random_error'
}, {
'range': {
'start': { 'line': 3, 'character': 10 },
'end': { 'line': 3, 'character': 11 }
},
'severity': 1,
'message': 'Second error',
'code': 8
}, {
'range': {
'start': { 'line': 3, 'character': 10 },
'end': { 'line': 3, 'character': 11 }
},
'severity': 1,
'message': 'Third error',
'code': '8'
} ]
}
}
completer.GetConnection()._notifications.put( notification )
completer.HandleNotificationInPollThread( notification )
with patch.object( completer, 'ServerIsReady', return_value = True ):
completer.OnFileReadyToParse( request_data )
# Simulate receipt of response and initialization complete
initialize_response = {
'result': {
'capabilities': {}
}
}
completer._HandleInitializeInPollThread( initialize_response )
diagnostics = contains_exactly(
has_entries( {
'kind': equal_to( 'ERROR' ),
'location': LocationMatcher( filepath, 4, 11 ),
'location_extent': RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ),
'ranges': contains_exactly(
RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ) ),
'text': equal_to( 'First error [random_error]' ),
'fixit_available': False
} ),
has_entries( {
'kind': equal_to( 'ERROR' ),
'location': LocationMatcher( filepath, 4, 11 ),
'location_extent': RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ),
'ranges': contains_exactly(
RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ) ),
'text': equal_to( 'Second error [8]' ),
'fixit_available': False
} ),
has_entries( {
'kind': equal_to( 'ERROR' ),
'location': LocationMatcher( filepath, 4, 11 ),
'location_extent': RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ),
'ranges': contains_exactly(
RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ) ),
'text': equal_to( 'Third error [8]' ),
'fixit_available': False
} )
)
assert_that( completer.OnFileReadyToParse( request_data ), diagnostics )
assert_that(
completer.PollForMessages( request_data ),
contains_exactly( has_entries( {
'diagnostics': diagnostics,
'filepath': filepath
} ) )
)
@IsolatedYcmd()
def test_LanguageServerCompleter_Diagnostics_PercentEncodeCannonical(
self, app ):
completer = MockCompleter()
filepath = os.path.realpath( '/foo?' )
uri = lsp.FilePathToUri( filepath )
assert_that( uri, ends_with( '%3F' ) )
request_data = RequestWrap( BuildRequest( line_num = 1,
column_num = 1,
filepath = filepath,
contents = '' ) )
notification = {
'jsonrpc': '2.0',
'method': 'textDocument/publishDiagnostics',
'params': {
'uri': uri.replace( '%3F', '%3f' ),
'diagnostics': [ {
'range': {
'start': { 'line': 3, 'character': 10 },
'end': { 'line': 3, 'character': 11 }
},
'severity': 1,
'message': 'First error'
} ]
}
}
completer.GetConnection()._notifications.put( notification )
completer.HandleNotificationInPollThread( notification )
with patch.object( completer, '_ServerIsInitialized', return_value = True ):
completer.OnFileReadyToParse( request_data )
# Simulate receipt of response and initialization complete
initialize_response = {
'result': {
'capabilities': {}
}
}
completer._HandleInitializeInPollThread( initialize_response )
diagnostics = contains_exactly(
has_entries( {
'kind': equal_to( 'ERROR' ),
'location': LocationMatcher( filepath, 4, 11 ),
'location_extent': RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ),
'ranges': contains_exactly(
RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ) ),
'text': equal_to( 'First error' ),
'fixit_available': False
} )
)
assert_that( completer.OnFileReadyToParse( request_data ), diagnostics )
assert_that(
completer.PollForMessages( request_data ),
contains_exactly( has_entries( {
'diagnostics': diagnostics,
'filepath': filepath
} ) )
)
@IsolatedYcmd()
@patch.object( completer, 'MESSAGE_POLL_TIMEOUT', 0.01 )
def test_LanguageServerCompleter_PollForMessages_ServerNotStarted(
self, app ):
server = MockCompleter()
request_data = RequestWrap( BuildRequest() )
assert_that( server.PollForMessages( request_data ), equal_to( True ) )
@IsolatedYcmd()
def test_LanguageServerCompleter_OnFileSave_BeforeServerReady( self, app ):
completer = MockCompleter()
request_data = RequestWrap( BuildRequest() )
with patch.object( completer, 'ServerIsReady', return_value = False ):
with patch.object( completer.GetConnection(),
'SendNotification' ) as send_notification:
completer.OnFileSave( request_data )
send_notification.assert_not_called()
@IsolatedYcmd()
def test_LanguageServerCompleter_OnFileReadyToParse_InvalidURI( self, app ):
completer = MockCompleter()
filepath = os.path.realpath( '/foo?' )
uri = lsp.FilePathToUri( filepath )
request_data = RequestWrap( BuildRequest( line_num = 1,
column_num = 1,
filepath = filepath,
contents = '' ) )
notification = {
'jsonrpc': '2.0',
'method': 'textDocument/publishDiagnostics',
'params': {
'uri': uri,
'diagnostics': [ {
'range': {
'start': { 'line': 3, 'character': 10 },
'end': { 'line': 3, 'character': 11 }
},
'severity': 1,
'message': 'First error'
} ]
}
}
completer.GetConnection()._notifications.put( notification )
completer.HandleNotificationInPollThread( notification )
with patch.object( completer, '_ServerIsInitialized', return_value = True ):
completer.OnFileReadyToParse( request_data )
# Simulate receipt of response and initialization complete
initialize_response = {
'result': {
'capabilities': {}
}
}
completer._HandleInitializeInPollThread( initialize_response )
diagnostics = contains_exactly(
has_entries( {
'kind': equal_to( 'ERROR' ),
'location': LocationMatcher( '', 4, 11 ),
'location_extent': RangeMatcher( '', ( 4, 11 ), ( 4, 12 ) ),
'ranges': contains_exactly(
RangeMatcher( '', ( 4, 11 ), ( 4, 12 ) ) ),
'text': equal_to( 'First error' ),
'fixit_available': False
} )
)
with patch( 'ycmd.completers.language_server.language_server_protocol.'
'UriToFilePath', side_effect = lsp.InvalidUriException ) as \
uri_to_filepath:
assert_that( completer.OnFileReadyToParse( request_data ), diagnostics )
uri_to_filepath.assert_called()
def test_LanguageServerCompleter_DistanceOfPointToRange_SingleLineRange(
self ):
# Point to the left of range.
_Check_Distance( ( 0, 0 ), ( 0, 2 ), ( 0, 5 ) , 2 )
# Point inside range.
_Check_Distance( ( 0, 4 ), ( 0, 2 ), ( 0, 5 ) , 0 )
# Point to the right of range.
_Check_Distance( ( 0, 8 ), ( 0, 2 ), ( 0, 5 ) , 3 )
def test_LanguageServerCompleter_DistanceOfPointToRange_MultiLineRange(
self ):
# Point to the left of range.
_Check_Distance( ( 0, 0 ), ( 0, 2 ), ( 3, 5 ) , 2 )
# Point inside range.
_Check_Distance( ( 1, 4 ), ( 0, 2 ), ( 3, 5 ) , 0 )
# Point to the right of range.
_Check_Distance( ( 3, 8 ), ( 0, 2 ), ( 3, 5 ) , 3 )
|
'''
Created on August 2nd, 2018
Created By: Brandon Robinson (brobinson2111)
'''
import logging
"""
Sets the provided logger for the application.
:logger The logger object to be configured.
"""
def set(logger):
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
file_handler = logging.FileHandler('application.log')
file_handler.setLevel(logging.DEBUG)
# create console handler with a debug level
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import calendar
import logging
import struct
from datetime import datetime
from google.appengine.ext import ndb
from .testname import TestName
from .jsonresults import JsonResults
class StepResult(ndb.Model):
"""Stores results for a single buildbot step (e.g, browser_tests).
The results for all test cases included in the step are stored in a
bit-packed blob, for compactness. The TestName class maintains a
global dict of all known test names, mapped to integer keys; this
class stores the integer keys rather than test name strings. The layout
of a test result entry in the packed struct is:
test name key: unsigned integer, 4 bytes
test run time: float, 4 bytes
expected result: unsigned integer, 1 byte
number of actual results: unsigned integer, 1 byte
actual results: array of 1-byte unsigned integers
"""
# Test status constants
(PASS,
FAIL,
SKIP,
NOTRUN,
CRASH,
TIMEOUT,
MISSING,
LEAK,
SLOW,
TEXT,
AUDIO,
IMAGE,
IMAGETEXT,
REBASELINE,
NEEDSREBASELINE,
NEEDSMANUALREBASELINE) = range(16)
STR2RESULT = {
'PASS': PASS,
'FAIL': FAIL,
'SKIP': SKIP,
'NOTRUN': NOTRUN,
'CRASH': CRASH,
'TIMEOUT': TIMEOUT,
'MISSING': MISSING,
'LEAK': LEAK,
'SLOW': SLOW,
'TEXT': TEXT,
'AUDIO': AUDIO,
'IMAGE': IMAGE,
'IMAGETEXT': IMAGETEXT,
'REBASELINE': REBASELINE,
'NEEDSREBASELINE': NEEDSREBASELINE,
'NEEDSMANUALREBASELINE': NEEDSMANUALREBASELINE,
}
RESULT2STR = [
'PASS',
'FAIL',
'SKIP',
'NOTRUN',
'CRASH',
'TIMEOUT',
'MISSING',
'LEAK',
'SLOW',
'TEXT',
'AUDIO',
'IMAGE',
'IMAGETEXT',
'REBASELINE',
'NEEDSREBASELINE',
'NEEDSMANUALREBASELINE',
]
# This is used as an argument to struct.pack to implement the first four
# fields in the struct layout described above (up to number of actual
# results). The array of actual results is append with a format of
# '<n>B', where <n> is the number of actual results.
TEST_PACK_FORMAT = '>IfBB'
TEST_PACK_FORMAT_SIZE = struct.calcsize(TEST_PACK_FORMAT)
master = ndb.StringProperty('m')
builder_name = ndb.StringProperty('b')
build_number = ndb.IntegerProperty('n')
test_type = ndb.StringProperty('tp')
blink_revision = ndb.StringProperty('br')
chromium_revision = ndb.StringProperty('cr')
version = ndb.IntegerProperty('v')
time = ndb.DateTimeProperty('t')
tests = ndb.BlobProperty('r')
@classmethod
def _encodeTests(cls, test_json):
result = ''
for test_name, test_result in test_json.iteritems():
try:
test_name_key = TestName.getKey(test_name)
except: # pragma: no cover
logging.error('Could not get global key for test name %s', test_name)
raise
try:
expected = cls.STR2RESULT[test_result['expected']]
actual = tuple(
[cls.STR2RESULT[a] for a in test_result['actual'].split()][:255])
elapsed = float(test_result['time'])
except: # pragma: no cover
logging.error('Could not parse numeric values from test result json')
raise
try:
result += struct.pack(
cls.TEST_PACK_FORMAT, test_name_key, elapsed, expected, len(actual))
result += struct.pack('%dB' % len(actual), *actual)
except: # pragma: no cover
logging.error('Could not struct pack test result')
raise
return result
def _decodeTests(self):
results = {}
failures = [0] * len(self.RESULT2STR)
i = 0
while i + self.TEST_PACK_FORMAT_SIZE < len(self.tests):
test_name_key, elapsed, expected, num_actual = struct.unpack(
self.TEST_PACK_FORMAT, self.tests[i:i+self.TEST_PACK_FORMAT_SIZE])
i += self.TEST_PACK_FORMAT_SIZE
assert i + num_actual <= len(self.tests)
test_name = TestName.getTestName(test_name_key)
actual = struct.unpack('%dB' % num_actual, self.tests[i:i+num_actual])
i += num_actual
for a in actual:
failures[a] += 1
results[str(test_name)] = {
'expected': self.RESULT2STR[expected],
'actual': ' '.join([self.RESULT2STR[a] for a in actual]),
'time': str(elapsed)
}
assert i == len(self.tests)
return results, failures
@classmethod
def fromJson(cls, master, test_type, data):
"""Instantiate a new StepResult from parsed json.
The expected json schema is what full-results.json contains. Note that the
returned StepResult instance has NOT been saved to the datastore.
"""
return cls(
master=master,
builder_name=data['builder_name'],
build_number=int(data['build_number']),
test_type=test_type,
blink_revision=data['blink_revision'],
chromium_revision=data['chromium_revision'],
version=int(data['version']),
time=datetime.utcfromtimestamp(float(data['seconds_since_epoch'])),
tests=cls._encodeTests(data['tests']),
)
def toJson(self):
"""Convert a StepResult object to parsed json.
The json schema is the same as what full-results.json contains.
"""
tests, failures = self._decodeTests()
failures = dict(zip(self.RESULT2STR, failures))
data = {
'builder_name': self.builder_name,
'build_number': str(self.build_number),
'blink_revision': self.blink_revision,
'chromium_revision': self.chromium_revision,
'version': str(self.version),
'seconds_since_epoch': str(calendar.timegm(self.time.utctimetuple())),
'tests': tests,
'num_failures_by_type': failures
}
return (self.master, self.test_type, data)
@classmethod
def fromTestFile(cls, test_file):
"""Convert a TestFile object to a StepResult object.
The returned StepResult has NOT been saved to the datastore.
"""
if not test_file.data:
test_file.load_data() # pragma: no cover
j = JsonResults.load_json(test_file.data)
return cls.fromJson(test_file.master, test_file.test_type, j)
|
#!python
# Author: Chris Huskey
"""
Module of functions for working with our AWS S3 buckets for video storage.
"""
# Import libraries we will use:
import boto3 # boto3 is AWS's Python SDK, by AWS
from botocore.exceptions import ClientError
from dotenv import load_dotenv
import json
import logging
import os
# -------------------------------------------------------------------------
# SETUP:
# Get access info from .env file:
load_dotenv()
AWS_ACCESS_KEY_ID =os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
AWS_DEFAULT_REGION = os.getenv("AWS_DEFAULT_REGION")
# os.environ['AWS_DEFAULT_REGION'] = AWS_DEFAULT_REGION
S3_BUCKET_NAME = os.getenv("S3_BUCKET_NAME")
# Create an S3 Service Resource:
s3 = boto3.resource('s3',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_DEFAULT_REGION
)
# Make an S3 client with boto3:
s3_client = boto3.client('s3',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_DEFAULT_REGION
)
# -------------------------------------------------------------------------
# Function that uploads the specified file to the specified S3 bucket:
def s3_download_file(bucket, filename, key=None):
"""
Download the specified file from the specified S3 bucket.
Parameters:
bucket: Name of the bucket to get the file from
filename: File to download
key: S3 key of the file to download
Returns: True if file was downloaded, else False.
"""
# If S3 key and/or object_name were not specified, fill in based on filename:
if key is None:
key = 'videos/' + filename
# Upload the file to the specified S3 bucket:
try:
response = s3_client.download_file(Bucket=bucket,
Filename=filename,
Key=key)
except ClientError as e:
logging.error(e)
return False
return True
# -------------------------------------------------------------------------
# Function that uploads the specified file to the specified S3 bucket:
def s3_upload_file(bucket, filename, key=None):
"""
Upload a file to an S3 bucket.
Parameters:
bucket: Name of the bucket to upload to
filename: File to upload
key: S3 key to upload the file as (e.g., 'videos/<file-name>')
Returns: True if file was uploaded, else False.
"""
# If S3 key and/or object_name were not specified, fill in based on filename:
if key is None:
key = 'videos/' + filename
# Upload the file to the specified S3 bucket:
try:
response = s3_client.upload_file(Bucket=bucket,
Filename=filename,
Key=key)
except ClientError as e:
logging.error(e)
return False
return True
|
IBBQ_MAC = "4C:24:98:D0:xx:xx"
influx_host = "influx.mycooldomain.com"
influx_db = "annoying_internet_devices"
|
from profess.Profess import *
from profess.JSONparser import *
#domain = "http://localhost:8080/v1/"
domain = "http://192.168.99.100:8080/v1/"
dummyInputData = open('inputData.json').read()
jsonInputDataFile=json.loads(dummyInputData)
IEEE13=open("IEEE13_changed.json").read()
jsonIEEE = json.loads(IEEE13)
modelDataFile = open('model.json').read()
#p1 = Profess("http://localhost:8080/v1/", dummyInputData)
p1 = Profess(domain)
dummyprofile= [3] * 24
dummyLoads=[]
dummyPrice=[]
dummyPV=[]
p1.json_parser.set_topology(jsonIEEE)
dummyPVdict=[]
print("p1.json_parser.get_node_name_list(): " + str(p1.json_parser.get_node_name_list()))
for element in p1.json_parser.get_node_name_list():
dummyDict={element:{element+".1": copy.deepcopy(dummyprofile),element+".2": copy.deepcopy(dummyprofile),element+".3": copy.deepcopy(dummyprofile)}}
dummyLoads.append(dummyDict)
dummyPVdict={element:{element+".1.2.3": copy.deepcopy(dummyprofile)}}
dummyPV.append(dummyPVdict)
dummyPrice = copy.deepcopy(dummyprofile)
element="671"
dummyDict = {element: [{element + ".1.2.3": copy.deepcopy(dummyprofile)}]}
print("dummyDict: " + str(dummyDict))
print("dummyLoads " + str(dummyLoads))
print("dummyPV " + str(dummyPV))
print("dummyPrice " + str(dummyPrice))
print("dummyLoads[1] " + str(dummyLoads[1]))
print("dummyLoads len: " + str(len(dummyLoads)))
dummyLoads[1]= dummyDict
dummyGESSCON=[{'633':
{'633.1.2.3': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0]}},
{'671': {'671.1.2.3': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]}}]
#print(dummyLoads)
#print(dummyPV)
#print(dummyPrice)
#print(p1.json_parser.get_node_element_list())
p1.set_up_profess(jsonIEEE, dummyLoads, dummyPV, dummyPrice, dummyGESSCON)
#print(p1.json_parser.get_node_element_list())
p1.start_all()
#print(p1.dataList)
print(p1.wait_and_get_output())
soc_list=[{"633":{"SoC":0.5}},{"671":{"SoC":0.4}},{"634":{"SoC":0.2}}]
p1.update(dummyLoads, dummyPV, dummyPrice,soc_list,dummyGESSCON)
print(p1.dataList)
#print(sorted(test))
#p1.translate_output(test)
|
import subprocess
import os
import argparse
import pipes
# Use this script to download files from ftp links.
# It contains a dictionary {ID: ftp_path} from Metabolights and
# Metabolomics Workbench, so you only have to specify
# an id.
# Otherwise you can to specify an http or ftp link yourself
# User defined variables
s3_path = "s3://almlab.bucket/isaac/revo_healthcare_data/"
def download_data(ftp_path, study, cwd, s3_path=None, to_s3=False):
'''
GOAL - Given an {ftp_path} and unique {study_name}, download all files
associated with that ftp path to {cwd}. If you want, send
those files to s3 ({to_s3}=True)
'''
directory = '{cwd}/{study}/'.format(cwd=cwd, study=study)
if to_s3:
# check to see if already downloaded
# prompt if they want to download it.
ls_s3 = 'nohup aws s3 ls {s3_path}{study} &'.format(s3_path=s3_path,
study=study)
check_bucket = subprocess.call(ls_s3, shell=True)
if check_bucket == 0: # aws returns 1 if error, zero otherwise
response = raw_input('Bucket for {study} already exists.'
'Do you want to overwrite it? (y/n):'.format(
study=study))
if response == 'y':
pass
if response == 'n':
print 'Ending script'
return
os.mkdir(directory)
# Recursively download all files from ftp path into your directory
# pipes.quote puts quotes around a path so that bash will
# play nicely with whitespace and weird characters ik '()'
# cut-dirs removes parts of the ftp url that would otherwise
# be assigned to directories (very annoying)
# Always three entries when split [ftp:, '', 'hostname']
# that we handle with -nH, ftp://hostname.org/path/to/things/*
# Note that we also have a /*, so we exclude the last / when counting
# directorystructures to ignore
url_dirs_to_cut = len(ftp_path.split('/')[3:-1])
print url_dirs_to_cut
wget_command = 'nohup wget -r -nH --cut-dirs={cut} '\
'{ftp} -P {dir} --no-verbose &'.format(
ftp=ftp_path, dir=pipes.quote(directory),
cut=url_dirs_to_cut)
subprocess.call(wget_command, shell=True)
if to_s3:
send_s3 = 'nohup aws s3 sync {dir} {s3_path}{study}'.format(
dir=pipes.quote(directory),
s3_path=s3_path, study=study)
subprocess.call(send_s3, shell=True)
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--study',
help='Required. Name of the study, i.e. MTBLS315, '
'or ST000392')
parser.add_argument('-p', '--path',
help='Optional. Base directory Where you want to download'
'files to.'
'(default is current directory). Will make a new directory')
parser.add_argument('-s3',
help='Optional. Path to an s3 storage bucket '
'where you want files deposited')
parser.add_argument('-ftp',
help='Optional. Path to an ftp link.'
'All files will be downloaded from that link')
args = parser.parse_args()
print args.study
print args.path
print args.s3
print args.ftp
# Make a dictionary of ftp links to download
# These are the directory names and paths to download
# Unzipped files, copy all of them
lights = 'ftp://ftp.ebi.ac.uk/pub/databases/metabolights/studies/public/'
bench = 'ftp://www.metabolomicsworkbench.org/Studies/'
studies = {'MTBLS200': lights+'MTBLS200/*',
'ST000392': bench+'ST000392.zip',
'MTBLS191': lights+'MTBLS191/*',
'ST000383': bench+'ST000383.zip',
'MTBLS105': lights+'MTBLS105/*',
'ST000397': bench + 'ST000397.7z',
'ST000368': bench + 'ST000368.zip',
'ST000369': bench + 'ST000369.zip',
'ST000385': bench + 'ST000385.zip',
'ST000386': bench + 'ST000386.zip',
'ST000062': bench + 'ST000062.zip',
'ST000063': bench + 'ST000063.zip',
'ST000396': bench + 'ST000396.7z',
'ST000381': bench + 'ST000381.zip',
'ST000382': bench + 'ST000382.zip',
'ST000329': bench + 'ST000329.zip',
'ST000388': bench + 'ST000388.zip',
'ST000389': bench + 'ST000389.zip',
'MTBLS72': lights + 'MTBLS72/*',
'MTBLS124': lights + 'MTBLS124/*',
'ST000421': bench + 'ST000421.zip',
'ST000422': bench + 'ST000422.zip',
'ST000578': bench + 'ST000578.zip',
'ST000041': bench + 'ST000041.zip',
'MTBLS146': lights + 'MTBLS146/*',
'MTBLS266': lights + 'MTBLS266/*',
'MTBLS264': lights + 'MTBLS264/*',
'ST000355': bench + 'ST000355.zip',
'ST000356': bench + 'ST000356.zip',
'MTBLS92': lights + 'MTBLS92/*',
'MTBLS90': lights + 'MTBLS90/*',
'MTBLS93': lights + 'MTBLS93/*',
'ST000284': bench + 'ST000284.zip',
'MTBLS253': lights + 'MTBLS253/*',
'MTBLS280': lights + 'MTBLS280/*',
'MTBLS279': lights + 'MTBLS279/*',
'MTBLS19': lights + 'MTBLS19/*',
'MTBLS17': lights + 'MTBLS17/*',
'MTBLS218': lights + 'MTBLS218/*',
'MTBLS20': lights + 'MTBLS20/*',
'MTBLS404': lights + 'MTBLS404/*',
'MTBLS148': lights + 'MTBLS148/*',
'ST000450': bench + 'ST000450.zip',
'MTBLS364': lights + 'MTBLS364/*',
'MTBLS315': lights + 'MTBLS315/*',
'ST000608': bench + 'ST000608.zip',
'MTBLS352': lights + 'MTBLS352/*',
'MTBLS358': lights + 'MTBLS358/*',
'ST000284': bench + 'ST000284.zip',
'ST000405': bench + 'ST000405.zip',
'MTBLS354': lights + 'MTBLS354/*',
'MTBLS28': lights + 'MTBLS28/*',
'MTBLS427': lights + 'MTBLS427/*',
'ST000291': bench + 'ST000291.zip',
'ST000292': bench + 'ST000292.zip',
'ST000046': bench + 'ST000046.7z',
'ST000091': bench + 'ST000091.zip',
'ST000045': bench + 'ST000045.7z',
}
# If they're giving you a study name, but not an ftp address, and you can't
# find the name in your dictionary, raise an error
if (args.study is not None) and \
(args.study not in studies.keys()) \
and (not args.ftp):
raise NameError("Couldn't find the study you were looking for. Add it to "
"the dictionary, or specify your own ftp link")
# If they gave an ftp link, use it. If not, find it in the {studies} dict
if args.ftp:
ftp_path = args.ftp
else:
ftp_path = studies[args.study]
if args.path:
cwd = args.path
else:
cwd = os.getcwd()
print cwd
# If you gave an s3 path, send the data to s3 as well
if args.s3:
download_data(ftp_path, args.study, cwd, args.s3, to_s3=True)
else:
download_data(ftp_path, args.study, cwd)
|
from unicodedata import normalize, category
def strip_tones(w):
"""Removes tones form a word."""
s = normalize("NFD", w)
return "".join(c for c in s if category(c) != "Mn")
|
#!/usr/bin/env python3
import sys, csv, argparse
def read_csv(f):
with open(f) as fd:
content = fd.readlines()[1:]
return list(csv.DictReader(content))
def analyze_file(f, potential_errors=False):
"""Analyze result file {f} (which should be a .csv file).
Print per-solver analysis, and errors which happen quickly (true errors, not timeouts).
"""
print(f"## analyze `{f}`")
table = read_csv(f)
print(f"read {len(table)} records")
if not table: return
provers = [x for x in table[0].keys() if ".time" not in x and x != "problem"]
print(f"provers: {provers}")
sat = {}
unsat = {}
unknown = {}
error = {}
if potential_errors:
quick_errors = []
for row in table:
for prover in provers:
res = row[prover]
if res == 'unsat':
unsat[prover] = 1 + unsat.get(prover, 0)
elif res == 'sat':
sat[prover] = 1 + sat.get(prover, 0)
elif res == 'unknown':
unknown[prover] = 1 + unknown.get(prover, 0)
elif res == 'error':
error[prover] = 1 + error.get(prover, 0)
time = float(row[prover + '.time'])
if potential_errors and time < 5:
quick_errors.append((prover, row['problem'], time))
else:
print(f"unknown result for {prover} on {row}: {res}")
for prover in provers:
print(f"{prover:{12}}: sat {sat.get(prover,0):6}" \
f" | unsat {unsat.get(prover,0):6}" \
f" | solved {sat.get(prover,0)+unsat.get(prover,0):6}" \
f" | unknown {unknown.get(prover,0):6}" \
f" | error {error.get(prover,0):6}")
if potential_errors:
for (prover,filename,time) in quick_errors:
print(f"potential error: {prover} on `{filename}` after {time}")
def main(files, potential_errors=False) -> ():
for f in files:
analyze_file(f, potential_errors=potential_errors)
if __name__ == "__main__":
p = argparse.ArgumentParser('analyze result files')
p.add_argument('files', nargs='+', help='files to analyze')
p.add_argument('--errors', dest='potential_errors', \
action='store_true', help='detect potential errors')
args = p.parse_args()
main(files=args.files, potential_errors=args.potential_errors)
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import numpy as np
import omegaconf
import torch
import transformers
import wandb
from hydra.utils import instantiate
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import WandbLogger
from torch import nn
from torch.nn import functional as F
from transformers import AlbertTokenizer
from nemo.collections.tts.helpers.helpers import (
binarize_attention_parallel,
get_mask_from_lengths,
plot_pitch_to_numpy,
plot_spectrogram_to_numpy,
)
from nemo.collections.tts.losses.aligner_loss import BinLoss, ForwardSumLoss
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.modules.fastpitch import average_pitch, regulate_len
from nemo.collections.tts.torch.tts_tokenizers import EnglishCharsTokenizer, EnglishPhonemesTokenizer
from nemo.core import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import (
LengthsType,
LogprobsType,
MelSpectrogramType,
ProbsType,
RegressionValuesType,
TokenDurationType,
TokenIndex,
TokenLogDurationType,
)
from nemo.core.neural_types.neural_type import NeuralType
from nemo.utils import logging, model_utils
class MixerTTSModel(SpectrogramGenerator, Exportable):
"""Mixer-TTS and Mixer-TTS-X models (https://arxiv.org/abs/2110.03584) that is used to generate mel spectrogram from text."""
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
# Convert to Hydra 1.0 compatible DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
# Setup normalizer
self.normalizer = None
self.text_normalizer_call = None
self.text_normalizer_call_kwargs = {}
self._setup_normalizer(cfg)
# Setup tokenizer
self.tokenizer = None
self._setup_tokenizer(cfg)
assert self.tokenizer is not None
num_tokens = len(self.tokenizer.tokens)
self.tokenizer_pad = self.tokenizer.pad
self.tokenizer_unk = self.tokenizer.oov
super().__init__(cfg=cfg, trainer=trainer)
self.pitch_loss_scale = cfg.pitch_loss_scale
self.durs_loss_scale = cfg.durs_loss_scale
self.mel_loss_scale = cfg.mel_loss_scale
self.aligner = instantiate(cfg.alignment_module)
self.forward_sum_loss = ForwardSumLoss()
self.bin_loss = BinLoss()
self.add_bin_loss = False
self.bin_loss_scale = 0.0
self.bin_loss_start_ratio = cfg.bin_loss_start_ratio
self.bin_loss_warmup_epochs = cfg.bin_loss_warmup_epochs
self.cond_on_lm_embeddings = cfg.get("cond_on_lm_embeddings", False)
if self.cond_on_lm_embeddings:
self.lm_padding_value = (
self._train_dl.dataset.lm_padding_value
if self._train_dl is not None
else self._get_lm_padding_value(cfg.lm_model)
)
self.lm_embeddings = self._get_lm_embeddings(cfg.lm_model)
self.lm_embeddings.weight.requires_grad = False
self.self_attention_module = instantiate(
cfg.self_attention_module, n_lm_tokens_channels=self.lm_embeddings.weight.shape[1]
)
self.encoder = instantiate(cfg.encoder, num_tokens=num_tokens, padding_idx=self.tokenizer_pad)
self.symbol_emb = self.encoder.to_embed
self.duration_predictor = instantiate(cfg.duration_predictor)
self.pitch_mean, self.pitch_std = float(cfg.pitch_mean), float(cfg.pitch_std)
self.pitch_predictor = instantiate(cfg.pitch_predictor)
self.pitch_emb = instantiate(cfg.pitch_emb)
self.preprocessor = instantiate(cfg.preprocessor)
self.decoder = instantiate(cfg.decoder)
self.proj = nn.Linear(self.decoder.d_model, cfg.n_mel_channels)
def _setup_normalizer(self, cfg):
if "text_normalizer" in cfg:
normalizer_kwargs = {}
if "whitelist" in cfg.text_normalizer:
normalizer_kwargs["whitelist"] = self.register_artifact(
'text_normalizer.whitelist', cfg.text_normalizer.whitelist
)
self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs)
self.text_normalizer_call = self.normalizer.normalize
if "text_normalizer_call_kwargs" in cfg:
self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs
def _setup_tokenizer(self, cfg):
text_tokenizer_kwargs = {}
if "g2p" in cfg.text_tokenizer:
g2p_kwargs = {}
if "phoneme_dict" in cfg.text_tokenizer.g2p:
g2p_kwargs["phoneme_dict"] = self.register_artifact(
'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict,
)
if "heteronyms" in cfg.text_tokenizer.g2p:
g2p_kwargs["heteronyms"] = self.register_artifact(
'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms,
)
text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs)
self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs)
def _get_lm_model_tokenizer(self, lm_model="albert"):
if getattr(self, "_lm_model_tokenizer", None) is not None:
return self._lm_model_tokenizer
if self._train_dl is not None and self._train_dl.dataset is not None:
self._lm_model_tokenizer = self._train_dl.dataset.lm_model_tokenizer
if lm_model == "albert":
self._lm_model_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
return self._lm_model_tokenizer
def _get_lm_embeddings(self, lm_model="albert"):
if lm_model == "albert":
return transformers.AlbertModel.from_pretrained('albert-base-v2').embeddings.word_embeddings
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def _get_lm_padding_value(self, lm_model="albert"):
if lm_model == "albert":
return transformers.AlbertTokenizer.from_pretrained('albert-base-v2')._convert_token_to_id('<pad>')
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def _metrics(
self,
true_durs,
true_text_len,
pred_durs,
true_pitch,
pred_pitch,
true_spect=None,
pred_spect=None,
true_spect_len=None,
attn_logprob=None,
attn_soft=None,
attn_hard=None,
attn_hard_dur=None,
):
text_mask = get_mask_from_lengths(true_text_len)
mel_mask = get_mask_from_lengths(true_spect_len)
loss = 0.0
# Dur loss and metrics
durs_loss = F.mse_loss(pred_durs, (true_durs + 1).float().log(), reduction='none')
durs_loss = durs_loss * text_mask.float()
durs_loss = durs_loss.sum() / text_mask.sum()
durs_pred = pred_durs.exp() - 1
durs_pred = torch.clamp_min(durs_pred, min=0)
durs_pred = durs_pred.round().long()
acc = ((true_durs == durs_pred) * text_mask).sum().float() / text_mask.sum() * 100
acc_dist_1 = (((true_durs - durs_pred).abs() <= 1) * text_mask).sum().float() / text_mask.sum() * 100
acc_dist_3 = (((true_durs - durs_pred).abs() <= 3) * text_mask).sum().float() / text_mask.sum() * 100
pred_spect = pred_spect.transpose(1, 2)
# Mel loss
mel_loss = F.mse_loss(pred_spect, true_spect, reduction='none').mean(dim=-2)
mel_loss = mel_loss * mel_mask.float()
mel_loss = mel_loss.sum() / mel_mask.sum()
loss = loss + self.durs_loss_scale * durs_loss + self.mel_loss_scale * mel_loss
# Aligner loss
bin_loss, ctc_loss = None, None
ctc_loss = self.forward_sum_loss(attn_logprob=attn_logprob, in_lens=true_text_len, out_lens=true_spect_len)
loss = loss + ctc_loss
if self.add_bin_loss:
bin_loss = self.bin_loss(hard_attention=attn_hard, soft_attention=attn_soft)
loss = loss + self.bin_loss_scale * bin_loss
true_avg_pitch = average_pitch(true_pitch.unsqueeze(1), attn_hard_dur).squeeze(1)
# Pitch loss
pitch_loss = F.mse_loss(pred_pitch, true_avg_pitch, reduction='none') # noqa
pitch_loss = (pitch_loss * text_mask).sum() / text_mask.sum()
loss = loss + self.pitch_loss_scale * pitch_loss
return loss, durs_loss, acc, acc_dist_1, acc_dist_3, pitch_loss, mel_loss, ctc_loss, bin_loss
@torch.jit.unused
def run_aligner(self, text, text_len, text_mask, spect, spect_len, attn_prior):
text_emb = self.symbol_emb(text)
attn_soft, attn_logprob = self.aligner(
spect, text_emb.permute(0, 2, 1), mask=text_mask == 0, attn_prior=attn_prior,
)
attn_hard = binarize_attention_parallel(attn_soft, text_len, spect_len)
attn_hard_dur = attn_hard.sum(2)[:, 0, :]
assert torch.all(torch.eq(attn_hard_dur.sum(dim=1), spect_len))
return attn_soft, attn_logprob, attn_hard, attn_hard_dur
@typecheck(
input_types={
"text": NeuralType(('B', 'T_text'), TokenIndex()),
"text_len": NeuralType(('B',), LengthsType()),
"pitch": NeuralType(('B', 'T_audio'), RegressionValuesType(), optional=True),
"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType(), optional=True),
"spect_len": NeuralType(('B',), LengthsType(), optional=True),
"attn_prior": NeuralType(('B', 'T_spec', 'T_text'), ProbsType(), optional=True),
"lm_tokens": NeuralType(('B', 'T_lm_tokens'), TokenIndex(), optional=True),
},
output_types={
"pred_spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
"durs_predicted": NeuralType(('B', 'T_text'), TokenDurationType()),
"log_durs_predicted": NeuralType(('B', 'T_text'), TokenLogDurationType()),
"pitch_predicted": NeuralType(('B', 'T_text'), RegressionValuesType()),
"attn_soft": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
"attn_logprob": NeuralType(('B', 'S', 'T_spec', 'T_text'), LogprobsType()),
"attn_hard": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
"attn_hard_dur": NeuralType(('B', 'T_text'), TokenDurationType()),
},
)
def forward(self, text, text_len, pitch=None, spect=None, spect_len=None, attn_prior=None, lm_tokens=None):
if self.training:
assert pitch is not None
text_mask = get_mask_from_lengths(text_len).unsqueeze(2)
enc_out, enc_mask = self.encoder(text, text_mask)
# Aligner
attn_soft, attn_logprob, attn_hard, attn_hard_dur = None, None, None, None
if spect is not None:
attn_soft, attn_logprob, attn_hard, attn_hard_dur = self.run_aligner(
text, text_len, text_mask, spect, spect_len, attn_prior
)
if self.cond_on_lm_embeddings:
lm_emb = self.lm_embeddings(lm_tokens)
lm_features = self.self_attention_module(
enc_out, lm_emb, lm_emb, q_mask=enc_mask.squeeze(2), kv_mask=lm_tokens != self.lm_padding_value
)
# Duration predictor
log_durs_predicted = self.duration_predictor(enc_out, enc_mask)
durs_predicted = torch.clamp(log_durs_predicted.exp() - 1, 0)
# Pitch predictor
pitch_predicted = self.pitch_predictor(enc_out, enc_mask)
# Avg pitch, add pitch_emb
if not self.training:
if pitch is not None:
pitch = average_pitch(pitch.unsqueeze(1), attn_hard_dur).squeeze(1)
pitch_emb = self.pitch_emb(pitch.unsqueeze(1))
else:
pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1))
else:
pitch = average_pitch(pitch.unsqueeze(1), attn_hard_dur).squeeze(1)
pitch_emb = self.pitch_emb(pitch.unsqueeze(1))
enc_out = enc_out + pitch_emb.transpose(1, 2)
if self.cond_on_lm_embeddings:
enc_out = enc_out + lm_features
# Regulate length
len_regulated_enc_out, dec_lens = regulate_len(attn_hard_dur, enc_out)
dec_out, dec_lens = self.decoder(len_regulated_enc_out, get_mask_from_lengths(dec_lens).unsqueeze(2))
pred_spect = self.proj(dec_out)
return (
pred_spect,
durs_predicted,
log_durs_predicted,
pitch_predicted,
attn_soft,
attn_logprob,
attn_hard,
attn_hard_dur,
)
def infer(
self,
text,
text_len=None,
text_mask=None,
spect=None,
spect_len=None,
attn_prior=None,
use_gt_durs=False,
lm_tokens=None,
pitch=None,
):
if text_mask is None:
text_mask = get_mask_from_lengths(text_len).unsqueeze(2)
enc_out, enc_mask = self.encoder(text, text_mask)
# Aligner
attn_hard_dur = None
if use_gt_durs:
attn_soft, attn_logprob, attn_hard, attn_hard_dur = self.run_aligner(
text, text_len, text_mask, spect, spect_len, attn_prior
)
if self.cond_on_lm_embeddings:
lm_emb = self.lm_embeddings(lm_tokens)
lm_features = self.self_attention_module(
enc_out, lm_emb, lm_emb, q_mask=enc_mask.squeeze(2), kv_mask=lm_tokens != self.lm_padding_value
)
# Duration predictor
log_durs_predicted = self.duration_predictor(enc_out, enc_mask)
durs_predicted = torch.clamp(log_durs_predicted.exp() - 1, 0)
# Avg pitch, pitch predictor
if use_gt_durs and pitch is not None:
pitch = average_pitch(pitch.unsqueeze(1), attn_hard_dur).squeeze(1)
pitch_emb = self.pitch_emb(pitch.unsqueeze(1))
else:
pitch_predicted = self.pitch_predictor(enc_out, enc_mask)
pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1))
# Add pitch emb
enc_out = enc_out + pitch_emb.transpose(1, 2)
if self.cond_on_lm_embeddings:
enc_out = enc_out + lm_features
if use_gt_durs:
if attn_hard_dur is not None:
len_regulated_enc_out, dec_lens = regulate_len(attn_hard_dur, enc_out)
else:
raise NotImplementedError
else:
len_regulated_enc_out, dec_lens = regulate_len(durs_predicted, enc_out)
dec_out, _ = self.decoder(len_regulated_enc_out, get_mask_from_lengths(dec_lens).unsqueeze(2))
pred_spect = self.proj(dec_out)
return pred_spect
def on_train_epoch_start(self):
bin_loss_start_epoch = np.ceil(self.bin_loss_start_ratio * self._trainer.max_epochs)
# Add bin loss when current_epoch >= bin_start_epoch
if not self.add_bin_loss and self.current_epoch >= bin_loss_start_epoch:
logging.info(f"Using hard attentions after epoch: {self.current_epoch}")
self.add_bin_loss = True
if self.add_bin_loss:
self.bin_loss_scale = min((self.current_epoch - bin_loss_start_epoch) / self.bin_loss_warmup_epochs, 1.0)
def training_step(self, batch, batch_idx):
attn_prior, lm_tokens = None, None
if self.cond_on_lm_embeddings:
audio, audio_len, text, text_len, attn_prior, pitch, _, lm_tokens = batch
else:
audio, audio_len, text, text_len, attn_prior, pitch, _ = batch
spect, spect_len = self.preprocessor(input_signal=audio, length=audio_len)
# pitch normalization
zero_pitch_idx = pitch == 0
pitch = (pitch - self.pitch_mean) / self.pitch_std
pitch[zero_pitch_idx] = 0.0
(pred_spect, _, pred_log_durs, pred_pitch, attn_soft, attn_logprob, attn_hard, attn_hard_dur,) = self(
text=text,
text_len=text_len,
pitch=pitch,
spect=spect,
spect_len=spect_len,
attn_prior=attn_prior,
lm_tokens=lm_tokens,
)
(loss, durs_loss, acc, acc_dist_1, acc_dist_3, pitch_loss, mel_loss, ctc_loss, bin_loss,) = self._metrics(
pred_durs=pred_log_durs,
pred_pitch=pred_pitch,
true_durs=attn_hard_dur,
true_text_len=text_len,
true_pitch=pitch,
true_spect=spect,
pred_spect=pred_spect,
true_spect_len=spect_len,
attn_logprob=attn_logprob,
attn_soft=attn_soft,
attn_hard=attn_hard,
attn_hard_dur=attn_hard_dur,
)
train_log = {
'train_loss': loss,
'train_durs_loss': durs_loss,
'train_pitch_loss': torch.tensor(1.0).to(durs_loss.device) if pitch_loss is None else pitch_loss,
'train_mel_loss': mel_loss,
'train_durs_acc': acc,
'train_durs_acc_dist_3': acc_dist_3,
'train_ctc_loss': torch.tensor(1.0).to(durs_loss.device) if ctc_loss is None else ctc_loss,
'train_bin_loss': torch.tensor(1.0).to(durs_loss.device) if bin_loss is None else bin_loss,
}
return {'loss': loss, 'progress_bar': {k: v.detach() for k, v in train_log.items()}, 'log': train_log}
def validation_step(self, batch, batch_idx):
attn_prior, lm_tokens = None, None
if self.cond_on_lm_embeddings:
audio, audio_len, text, text_len, attn_prior, pitch, _, lm_tokens = batch
else:
audio, audio_len, text, text_len, attn_prior, pitch, _ = batch
spect, spect_len = self.preprocessor(input_signal=audio, length=audio_len)
# pitch normalization
zero_pitch_idx = pitch == 0
pitch = (pitch - self.pitch_mean) / self.pitch_std
pitch[zero_pitch_idx] = 0.0
(pred_spect, _, pred_log_durs, pred_pitch, attn_soft, attn_logprob, attn_hard, attn_hard_dur,) = self(
text=text,
text_len=text_len,
pitch=pitch,
spect=spect,
spect_len=spect_len,
attn_prior=attn_prior,
lm_tokens=lm_tokens,
)
(loss, durs_loss, acc, acc_dist_1, acc_dist_3, pitch_loss, mel_loss, ctc_loss, bin_loss,) = self._metrics(
pred_durs=pred_log_durs,
pred_pitch=pred_pitch,
true_durs=attn_hard_dur,
true_text_len=text_len,
true_pitch=pitch,
true_spect=spect,
pred_spect=pred_spect,
true_spect_len=spect_len,
attn_logprob=attn_logprob,
attn_soft=attn_soft,
attn_hard=attn_hard,
attn_hard_dur=attn_hard_dur,
)
# without ground truth internal features except for durations
pred_spect, _, pred_log_durs, pred_pitch, attn_soft, attn_logprob, attn_hard, attn_hard_dur = self(
text=text,
text_len=text_len,
pitch=None,
spect=spect,
spect_len=spect_len,
attn_prior=attn_prior,
lm_tokens=lm_tokens,
)
*_, with_pred_features_mel_loss, _, _ = self._metrics(
pred_durs=pred_log_durs,
pred_pitch=pred_pitch,
true_durs=attn_hard_dur,
true_text_len=text_len,
true_pitch=pitch,
true_spect=spect,
pred_spect=pred_spect,
true_spect_len=spect_len,
attn_logprob=attn_logprob,
attn_soft=attn_soft,
attn_hard=attn_hard,
attn_hard_dur=attn_hard_dur,
)
val_log = {
'val_loss': loss,
'val_durs_loss': durs_loss,
'val_pitch_loss': torch.tensor(1.0).to(durs_loss.device) if pitch_loss is None else pitch_loss,
'val_mel_loss': mel_loss,
'val_with_pred_features_mel_loss': with_pred_features_mel_loss,
'val_durs_acc': acc,
'val_durs_acc_dist_3': acc_dist_3,
'val_ctc_loss': torch.tensor(1.0).to(durs_loss.device) if ctc_loss is None else ctc_loss,
'val_bin_loss': torch.tensor(1.0).to(durs_loss.device) if bin_loss is None else bin_loss,
}
self.log_dict(val_log, prog_bar=False, on_epoch=True, logger=True, sync_dist=True)
if batch_idx == 0 and self.current_epoch % 5 == 0 and isinstance(self.logger, WandbLogger):
specs = []
pitches = []
for i in range(min(3, spect.shape[0])):
specs += [
wandb.Image(
plot_spectrogram_to_numpy(spect[i, :, : spect_len[i]].data.cpu().numpy()),
caption=f"gt mel {i}",
),
wandb.Image(
plot_spectrogram_to_numpy(pred_spect.transpose(1, 2)[i, :, : spect_len[i]].data.cpu().numpy()),
caption=f"pred mel {i}",
),
]
pitches += [
wandb.Image(
plot_pitch_to_numpy(
average_pitch(pitch.unsqueeze(1), attn_hard_dur)
.squeeze(1)[i, : text_len[i]]
.data.cpu()
.numpy(),
ylim_range=[-2.5, 2.5],
),
caption=f"gt pitch {i}",
),
]
pitches += [
wandb.Image(
plot_pitch_to_numpy(pred_pitch[i, : text_len[i]].data.cpu().numpy(), ylim_range=[-2.5, 2.5]),
caption=f"pred pitch {i}",
),
]
self.logger.experiment.log({"specs": specs, "pitches": pitches})
@typecheck(
input_types={
"tokens": NeuralType(('B', 'T_text'), TokenIndex(), optional=True),
"tokens_len": NeuralType(('B'), LengthsType(), optional=True),
"lm_tokens": NeuralType(('B', 'T_lm_tokens'), TokenIndex(), optional=True),
"raw_texts": [NeuralType(optional=True)],
"lm_model": NeuralType(optional=True),
},
output_types={"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),},
)
def generate_spectrogram(
self,
tokens: Optional[torch.Tensor] = None,
tokens_len: Optional[torch.Tensor] = None,
lm_tokens: Optional[torch.Tensor] = None,
raw_texts: Optional[List[str]] = None,
norm_text_for_lm_model: bool = True,
lm_model: str = "albert",
):
if tokens is not None:
if tokens_len is None:
# It is assumed that padding is consecutive and only at the end
tokens_len = (tokens != self.tokenizer.pad).sum(dim=-1)
else:
if raw_texts is None:
raise ValueError("raw_texts must be specified if tokens is None")
t_seqs = [self.tokenizer(t) for t in raw_texts]
tokens = torch.nn.utils.rnn.pad_sequence(
sequences=[torch.tensor(t, dtype=torch.long, device=self.device) for t in t_seqs],
batch_first=True,
padding_value=self.tokenizer.pad,
)
tokens_len = torch.tensor([len(t) for t in t_seqs], dtype=torch.long, device=tokens.device)
if self.cond_on_lm_embeddings and lm_tokens is None:
if raw_texts is None:
raise ValueError("raw_texts must be specified if lm_tokens is None")
lm_model_tokenizer = self._get_lm_model_tokenizer(lm_model)
lm_padding_value = lm_model_tokenizer._convert_token_to_id('<pad>')
lm_space_value = lm_model_tokenizer._convert_token_to_id('▁')
assert isinstance(self.tokenizer, EnglishCharsTokenizer) or isinstance(
self.tokenizer, EnglishPhonemesTokenizer
)
if norm_text_for_lm_model and self.text_normalizer_call is not None:
raw_texts = [self.text_normalizer_call(t, **self.text_normalizer_call_kwargs) for t in raw_texts]
preprocess_texts_as_tts_input = [self.tokenizer.text_preprocessing_func(t) for t in raw_texts]
lm_tokens_as_ids_list = [
lm_model_tokenizer.encode(t, add_special_tokens=False) for t in preprocess_texts_as_tts_input
]
if self.tokenizer.pad_with_space:
lm_tokens_as_ids_list = [[lm_space_value] + t + [lm_space_value] for t in lm_tokens_as_ids_list]
lm_tokens = torch.full(
(len(lm_tokens_as_ids_list), max([len(t) for t in lm_tokens_as_ids_list])),
fill_value=lm_padding_value,
device=tokens.device,
)
for i, lm_tokens_i in enumerate(lm_tokens_as_ids_list):
lm_tokens[i, : len(lm_tokens_i)] = torch.tensor(lm_tokens_i, device=tokens.device)
pred_spect = self.infer(tokens, tokens_len, lm_tokens=lm_tokens).transpose(1, 2)
return pred_spect
def parse(self, text: str, normalize=True) -> torch.Tensor:
if self.training:
logging.warning("parse() is meant to be called in eval mode.")
if normalize and self.text_normalizer_call is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs)
with self.tokenizer.set_phone_prob(prob=1.0):
tokens = self.tokenizer.encode(text)
return torch.tensor(tokens).long().unsqueeze(0).to(self.device)
def _loader(self, cfg):
try:
_ = cfg.dataset.manifest_filepath
except omegaconf.errors.MissingMandatoryValue:
logging.warning("manifest_filepath was skipped. No dataset for this model.")
return None
dataset = instantiate(
cfg.dataset,
text_normalizer=self.normalizer,
text_normalizer_call_kwargs=self.text_normalizer_call_kwargs,
text_tokenizer=self.tokenizer,
)
return torch.utils.data.DataLoader( # noqa
dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params,
)
def setup_training_data(self, cfg):
self._train_dl = self._loader(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self._loader(cfg)
def setup_test_data(self, cfg):
"""Omitted."""
pass
@classmethod
def list_available_models(cls) -> 'List[PretrainedModelInfo]':
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
list_of_models = []
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_mixertts",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_mixertts/versions/1.6.0/files/tts_en_lj_mixertts.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz with and can be used to generate female English voices with an American accent.",
class_=cls, # noqa
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_mixerttsx",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_mixerttsx/versions/1.6.0/files/tts_en_lj_mixerttsx.nemo",
description="This model is trained on LJSpeech sampled at 22050Hz with and can be used to generate female English voices with an American accent.",
class_=cls, # noqa
)
list_of_models.append(model)
return list_of_models
# Methods for model exportability
@property
def input_types(self):
return {
"text": NeuralType(('B', 'T_text'), TokenIndex()),
"lm_tokens": NeuralType(('B', 'T_lm_tokens'), TokenIndex(), optional=True),
}
@property
def output_types(self):
return {
"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
}
def input_example(self, max_text_len=10, max_lm_tokens_len=10):
text = torch.randint(
low=0, high=len(self.tokenizer.tokens), size=(1, max_text_len), device=self.device, dtype=torch.long,
)
inputs = {'text': text}
if self.cond_on_lm_embeddings:
inputs['lm_tokens'] = torch.randint(
low=0,
high=self.lm_embeddings.weight.shape[0],
size=(1, max_lm_tokens_len),
device=self.device,
dtype=torch.long,
)
return (inputs,)
def forward_for_export(self, text, lm_tokens=None):
text_mask = (text != self.tokenizer_pad).unsqueeze(2)
spect = self.infer(text=text, text_mask=text_mask, lm_tokens=lm_tokens).transpose(1, 2)
return spect.to(torch.float)
|
"""
Test script using pytest for make_dict
Author: Varisht Ghedia
"""
from make_dict import make_dict
def test_same_size():
"""
Test that it can make dictionaries from lists of same size.
"""
list1 = ['a', 'b', 'c']
list2 = [1,3,3]
assert make_dict(list1, list2) == {'a': 1, 'b': 3, 'c': 3}
def test_large_list2():
"""
Test that it can make dictionaries when list of values is larger than list of keys.
"""
list1 = ['a', 'b', 'c']
list2 = [1,3,3,4]
assert make_dict(list1, list2) == {'a': 1, 'b': 3, 'c': 3}
def test_large_list1():
"""
Test that it can make dictionaries when list of keys is larger than list of values.
"""
list1 = ['a', 'b', 'c', 'd']
list2 = [1,3,4]
assert make_dict(list1, list2) == {'a': 1, 'b': 3, 'c': 4, 'd': None}
|
from gql import Client
from gql.transport.local_schema import LocalSchemaTransport
from . import custom_scalars, graphql_server
class LocalSchemaTest:
async def asyncSetUp(self):
transport = LocalSchemaTransport(schema=graphql_server.schema._schema)
self.client = Client(
transport=transport,
fetch_schema_from_transport=True,
parse_results=True,
serialize_variables=True,
)
async with self.client:
custom_scalars.register_parsers(self.client.schema)
|
from . import db, User, Message
PER_PAGE = 30
class Timeline:
def __init__(self, messages, type, user=None):
self.messages = messages
self.type = type
self.user = user
def to_dict(self):
return {
'messages': [msg.to_dict() for msg in self.messages],
'type': self.type,
'user': self.user.to_dict() if self.user else None,
}
@classmethod
def public(cls):
messages = db.query('''
SELECT messages.*, users.name, users.email
FROM messages
JOIN users ON messages.user_id = users.id
ORDER BY messages.pub_date DESC
LIMIT %s''', [PER_PAGE])
return cls(list(map(cls.format_message, messages)), 'public')
@classmethod
def user(cls, user):
messages = db.query('''
SELECT messages.*, users.name, users.email
FROM messages
JOIN users ON users.id = messages.user_id
WHERE users.id = %s
ORDER BY messages.pub_date DESC
LIMIT %s''', [user.id, PER_PAGE])
return cls(list(map(cls.format_message, messages)), 'user', user)
@classmethod
def following(cls, user):
messages = db.query('''
SELECT messages.*, users.name, users.email
FROM messages
JOIN users ON messages.user_id = users.id
WHERE users.id = %s
OR users.id IN (SELECT whom_id FROM followers WHERE who_id = %s)
ORDER BY messages.pub_date DESC
LIMIT %s''', [user.id, user.id, PER_PAGE])
return cls(list(map(cls.format_message, messages)), 'following', user)
@staticmethod
def format_message(message):
user = {
'id': message['user_id'],
'name': message['name'],
'email': message['email'],
}
message = {
'id': message['id'],
'user_id': message['user_id'],
'text': message['text'],
'pub_date': message['pub_date'],
}
return Message(message, User(user))
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Code is based on examples from https://realpython.com/pysimplegui-python/
import PySimpleGUI as sg
import cv2
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'qt')
# Create the setup
def main():
sg.theme("LightBlue6")
# Define the window layout
layout = [
[sg.Image(filename="", key="-IMAGE-")],
[
sg.Button("LOG", size=(10, 1)),
sg.Slider(
(-10, 10),
0,
0.1,
orientation="h",
size=(40, 10),
key="-LOG SLIDER-",
),
sg.Button("GAMMA", size=(10, 1)),
sg.Slider(
(0, 25),
1,
0.1,
orientation="h",
size=(40, 10),
key="-GAMMA SLIDER-",
),
],
[
sg.Button("AVERAGE", size=(10, 1)),
sg.Slider(
(1, 21),
3,
1,
orientation="h",
size=(40, 10),
key="-BLUR SLIDER-",
),
sg.Button("MEDIAN", size=(10, 1)),
sg.Slider(
(1, 21),
3,
1,
orientation="h",
size=(40, 10),
key="-MEDIAN SLIDER-",
),
],
[
sg.Button("HSV_THS", size=(10, 1)),
sg.Text('H low'),
sg.Slider(
(0, 179),
90,
1,
orientation="h",
size=(15, 10),
key="-HSV SLIDER H LOW-",
),
sg.Text('H high'),
sg.Slider(
(0, 179),
179,
1,
orientation="h",
size=(15, 10),
key="-HSV SLIDER H HIGH-",
),
sg.Text('S Low'),
sg.Slider(
(0, 255),
125,
1,
orientation="h",
size=(18, 10),
key="-HSV SLIDER S LOW-",
),
sg.Text('S High'),
sg.Slider(
(0, 255),
255,
1,
orientation="h",
size=(18, 10),
key="-HSV SLIDER S HIGH-",
),
sg.Text('V Low'),
sg.Slider(
(0, 255),
125,
1,
orientation="h",
size=(18, 10),
key="-HSV SLIDER V LOW-",
),
sg.Text('V High'),
sg.Slider(
(0, 255),
255,
1,
orientation="h",
size=(18, 10),
key="-HSV SLIDER V HIGH-",
),
],
[
sg.Button("ERODE", size=(10, 1)),
sg.Slider(
(1, 15),
3,
1,
orientation="h",
size=(40, 10),
key="-ERODE SLIDER-",
),
sg.Button("DILATE", size=(10, 1)),
sg.Slider(
(1, 15),
3,
1,
orientation="h",
size=(40, 10),
key="-DILATE SLIDER-",
),
],
[sg.Button("Reset changes", size=(12, 1)),sg.Button("Histogram", size=(10, 1)),sg.Button("Outline", size=(10, 1)),sg.Button("Exit", size=(10, 1))],
]
# Create the window
window = sg.Window("Bacteria detection", layout, location=(800, 400))# 800,400 - default
img = cv2.imread('test3.jpg')# Change the file here by typing the filename
img = cv2.resize(img, (800, 600))
#(M, N, channels) = img.shape # can be used to check whether the image is read or not
#print(M, N)
image = img.copy()
img_tmp = img.copy()
frame = np.concatenate((img_tmp, image), axis=1)
# Create event loop
while True:
event, values = window.read(timeout=200)
if event == "Exit" or event == sg.WIN_CLOSED:
print("exit")
break
elif event == "Reset changes":
img_tmp = img.copy()
frame = np.concatenate((img_tmp, image), axis=1)
print('ResetRGB')
# Image restoration filters
elif event == "AVERAGE":
b_val = int(values["-BLUR SLIDER-"])
if (b_val % 2) == 0:
b_val = b_val+1
img_tmp = cv2.blur(img_tmp, (b_val, b_val), )
frame = np.concatenate((img_tmp, image), axis=1)
print('average')
elif event == "MEDIAN":
m_val = int(values["-MEDIAN SLIDER-"])
if (m_val % 2) == 0:
m_val = m_val+1
img_tmp = cv2.medianBlur(img_tmp, m_val)
frame = np.concatenate((img_tmp, image), axis=1)
print('median')
#Thresholding
elif event == "HSV_THS":
img_hsv = cv2.cvtColor(img_tmp,cv2.COLOR_BGR2HSV)
lower = np.array([int(values["-HSV SLIDER H LOW-"]),int(values["-HSV SLIDER S LOW-"]),int(values["-HSV SLIDER V LOW-"])])
upper = np.array([int(values["-HSV SLIDER H HIGH-"]),int(values["-HSV SLIDER S HIGH-"]),int(values["-HSV SLIDER V HIGH-"])])
mask = cv2.inRange(img_hsv,lower,upper)
# You can leave the thresholded image in black and white
#img_tmp = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
# Initially, the thresholded mask is applied to image allowing the preservation of bacteria colour
masked_img = cv2.bitwise_and(img_hsv, img_hsv, mask = mask)
img_tmp = cv2.cvtColor(masked_img,cv2.COLOR_HSV2BGR)
frame = np.concatenate((img_tmp, image), axis=1)
print('HSV_THS')
# Image enhancement functions
elif event == "ERODE":
e_val = int(values["-ERODE SLIDER-"])
kernel = np.ones((e_val,e_val),np.uint8)
img_tmp = cv2.erode(img_tmp,kernel,iterations = 1)
frame = np.concatenate((img_tmp, image), axis=1)
print('erode')
elif event == "DILATE":
d_val = int(values["-DILATE SLIDER-"])
kernel = np.ones((d_val,d_val),np.uint8)
img_tmp = cv2.dilate(img_tmp,kernel,iterations = 1)
frame = np.concatenate((img_tmp, image), axis=1)
print('dilate')
# Image transformations functions
elif event == "LOG":
const = values["-LOG SLIDER-"]
# For a gray-scale logarithmic transformation
#img_tmp = cv2.cvtColor(img_tmp,cv2.COLOR_BGR2GRAY)
img_tmp = img_tmp.astype('float')
img_tmp = ((const * (np.log10(img_tmp + 1)))*255).astype("uint8")
#img_tmp = cv2.cvtColor(img_tmp,cv2.COLOR_GRAY2BGR)
frame = np.concatenate((img_tmp, image), axis=1)
print('log')
elif event == "GAMMA":
c = 1.0
gamma = values["-GAMMA SLIDER-"]
print(gamma)
img_tmp = (c*((img_tmp/ 255.0)**gamma)*255).astype("uint8")
frame = np.concatenate((img_tmp, image), axis=1)
print('gamma')
#Draw and display detected bacteria outline
elif event == "Outline":
contours, hier = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_tmp = cv2.drawContours(img_tmp, contours, -1, (180,130,70),2)
frame = np.concatenate((img_tmp, image), axis=1)
print('outline')
#Display statistics about bacteria area (pixel count) as a histogram (x axis - area, y axis bacteria count)
elif event == "Histogram":
contours, hier = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
areas=np.empty([0, 0])
for contour in range(0,len(contours)):
areas = np.append(areas,round(cv2.contourArea(contours[contour]),2))
# If one wants to check the existed areas to choose the best x limit range, then this line can be uncommented:
#print('Areas:',areas)
hist, bins = np.histogram(areas,range(0,len(areas)))
plt.hist(hist, bins, histtype = 'stepfilled')
plt.xlabel("area")
# The limit can be changed by user
plt.xlim(-10,100)
plt.ylabel("number of cells")
plt.title("Histogram")
plt.show()
print('hist')
imgbytes = cv2.imencode(".png", frame)[1].tobytes()
window["-IMAGE-"].update(data=imgbytes)
window.close()
main()
|
from recipe_scrapers.cookstr import Cookstr
from tests import ScraperTest
class TestCookstrScraper(ScraperTest):
scraper_class = Cookstr
def test_host(self):
self.assertEqual("cookstr.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.Cookstr.com/recipes/chocolate-cake-nicole-axworthy",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Chocolate Cake")
def test_total_time(self):
self.assertEqual(60, self.harvester_class.total_time())
def test_total_yields(self):
self.harvester_class.exception_handling = True
self.assertEqual("", self.harvester_class.yields())
def test_ingredients(self):
self.assertCountEqual(
[
"1 recipe Chocolate Cake Mix",
"1/2 cup coffee or water",
"1/2 cup almond or soy milk (vanilla flavor preferred)",
"1/2 cup canola oil",
"1/2 cup pure maple syrup",
"2 tablespoons apple cider vinegar",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"Preheat the oven to 350°F. Lightly grease a 9-inch cake pan with coconut oil or line a 12-cup muffin tin with paper liners.\nIn a large bowl, sift the dry cake mix ingredients using a fine-mesh sieve.\nIn a medium bowl, mix together the coffee, almond milk, oil, maple syrup, and vinegar.\nAdd the liquid ingredients to the bowl with the cake mix and whisk gently until there are no large clumps remaining.\nPour the batter into the prepared pan. Bake for 22 to 27 minutes in the cake pan or 20 to 25 minutes in the muffin tin. The cake/cupcakes can be stored in an airtight container in the fridge for up to 5 days or frozen for 2 to 3 months.",
self.harvester_class.instructions(),
)
|
dic2 = {"name":"홍길동", "job":"도둑", "address":["울릉도", "제주도", "함경도"]}
# 딕셔너리 전체 출력
print(dic2)
# 주소만 출력
print(dic2["address"])
# 제주도만 출력
print(dic2["address"][1])
dic2["age"] = 33
print(dic2)
dic2["name"] = "전우치"
print(dic2)
del dic2["job"]
# dic2에서 "job"이라는 키를 가진 데이터 삭제
print(dic2)
# {'name': '전우치', 'address': ['울릉도', '제주도', '함경도'], 'age': 33}
# del dic2["job"]
# 존재하지 않는 키를 삭제할 경우 KeyError 발생
# dic2에 "address"라는 키가 존재하는지 검사 후 존재하면 그 값을 출력
# 존재하지 않으면 "존재하지 않는 키입니다."라고 출력
if "address" in dic2 :
print(dic2["address"])
else :
print("존재하지 않는 키입니다.")
# 3항 연산자
print(dic2["job"] if "job" in dic2 else "존재하지 않는 키입니다.")
print(dic2.get("job"))
#위와 같은 기능을 in 이 아닌 get() 함수로 작업
# if dic2.get("address") is not None :
if dic2.get("address") != None :
print(dic2["address"])
else :
print("존재하지 않는 키입니다.")
print("-----------------------")
for key in dic2 :
print(key, ":", dic2[key])
lst = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"]
print("-----------------------")
# 리스트 전체 출력 : index 0 ~ lst length -1
for i in range(len(lst)) :
print(lst[i])
# a b c d e f g h i j k
print("-----------------------")
# 리스트 c부터 h까지 출력 : index 2 ~ lst length -3
for i in range(2, len(lst) - 3) :
print(lst[i])
# c d e f g h
print("-----------------------")
# 리스트 a부터 2씩 증가하여 마지막까지 출력 : index 0, 2, 4, .. , lst length -1
for i in range(0, len(lst), 2) :
print(lst[i])
# a c e g i k
print("-----------------------")
# 리스트 k부터 a까지 역순으로 출력
for i in range(len(lst), 0, -1) :
print(lst[i - 1])
# k j i h g f e d c b a
print("-----------------------")
for i in reversed(range(len(lst))) :
print(lst[i])
# k j i h g f e d c b a
print("-----------------------")
import datetime
now = datetime.datetime.now()
print(now)
print(type(now))
print(now.year, "년", type(now.year))
print(now.month, "월", type(now.month))
print(now.day, "일", type(now.day))
print(now.hour, "시", type(now.hour))
print(now.minute, "분", type(now.minute))
print(now.second, "초", type(now.second))
print(now.microsecond, "마이크로초", type(now.microsecond))
# 12, 1, 2월은 "겨울" / 3, 4 ,5 월은 "봄" / 6, 7, 8 월은 "여름" / 9, 10, 11월은 "가을"
# 오늘 날자를 기준으로 계절을 출력
now = datetime.datetime.now()
month = now.month
#if month == 12 or (1 <= month <= 2) : # 요래짜니 가독성이 안좋음
# print("겨울")
#elif 3 <= month <= 5 :
if 3 <= month <= 5 :
print("봄")
elif 6 <= month <= 8 :
print("여름")
elif 9 <= month <= 11 :
print("가을")
else :
print("겨울")
i = 0
while i < 10 :
print("{}번째 반복".format(i))
i += 1
# 0번째 반복 .. 9번째 반복
lst = [1, 2, 1, 2, 3, 4]
val = 2
while val in lst :
lst.remove(val)
# remove() 함수로 val을 삭제하여 루프문을 빠져나옴
print(lst)
# [1, 1, 3, 4]
import time
print(time.time())
cnt = 0
target = time.time() + 2
#while time.time() < target :
# cnt += 1
print("2초 동안 {}번 반복".format(cnt))
# 무한루프\
'''
i = 1
while True :
print("{}번째 반복".format(i))
i += 1
ipt = input("> 종료하시겠습니까? y/n :")
if ipt.lower() == "y" :
print("반복 종료")
break
'''
lst = [1, 3, 12, 15, 4, 55, 9]
# lst안의 값들 중 10보다 큰 값만 출력
for item in lst :
if item > 10 :
print(item)
print("-----------------------")
# continue 활용
for item in lst :
if item < 10 :
continue
print(item)
print("-----------------------")
# 100 이하의 자연수 중에서 5와 7의 최소공배수를 구하여 출력
for i in range(1, 100) :
if i % 5 == 0 and i % 7 == 0 :
print(i)
break
print("-----------------------")
# 100 이하의 자연수 중에서 9의 배수들의 합을 출력
ret = 0
i = 0
while i <= 100 :
ret += i
i += 9
print(ret)
print("-----------------------")
# 1000이하의 자연수 중 8이 들어가는 수의 개수
cnt = 0
for i in range(1001) :
if str(i).find("8") != -1 :
cnt += 1
print(cnt)
print("-----------------------")
lst = [112, 20, 4, 34, 56]
print(min(lst))
print(max(lst))
print(sum(lst))
lst_r = reversed(lst)
print(lst_r)
print(list(lst_r))
print(enumerate(lst))
print(list(enumerate(lst)))
for i, val in enumerate(lst) :
print(i, ":", val)
print(dic2.items())
for key, val in dic2.items() :
print(key, ":", val)
s = "::".join(["ab", "cd", "ef", "gh", "ij"])
print(s)
# ab::cd::ef::gh::ij
|
# CAN controls for MQB platform Volkswagen, Audi, Skoda and SEAT.
# PQ35/PQ46/NMS, and any future MLB, to come later.
def create_mqb_steering_control(packer, bus, apply_steer, idx, lkas_enabled):
values = {
"SET_ME_0X3": 0x3,
"Assist_Torque": abs(apply_steer),
"Assist_Requested": lkas_enabled,
"Assist_VZ": 1 if apply_steer < 0 else 0,
"HCA_Available": 1,
"HCA_Standby": not lkas_enabled,
"HCA_Active": lkas_enabled,
"SET_ME_0XFE": 0xFE,
"SET_ME_0X07": 0x07,
}
return packer.make_can_msg("HCA_01", bus, values, idx)
def create_mqb_hud_control(packer, bus, enabled, steering_pressed, hud_alert, left_lane_visible, right_lane_visible,
ldw_lane_warning_left, ldw_lane_warning_right, ldw_side_dlc_tlc, ldw_dlc, ldw_tlc,
standstill, left_lane_depart, right_lane_depart):
# Lane color reference:
# 0 (LKAS disabled) - off
# 1 (LKAS enabled, no lane detected) - dark gray
# 2 (LKAS enabled, lane detected) - light gray on VW, green or white on Audi depending on year or virtual cockpit. On a color MFD on a 2015 A3 TDI it is white, virtual cockpit on a 2018 A3 e-Tron its green.
# 3 (LKAS enabled, lane departure detected) - white on VW, red on Audi
values = {
"LDW_Status_LED_gelb": 1 if enabled and steering_pressed else 0,
"LDW_Status_LED_gruen": 1 if enabled and not steering_pressed else 0,
"LDW_Lernmodus_links": 3 if enabled and left_lane_visible else 1 + left_lane_visible,
"LDW_Lernmodus_rechts": 3 if enabled and right_lane_visible else 1 + right_lane_visible,
"LDW_Texte": hud_alert,
"LDW_SW_Warnung_links": ldw_lane_warning_left,
"LDW_SW_Warnung_rechts": ldw_lane_warning_right,
"LDW_Seite_DLCTLC": ldw_side_dlc_tlc,
"LDW_DLC": ldw_dlc,
"LDW_TLC": ldw_tlc
}
return packer.make_can_msg("LDW_02", bus, values)
def create_mqb_acc_buttons_control(packer, bus, buttonStatesToSend, CS, idx):
values = {
"GRA_Hauptschalter": CS.graHauptschalter,
"GRA_Abbrechen": buttonStatesToSend["cancel"],
"GRA_Tip_Setzen": buttonStatesToSend["setCruise"],
"GRA_Tip_Hoch": buttonStatesToSend["accelCruise"],
"GRA_Tip_Runter": buttonStatesToSend["decelCruise"],
"GRA_Tip_Wiederaufnahme": buttonStatesToSend["resumeCruise"],
"GRA_Verstellung_Zeitluecke": 3 if buttonStatesToSend["gapAdjustCruise"] else 0,
"GRA_Typ_Hauptschalter": CS.graTypHauptschalter,
"GRA_Codierung": 2,
"GRA_Tip_Stufe_2": CS.graTipStufe2,
"GRA_ButtonTypeInfo": CS.graButtonTypeInfo
}
return packer.make_can_msg("GRA_ACC_01", bus, values, idx)
|
'''
Description:
Reverse a singly linked list.
Example:
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
Follow up:
A linked list can be reversed either iteratively or recursively. Could you implement both?
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def helper(self, prev, cur):
if cur:
# locate next hopping node
next_hop = cur.next
# reverse direction
cur.next = prev
return self.helper( cur, next_hop)
else:
# new head of reverse linked list
return prev
def reverseList(self, head: ListNode) -> ListNode:
return self.helper( None, head)
# n : the length of linked list
## Time Complexity: O( n )
#
# The overhead in time is the call depth of recursion, which is of O( n )
## Space Complexity: O( n )
#
# The overhead in space is to maintain call stack for recursion, which is of O( n )
def traverse( node ):
while node:
print( node.val, end = ' ')
node = node.next
return
def test_bench():
'''
1 -> 2 -> 3 -> 4 -> 5
'''
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
head_of_reverse = Solution().reverseList( head )
# expected output:
'''
5 4 3 2 1
'''
traverse( head_of_reverse)
return
if __name__ == '__main__':
test_bench()
|
from core import constants
from core.entities.movable import Movable
from core.entities.particles.projectile import Projectile
from core.texture_manager import TextureManager
class Tank(Movable):
"""
A class representing a tank in the game world.
"""
def __init__(self, **kwargs):
self.texture1 = TextureManager.tank_player_1 # Movement animation frames
self.texture2 = TextureManager.tank_player_2
super().__init__(img=self.texture1, **kwargs)
self.speed = constants.TANK_SPEED
self.move_skip = constants.TANK_MOVE_SKIP
self.scale = constants.TANK_SCALE # For 16x16 pixel textures
self.health = constants.TANK_HEALTH
self.fire_cooldown = 0
self.last_fired_bullet = None
self.bullet_spawn_position = {
(-1, 0): (0, self.rect.height // 2),
(1, 0): (self.rect.width, self.rect.height // 2),
(0, 1): (self.rect.width // 2, self.rect.height),
(0, -1): (self.rect.width // 2, 0)
}
self.track_anim = False
def logic_update(self, game, tick):
super().logic_update(game, tick)
# Movement direction rotation
super().resolve_rotation_4_axis()
# Fire cooldown
if self.fire_cooldown > 0:
self.fire_cooldown -= 1
# Animation
if self.move_dir[0] != 0 or self.move_dir[1] != 0:
if tick % constants.TANK_TRACK_ANIM_DURATION == 0:
self.track_anim = not self.track_anim
if self.track_anim:
self.image = self.texture1
else:
self.image = self.texture2
def shoot(self, game, player_invoked=False) -> None:
"""
Makes the tank shoot by creating a projectile entity.
Each tank can only fire a single projectile at a time and it can't shoot again until
the projectile it previously fired has been destroyed (by hitting something or its lifetime ending).
Also there is a short minimum fire cooldown in place.
"""
if self.can_shoot():
spawn_position = self.bullet_spawn_position[self.facing_direction]
new_bullet = Projectile(self, x=self.rect.x + spawn_position[0], y=self.rect.y + spawn_position[1],
batch=game.batch)
new_bullet.move_dir = self.facing_direction
new_bullet.player_owned = player_invoked
game.entity_manager.add_entity(new_bullet)
self.last_fired_bullet = new_bullet
self.fire_cooldown = constants.TANK_MIN_FIRE_COOLDOWN
def can_shoot(self) -> bool:
"""
Determines whether the tank can currently shoot a new projectile.
"""
return self.last_fired_bullet is None or self.last_fired_bullet.to_remove and self.fire_cooldown <= 0
def damage(self, game, other_entity=None) -> bool:
"""
Applies damage to this tank or destroys it, deleting the entity.
"""
self.health -= 1
if self.health <= 0:
game.entity_manager.remove_entity(self)
return False
|
#---------------------------
# organize imports
#---------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
import os
import warnings
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
#---------------------------
# library specific options
#---------------------------
pd.options.display.float_format = '{:,.2f}'.format
sns.set(color_codes=True)
def warn(*args, **kwargs):
pass
warnings.warn = warn
warnings.filterwarnings("ignore", category=FutureWarning)
#---------------------------
# analyze the dataset
#---------------------------
def analyze_dataset(dataset):
print("[INFO] keys : {}".format(dataset.keys()))
print("[INFO] features shape : {}".format(dataset.data.shape))
print("[INFO] target shape : {}".format(dataset.target.shape))
print("[INFO] feature names")
print(dataset.feature_names)
print("[INFO] dataset summary")
print(dataset.DESCR)
df = pd.DataFrame(dataset.data)
print("[INFO] df type : {}".format(type(df)))
print("[INFO] df shape: {}".format(df.shape))
print(df.head())
df.columns = dataset.feature_names
print(df.head())
df["PRICE"] = dataset.target
print(df.head())
print("[INFO] dataset datatypes")
print(df.dtypes)
print("[INFO] dataset statistical summary")
print(df.describe())
# correlation between attributes
print("PEARSON CORRELATION")
print(df.corr(method="pearson"))
sns.heatmap(df.corr(method="pearson"))
plt.savefig("heatmap_pearson.png")
plt.clf()
plt.close()
print("SPEARMAN CORRELATION")
print(df.corr(method="spearman"))
sns.heatmap(df.corr(method="spearman"))
plt.savefig("heatmap_spearman.png")
plt.clf()
plt.close()
print("KENDALL CORRELATION")
print(df.corr(method="kendall"))
sns.heatmap(df.corr(method="kendall"))
plt.savefig("heatmap_kendall.png")
plt.clf()
plt.close()
# show missing values
print(pd.isnull(df).any())
file_report = "boston_housing.txt"
with open(file_report, "w") as f:
f.write("Features shape : {}".format(df.drop("PRICE", axis=1).shape))
f.write("\n")
f.write("Target shape : {}".format(df["PRICE"].shape))
f.write("\n")
f.write("\nColumn names")
f.write("\n")
f.write(str(df.columns))
f.write("\n")
f.write("\nStatistical summary")
f.write("\n")
f.write(str(df.describe()))
f.write("\n")
f.write("\nDatatypes")
f.write("\n")
f.write(str(df.dtypes))
f.write("\n")
f.write("\nPEARSON correlation")
f.write("\n")
f.write(str(df.corr(method="pearson")))
f.write("\n")
f.write("\nSPEARMAN correlation")
f.write("\n")
f.write(str(df.corr(method="spearman")))
f.write("\n")
f.write("\nKENDALL correlation")
f.write("\n")
f.write(str(df.corr(method="kendall")))
f.write("\nMissing Values")
f.write("\n")
f.write(str(pd.isnull(df).any()))
return df
#---------------------------
# visualize the dataset
#---------------------------
def visualize_dataset(df):
colors = ["y", "b", "g", "r"]
cols = list(df.columns.values)
if not os.path.exists("plots/univariate/box"):
os.makedirs("plots/univariate/box")
if not os.path.exists("plots/univariate/density"):
os.makedirs("plots/univariate/density")
# draw a boxplot with vertical orientation
for i, col in enumerate(cols):
sns.boxplot(df[col], color=random.choice(colors), orient="v")
plt.savefig("plots/univariate/box/box_" + str(i) + ".png")
plt.clf()
plt.close()
# draw a histogram and fit a kernel density estimate (KDE)
for i, col in enumerate(cols):
sns.distplot(df[col], color=random.choice(colors))
plt.savefig("plots/univariate/density/density_" + str(i) + ".png")
plt.clf()
plt.close()
if not os.path.exists("plots/multivariate"):
os.makedirs("plots/multivariate")
# bivariate plot between target and feature
for i, col in enumerate(cols):
if (i == len(cols) - 1):
pass
else:
sns.jointplot(x=col, y="PRICE", data=df);
plt.savefig("plots/multivariate/target_vs_" + str(i) + ".png")
plt.clf()
plt.close()
# pairplot
sns.pairplot(df)
plt.savefig("plots/pairplot.png")
plt.clf()
plt.close()
#---------------------------
# train the model
#---------------------------
def train_model(df, dataset):
X = df.drop("PRICE", axis=1)
Y = df["PRICE"]
print(X.shape)
print(Y.shape)
scaler = MinMaxScaler().fit(X)
scaled_X = scaler.transform(X)
seed = 9
test_size = 0.20
X_train, X_test, Y_train, Y_test = train_test_split(scaled_X, Y, test_size = test_size, random_state = seed)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
# user variables to tune
folds = 10
metric = "neg_mean_squared_error"
# hold different regression models in a single dictionary
models = {}
models["Linear"] = LinearRegression()
models["Lasso"] = Lasso()
models["ElasticNet"] = ElasticNet()
models["KNN"] = KNeighborsRegressor()
models["DecisionTree"] = DecisionTreeRegressor()
models["SVR"] = SVR()
models["AdaBoost"] = AdaBoostRegressor()
models["GradientBoost"] = GradientBoostingRegressor()
models["RandomForest"] = RandomForestRegressor()
models["ExtraTrees"] = ExtraTreesRegressor()
# 10-fold cross validation for each model
model_results = []
model_names = []
for model_name in models:
model = models[model_name]
k_fold = KFold(n_splits=folds, random_state=seed)
results = cross_val_score(model, X_train, Y_train, cv=k_fold, scoring=metric)
model_results.append(results)
model_names.append(model_name)
print("{}: {}, {}".format(model_name, round(results.mean(), 3), round(results.std(), 3)))
# box-whisker plot to compare regression models
figure = plt.figure()
figure.suptitle('Regression models comparison')
axis = figure.add_subplot(111)
plt.boxplot(model_results)
axis.set_xticklabels(model_names, rotation = 45, ha="right")
axis.set_ylabel("Mean Squared Error (MSE)")
plt.margins(0.05, 0.1)
plt.savefig("model_mse_scores.png")
plt.clf()
plt.close()
# create and fit the best regression model
best_model = GradientBoostingRegressor(random_state=seed)
best_model.fit(X_train, Y_train)
# make predictions using the model
predictions = best_model.predict(X_test)
print("[INFO] MSE : {}".format(round(mean_squared_error(Y_test, predictions), 3)))
# plot between predictions and Y_test
x_axis = np.array(range(0, predictions.shape[0]))
plt.plot(x_axis, predictions, linestyle="--", marker="o", alpha=0.7, color='r', label="predictions")
plt.plot(x_axis, Y_test, linestyle="--", marker="o", alpha=0.7, color='g', label="Y_test")
plt.xlabel('Row number')
plt.ylabel('PRICE')
plt.title('Predictions vs Y_test')
plt.legend(loc='lower right')
plt.savefig("predictions_vs_ytest.png")
plt.clf()
plt.close()
# plot model's feature importance
feature_importance = best_model.feature_importances_
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, dataset.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.savefig("feature_importance.png")
plt.clf()
plt.close()
#--------------------------
# MAIN FUNCTION
#--------------------------
if __name__ == '__main__':
dataset = load_boston()
df = analyze_dataset(dataset)
visualize_dataset(df)
train_model(df, dataset)
|
__author__ = 'NovikovII'
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Считать отдельными операторами целочисленные ширину и высоту прямоугольника, создать функцию (def), принимающую в качестве параметров ширину и высоту фигуры.
Внутри функции создать две вложенные функции (lambda) по подсчету площади и периметра фигуры.
Вывести одной строкой через пробел площадь и периметр, разделенные пробелом (например '38 90').
'''
def rectangle(a, b, name='perimeter'):
f = [lambda a, b: (a + b) * 2, lambda a, b: a * b]
s = {'3': lambda a, b: (a + b) * 2, '4': lambda a, b: a * b}
if name == 'perimeter':
return f[0](a,b)
else:
return s['4'](a,b)
#a = int(input())
#b = int(input())
a = 5
b = 6
print(rectangle(a, b, name=''), rectangle(a, b, name='perimeter'))
|
"""
File that contains the Python code to be executed in "group_by_tag" Notebook.
"""
TAG_TABLE_HEADER = """\
<table>
<tr>
<td colspan="3" class="group_by_header">
Tag i
</td>
</tr>
"""
# 21/09/2018 16h57m :)
|
"""JSON serializers and deserializers for common datatypes."""
import datetime as dt
def numpy_encode(obj):
"""Encode a numpy array."""
return obj.tolist()
def numpy_decode(obj):
"""Decode a numpy array."""
import numpy # noqa
return numpy.array(obj)
def datetime_encode(obj):
"""Encode a datetime."""
return obj.isoformat()
def datetime_decode(obj):
"""Decode a datetime."""
return dt.datetime.fromisoformat(obj)
|
# -*- coding: utf-8 -*-
from copy import copy
from reportlab.platypus import (SimpleDocTemplate, Paragraph, Spacer, Table,
TableStyle)
#from reportlab.lib import colors
from reportlab.lib.pagesizes import letter
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import inch
from .personal import address_paragraph
PAGE_HEIGHT = 792.0
PAGE_WIDTH = 612.0
stylesheet = getSampleStyleSheet()
TABLE_STYLE = TableStyle([
('ALIGN', (2,0), (-1,-1), 'RIGHT'),
('VALIGN', (0,0), (-1,-1), 'TOP'),
])
#('LINEABOVE', (0,0), (-1,0), 2, colors.green),
#('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
#('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),
class Layout(object):
def __init__(self, title):
self.title = title
def myFirstPage(self, canvas, doc):
canvas.saveState()
canvas.setFont('Times-Bold',16)
canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-108, self.title)
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Invoice page %d" % (doc.page,))
canvas.restoreState()
def myLaterPages(self, canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Invoice page %d" % (doc.page,))
canvas.restoreState()
def format_invoice(title, entries):
doc = SimpleDocTemplate('invoice.pdf', pagesize=letter)
style = stylesheet["Normal"]
times = copy(style)
times.fontName = 'Times-Roman'
times.fontSize = 12
times.leading = 14
story = [Spacer(1,0.75*inch), Paragraph(address_paragraph, times)]
entries = sorted(entries, key=lambda entry: entry.start)
data = [[u'', u'', u'Time', u'Rate', u'Total']]
for i, entry in enumerate(entries):
d0 = entry.start.date()
d1 = entry.end.date()
if d0 == d1:
dates = '{:%B %d}'.format(d0).replace(u' 0', u' ')
else:
dates = u'{:%b %d} – {:%b %d}'.format(d0, d1).replace(u' 0', u' ')
h, m = divmod(entry.minutes, 60)
elapsed = u'{}:{:02}'.format(int(h), int(m))
p = Paragraph(entry.description, style)
#story.append(p)
#story.append(Spacer(1,0.2*inch))
dollar = u'' if i else u'$'
data.append([dates, p, elapsed,
u'{} {:,}'.format(dollar, entry.rate),
u'{} {:,}'.format(dollar, entry.amount)])
total_amount = sum(entry.amount for entry in entries)
total_elapsed = sum(entry.minutes for entry in entries)
data.append([u'', u'Total ' + '.' * 80,
u'{}:{:02}'.format(*divmod(total_elapsed, 60)),
u'', u'$ {:,}'.format(total_amount)])
table = Table(data, colWidths=[84.0, 260.0, 36.0, 30.0, 40.0],
style=TABLE_STYLE)
story.append(table)
layout = Layout(title)
doc.build(
story,
onFirstPage=layout.myFirstPage,
onLaterPages=layout.myLaterPages,
)
|
import sys
sys.path.append("python")
from PIL import Image
from PIL import ImageCms
import os
from CompareOverHtml import createCompareHtml
rootpath = "./greyramp-fulltv"
if not os.path.exists(rootpath):
os.makedirs(rootpath)
colwidth = 4
height = 150
width = 256*colwidth
img = Image.new( mode = "RGB", size = (width, height) )
for icol in range(0, 256):
imgpaste = Image.new( mode = "RGB", size = (colwidth, height), color=(icol, icol, icol) )
img.paste(imgpaste, box=(icol*colwidth, 0))
source_image = os.path.join(rootpath, "greyscale-raw.png")
img.save(source_image)
#source_image = os.path.join(rootpath, "greyscale-srgb-photoshop.png")
listimages = []
# Now lets make the mp4's.
listimages.append({'id': 'none', 'label': 'Test-1: Greyramp'})
listimages.append({'id': 'greyramppng', 'label': 'Source png file', 'description': '', 'image': os.path.join("..", source_image)})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -sws_flags spline+accurate_rnd+full_chroma_int -vf "scale=in_range=full:in_color_matrix=bt709:out_range={outrange}:out_color_matrix=bt709" -c:v libx264 -t 5 -pix_fmt yuv420p -qscale:v 1 -color_range 1 -colorspace 1 -color_primaries 1 -color_trc 13 {rootpath}/greyscale-{fileext}.mp4'.format(outrange="tv", source_image=source_image, fileext="tv", rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'greytv', 'label': 'Normal encode', 'video': "greyscale-tv.mp4", 'description': 'Using the PNG greyramp, with the normal "TV" out range of 16-235, and using yuv420p encoding. ', 'cmd': cmd})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -sws_flags spline+accurate_rnd+full_chroma_int -vf "scale=in_range=full:in_color_matrix=bt709:out_range={outrange}:out_color_matrix=bt709" -c:v libx264 -t 5 -pix_fmt yuv420p -qscale:v 1 -color_range 2 -colorspace 1 -color_primaries 1 -color_trc 13 {rootpath}/greyscale-{fileext}.mp4'.format(outrange="full", source_image=source_image, fileext="full", rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'greyfull', 'label': 'Full range encoding.', 'description': 'Greyramp encoded using out_range=full yuv420p encoding, here we also set color_range=2 to let the decoder know to process it correctly.', 'video': "greyscale-full.mp4", 'cmd': cmd})
source_image = "../sourceimages/radialgrad.png"
listimages.append({'id': 'none', 'label': 'Test-2: Radial gradent'})
listimages.append({'id': 'radialgradpng', 'label': 'Source png file', 'description': 'This is a less forgiving test image', 'image': os.path.join("..", source_image)})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -sws_flags spline+accurate_rnd+full_chroma_int -vf "scale=in_range=full:in_color_matrix=bt709:out_range={outrange}:out_color_matrix=bt709" -c:v libx264 -t 5 -pix_fmt yuv420p -qscale:v 1 -color_range 1 -colorspace 1 -color_primaries 1 -color_trc 13 {rootpath}/radialgrad-{fileext}.mp4'.format(outrange="tv", source_image=source_image, fileext="tv", rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'radialgradtv', 'label': 'Normal encode', 'description': 'Using the radial gradent with normal yuv420p encoding and yuv420p.', 'video': "radialgrad-tv.mp4", 'cmd': cmd})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -sws_flags spline+accurate_rnd+full_chroma_int -vf "scale=in_range=full:in_color_matrix=bt709:out_range={outrange}:out_color_matrix=bt709" -c:v libx264 -t 5 -pix_fmt yuv420p -qscale:v 1 -color_range 2 -colorspace 1 -color_primaries 1 -color_trc 13 {rootpath}/radialgrad-{fileext}.mp4'.format(outrange="full", source_image=source_image, fileext="full", rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'radialgradfull', 'label': 'Full range encoding.', 'description': 'Using the full range yuv420p encoding with color_range=2', 'video': "radialgrad-full.mp4", 'cmd': cmd})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -sws_flags spline+accurate_rnd+full_chroma_int -vf "scale=in_range=full:in_color_matrix=bt709:out_range={outrange}:out_color_matrix=bt709" -c:v libx264 -t 5 -pix_fmt yuvj420p -qscale:v 1 -color_range 2 -colorspace 1 -color_primaries 1 -color_trc 13 {rootpath}/radialgrad-{fileext}.mp4'.format(outrange="full", source_image=source_image, fileext="fullj", rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'radialgradfullj', 'label': 'Alternate full range encoding', 'description': 'This is an older alternative to full-range encoding, that ffmpeg is currently deprecating. ', 'video': "radialgrad-fullj.mp4", 'cmd': cmd})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -sws_flags spline+accurate_rnd+full_chroma_int -vf "scale=in_range=full:in_color_matrix=bt709:out_range={outrange}:out_color_matrix=bt709" -c:v libx264 -t 5 -pix_fmt yuv420p -qscale:v 1 -color_range 0 -colorspace 1 -color_primaries 1 -color_trc 13 {rootpath}/radialgrad-{fileext}.mp4'.format(outrange="full", source_image=source_image, fileext="full0", rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'radialgradfull0', 'label': 'Using full range without color_range flag', 'description': 'This is what happens if you specify full range without the color_range flag (color_range=0). DONT DO THIS!', 'video': "radialgrad-full0.mp4", 'cmd': cmd})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -sws_flags spline+accurate_rnd+full_chroma_int -vf "scale=in_range=full:in_color_matrix=bt709:out_range={outrange}:out_color_matrix=bt709" -c:v libx264 -t 5 -pix_fmt yuv444p -qscale:v 1 -color_range 2 -colorspace 1 -color_primaries 1 -color_trc 13 {rootpath}/radialgrad-{fileext}.mp4'.format(outrange="full", source_image=source_image, fileext="full444", rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'radialgradfull444', 'label': 'Full range with yuv444 (chrome only)', 'description': 'This is testing yuv444p but still 8-bit, this will only work on chrome.', 'video': "radialgrad-full444.mp4", 'cmd': cmd})
#cmd = 'ffmpeg -y -loop 1 -i {source_image} -sws_flags spline+accurate_rnd+full_chroma_int -vf "scale=in_range=full:in_color_matrix=bt709:out_range={outrange}:out_color_matrix=bt709" -c:v libx265 -t 5 -pix_fmt yuv420p -qscale:v 1 -color_range 2 -colorspace 1 -color_primaries 1 -color_trc 13 {rootpath}/radialgrad-{fileext}.mp4'.format(outrange="full", source_image=source_image, fileext="h265", rootpath=rootpath)
#os.system(cmd)
#listimages.append({'id': 'radialgradh265', 'label': 'Full range with yuv420 h265', 'description': 'This is testing yuv444p but still 8-bit, this will only work on chrome.', 'video': "radialgrad-h265.mp4", 'cmd': cmd})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -c:v libx264 -t 5 -preset placebo -qp 0 -x264-params "keyint=15:no-deblock=1" -pix_fmt yuv444p10le -sws_flags spline+accurate_rnd+full_chroma_int -vf "colorspace=bt709:iall=bt601-6-625:fast=1" {rootpath}/radialgrad-raw-10bit.mp4'.format(source_image=source_image, rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'radialgradtv10', 'label': '10-bit encoding, tv-range (chrome only)', 'description': 'This is testing 10-bit encoding, yuv444p10le', 'video': "radialgrad-raw-10bit.mp4", 'cmd': cmd})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -c:v libx264rgb -t 5 -preset placebo -qp 0 -x264-params "keyint=15:no-deblock=1" {rootpath}/radialgrad-raw-rgb.mp4'.format(source_image=source_image, rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'radialgradrgb', 'label': 'This is using RGB encoding', 'description': 'libx264rgb 8-bit mp4 (not supported)', 'video': "radialgrad-raw-rgb.mp4", 'cmd': cmd})
source_image = "../sourceimages/Digital_LAD_raw.png"
listimages.append({'id': 'none', 'label': 'Test-3: Marcie'})
listimages.append({'id': 'marcieraw', 'label': 'Source png file', 'image': os.path.join("..", source_image), 'cmd': cmd})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -sws_flags spline+accurate_rnd+full_chroma_int -vf "scale=in_range=full:in_color_matrix=bt709:out_range={outrange}:out_color_matrix=bt709" -c:v libx264 -t 5 -pix_fmt yuv420p -qscale:v 1 -color_range 1 -colorspace 1 -color_primaries 1 -color_trc 13 {rootpath}/marcie-{fileext}.mp4'.format(outrange="tv", source_image=source_image, fileext="tv", rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'marcietv', 'label': 'Normal encoding', 'description': 'marcie out_range=tv -pix_fmt yuv420p 16-235 range', 'video': "marcie-tv.mp4", 'cmd': cmd})
cmd = 'ffmpeg -y -loop 1 -i {source_image} -sws_flags spline+accurate_rnd+full_chroma_int -vf "scale=in_range=full:in_color_matrix=bt709:out_range={outrange}:out_color_matrix=bt709" -c:v libx264 -t 5 -pix_fmt yuv420p -qscale:v 1 -color_range 2 -colorspace 1 -color_primaries 1 -color_trc 13 {rootpath}/marcie-{fileext}.mp4'.format(outrange="full", source_image=source_image, fileext="full", rootpath=rootpath)
os.system(cmd)
listimages.append({'id': 'marciefull', 'label': 'Full Range', 'video': "marcie-full.mp4", 'cmd': cmd})
createCompareHtml(outputpath=rootpath+"/compare.html",
listimages=listimages,
introduction="<H1>Full range vs TV Range</H1><p> Comparing full range encoding vs. tv range, but also yuv420p vs. yuvj420p. We believe that this is well supported on web browsers, and dont see a downside to it. There may be cases where other applications do not read it. The code to generate these files is <a href='../%s'>here</a>. </p>" % os.path.basename(__file__),
videohtml = ' width=920 ')
|
""" This is custom bot class """
import json
import aiohttp
from discord.ext import commands
import motor.motor_asyncio
#check discord.py rewrite documentation for why AutoShardedBot was used.
class Bot(commands.AutoShardedBot):
"""
This is a custom object which extends default commands.Bot class and provides
a configuration handler and a common aiohttp ClientSession.
"""
def __init__(self, *args, **kwargs):
"""`config_file`: A string representing name of .json file which contains
configurations for bot. Can be anything but will default to `config.json`
Instance variables:
`session` - An `aiohttp.ClientSession` used for performing API hooks.
`config` - A `dictionary` containing key value pairs represnting bot configs.
"""
#pass arguments to constructor of parent class
super().__init__(*args, **kwargs)
self.config = {}
#in case I ever forget why kwargs.get method is used(god help me), just google it
self.config_file = kwargs.get("config_file","config.json")
#also apparently you can do Bot.loop, did not knew this.
self.session = aiohttp.ClientSession(loop = self.loop)
#create mongoDB client. MongoDB database server must be running in background
self.DBClient = motor.motor_asyncio.AsyncIOMotorClient()
#create Bot databsase
self.db = self.DBClient.RoboX86
#get settings of all of which bot is part. it's basically a collection
#self.guildSettings = self.db.guilds
#get collection containing details of all users subscribed to one piece updates
self.onePieceCollection = self.db.onePieceCollection
#get collection containing details of all users subscribed to unOrdinary updates
self.unOrdinaryCollection = self.db.unOrdinaryCollection
def load_config(self, filename: str=None):
"""
Load congig from a .JSON file. If not specified will default to
`Bot.config_file`.
"""
if not filename:
filename = self.config_file
#pro tip: google difference between json `load,dump` and `loads,dumps`.
with open(filename) as file_object:
config = json.load(file_object)
#also google `isinstance` vs `type`. Hint: `isinstance` is better.
if isinstance(config,dict):
for key,value in config.items():
self.config[key] = value
def save_config(self, filename:str=None):
"""save config to a .JSON file. Defaults to `Bot.config_file`."""
if not filename:
filename = self.config_file
with open(filename,'w') as file_object:
json.dump(self.config_file, file_object, indent=4, sort_keys=True)
#add database connection..
|
import logging
import copy
import glob
import math
import os
import numpy as np
import tqdm
import pickle
from util import io_util
from info import data_info, hardware_info
from data_class import global_model_data, grouped_op_unit_data
import global_model_config
from type import OpUnit, ConcurrentCountingMode, Target, ExecutionFeature
def get_data(input_path, mini_model_map, model_results_path, warmup_period, use_query_predict_cache, add_noise,
ee_sample_interval, txn_sample_interval, network_sample_interval):
"""Get the data for the global models
Read from the cache if exists, otherwise save the constructed data to the cache.
:param input_path: input data file path
:param mini_model_map: mini models used for prediction
:param model_results_path: directory path to log the result information
:param warmup_period: warmup period for pipeline data
:param use_query_predict_cache: whether cache the prediction result based on the query for acceleration
:param add_noise: whether to add noise to the cardinality estimations
:param ee_sample_interval: sampling interval for the EE OUs
:param txn_sample_interval: sampling interval for the transaction OUs
:param network_sample_interval: sampling interval for the network OUs
:return: (GlobalResourceData list, GlobalImpactData list)
"""
cache_file = input_path + '/global_model_data.pickle'
headers_file = input_path + '/global_model_headers.pickle'
if os.path.exists(cache_file):
with open(cache_file, 'rb') as pickle_file:
resource_data_list, impact_data_list, data_info.RAW_FEATURES_CSV_INDEX, data_info.RAW_TARGET_CSV_INDEX, data_info.INPUT_CSV_INDEX, data_info.TARGET_CSV_INDEX = pickle.load(pickle_file)
else:
data_list = _get_grouped_opunit_data_with_prediction(input_path, mini_model_map, model_results_path,
warmup_period, use_query_predict_cache, add_noise,
ee_sample_interval, txn_sample_interval,
network_sample_interval)
resource_data_list, impact_data_list = _construct_interval_based_global_model_data(data_list,
model_results_path)
with open(cache_file, 'wb') as file:
pickle.dump((resource_data_list, impact_data_list, data_info.RAW_FEATURES_CSV_INDEX, data_info.RAW_TARGET_CSV_INDEX, data_info.INPUT_CSV_INDEX, data_info.TARGET_CSV_INDEX), file)
return resource_data_list, impact_data_list
def _get_grouped_opunit_data_with_prediction(input_path, mini_model_map, model_results_path, warmup_period,
use_query_predict_cache, add_noise, ee_sample_interval,
txn_sample_interval, network_sample_interval):
"""Get the grouped opunit data with the predicted metrics and elapsed time
:param input_path: input data file path
:param mini_model_map: mini models used for prediction
:param model_results_path: directory path to log the result information
:param warmup_period: warmup period for pipeline data
:return: The list of the GroupedOpUnitData objects
"""
data_list = _get_data_list(input_path, warmup_period, ee_sample_interval, txn_sample_interval,
network_sample_interval)
_predict_grouped_opunit_data(data_list, mini_model_map, model_results_path, use_query_predict_cache, add_noise)
logging.info("Finished GroupedOpUnitData prediction with the mini models")
return data_list
def _construct_interval_based_global_model_data(data_list, model_results_path):
"""Construct the GlobalImpactData used for the global model training
:param data_list: The list of GroupedOpUnitData objects
:param model_results_path: directory path to log the result information
:return: (GlobalResourceData list, GlobalImpactData list)
"""
prediction_path = "{}/global_resource_data.csv".format(model_results_path)
io_util.create_csv_file(prediction_path, ["Elapsed us", "# Concurrent OpUnit Groups"])
start_time_list = sorted([d.get_start_time(ConcurrentCountingMode.INTERVAL) for d in data_list])
rounded_start_time_list = [_round_to_second(start_time_list[0])]
# Map from interval start time to the data in this interval
interval_data_map = {rounded_start_time_list[0]: []}
# Get all the interval start times and initialize the map
for t in start_time_list:
rounded_time = _round_to_second(t)
if rounded_time > rounded_start_time_list[-1]:
rounded_start_time_list.append(rounded_time)
interval_data_map[rounded_time] = []
for data in tqdm.tqdm(data_list, desc="Find Interval Data"):
# For each data, find the intervals that might overlap with it
interval_start_time = _round_to_second(data.get_start_time(ConcurrentCountingMode.EXACT) -
global_model_config.INTERVAL_SIZE + global_model_config.INTERVAL_SEGMENT)
while interval_start_time <= data.get_end_time(ConcurrentCountingMode.ESTIMATED):
if interval_start_time in interval_data_map:
interval_data_map[interval_start_time].append(data)
interval_start_time += global_model_config.INTERVAL_SEGMENT
# Get the global resource data
resource_data_map = {}
for start_time in tqdm.tqdm(rounded_start_time_list, desc="Construct GlobalResourceData"):
resource_data_map[start_time] = _get_global_resource_data(start_time, interval_data_map[start_time],
prediction_path)
# Now construct the global impact data
impact_data_list = []
for data in data_list:
interval_start_time = _round_to_second(data.get_start_time(ConcurrentCountingMode.INTERVAL))
resource_data_list = []
while interval_start_time <= data.get_end_time(ConcurrentCountingMode.ESTIMATED):
if interval_start_time in resource_data_map:
resource_data_list.append(resource_data_map[interval_start_time])
interval_start_time += global_model_config.INTERVAL_SIZE
impact_data_list.append(global_model_data.GlobalImpactData(data, resource_data_list))
return list(resource_data_map.values()), impact_data_list
def _round_to_second(time):
"""
:param time: in us
:return: time in us rounded to the earliest second
"""
return time - time % 1000000
def _get_global_resource_data(start_time, concurrent_data_list, log_path):
"""Get the input feature and the target output for the global resource utilization metrics during an interval
The calculation is adjusted by the overlapping ratio between the opunit groups and the time range.
:param start_time: of the interval
:param concurrent_data_list: the concurrent running opunit groups
:param log_path: the file path to log the data construction results
:return: (the input feature, the resource utilization on the other logical core of the same physical core,
the output resource targets)
"""
# Define a secondary_counting_mode corresponding to the concurrent_counting_mode to derive the concurrent operations
# in different scenarios
end_time = start_time + global_model_config.INTERVAL_SIZE - 1
elapsed_us = global_model_config.INTERVAL_SIZE
# The adjusted resource metrics per logical core.
# TODO: Assuming each physical core has two logical cores via hyper threading for now. Can extend to other scenarios
physical_core_num = hardware_info.PHYSICAL_CORE_NUM
adjusted_x_list = [0] * 2 * physical_core_num
adjusted_y = 0
logging.debug(concurrent_data_list)
logging.debug("{} {}".format(start_time, end_time))
for data in concurrent_data_list:
data_start_time = data.get_start_time(ConcurrentCountingMode.ESTIMATED)
data_end_time = data.get_end_time(ConcurrentCountingMode.ESTIMATED)
ratio = _calculate_range_overlap(start_time, end_time, data_start_time, data_end_time) / (data_end_time -
data_start_time + 2)
#print(start_time, end_time, data_start_time, data_end_time, data_end_time - data_start_time + 1)
sample_interval = data.sample_interval
logging.debug("{} {} {}".format(data_start_time, data_end_time, ratio))
logging.debug("{} {}".format(data.y, data.y_pred))
logging.debug("Sampling interval: {}".format(sample_interval))
# Multiply the resource metrics based on the sampling interval
adjusted_y += data.y * ratio * (sample_interval + 1)
cpu_id = data.cpu_id
if cpu_id > physical_core_num:
cpu_id -= physical_core_num
# Multiply the mini-model predictions based on the sampling interval
adjusted_x_list[cpu_id] += data.y_pred * ratio * (sample_interval + 1)
# change the number to per time unit (us) utilization
for x in adjusted_x_list:
x /= elapsed_us
adjusted_y /= elapsed_us
sum_adjusted_x = np.sum(adjusted_x_list, axis=0)
std_adjusted_x = np.std(adjusted_x_list, axis=0)
ratio_error = abs(adjusted_y - sum_adjusted_x) / (adjusted_y + 1e-6)
logging.debug(sum_adjusted_x)
logging.debug(adjusted_y)
logging.debug("")
io_util.write_csv_result(log_path, elapsed_us, [len(concurrent_data_list)] + list(sum_adjusted_x) + [""] +
list(adjusted_y) + [""] + list(ratio_error))
adjusted_x = np.concatenate((sum_adjusted_x, std_adjusted_x))
return global_model_data.GlobalResourceData(start_time, adjusted_x_list, adjusted_x, adjusted_y)
def _calculate_range_overlap(start_timel, end_timel, start_timer, end_timer):
return min(end_timel, end_timer) - max(start_timel, start_timer) + 1
def _get_data_list(input_path, warmup_period, ee_sample_interval, txn_sample_interval,
network_sample_interval):
"""Get the list of all the operating units (or groups of operating units) stored in GlobalData objects
:param input_path: input data file path
:param warmup_period: warmup period for pipeline data
:return: the list of all the operating units (or groups of operating units) stored in GlobalData objects
"""
data_list = []
# First get the data for all mini runners
for filename in glob.glob(os.path.join(input_path, '*.csv')):
data_list += grouped_op_unit_data.get_grouped_op_unit_data(filename, warmup_period,
ee_sample_interval, txn_sample_interval,
network_sample_interval)
logging.info("Loaded file: {}".format(filename))
return data_list
def _add_estimation_noise(opunit, x):
"""Add estimation noise to the OUs that may use the cardinality estimation
"""
if opunit not in data_info.OUS_USING_CAR_EST:
return
tuple_num_index = data_info.INPUT_CSV_INDEX[ExecutionFeature.NUM_ROWS]
cardinality_index = data_info.INPUT_CSV_INDEX[ExecutionFeature.EST_CARDINALITIES]
tuple_num = x[tuple_num_index]
cardinality = x[cardinality_index]
if tuple_num > 1000:
logging.debug("Adding noise to tuple num (%)".format(tuple_num))
x[tuple_num_index] += np.random.normal(0, tuple_num * 0.3)
x[tuple_num_index] = max(1, x[tuple_num_index])
if cardinality > 1000:
logging.debug("Adding noise to cardinality (%)".format(x[cardinality_index]))
x[cardinality_index] += np.random.normal(0, cardinality * 0.3)
x[cardinality_index] = max(1, x[cardinality_index])
def _predict_grouped_opunit_data(data_list, mini_model_map, model_results_path, use_query_predict_cache, add_noise):
"""Use the mini-runner to predict the resource consumptions for all the GlobalData, and record the prediction
result in place
:param data_list: The list of the GroupedOpUnitData objects
:param mini_model_map: The trained mini models
:param model_results_path: file path to log the prediction results
:param use_query_predict_cache: whether cache the prediction result based on the query for acceleration
:param add_noise: whether to add noise to the cardinality estimations
"""
prediction_path = "{}/grouped_opunit_prediction.csv".format(model_results_path)
pipeline_path = "{}/grouped_pipeline.csv".format(model_results_path)
io_util.create_csv_file(prediction_path, ["Pipeline", "", "Actual", "", "Predicted", "", "Ratio Error"])
io_util.create_csv_file(pipeline_path, ["Number", "Percentage", "Pipeline", "Actual Us", "Predicted Us",
"Us Error", "Absolute Us", "Absolute Us %"])
# Track pipeline cumulative numbers
num_pipelines = 0
total_actual = None
total_predicted = []
actual_pipelines = {}
predicted_pipelines = {}
count_pipelines = {}
query_prediction_path = "{}/grouped_query_prediction.csv".format(model_results_path)
io_util.create_csv_file(query_prediction_path, ["Query", "", "Actual", "", "Predicted", "", "Ratio Error"])
current_query_id = None
query_y = None
query_y_pred = None
# Have to use a prediction cache when having lots of global data...
prediction_cache = {}
# use a prediction cache based on queries to accelerate
query_prediction_cache = {}
# First run a prediction on the global running data with the mini model results
for i, data in enumerate(tqdm.tqdm(data_list, desc="Predict GroupedOpUnitData")):
y = data.y
if data.name[0] != 'q' or (data.name not in query_prediction_cache) or not use_query_predict_cache:
logging.debug("{} pipeline elapsed time: {}".format(data.name, y[-1]))
pipeline_y_pred = 0
for opunit_feature in data.opunit_features:
opunit = opunit_feature[0]
opunit_model = mini_model_map[opunit]
x = np.array(opunit_feature[1]).reshape(1, -1)
if add_noise:
_add_estimation_noise(opunit, x[0])
key = (opunit, x.tobytes())
if key not in prediction_cache:
y_pred = opunit_model.predict(x)
y_pred = np.clip(y_pred, 0, None)
prediction_cache[key] = y_pred
else:
y_pred = prediction_cache[key]
logging.debug("Predicted {} elapsed time with feature {}: {}".format(opunit_feature[0].name,
x[0], y_pred[0, -1]))
if opunit in data_info.MEM_ADJUST_OPUNITS:
# Compute the number of "slots" (based on row feature or cardinality feature
num_tuple = opunit_feature[1][data_info.INPUT_CSV_INDEX[ExecutionFeature.NUM_ROWS]]
if opunit == OpUnit.AGG_BUILD:
num_tuple = opunit_feature[1][data_info.INPUT_CSV_INDEX[ExecutionFeature.EST_CARDINALITIES]]
# SORT/AGG/HASHJOIN_BUILD all allocate a "pointer" buffer
# that contains the first pow2 larger than num_tuple entries
pow_high = 2 ** math.ceil(math.log(num_tuple, 2))
buffer_size = pow_high * data_info.POINTER_SIZE
if opunit == OpUnit.AGG_BUILD and num_tuple <= 256:
# For AGG_BUILD, if slots <= AggregationHashTable::K_DEFAULT_INITIAL_TABLE_SIZE
# the buffer is not recorded as part of the pipeline
buffer_size = 0
pred_mem = y_pred[0][data_info.TARGET_CSV_INDEX[Target.MEMORY_B]]
if pred_mem <= buffer_size:
logging.debug("{} feature {} {} with prediction {} exceeds buffer {}"
.format(data.name, opunit_feature, opunit_feature[1], y_pred[0], buffer_size))
# For hashjoin_build, there is still some inaccuracy due to the
# fact that we do not know about the hash table's load factor.
scale = data_info.INPUT_CSV_INDEX[ExecutionFeature.MEM_FACTOR]
adj_mem = (pred_mem - buffer_size) * opunit_feature[1][scale] + buffer_size
# Don't modify prediction cache
y_pred = copy.deepcopy(y_pred)
y_pred[0][data_info.TARGET_CSV_INDEX[Target.MEMORY_B]] = adj_mem
pipeline_y_pred += y_pred[0]
pipeline_y = copy.deepcopy(pipeline_y_pred)
query_prediction_cache[data.name] = pipeline_y
else:
pipeline_y_pred = query_prediction_cache[data.name]
pipeline_y = copy.deepcopy(pipeline_y_pred)
# Grouping when we're predicting queries
if data.name[0] == 'q':
query_id = data.name[1:data.name.rfind(" p")]
if query_id != current_query_id:
if current_query_id is not None:
io_util.write_csv_result(query_prediction_path, current_query_id, [""] + list(query_y) + [""] +
list(query_y_pred) + [""] +
list(abs(query_y - query_y_pred) / (query_y + 1)))
current_query_id = query_id
query_y = copy.deepcopy(y)
query_y_pred = copy.deepcopy(pipeline_y_pred)
else:
query_y += y
query_y_pred += pipeline_y_pred
data.y_pred = pipeline_y
logging.debug("{} pipeline prediction: {}".format(data.name, pipeline_y))
logging.debug("{} pipeline predicted time: {}".format(data.name, pipeline_y[-1]))
ratio_error = abs(y - pipeline_y) / (y + 1)
logging.debug("|Actual - Predict| / Actual: {}".format(ratio_error[-1]))
io_util.write_csv_result(prediction_path, data.name, [""] + list(y) + [""] + list(pipeline_y) + [""] +
list(ratio_error))
logging.debug("")
# Record cumulative numbers
if data.name not in actual_pipelines:
actual_pipelines[data.name] = copy.deepcopy(y)
predicted_pipelines[data.name] = copy.deepcopy(pipeline_y)
count_pipelines[data.name] = 1
else:
actual_pipelines[data.name] += y
predicted_pipelines[data.name] += pipeline_y
count_pipelines[data.name] += 1
# Update totals
if total_actual is None:
total_actual = copy.deepcopy(y)
total_predicted = copy.deepcopy(pipeline_y)
else:
total_actual += y
total_predicted += pipeline_y
num_pipelines += 1
total_elapsed_err = 0
for pipeline in actual_pipelines:
actual = actual_pipelines[pipeline]
predicted = predicted_pipelines[pipeline]
total_elapsed_err = total_elapsed_err + (abs(actual - predicted))[-1]
for pipeline in actual_pipelines:
actual = actual_pipelines[pipeline]
predicted = predicted_pipelines[pipeline]
num = count_pipelines[pipeline]
ratio_error = abs(actual - predicted) / (actual + 1)
abs_error = abs(actual - predicted)[-1]
pabs_error = abs_error / total_elapsed_err
io_util.write_csv_result(pipeline_path, pipeline, [num, num*1.0/num_pipelines, actual[-1],
predicted[-1], ratio_error[-1], abs_error, pabs_error] +
[""] + list(actual) + [""] + list(predicted) + [""] + list(ratio_error))
ratio_error = abs(total_actual - total_predicted) / (total_actual + 1)
io_util.write_csv_result(pipeline_path, "Total Pipeline", [num_pipelines, 1, total_actual[-1],
total_predicted[-1], ratio_error[-1], total_elapsed_err, 1] +
[""] + list(total_actual) + [""] + list(total_predicted) + [""] + list(ratio_error))
|
"""
Pre-processing of the page metadata before rendering.
"""
from datetime import datetime
import subprocess
def process_info(info, site):
"""
Alter the page metadata before rendering.
"""
# Urubu doesn't split the 'tags' into multiple strings
if "tags" in info:
if isinstance(info["tags"], str):
info["tags"] = info["tags"].split(", ")
# Identify to which folder the item belongs (paper, blog, etc)
if "type" not in info:
info["type"] = "/{}".format(info["id"].split("/")[1])
# Add the current date to the site metadata
if "now" not in site:
site["now"] = datetime.utcnow()
# Add the last git commit hash to the site metadata
if "commit" not in site:
completed = subprocess.run(
["git", "rev-parse", "--short", "HEAD"], capture_output=True, text=True
)
site["commit"] = completed.stdout.strip()
|
# 获取文件夹中的文件路径
import os
def get_filePathList(dirPath, partOfFileName=''):
allFileName_list = list(os.walk(dirPath))[0][2]
fileName_list = [k for k in allFileName_list if partOfFileName in k]
filePath_list = [os.path.join(dirPath, k) for k in fileName_list]
return filePath_list
# 修改文件夹中的单个xml文件
import xml.etree.ElementTree as ET
def single_xmlCompress(old_xmlFilePath, new_xmlFilePath, new_size):
new_width, new_height = new_size
with open(old_xmlFilePath) as file:
fileContent = file.read()
root = ET.XML(fileContent)
# 获得图片宽度变化倍数,并改变xml文件中width节点的值
width = root.find('size').find('width')
old_width = int(width.text)
width_times = new_width / old_width
width.text = str(new_width)
# 获得图片高度变化倍数,并改变xml文件中height节点的值
height = root.find('size').find('height')
old_height = int(height.text)
height_times = new_height / old_height
height.text = str(new_height)
# 获取标记物体的列表,修改其中xmin,ymin,xmax,ymax这4个节点的值
object_list = root.findall('object')
for object_item in object_list:
bndbox = object_item.find('bndbox')
xmin = bndbox.find('xmin')
xminValue = int(xmin.text)
xmin.text = str(int(xminValue * width_times))
ymin = bndbox.find('ymin')
yminValue = int(ymin.text)
ymin.text = str(int(yminValue * height_times))
xmax = bndbox.find('xmax')
xmaxValue = int(xmax.text)
xmax.text = str(int(xmaxValue * width_times))
ymax = bndbox.find('ymax')
ymaxValue = int(ymax.text)
ymax.text = str(int(ymaxValue * height_times))
tree = ET.ElementTree(root)
tree.write(new_xmlFilePath)
# 修改文件夹中的若干xml文件
def batch_xmlCompress(old_dirPath, new_dirPath, new_size):
xmlFilePath_list = get_filePathList(old_dirPath, '.xml')
for xmlFilePath in xmlFilePath_list:
old_xmlFilePath = xmlFilePath
xmlFileName = os.path.split(old_xmlFilePath)[1]
new_xmlFilePath = os.path.join(new_dirPath, xmlFileName)
single_xmlCompress(xmlFilePath, new_xmlFilePath, new_size)
#修改文件夹中的单个jpg文件
from PIL import Image
def single_imageCompress(old_imageFilePath, new_imageFilePath, new_size):
old_image = Image.open(old_imageFilePath)
new_image = old_image.resize(new_size, Image.ANTIALIAS)
new_image.save(new_imageFilePath)
# 修改文件夹中的若干jpg文件
def batch_imageCompress(old_dirPath, new_dirPath, new_size, suffix):
if not os.path.isdir(new_dirPath):
os.makedirs(new_dirPath)
imageFilePath_list = get_filePathList(old_dirPath, suffix)
for imageFilePath in imageFilePath_list:
old_imageFilePath = imageFilePath
jpgFileName = os.path.split(old_imageFilePath)[1]
new_imageFilePath = os.path.join(new_dirPath, jpgFileName)
single_imageCompress(old_imageFilePath, new_imageFilePath, new_size)
# 解析运行代码文件时传入的参数
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dirPath', type=str, help='文件夹路径', default='../resources/selected_images')
parser.add_argument('-w', '--width', type=int, default=416)
parser.add_argument('-he', '--height', type=int, default=416)
parser.add_argument('-s', '--suffix', type=str, default='.jpg')
argument_namespace = parser.parse_args()
return argument_namespace
# 主函数
if __name__ == '__main__':
argument_namespace = parse_args()
old_dirPath = argument_namespace.dirPath
assert os.path.exists(old_dirPath), 'not exists this path: %s' %old_dirPath
width = argument_namespace.width
height = argument_namespace.height
new_size = (width, height)
new_dirPath = '../resources/images_%sx%s' %(str(width), str(height))
suffix = argument_namespace.suffix
batch_imageCompress(old_dirPath, new_dirPath, new_size, suffix)
print('所有图片文件都已经完成压缩')
batch_xmlCompress(old_dirPath, new_dirPath, new_size)
print('所有xml文件都已经完成压缩')
|
"""Tests for tools for manipulation of expressions using paths. """
from sympy.simplify.epathtools import epath, EPath
from sympy.testing.pytest import raises
from sympy import sin, cos, E
from sympy.abc import x, y, z, t
def test_epath_select():
expr = [((x, 1, t), 2), ((3, y, 4), z)]
assert epath("/*", expr) == [((x, 1, t), 2), ((3, y, 4), z)]
assert epath("/*/*", expr) == [(x, 1, t), 2, (3, y, 4), z]
assert epath("/*/*/*", expr) == [x, 1, t, 3, y, 4]
assert epath("/*/*/*/*", expr) == []
assert epath("/[:]", expr) == [((x, 1, t), 2), ((3, y, 4), z)]
assert epath("/[:]/[:]", expr) == [(x, 1, t), 2, (3, y, 4), z]
assert epath("/[:]/[:]/[:]", expr) == [x, 1, t, 3, y, 4]
assert epath("/[:]/[:]/[:]/[:]", expr) == []
assert epath("/*/[:]", expr) == [(x, 1, t), 2, (3, y, 4), z]
assert epath("/*/[0]", expr) == [(x, 1, t), (3, y, 4)]
assert epath("/*/[1]", expr) == [2, z]
assert epath("/*/[2]", expr) == []
assert epath("/*/int", expr) == [2]
assert epath("/*/Symbol", expr) == [z]
assert epath("/*/tuple", expr) == [(x, 1, t), (3, y, 4)]
assert epath("/*/__iter__?", expr) == [(x, 1, t), (3, y, 4)]
assert epath("/*/int|tuple", expr) == [(x, 1, t), 2, (3, y, 4)]
assert epath("/*/Symbol|tuple", expr) == [(x, 1, t), (3, y, 4), z]
assert epath("/*/int|Symbol|tuple", expr) == [(x, 1, t), 2, (3, y, 4), z]
assert epath("/*/int|__iter__?", expr) == [(x, 1, t), 2, (3, y, 4)]
assert epath("/*/Symbol|__iter__?", expr) == [(x, 1, t), (3, y, 4), z]
assert epath(
"/*/int|Symbol|__iter__?", expr) == [(x, 1, t), 2, (3, y, 4), z]
assert epath("/*/[0]/int", expr) == [1, 3, 4]
assert epath("/*/[0]/Symbol", expr) == [x, t, y]
assert epath("/*/[0]/int[1:]", expr) == [1, 4]
assert epath("/*/[0]/Symbol[1:]", expr) == [t, y]
assert epath("/Symbol", x + y + z + 1) == [x, y, z]
assert epath("/*/*/Symbol", t + sin(x + 1) + cos(x + y + E)) == [x, x, y]
def test_epath_apply():
expr = [((x, 1, t), 2), ((3, y, 4), z)]
func = lambda expr: expr**2
assert epath("/*", expr, list) == [[(x, 1, t), 2], [(3, y, 4), z]]
assert epath("/*/[0]", expr, list) == [([x, 1, t], 2), ([3, y, 4], z)]
assert epath("/*/[1]", expr, func) == [((x, 1, t), 4), ((3, y, 4), z**2)]
assert epath("/*/[2]", expr, list) == expr
assert epath("/*/[0]/int", expr, func) == [((x, 1, t), 2), ((9, y, 16), z)]
assert epath("/*/[0]/Symbol", expr, func) == [((x**2, 1, t**2), 2),
((3, y**2, 4), z)]
assert epath(
"/*/[0]/int[1:]", expr, func) == [((x, 1, t), 2), ((3, y, 16), z)]
assert epath("/*/[0]/Symbol[1:]", expr, func) == [((x, 1, t**2),
2), ((3, y**2, 4), z)]
assert epath("/Symbol", x + y + z + 1, func) == x**2 + y**2 + z**2 + 1
assert epath("/*/*/Symbol", t + sin(x + 1) + cos(x + y + E), func) == \
t + sin(x**2 + 1) + cos(x**2 + y**2 + E)
def test_EPath():
assert EPath("/*/[0]")._path == "/*/[0]"
assert EPath(EPath("/*/[0]"))._path == "/*/[0]"
assert isinstance(epath("/*/[0]"), EPath) is True
assert repr(EPath("/*/[0]")) == "EPath('/*/[0]')"
raises(ValueError, lambda: EPath(""))
raises(ValueError, lambda: EPath("/"))
raises(ValueError, lambda: EPath("/|x"))
raises(ValueError, lambda: EPath("/["))
raises(ValueError, lambda: EPath("/[0]%"))
raises(NotImplementedError, lambda: EPath("Symbol"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.